1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992-2013 Free Software Foundation, Inc.
3 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-attr.h"
40 #include "diagnostic-core.h"
46 #include "common/common-target.h"
47 #include "target-def.h"
48 #include "langhooks.h"
52 /* Return nonzero if there is a bypass for the output of
53 OUT_INSN and the fp store IN_INSN. */
55 pa_fpstore_bypass_p (rtx out_insn, rtx in_insn)
57 enum machine_mode store_mode;
58 enum machine_mode other_mode;
61 if (recog_memoized (in_insn) < 0
62 || (get_attr_type (in_insn) != TYPE_FPSTORE
63 && get_attr_type (in_insn) != TYPE_FPSTORE_LOAD)
64 || recog_memoized (out_insn) < 0)
67 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
69 set = single_set (out_insn);
73 other_mode = GET_MODE (SET_SRC (set));
75 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
79 #ifndef DO_FRAME_NOTES
80 #ifdef INCOMING_RETURN_ADDR_RTX
81 #define DO_FRAME_NOTES 1
83 #define DO_FRAME_NOTES 0
87 static void pa_option_override (void);
88 static void copy_reg_pointer (rtx, rtx);
89 static void fix_range (const char *);
90 static int hppa_register_move_cost (enum machine_mode mode, reg_class_t,
92 static int hppa_address_cost (rtx, enum machine_mode mode, addr_space_t, bool);
93 static bool hppa_rtx_costs (rtx, int, int, int, int *, bool);
94 static inline rtx force_mode (enum machine_mode, rtx);
95 static void pa_reorg (void);
96 static void pa_combine_instructions (void);
97 static int pa_can_combine_p (rtx, rtx, rtx, int, rtx, rtx, rtx);
98 static bool forward_branch_p (rtx);
99 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
100 static void compute_zdepdi_operands (unsigned HOST_WIDE_INT, unsigned *);
101 static int compute_movmem_length (rtx);
102 static int compute_clrmem_length (rtx);
103 static bool pa_assemble_integer (rtx, unsigned int, int);
104 static void remove_useless_addtr_insns (int);
105 static void store_reg (int, HOST_WIDE_INT, int);
106 static void store_reg_modify (int, int, HOST_WIDE_INT);
107 static void load_reg (int, HOST_WIDE_INT, int);
108 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
109 static rtx pa_function_value (const_tree, const_tree, bool);
110 static rtx pa_libcall_value (enum machine_mode, const_rtx);
111 static bool pa_function_value_regno_p (const unsigned int);
112 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT);
113 static void update_total_code_bytes (unsigned int);
114 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT);
115 static int pa_adjust_cost (rtx, rtx, rtx, int);
116 static int pa_adjust_priority (rtx, int);
117 static int pa_issue_rate (void);
118 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED;
119 static section *pa_som_tm_clone_table_section (void) ATTRIBUTE_UNUSED;
120 static section *pa_select_section (tree, int, unsigned HOST_WIDE_INT)
122 static void pa_encode_section_info (tree, rtx, int);
123 static const char *pa_strip_name_encoding (const char *);
124 static bool pa_function_ok_for_sibcall (tree, tree);
125 static void pa_globalize_label (FILE *, const char *)
127 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
128 HOST_WIDE_INT, tree);
129 #if !defined(USE_COLLECT2)
130 static void pa_asm_out_constructor (rtx, int);
131 static void pa_asm_out_destructor (rtx, int);
133 static void pa_init_builtins (void);
134 static rtx pa_expand_builtin (tree, rtx, rtx, enum machine_mode mode, int);
135 static rtx hppa_builtin_saveregs (void);
136 static void hppa_va_start (tree, rtx);
137 static tree hppa_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
138 static bool pa_scalar_mode_supported_p (enum machine_mode);
139 static bool pa_commutative_p (const_rtx x, int outer_code);
140 static void copy_fp_args (rtx) ATTRIBUTE_UNUSED;
141 static int length_fp_args (rtx) ATTRIBUTE_UNUSED;
142 static rtx hppa_legitimize_address (rtx, rtx, enum machine_mode);
143 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
144 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
145 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
146 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
147 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
148 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
149 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
150 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
151 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
152 static void output_deferred_plabels (void);
153 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED;
154 #ifdef ASM_OUTPUT_EXTERNAL_REAL
155 static void pa_hpux_file_end (void);
157 static void pa_init_libfuncs (void);
158 static rtx pa_struct_value_rtx (tree, int);
159 static bool pa_pass_by_reference (cumulative_args_t, enum machine_mode,
161 static int pa_arg_partial_bytes (cumulative_args_t, enum machine_mode,
163 static void pa_function_arg_advance (cumulative_args_t, enum machine_mode,
165 static rtx pa_function_arg (cumulative_args_t, enum machine_mode,
167 static unsigned int pa_function_arg_boundary (enum machine_mode, const_tree);
168 static struct machine_function * pa_init_machine_status (void);
169 static reg_class_t pa_secondary_reload (bool, rtx, reg_class_t,
171 secondary_reload_info *);
172 static void pa_extra_live_on_entry (bitmap);
173 static enum machine_mode pa_promote_function_mode (const_tree,
174 enum machine_mode, int *,
177 static void pa_asm_trampoline_template (FILE *);
178 static void pa_trampoline_init (rtx, tree, rtx);
179 static rtx pa_trampoline_adjust_address (rtx);
180 static rtx pa_delegitimize_address (rtx);
181 static bool pa_print_operand_punct_valid_p (unsigned char);
182 static rtx pa_internal_arg_pointer (void);
183 static bool pa_can_eliminate (const int, const int);
184 static void pa_conditional_register_usage (void);
185 static enum machine_mode pa_c_mode_for_suffix (char);
186 static section *pa_function_section (tree, enum node_frequency, bool, bool);
187 static bool pa_cannot_force_const_mem (enum machine_mode, rtx);
188 static bool pa_legitimate_constant_p (enum machine_mode, rtx);
189 static unsigned int pa_section_type_flags (tree, const char *, int);
190 static bool pa_legitimate_address_p (enum machine_mode, rtx, bool);
192 /* The following extra sections are only used for SOM. */
193 static GTY(()) section *som_readonly_data_section;
194 static GTY(()) section *som_one_only_readonly_data_section;
195 static GTY(()) section *som_one_only_data_section;
196 static GTY(()) section *som_tm_clone_table_section;
198 /* Counts for the number of callee-saved general and floating point
199 registers which were saved by the current function's prologue. */
200 static int gr_saved, fr_saved;
202 /* Boolean indicating whether the return pointer was saved by the
203 current function's prologue. */
204 static bool rp_saved;
206 static rtx find_addr_reg (rtx);
208 /* Keep track of the number of bytes we have output in the CODE subspace
209 during this compilation so we'll know when to emit inline long-calls. */
210 unsigned long total_code_bytes;
212 /* The last address of the previous function plus the number of bytes in
213 associated thunks that have been output. This is used to determine if
214 a thunk can use an IA-relative branch to reach its target function. */
215 static unsigned int last_address;
217 /* Variables to handle plabels that we discover are necessary at assembly
218 output time. They are output after the current function. */
219 struct GTY(()) deferred_plabel
224 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
226 static size_t n_deferred_plabels = 0;
228 /* Initialize the GCC target structure. */
230 #undef TARGET_OPTION_OVERRIDE
231 #define TARGET_OPTION_OVERRIDE pa_option_override
233 #undef TARGET_ASM_ALIGNED_HI_OP
234 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
235 #undef TARGET_ASM_ALIGNED_SI_OP
236 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
237 #undef TARGET_ASM_ALIGNED_DI_OP
238 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
239 #undef TARGET_ASM_UNALIGNED_HI_OP
240 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
241 #undef TARGET_ASM_UNALIGNED_SI_OP
242 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
243 #undef TARGET_ASM_UNALIGNED_DI_OP
244 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
245 #undef TARGET_ASM_INTEGER
246 #define TARGET_ASM_INTEGER pa_assemble_integer
248 #undef TARGET_ASM_FUNCTION_PROLOGUE
249 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
250 #undef TARGET_ASM_FUNCTION_EPILOGUE
251 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
253 #undef TARGET_FUNCTION_VALUE
254 #define TARGET_FUNCTION_VALUE pa_function_value
255 #undef TARGET_LIBCALL_VALUE
256 #define TARGET_LIBCALL_VALUE pa_libcall_value
257 #undef TARGET_FUNCTION_VALUE_REGNO_P
258 #define TARGET_FUNCTION_VALUE_REGNO_P pa_function_value_regno_p
260 #undef TARGET_LEGITIMIZE_ADDRESS
261 #define TARGET_LEGITIMIZE_ADDRESS hppa_legitimize_address
263 #undef TARGET_SCHED_ADJUST_COST
264 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
265 #undef TARGET_SCHED_ADJUST_PRIORITY
266 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
267 #undef TARGET_SCHED_ISSUE_RATE
268 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
270 #undef TARGET_ENCODE_SECTION_INFO
271 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
272 #undef TARGET_STRIP_NAME_ENCODING
273 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
275 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
276 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
278 #undef TARGET_COMMUTATIVE_P
279 #define TARGET_COMMUTATIVE_P pa_commutative_p
281 #undef TARGET_ASM_OUTPUT_MI_THUNK
282 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
283 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
284 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
286 #undef TARGET_ASM_FILE_END
287 #ifdef ASM_OUTPUT_EXTERNAL_REAL
288 #define TARGET_ASM_FILE_END pa_hpux_file_end
290 #define TARGET_ASM_FILE_END output_deferred_plabels
293 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
294 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P pa_print_operand_punct_valid_p
296 #if !defined(USE_COLLECT2)
297 #undef TARGET_ASM_CONSTRUCTOR
298 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
299 #undef TARGET_ASM_DESTRUCTOR
300 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
303 #undef TARGET_INIT_BUILTINS
304 #define TARGET_INIT_BUILTINS pa_init_builtins
306 #undef TARGET_EXPAND_BUILTIN
307 #define TARGET_EXPAND_BUILTIN pa_expand_builtin
309 #undef TARGET_REGISTER_MOVE_COST
310 #define TARGET_REGISTER_MOVE_COST hppa_register_move_cost
311 #undef TARGET_RTX_COSTS
312 #define TARGET_RTX_COSTS hppa_rtx_costs
313 #undef TARGET_ADDRESS_COST
314 #define TARGET_ADDRESS_COST hppa_address_cost
316 #undef TARGET_MACHINE_DEPENDENT_REORG
317 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
319 #undef TARGET_INIT_LIBFUNCS
320 #define TARGET_INIT_LIBFUNCS pa_init_libfuncs
322 #undef TARGET_PROMOTE_FUNCTION_MODE
323 #define TARGET_PROMOTE_FUNCTION_MODE pa_promote_function_mode
324 #undef TARGET_PROMOTE_PROTOTYPES
325 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
327 #undef TARGET_STRUCT_VALUE_RTX
328 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
329 #undef TARGET_RETURN_IN_MEMORY
330 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
331 #undef TARGET_MUST_PASS_IN_STACK
332 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
333 #undef TARGET_PASS_BY_REFERENCE
334 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
335 #undef TARGET_CALLEE_COPIES
336 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
337 #undef TARGET_ARG_PARTIAL_BYTES
338 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
339 #undef TARGET_FUNCTION_ARG
340 #define TARGET_FUNCTION_ARG pa_function_arg
341 #undef TARGET_FUNCTION_ARG_ADVANCE
342 #define TARGET_FUNCTION_ARG_ADVANCE pa_function_arg_advance
343 #undef TARGET_FUNCTION_ARG_BOUNDARY
344 #define TARGET_FUNCTION_ARG_BOUNDARY pa_function_arg_boundary
346 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
347 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
348 #undef TARGET_EXPAND_BUILTIN_VA_START
349 #define TARGET_EXPAND_BUILTIN_VA_START hppa_va_start
350 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
351 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
353 #undef TARGET_SCALAR_MODE_SUPPORTED_P
354 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
356 #undef TARGET_CANNOT_FORCE_CONST_MEM
357 #define TARGET_CANNOT_FORCE_CONST_MEM pa_cannot_force_const_mem
359 #undef TARGET_SECONDARY_RELOAD
360 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
362 #undef TARGET_EXTRA_LIVE_ON_ENTRY
363 #define TARGET_EXTRA_LIVE_ON_ENTRY pa_extra_live_on_entry
365 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
366 #define TARGET_ASM_TRAMPOLINE_TEMPLATE pa_asm_trampoline_template
367 #undef TARGET_TRAMPOLINE_INIT
368 #define TARGET_TRAMPOLINE_INIT pa_trampoline_init
369 #undef TARGET_TRAMPOLINE_ADJUST_ADDRESS
370 #define TARGET_TRAMPOLINE_ADJUST_ADDRESS pa_trampoline_adjust_address
371 #undef TARGET_DELEGITIMIZE_ADDRESS
372 #define TARGET_DELEGITIMIZE_ADDRESS pa_delegitimize_address
373 #undef TARGET_INTERNAL_ARG_POINTER
374 #define TARGET_INTERNAL_ARG_POINTER pa_internal_arg_pointer
375 #undef TARGET_CAN_ELIMINATE
376 #define TARGET_CAN_ELIMINATE pa_can_eliminate
377 #undef TARGET_CONDITIONAL_REGISTER_USAGE
378 #define TARGET_CONDITIONAL_REGISTER_USAGE pa_conditional_register_usage
379 #undef TARGET_C_MODE_FOR_SUFFIX
380 #define TARGET_C_MODE_FOR_SUFFIX pa_c_mode_for_suffix
381 #undef TARGET_ASM_FUNCTION_SECTION
382 #define TARGET_ASM_FUNCTION_SECTION pa_function_section
384 #undef TARGET_LEGITIMATE_CONSTANT_P
385 #define TARGET_LEGITIMATE_CONSTANT_P pa_legitimate_constant_p
386 #undef TARGET_SECTION_TYPE_FLAGS
387 #define TARGET_SECTION_TYPE_FLAGS pa_section_type_flags
388 #undef TARGET_LEGITIMATE_ADDRESS_P
389 #define TARGET_LEGITIMATE_ADDRESS_P pa_legitimate_address_p
391 struct gcc_target targetm = TARGET_INITIALIZER;
393 /* Parse the -mfixed-range= option string. */
396 fix_range (const char *const_str)
399 char *str, *dash, *comma;
401 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
402 REG2 are either register names or register numbers. The effect
403 of this option is to mark the registers in the range from REG1 to
404 REG2 as ``fixed'' so they won't be used by the compiler. This is
405 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
407 i = strlen (const_str);
408 str = (char *) alloca (i + 1);
409 memcpy (str, const_str, i + 1);
413 dash = strchr (str, '-');
416 warning (0, "value of -mfixed-range must have form REG1-REG2");
421 comma = strchr (dash + 1, ',');
425 first = decode_reg_name (str);
428 warning (0, "unknown register name: %s", str);
432 last = decode_reg_name (dash + 1);
435 warning (0, "unknown register name: %s", dash + 1);
443 warning (0, "%s-%s is an empty range", str, dash + 1);
447 for (i = first; i <= last; ++i)
448 fixed_regs[i] = call_used_regs[i] = 1;
457 /* Check if all floating point registers have been fixed. */
458 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
463 target_flags |= MASK_DISABLE_FPREGS;
466 /* Implement the TARGET_OPTION_OVERRIDE hook. */
469 pa_option_override (void)
472 cl_deferred_option *opt;
473 vec<cl_deferred_option> *v
474 = (vec<cl_deferred_option> *) pa_deferred_options;
477 FOR_EACH_VEC_ELT (*v, i, opt)
479 switch (opt->opt_index)
481 case OPT_mfixed_range_:
482 fix_range (opt->arg);
490 /* Unconditional branches in the delay slot are not compatible with dwarf2
491 call frame information. There is no benefit in using this optimization
492 on PA8000 and later processors. */
493 if (pa_cpu >= PROCESSOR_8000
494 || (targetm_common.except_unwind_info (&global_options) == UI_DWARF2
496 || flag_unwind_tables)
497 target_flags &= ~MASK_JUMP_IN_DELAY;
499 if (flag_pic && TARGET_PORTABLE_RUNTIME)
501 warning (0, "PIC code generation is not supported in the portable runtime model");
504 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
506 warning (0, "PIC code generation is not compatible with fast indirect calls");
509 if (! TARGET_GAS && write_symbols != NO_DEBUG)
511 warning (0, "-g is only supported when using GAS on this processor,");
512 warning (0, "-g option disabled");
513 write_symbols = NO_DEBUG;
517 /* FIXME: Disable auto increment and decrement processing until reload
518 is completed. See PR middle-end 56791. */
519 flag_auto_inc_dec = reload_completed;
522 /* We only support the "big PIC" model now. And we always generate PIC
523 code when in 64bit mode. */
524 if (flag_pic == 1 || TARGET_64BIT)
527 /* Disable -freorder-blocks-and-partition as we don't support hot and
528 cold partitioning. */
529 if (flag_reorder_blocks_and_partition)
531 inform (input_location,
532 "-freorder-blocks-and-partition does not work "
533 "on this architecture");
534 flag_reorder_blocks_and_partition = 0;
535 flag_reorder_blocks = 1;
538 /* We can't guarantee that .dword is available for 32-bit targets. */
539 if (UNITS_PER_WORD == 4)
540 targetm.asm_out.aligned_op.di = NULL;
542 /* The unaligned ops are only available when using GAS. */
545 targetm.asm_out.unaligned_op.hi = NULL;
546 targetm.asm_out.unaligned_op.si = NULL;
547 targetm.asm_out.unaligned_op.di = NULL;
550 init_machine_status = pa_init_machine_status;
555 PA_BUILTIN_COPYSIGNQ,
558 PA_BUILTIN_HUGE_VALQ,
562 static GTY(()) tree pa_builtins[(int) PA_BUILTIN_max];
565 pa_init_builtins (void)
567 #ifdef DONT_HAVE_FPUTC_UNLOCKED
569 tree decl = builtin_decl_explicit (BUILT_IN_PUTC_UNLOCKED);
570 set_builtin_decl (BUILT_IN_FPUTC_UNLOCKED, decl,
571 builtin_decl_implicit_p (BUILT_IN_PUTC_UNLOCKED));
578 if ((decl = builtin_decl_explicit (BUILT_IN_FINITE)) != NULL_TREE)
579 set_user_assembler_name (decl, "_Isfinite");
580 if ((decl = builtin_decl_explicit (BUILT_IN_FINITEF)) != NULL_TREE)
581 set_user_assembler_name (decl, "_Isfinitef");
585 if (HPUX_LONG_DOUBLE_LIBRARY)
589 /* Under HPUX, the __float128 type is a synonym for "long double". */
590 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
593 /* TFmode support builtins. */
594 ftype = build_function_type_list (long_double_type_node,
595 long_double_type_node,
597 decl = add_builtin_function ("__builtin_fabsq", ftype,
598 PA_BUILTIN_FABSQ, BUILT_IN_MD,
599 "_U_Qfabs", NULL_TREE);
600 TREE_READONLY (decl) = 1;
601 pa_builtins[PA_BUILTIN_FABSQ] = decl;
603 ftype = build_function_type_list (long_double_type_node,
604 long_double_type_node,
605 long_double_type_node,
607 decl = add_builtin_function ("__builtin_copysignq", ftype,
608 PA_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
609 "_U_Qfcopysign", NULL_TREE);
610 TREE_READONLY (decl) = 1;
611 pa_builtins[PA_BUILTIN_COPYSIGNQ] = decl;
613 ftype = build_function_type_list (long_double_type_node, NULL_TREE);
614 decl = add_builtin_function ("__builtin_infq", ftype,
615 PA_BUILTIN_INFQ, BUILT_IN_MD,
617 pa_builtins[PA_BUILTIN_INFQ] = decl;
619 decl = add_builtin_function ("__builtin_huge_valq", ftype,
620 PA_BUILTIN_HUGE_VALQ, BUILT_IN_MD,
622 pa_builtins[PA_BUILTIN_HUGE_VALQ] = decl;
627 pa_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
628 enum machine_mode mode ATTRIBUTE_UNUSED,
629 int ignore ATTRIBUTE_UNUSED)
631 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
632 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
636 case PA_BUILTIN_FABSQ:
637 case PA_BUILTIN_COPYSIGNQ:
638 return expand_call (exp, target, ignore);
640 case PA_BUILTIN_INFQ:
641 case PA_BUILTIN_HUGE_VALQ:
643 enum machine_mode target_mode = TYPE_MODE (TREE_TYPE (exp));
648 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, target_mode);
650 tmp = validize_mem (force_const_mem (target_mode, tmp));
653 target = gen_reg_rtx (target_mode);
655 emit_move_insn (target, tmp);
666 /* Function to init struct machine_function.
667 This will be called, via a pointer variable,
668 from push_function_context. */
670 static struct machine_function *
671 pa_init_machine_status (void)
673 return ggc_alloc_cleared_machine_function ();
676 /* If FROM is a probable pointer register, mark TO as a probable
677 pointer register with the same pointer alignment as FROM. */
680 copy_reg_pointer (rtx to, rtx from)
682 if (REG_POINTER (from))
683 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
686 /* Return 1 if X contains a symbolic expression. We know these
687 expressions will have one of a few well defined forms, so
688 we need only check those forms. */
690 pa_symbolic_expression_p (rtx x)
693 /* Strip off any HIGH. */
694 if (GET_CODE (x) == HIGH)
697 return symbolic_operand (x, VOIDmode);
700 /* Accept any constant that can be moved in one instruction into a
703 pa_cint_ok_for_move (HOST_WIDE_INT ival)
705 /* OK if ldo, ldil, or zdepi, can be used. */
706 return (VAL_14_BITS_P (ival)
707 || pa_ldil_cint_p (ival)
708 || pa_zdepi_cint_p (ival));
711 /* True iff ldil can be used to load this CONST_INT. The least
712 significant 11 bits of the value must be zero and the value must
713 not change sign when extended from 32 to 64 bits. */
715 pa_ldil_cint_p (HOST_WIDE_INT ival)
717 HOST_WIDE_INT x = ival & (((HOST_WIDE_INT) -1 << 31) | 0x7ff);
719 return x == 0 || x == ((HOST_WIDE_INT) -1 << 31);
722 /* True iff zdepi can be used to generate this CONST_INT.
723 zdepi first sign extends a 5-bit signed number to a given field
724 length, then places this field anywhere in a zero. */
726 pa_zdepi_cint_p (unsigned HOST_WIDE_INT x)
728 unsigned HOST_WIDE_INT lsb_mask, t;
730 /* This might not be obvious, but it's at least fast.
731 This function is critical; we don't have the time loops would take. */
733 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
734 /* Return true iff t is a power of two. */
735 return ((t & (t - 1)) == 0);
738 /* True iff depi or extru can be used to compute (reg & mask).
739 Accept bit pattern like these:
744 pa_and_mask_p (unsigned HOST_WIDE_INT mask)
747 mask += mask & -mask;
748 return (mask & (mask - 1)) == 0;
751 /* True iff depi can be used to compute (reg | MASK). */
753 pa_ior_mask_p (unsigned HOST_WIDE_INT mask)
755 mask += mask & -mask;
756 return (mask & (mask - 1)) == 0;
759 /* Legitimize PIC addresses. If the address is already
760 position-independent, we return ORIG. Newly generated
761 position-independent addresses go to REG. If we need more
762 than one register, we lose. */
765 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
769 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig));
771 /* Labels need special handling. */
772 if (pic_label_operand (orig, mode))
776 /* We do not want to go through the movXX expanders here since that
777 would create recursion.
779 Nor do we really want to call a generator for a named pattern
780 since that requires multiple patterns if we want to support
783 So instead we just emit the raw set, which avoids the movXX
784 expanders completely. */
785 mark_reg_pointer (reg, BITS_PER_UNIT);
786 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, orig));
788 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
789 add_reg_note (insn, REG_EQUAL, orig);
791 /* During and after reload, we need to generate a REG_LABEL_OPERAND note
792 and update LABEL_NUSES because this is not done automatically. */
793 if (reload_in_progress || reload_completed)
795 /* Extract LABEL_REF. */
796 if (GET_CODE (orig) == CONST)
797 orig = XEXP (XEXP (orig, 0), 0);
798 /* Extract CODE_LABEL. */
799 orig = XEXP (orig, 0);
800 add_reg_note (insn, REG_LABEL_OPERAND, orig);
801 /* Make sure we have label and not a note. */
803 LABEL_NUSES (orig)++;
805 crtl->uses_pic_offset_table = 1;
808 if (GET_CODE (orig) == SYMBOL_REF)
814 /* Before reload, allocate a temporary register for the intermediate
815 result. This allows the sequence to be deleted when the final
816 result is unused and the insns are trivially dead. */
817 tmp_reg = ((reload_in_progress || reload_completed)
818 ? reg : gen_reg_rtx (Pmode));
820 if (function_label_operand (orig, VOIDmode))
822 /* Force function label into memory in word mode. */
823 orig = XEXP (force_const_mem (word_mode, orig), 0);
824 /* Load plabel address from DLT. */
825 emit_move_insn (tmp_reg,
826 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
827 gen_rtx_HIGH (word_mode, orig)));
829 = gen_const_mem (Pmode,
830 gen_rtx_LO_SUM (Pmode, tmp_reg,
831 gen_rtx_UNSPEC (Pmode,
834 emit_move_insn (reg, pic_ref);
835 /* Now load address of function descriptor. */
836 pic_ref = gen_rtx_MEM (Pmode, reg);
840 /* Load symbol reference from DLT. */
841 emit_move_insn (tmp_reg,
842 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
843 gen_rtx_HIGH (word_mode, orig)));
845 = gen_const_mem (Pmode,
846 gen_rtx_LO_SUM (Pmode, tmp_reg,
847 gen_rtx_UNSPEC (Pmode,
852 crtl->uses_pic_offset_table = 1;
853 mark_reg_pointer (reg, BITS_PER_UNIT);
854 insn = emit_move_insn (reg, pic_ref);
856 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
857 set_unique_reg_note (insn, REG_EQUAL, orig);
861 else if (GET_CODE (orig) == CONST)
865 if (GET_CODE (XEXP (orig, 0)) == PLUS
866 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
870 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
872 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
873 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
874 base == reg ? 0 : reg);
876 if (GET_CODE (orig) == CONST_INT)
878 if (INT_14_BITS (orig))
879 return plus_constant (Pmode, base, INTVAL (orig));
880 orig = force_reg (Pmode, orig);
882 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
883 /* Likewise, should we set special REG_NOTEs here? */
889 static GTY(()) rtx gen_tls_tga;
892 gen_tls_get_addr (void)
895 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
900 hppa_tls_call (rtx arg)
904 ret = gen_reg_rtx (Pmode);
905 emit_library_call_value (gen_tls_get_addr (), ret,
906 LCT_CONST, Pmode, 1, arg, Pmode);
912 legitimize_tls_address (rtx addr)
914 rtx ret, insn, tmp, t1, t2, tp;
915 enum tls_model model = SYMBOL_REF_TLS_MODEL (addr);
919 case TLS_MODEL_GLOBAL_DYNAMIC:
920 tmp = gen_reg_rtx (Pmode);
922 emit_insn (gen_tgd_load_pic (tmp, addr));
924 emit_insn (gen_tgd_load (tmp, addr));
925 ret = hppa_tls_call (tmp);
928 case TLS_MODEL_LOCAL_DYNAMIC:
929 ret = gen_reg_rtx (Pmode);
930 tmp = gen_reg_rtx (Pmode);
933 emit_insn (gen_tld_load_pic (tmp, addr));
935 emit_insn (gen_tld_load (tmp, addr));
936 t1 = hppa_tls_call (tmp);
939 t2 = gen_reg_rtx (Pmode);
940 emit_libcall_block (insn, t2, t1,
941 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
943 emit_insn (gen_tld_offset_load (ret, addr, t2));
946 case TLS_MODEL_INITIAL_EXEC:
947 tp = gen_reg_rtx (Pmode);
948 tmp = gen_reg_rtx (Pmode);
949 ret = gen_reg_rtx (Pmode);
950 emit_insn (gen_tp_load (tp));
952 emit_insn (gen_tie_load_pic (tmp, addr));
954 emit_insn (gen_tie_load (tmp, addr));
955 emit_move_insn (ret, gen_rtx_PLUS (Pmode, tp, tmp));
958 case TLS_MODEL_LOCAL_EXEC:
959 tp = gen_reg_rtx (Pmode);
960 ret = gen_reg_rtx (Pmode);
961 emit_insn (gen_tp_load (tp));
962 emit_insn (gen_tle_load (ret, addr, tp));
972 /* Try machine-dependent ways of modifying an illegitimate address
973 to be legitimate. If we find one, return the new, valid address.
974 This macro is used in only one place: `memory_address' in explow.c.
976 OLDX is the address as it was before break_out_memory_refs was called.
977 In some cases it is useful to look at this to decide what needs to be done.
979 It is always safe for this macro to do nothing. It exists to recognize
980 opportunities to optimize the output.
982 For the PA, transform:
984 memory(X + <large int>)
988 if (<large int> & mask) >= 16
989 Y = (<large int> & ~mask) + mask + 1 Round up.
991 Y = (<large int> & ~mask) Round down.
993 memory (Z + (<large int> - Y));
995 This is for CSE to find several similar references, and only use one Z.
997 X can either be a SYMBOL_REF or REG, but because combine cannot
998 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
999 D will not fit in 14 bits.
1001 MODE_FLOAT references allow displacements which fit in 5 bits, so use
1004 MODE_INT references allow displacements which fit in 14 bits, so use
1007 This relies on the fact that most mode MODE_FLOAT references will use FP
1008 registers and most mode MODE_INT references will use integer registers.
1009 (In the rare case of an FP register used in an integer MODE, we depend
1010 on secondary reloads to clean things up.)
1013 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
1014 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
1015 addressing modes to be used).
1017 Put X and Z into registers. Then put the entire expression into
1021 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1022 enum machine_mode mode)
1026 /* We need to canonicalize the order of operands in unscaled indexed
1027 addresses since the code that checks if an address is valid doesn't
1028 always try both orders. */
1029 if (!TARGET_NO_SPACE_REGS
1030 && GET_CODE (x) == PLUS
1031 && GET_MODE (x) == Pmode
1032 && REG_P (XEXP (x, 0))
1033 && REG_P (XEXP (x, 1))
1034 && REG_POINTER (XEXP (x, 0))
1035 && !REG_POINTER (XEXP (x, 1)))
1036 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
1038 if (PA_SYMBOL_REF_TLS_P (x))
1039 return legitimize_tls_address (x);
1041 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
1043 /* Strip off CONST. */
1044 if (GET_CODE (x) == CONST)
1047 /* Special case. Get the SYMBOL_REF into a register and use indexing.
1048 That should always be safe. */
1049 if (GET_CODE (x) == PLUS
1050 && GET_CODE (XEXP (x, 0)) == REG
1051 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
1053 rtx reg = force_reg (Pmode, XEXP (x, 1));
1054 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
1057 /* Note we must reject symbols which represent function addresses
1058 since the assembler/linker can't handle arithmetic on plabels. */
1059 if (GET_CODE (x) == PLUS
1060 && GET_CODE (XEXP (x, 1)) == CONST_INT
1061 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
1062 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
1063 || GET_CODE (XEXP (x, 0)) == REG))
1065 rtx int_part, ptr_reg;
1067 int offset = INTVAL (XEXP (x, 1));
1070 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
1071 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
1073 /* Choose which way to round the offset. Round up if we
1074 are >= halfway to the next boundary. */
1075 if ((offset & mask) >= ((mask + 1) / 2))
1076 newoffset = (offset & ~ mask) + mask + 1;
1078 newoffset = (offset & ~ mask);
1080 /* If the newoffset will not fit in 14 bits (ldo), then
1081 handling this would take 4 or 5 instructions (2 to load
1082 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
1083 add the new offset and the SYMBOL_REF.) Combine can
1084 not handle 4->2 or 5->2 combinations, so do not create
1086 if (! VAL_14_BITS_P (newoffset)
1087 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
1089 rtx const_part = plus_constant (Pmode, XEXP (x, 0), newoffset);
1092 gen_rtx_HIGH (Pmode, const_part));
1095 gen_rtx_LO_SUM (Pmode,
1096 tmp_reg, const_part));
1100 if (! VAL_14_BITS_P (newoffset))
1101 int_part = force_reg (Pmode, GEN_INT (newoffset));
1103 int_part = GEN_INT (newoffset);
1105 ptr_reg = force_reg (Pmode,
1106 gen_rtx_PLUS (Pmode,
1107 force_reg (Pmode, XEXP (x, 0)),
1110 return plus_constant (Pmode, ptr_reg, offset - newoffset);
1113 /* Handle (plus (mult (a) (shadd_constant)) (b)). */
1115 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT
1116 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1117 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1)))
1118 && (OBJECT_P (XEXP (x, 1))
1119 || GET_CODE (XEXP (x, 1)) == SUBREG)
1120 && GET_CODE (XEXP (x, 1)) != CONST)
1122 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1126 if (GET_CODE (reg1) != REG)
1127 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1129 reg2 = XEXP (XEXP (x, 0), 0);
1130 if (GET_CODE (reg2) != REG)
1131 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1133 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1134 gen_rtx_MULT (Pmode,
1140 /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)).
1142 Only do so for floating point modes since this is more speculative
1143 and we lose if it's an integer store. */
1144 if (GET_CODE (x) == PLUS
1145 && GET_CODE (XEXP (x, 0)) == PLUS
1146 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
1147 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
1148 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)))
1149 && (mode == SFmode || mode == DFmode))
1152 /* First, try and figure out what to use as a base register. */
1153 rtx reg1, reg2, base, idx;
1155 reg1 = XEXP (XEXP (x, 0), 1);
1160 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
1161 then pa_emit_move_sequence will turn on REG_POINTER so we'll know
1162 it's a base register below. */
1163 if (GET_CODE (reg1) != REG)
1164 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1166 if (GET_CODE (reg2) != REG)
1167 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1169 /* Figure out what the base and index are. */
1171 if (GET_CODE (reg1) == REG
1172 && REG_POINTER (reg1))
1175 idx = gen_rtx_PLUS (Pmode,
1176 gen_rtx_MULT (Pmode,
1177 XEXP (XEXP (XEXP (x, 0), 0), 0),
1178 XEXP (XEXP (XEXP (x, 0), 0), 1)),
1181 else if (GET_CODE (reg2) == REG
1182 && REG_POINTER (reg2))
1191 /* If the index adds a large constant, try to scale the
1192 constant so that it can be loaded with only one insn. */
1193 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1194 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
1195 / INTVAL (XEXP (XEXP (idx, 0), 1)))
1196 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
1198 /* Divide the CONST_INT by the scale factor, then add it to A. */
1199 int val = INTVAL (XEXP (idx, 1));
1201 val /= INTVAL (XEXP (XEXP (idx, 0), 1));
1202 reg1 = XEXP (XEXP (idx, 0), 0);
1203 if (GET_CODE (reg1) != REG)
1204 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1206 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
1208 /* We can now generate a simple scaled indexed address. */
1211 (Pmode, gen_rtx_PLUS (Pmode,
1212 gen_rtx_MULT (Pmode, reg1,
1213 XEXP (XEXP (idx, 0), 1)),
1217 /* If B + C is still a valid base register, then add them. */
1218 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1219 && INTVAL (XEXP (idx, 1)) <= 4096
1220 && INTVAL (XEXP (idx, 1)) >= -4096)
1222 int val = INTVAL (XEXP (XEXP (idx, 0), 1));
1225 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1227 reg2 = XEXP (XEXP (idx, 0), 0);
1228 if (GET_CODE (reg2) != CONST_INT)
1229 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1231 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1232 gen_rtx_MULT (Pmode,
1238 /* Get the index into a register, then add the base + index and
1239 return a register holding the result. */
1241 /* First get A into a register. */
1242 reg1 = XEXP (XEXP (idx, 0), 0);
1243 if (GET_CODE (reg1) != REG)
1244 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1246 /* And get B into a register. */
1247 reg2 = XEXP (idx, 1);
1248 if (GET_CODE (reg2) != REG)
1249 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1251 reg1 = force_reg (Pmode,
1252 gen_rtx_PLUS (Pmode,
1253 gen_rtx_MULT (Pmode, reg1,
1254 XEXP (XEXP (idx, 0), 1)),
1257 /* Add the result to our base register and return. */
1258 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1262 /* Uh-oh. We might have an address for x[n-100000]. This needs
1263 special handling to avoid creating an indexed memory address
1264 with x-100000 as the base.
1266 If the constant part is small enough, then it's still safe because
1267 there is a guard page at the beginning and end of the data segment.
1269 Scaled references are common enough that we want to try and rearrange the
1270 terms so that we can use indexing for these addresses too. Only
1271 do the optimization for floatint point modes. */
1273 if (GET_CODE (x) == PLUS
1274 && pa_symbolic_expression_p (XEXP (x, 1)))
1276 /* Ugly. We modify things here so that the address offset specified
1277 by the index expression is computed first, then added to x to form
1278 the entire address. */
1280 rtx regx1, regx2, regy1, regy2, y;
1282 /* Strip off any CONST. */
1284 if (GET_CODE (y) == CONST)
1287 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1289 /* See if this looks like
1290 (plus (mult (reg) (shadd_const))
1291 (const (plus (symbol_ref) (const_int))))
1293 Where const_int is small. In that case the const
1294 expression is a valid pointer for indexing.
1296 If const_int is big, but can be divided evenly by shadd_const
1297 and added to (reg). This allows more scaled indexed addresses. */
1298 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1299 && GET_CODE (XEXP (x, 0)) == MULT
1300 && GET_CODE (XEXP (y, 1)) == CONST_INT
1301 && INTVAL (XEXP (y, 1)) >= -4096
1302 && INTVAL (XEXP (y, 1)) <= 4095
1303 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1304 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1306 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1310 if (GET_CODE (reg1) != REG)
1311 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1313 reg2 = XEXP (XEXP (x, 0), 0);
1314 if (GET_CODE (reg2) != REG)
1315 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1317 return force_reg (Pmode,
1318 gen_rtx_PLUS (Pmode,
1319 gen_rtx_MULT (Pmode,
1324 else if ((mode == DFmode || mode == SFmode)
1325 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1326 && GET_CODE (XEXP (x, 0)) == MULT
1327 && GET_CODE (XEXP (y, 1)) == CONST_INT
1328 && INTVAL (XEXP (y, 1)) % INTVAL (XEXP (XEXP (x, 0), 1)) == 0
1329 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1330 && pa_shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1333 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1334 / INTVAL (XEXP (XEXP (x, 0), 1))));
1335 regx2 = XEXP (XEXP (x, 0), 0);
1336 if (GET_CODE (regx2) != REG)
1337 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1338 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1342 gen_rtx_PLUS (Pmode,
1343 gen_rtx_MULT (Pmode, regx2,
1344 XEXP (XEXP (x, 0), 1)),
1345 force_reg (Pmode, XEXP (y, 0))));
1347 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1348 && INTVAL (XEXP (y, 1)) >= -4096
1349 && INTVAL (XEXP (y, 1)) <= 4095)
1351 /* This is safe because of the guard page at the
1352 beginning and end of the data space. Just
1353 return the original address. */
1358 /* Doesn't look like one we can optimize. */
1359 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1360 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1361 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1362 regx1 = force_reg (Pmode,
1363 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1365 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1373 /* Implement the TARGET_REGISTER_MOVE_COST hook.
1375 Compute extra cost of moving data between one register class
1378 Make moves from SAR so expensive they should never happen. We used to
1379 have 0xffff here, but that generates overflow in rare cases.
1381 Copies involving a FP register and a non-FP register are relatively
1382 expensive because they must go through memory.
1384 Other copies are reasonably cheap. */
1387 hppa_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
1388 reg_class_t from, reg_class_t to)
1390 if (from == SHIFT_REGS)
1392 else if (to == SHIFT_REGS && FP_REG_CLASS_P (from))
1394 else if ((FP_REG_CLASS_P (from) && ! FP_REG_CLASS_P (to))
1395 || (FP_REG_CLASS_P (to) && ! FP_REG_CLASS_P (from)))
1401 /* For the HPPA, REG and REG+CONST is cost 0
1402 and addresses involving symbolic constants are cost 2.
1404 PIC addresses are very expensive.
1406 It is no coincidence that this has the same structure
1407 as pa_legitimate_address_p. */
1410 hppa_address_cost (rtx X, enum machine_mode mode ATTRIBUTE_UNUSED,
1411 addr_space_t as ATTRIBUTE_UNUSED,
1412 bool speed ATTRIBUTE_UNUSED)
1414 switch (GET_CODE (X))
1427 /* Compute a (partial) cost for rtx X. Return true if the complete
1428 cost has been computed, and false if subexpressions should be
1429 scanned. In either case, *TOTAL contains the cost result. */
1432 hppa_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
1433 int *total, bool speed ATTRIBUTE_UNUSED)
1440 if (INTVAL (x) == 0)
1442 else if (INT_14_BITS (x))
1459 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1460 && outer_code != SET)
1467 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1469 *total = COSTS_N_INSNS (3);
1473 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1474 factor = GET_MODE_SIZE (GET_MODE (x)) / 4;
1478 if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1479 *total = factor * factor * COSTS_N_INSNS (8);
1481 *total = factor * factor * COSTS_N_INSNS (20);
1485 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1487 *total = COSTS_N_INSNS (14);
1495 /* A mode size N times larger than SImode needs O(N*N) more insns. */
1496 factor = GET_MODE_SIZE (GET_MODE (x)) / 4;
1500 *total = factor * factor * COSTS_N_INSNS (60);
1503 case PLUS: /* this includes shNadd insns */
1505 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1507 *total = COSTS_N_INSNS (3);
1511 /* A size N times larger than UNITS_PER_WORD needs N times as
1512 many insns, taking N times as long. */
1513 factor = GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD;
1516 *total = factor * COSTS_N_INSNS (1);
1522 *total = COSTS_N_INSNS (1);
1530 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1531 new rtx with the correct mode. */
1533 force_mode (enum machine_mode mode, rtx orig)
1535 if (mode == GET_MODE (orig))
1538 gcc_assert (REGNO (orig) < FIRST_PSEUDO_REGISTER);
1540 return gen_rtx_REG (mode, REGNO (orig));
1543 /* Return 1 if *X is a thread-local symbol. */
1546 pa_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1548 return PA_SYMBOL_REF_TLS_P (*x);
1551 /* Return 1 if X contains a thread-local symbol. */
1554 pa_tls_referenced_p (rtx x)
1556 if (!TARGET_HAVE_TLS)
1559 return for_each_rtx (&x, &pa_tls_symbol_ref_1, 0);
1562 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1565 pa_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1567 return pa_tls_referenced_p (x);
1570 /* Emit insns to move operands[1] into operands[0].
1572 Return 1 if we have written out everything that needs to be done to
1573 do the move. Otherwise, return 0 and the caller will emit the move
1576 Note SCRATCH_REG may not be in the proper mode depending on how it
1577 will be used. This routine is responsible for creating a new copy
1578 of SCRATCH_REG in the proper mode. */
1581 pa_emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
1583 register rtx operand0 = operands[0];
1584 register rtx operand1 = operands[1];
1587 /* We can only handle indexed addresses in the destination operand
1588 of floating point stores. Thus, we need to break out indexed
1589 addresses from the destination operand. */
1590 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1592 gcc_assert (can_create_pseudo_p ());
1594 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1595 operand0 = replace_equiv_address (operand0, tem);
1598 /* On targets with non-equivalent space registers, break out unscaled
1599 indexed addresses from the source operand before the final CSE.
1600 We have to do this because the REG_POINTER flag is not correctly
1601 carried through various optimization passes and CSE may substitute
1602 a pseudo without the pointer set for one with the pointer set. As
1603 a result, we loose various opportunities to create insns with
1604 unscaled indexed addresses. */
1605 if (!TARGET_NO_SPACE_REGS
1606 && !cse_not_expected
1607 && GET_CODE (operand1) == MEM
1608 && GET_CODE (XEXP (operand1, 0)) == PLUS
1609 && REG_P (XEXP (XEXP (operand1, 0), 0))
1610 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1612 = replace_equiv_address (operand1,
1613 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1616 && reload_in_progress && GET_CODE (operand0) == REG
1617 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1618 operand0 = reg_equiv_mem (REGNO (operand0));
1619 else if (scratch_reg
1620 && reload_in_progress && GET_CODE (operand0) == SUBREG
1621 && GET_CODE (SUBREG_REG (operand0)) == REG
1622 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1624 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1625 the code which tracks sets/uses for delete_output_reload. */
1626 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1627 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
1628 SUBREG_BYTE (operand0));
1629 operand0 = alter_subreg (&temp, true);
1633 && reload_in_progress && GET_CODE (operand1) == REG
1634 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1635 operand1 = reg_equiv_mem (REGNO (operand1));
1636 else if (scratch_reg
1637 && reload_in_progress && GET_CODE (operand1) == SUBREG
1638 && GET_CODE (SUBREG_REG (operand1)) == REG
1639 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1641 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1642 the code which tracks sets/uses for delete_output_reload. */
1643 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1644 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
1645 SUBREG_BYTE (operand1));
1646 operand1 = alter_subreg (&temp, true);
1649 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1650 && ((tem = find_replacement (&XEXP (operand0, 0)))
1651 != XEXP (operand0, 0)))
1652 operand0 = replace_equiv_address (operand0, tem);
1654 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1655 && ((tem = find_replacement (&XEXP (operand1, 0)))
1656 != XEXP (operand1, 0)))
1657 operand1 = replace_equiv_address (operand1, tem);
1659 /* Handle secondary reloads for loads/stores of FP registers from
1660 REG+D addresses where D does not fit in 5 or 14 bits, including
1661 (subreg (mem (addr))) cases. */
1663 && fp_reg_operand (operand0, mode)
1664 && (MEM_P (operand1)
1665 || (GET_CODE (operand1) == SUBREG
1666 && MEM_P (XEXP (operand1, 0))))
1667 && !floating_point_store_memory_operand (operand1, mode))
1669 if (GET_CODE (operand1) == SUBREG)
1670 operand1 = XEXP (operand1, 0);
1672 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1673 it in WORD_MODE regardless of what mode it was originally given
1675 scratch_reg = force_mode (word_mode, scratch_reg);
1677 /* D might not fit in 14 bits either; for such cases load D into
1679 if (reg_plus_base_memory_operand (operand1, mode)
1682 && INT_14_BITS (XEXP (XEXP (operand1, 0), 1))))
1684 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1685 emit_move_insn (scratch_reg,
1686 gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
1688 XEXP (XEXP (operand1, 0), 0),
1692 emit_move_insn (scratch_reg, XEXP (operand1, 0));
1693 emit_insn (gen_rtx_SET (VOIDmode, operand0,
1694 replace_equiv_address (operand1, scratch_reg)));
1697 else if (scratch_reg
1698 && fp_reg_operand (operand1, mode)
1699 && (MEM_P (operand0)
1700 || (GET_CODE (operand0) == SUBREG
1701 && MEM_P (XEXP (operand0, 0))))
1702 && !floating_point_store_memory_operand (operand0, mode))
1704 if (GET_CODE (operand0) == SUBREG)
1705 operand0 = XEXP (operand0, 0);
1707 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1708 it in WORD_MODE regardless of what mode it was originally given
1710 scratch_reg = force_mode (word_mode, scratch_reg);
1712 /* D might not fit in 14 bits either; for such cases load D into
1714 if (reg_plus_base_memory_operand (operand0, mode)
1717 && INT_14_BITS (XEXP (XEXP (operand0, 0), 1))))
1719 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
1720 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
1723 XEXP (XEXP (operand0, 0),
1728 emit_move_insn (scratch_reg, XEXP (operand0, 0));
1729 emit_insn (gen_rtx_SET (VOIDmode,
1730 replace_equiv_address (operand0, scratch_reg),
1734 /* Handle secondary reloads for loads of FP registers from constant
1735 expressions by forcing the constant into memory. For the most part,
1736 this is only necessary for SImode and DImode.
1738 Use scratch_reg to hold the address of the memory location. */
1739 else if (scratch_reg
1740 && CONSTANT_P (operand1)
1741 && fp_reg_operand (operand0, mode))
1743 rtx const_mem, xoperands[2];
1745 if (operand1 == CONST0_RTX (mode))
1747 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1751 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1752 it in WORD_MODE regardless of what mode it was originally given
1754 scratch_reg = force_mode (word_mode, scratch_reg);
1756 /* Force the constant into memory and put the address of the
1757 memory location into scratch_reg. */
1758 const_mem = force_const_mem (mode, operand1);
1759 xoperands[0] = scratch_reg;
1760 xoperands[1] = XEXP (const_mem, 0);
1761 pa_emit_move_sequence (xoperands, Pmode, 0);
1763 /* Now load the destination register. */
1764 emit_insn (gen_rtx_SET (mode, operand0,
1765 replace_equiv_address (const_mem, scratch_reg)));
1768 /* Handle secondary reloads for SAR. These occur when trying to load
1769 the SAR from memory or a constant. */
1770 else if (scratch_reg
1771 && GET_CODE (operand0) == REG
1772 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1773 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1774 && (GET_CODE (operand1) == MEM || GET_CODE (operand1) == CONST_INT))
1776 /* D might not fit in 14 bits either; for such cases load D into
1778 if (GET_CODE (operand1) == MEM
1779 && !memory_address_p (GET_MODE (operand0), XEXP (operand1, 0)))
1781 /* We are reloading the address into the scratch register, so we
1782 want to make sure the scratch register is a full register. */
1783 scratch_reg = force_mode (word_mode, scratch_reg);
1785 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1786 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1789 XEXP (XEXP (operand1, 0),
1793 /* Now we are going to load the scratch register from memory,
1794 we want to load it in the same width as the original MEM,
1795 which must be the same as the width of the ultimate destination,
1797 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1799 emit_move_insn (scratch_reg,
1800 replace_equiv_address (operand1, scratch_reg));
1804 /* We want to load the scratch register using the same mode as
1805 the ultimate destination. */
1806 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1808 emit_move_insn (scratch_reg, operand1);
1811 /* And emit the insn to set the ultimate destination. We know that
1812 the scratch register has the same mode as the destination at this
1814 emit_move_insn (operand0, scratch_reg);
1817 /* Handle the most common case: storing into a register. */
1818 else if (register_operand (operand0, mode))
1820 /* Legitimize TLS symbol references. This happens for references
1821 that aren't a legitimate constant. */
1822 if (PA_SYMBOL_REF_TLS_P (operand1))
1823 operand1 = legitimize_tls_address (operand1);
1825 if (register_operand (operand1, mode)
1826 || (GET_CODE (operand1) == CONST_INT
1827 && pa_cint_ok_for_move (INTVAL (operand1)))
1828 || (operand1 == CONST0_RTX (mode))
1829 || (GET_CODE (operand1) == HIGH
1830 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1831 /* Only `general_operands' can come here, so MEM is ok. */
1832 || GET_CODE (operand1) == MEM)
1834 /* Various sets are created during RTL generation which don't
1835 have the REG_POINTER flag correctly set. After the CSE pass,
1836 instruction recognition can fail if we don't consistently
1837 set this flag when performing register copies. This should
1838 also improve the opportunities for creating insns that use
1839 unscaled indexing. */
1840 if (REG_P (operand0) && REG_P (operand1))
1842 if (REG_POINTER (operand1)
1843 && !REG_POINTER (operand0)
1844 && !HARD_REGISTER_P (operand0))
1845 copy_reg_pointer (operand0, operand1);
1848 /* When MEMs are broken out, the REG_POINTER flag doesn't
1849 get set. In some cases, we can set the REG_POINTER flag
1850 from the declaration for the MEM. */
1851 if (REG_P (operand0)
1852 && GET_CODE (operand1) == MEM
1853 && !REG_POINTER (operand0))
1855 tree decl = MEM_EXPR (operand1);
1857 /* Set the register pointer flag and register alignment
1858 if the declaration for this memory reference is a
1864 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1866 if (TREE_CODE (decl) == COMPONENT_REF)
1867 decl = TREE_OPERAND (decl, 1);
1869 type = TREE_TYPE (decl);
1870 type = strip_array_types (type);
1872 if (POINTER_TYPE_P (type))
1876 type = TREE_TYPE (type);
1877 /* Using TYPE_ALIGN_OK is rather conservative as
1878 only the ada frontend actually sets it. */
1879 align = (TYPE_ALIGN_OK (type) ? TYPE_ALIGN (type)
1881 mark_reg_pointer (operand0, align);
1886 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1890 else if (GET_CODE (operand0) == MEM)
1892 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1893 && !(reload_in_progress || reload_completed))
1895 rtx temp = gen_reg_rtx (DFmode);
1897 emit_insn (gen_rtx_SET (VOIDmode, temp, operand1));
1898 emit_insn (gen_rtx_SET (VOIDmode, operand0, temp));
1901 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1903 /* Run this case quickly. */
1904 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1907 if (! (reload_in_progress || reload_completed))
1909 operands[0] = validize_mem (operand0);
1910 operands[1] = operand1 = force_reg (mode, operand1);
1914 /* Simplify the source if we need to.
1915 Note we do have to handle function labels here, even though we do
1916 not consider them legitimate constants. Loop optimizations can
1917 call the emit_move_xxx with one as a source. */
1918 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
1919 || function_label_operand (operand1, VOIDmode)
1920 || (GET_CODE (operand1) == HIGH
1921 && symbolic_operand (XEXP (operand1, 0), mode)))
1925 if (GET_CODE (operand1) == HIGH)
1928 operand1 = XEXP (operand1, 0);
1930 if (symbolic_operand (operand1, mode))
1932 /* Argh. The assembler and linker can't handle arithmetic
1935 So we force the plabel into memory, load operand0 from
1936 the memory location, then add in the constant part. */
1937 if ((GET_CODE (operand1) == CONST
1938 && GET_CODE (XEXP (operand1, 0)) == PLUS
1939 && function_label_operand (XEXP (XEXP (operand1, 0), 0),
1941 || function_label_operand (operand1, VOIDmode))
1943 rtx temp, const_part;
1945 /* Figure out what (if any) scratch register to use. */
1946 if (reload_in_progress || reload_completed)
1948 scratch_reg = scratch_reg ? scratch_reg : operand0;
1949 /* SCRATCH_REG will hold an address and maybe the actual
1950 data. We want it in WORD_MODE regardless of what mode it
1951 was originally given to us. */
1952 scratch_reg = force_mode (word_mode, scratch_reg);
1955 scratch_reg = gen_reg_rtx (Pmode);
1957 if (GET_CODE (operand1) == CONST)
1959 /* Save away the constant part of the expression. */
1960 const_part = XEXP (XEXP (operand1, 0), 1);
1961 gcc_assert (GET_CODE (const_part) == CONST_INT);
1963 /* Force the function label into memory. */
1964 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
1968 /* No constant part. */
1969 const_part = NULL_RTX;
1971 /* Force the function label into memory. */
1972 temp = force_const_mem (mode, operand1);
1976 /* Get the address of the memory location. PIC-ify it if
1978 temp = XEXP (temp, 0);
1980 temp = legitimize_pic_address (temp, mode, scratch_reg);
1982 /* Put the address of the memory location into our destination
1985 pa_emit_move_sequence (operands, mode, scratch_reg);
1987 /* Now load from the memory location into our destination
1989 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
1990 pa_emit_move_sequence (operands, mode, scratch_reg);
1992 /* And add back in the constant part. */
1993 if (const_part != NULL_RTX)
1994 expand_inc (operand0, const_part);
2003 if (reload_in_progress || reload_completed)
2005 temp = scratch_reg ? scratch_reg : operand0;
2006 /* TEMP will hold an address and maybe the actual
2007 data. We want it in WORD_MODE regardless of what mode it
2008 was originally given to us. */
2009 temp = force_mode (word_mode, temp);
2012 temp = gen_reg_rtx (Pmode);
2014 /* (const (plus (symbol) (const_int))) must be forced to
2015 memory during/after reload if the const_int will not fit
2017 if (GET_CODE (operand1) == CONST
2018 && GET_CODE (XEXP (operand1, 0)) == PLUS
2019 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
2020 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1))
2021 && (reload_completed || reload_in_progress)
2024 rtx const_mem = force_const_mem (mode, operand1);
2025 operands[1] = legitimize_pic_address (XEXP (const_mem, 0),
2027 operands[1] = replace_equiv_address (const_mem, operands[1]);
2028 pa_emit_move_sequence (operands, mode, temp);
2032 operands[1] = legitimize_pic_address (operand1, mode, temp);
2033 if (REG_P (operand0) && REG_P (operands[1]))
2034 copy_reg_pointer (operand0, operands[1]);
2035 emit_insn (gen_rtx_SET (VOIDmode, operand0, operands[1]));
2038 /* On the HPPA, references to data space are supposed to use dp,
2039 register 27, but showing it in the RTL inhibits various cse
2040 and loop optimizations. */
2045 if (reload_in_progress || reload_completed)
2047 temp = scratch_reg ? scratch_reg : operand0;
2048 /* TEMP will hold an address and maybe the actual
2049 data. We want it in WORD_MODE regardless of what mode it
2050 was originally given to us. */
2051 temp = force_mode (word_mode, temp);
2054 temp = gen_reg_rtx (mode);
2056 /* Loading a SYMBOL_REF into a register makes that register
2057 safe to be used as the base in an indexed address.
2059 Don't mark hard registers though. That loses. */
2060 if (GET_CODE (operand0) == REG
2061 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
2062 mark_reg_pointer (operand0, BITS_PER_UNIT);
2063 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
2064 mark_reg_pointer (temp, BITS_PER_UNIT);
2067 set = gen_rtx_SET (mode, operand0, temp);
2069 set = gen_rtx_SET (VOIDmode,
2071 gen_rtx_LO_SUM (mode, temp, operand1));
2073 emit_insn (gen_rtx_SET (VOIDmode,
2075 gen_rtx_HIGH (mode, operand1)));
2081 else if (pa_tls_referenced_p (operand1))
2086 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
2088 addend = XEXP (XEXP (tmp, 0), 1);
2089 tmp = XEXP (XEXP (tmp, 0), 0);
2092 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
2093 tmp = legitimize_tls_address (tmp);
2096 tmp = gen_rtx_PLUS (mode, tmp, addend);
2097 tmp = force_operand (tmp, operands[0]);
2101 else if (GET_CODE (operand1) != CONST_INT
2102 || !pa_cint_ok_for_move (INTVAL (operand1)))
2106 HOST_WIDE_INT value = 0;
2107 HOST_WIDE_INT insv = 0;
2110 if (GET_CODE (operand1) == CONST_INT)
2111 value = INTVAL (operand1);
2114 && GET_CODE (operand1) == CONST_INT
2115 && HOST_BITS_PER_WIDE_INT > 32
2116 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
2120 /* Extract the low order 32 bits of the value and sign extend.
2121 If the new value is the same as the original value, we can
2122 can use the original value as-is. If the new value is
2123 different, we use it and insert the most-significant 32-bits
2124 of the original value into the final result. */
2125 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
2126 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
2129 #if HOST_BITS_PER_WIDE_INT > 32
2130 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
2134 operand1 = GEN_INT (nval);
2138 if (reload_in_progress || reload_completed)
2139 temp = scratch_reg ? scratch_reg : operand0;
2141 temp = gen_reg_rtx (mode);
2143 /* We don't directly split DImode constants on 32-bit targets
2144 because PLUS uses an 11-bit immediate and the insn sequence
2145 generated is not as efficient as the one using HIGH/LO_SUM. */
2146 if (GET_CODE (operand1) == CONST_INT
2147 && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD
2148 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2151 /* Directly break constant into high and low parts. This
2152 provides better optimization opportunities because various
2153 passes recognize constants split with PLUS but not LO_SUM.
2154 We use a 14-bit signed low part except when the addition
2155 of 0x4000 to the high part might change the sign of the
2157 HOST_WIDE_INT low = value & 0x3fff;
2158 HOST_WIDE_INT high = value & ~ 0x3fff;
2162 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
2170 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (high)));
2171 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
2175 emit_insn (gen_rtx_SET (VOIDmode, temp,
2176 gen_rtx_HIGH (mode, operand1)));
2177 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
2180 insn = emit_move_insn (operands[0], operands[1]);
2182 /* Now insert the most significant 32 bits of the value
2183 into the register. When we don't have a second register
2184 available, it could take up to nine instructions to load
2185 a 64-bit integer constant. Prior to reload, we force
2186 constants that would take more than three instructions
2187 to load to the constant pool. During and after reload,
2188 we have to handle all possible values. */
2191 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
2192 register and the value to be inserted is outside the
2193 range that can be loaded with three depdi instructions. */
2194 if (temp != operand0 && (insv >= 16384 || insv < -16384))
2196 operand1 = GEN_INT (insv);
2198 emit_insn (gen_rtx_SET (VOIDmode, temp,
2199 gen_rtx_HIGH (mode, operand1)));
2200 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
2202 emit_insn (gen_insvdi (operand0, GEN_INT (32),
2205 emit_insn (gen_insvsi (operand0, GEN_INT (32),
2210 int len = 5, pos = 27;
2212 /* Insert the bits using the depdi instruction. */
2215 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
2216 HOST_WIDE_INT sign = v5 < 0;
2218 /* Left extend the insertion. */
2219 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
2220 while (pos > 0 && (insv & 1) == sign)
2222 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
2228 emit_insn (gen_insvdi (operand0, GEN_INT (len),
2229 GEN_INT (pos), GEN_INT (v5)));
2231 emit_insn (gen_insvsi (operand0, GEN_INT (len),
2232 GEN_INT (pos), GEN_INT (v5)));
2234 len = pos > 0 && pos < 5 ? pos : 5;
2240 set_unique_reg_note (insn, REG_EQUAL, op1);
2245 /* Now have insn-emit do whatever it normally does. */
2249 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2250 it will need a link/runtime reloc). */
2253 pa_reloc_needed (tree exp)
2257 switch (TREE_CODE (exp))
2262 case POINTER_PLUS_EXPR:
2265 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2266 reloc |= pa_reloc_needed (TREE_OPERAND (exp, 1));
2270 case NON_LVALUE_EXPR:
2271 reloc = pa_reloc_needed (TREE_OPERAND (exp, 0));
2277 unsigned HOST_WIDE_INT ix;
2279 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), ix, value)
2281 reloc |= pa_reloc_needed (value);
2295 /* Return the best assembler insn template
2296 for moving operands[1] into operands[0] as a fullword. */
2298 pa_singlemove_string (rtx *operands)
2300 HOST_WIDE_INT intval;
2302 if (GET_CODE (operands[0]) == MEM)
2303 return "stw %r1,%0";
2304 if (GET_CODE (operands[1]) == MEM)
2306 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2311 gcc_assert (GET_MODE (operands[1]) == SFmode);
2313 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2315 REAL_VALUE_FROM_CONST_DOUBLE (d, operands[1]);
2316 REAL_VALUE_TO_TARGET_SINGLE (d, i);
2318 operands[1] = GEN_INT (i);
2319 /* Fall through to CONST_INT case. */
2321 if (GET_CODE (operands[1]) == CONST_INT)
2323 intval = INTVAL (operands[1]);
2325 if (VAL_14_BITS_P (intval))
2327 else if ((intval & 0x7ff) == 0)
2328 return "ldil L'%1,%0";
2329 else if (pa_zdepi_cint_p (intval))
2330 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2332 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2334 return "copy %1,%0";
2338 /* Compute position (in OP[1]) and width (in OP[2])
2339 useful for copying IMM to a register using the zdepi
2340 instructions. Store the immediate value to insert in OP[0]. */
2342 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2346 /* Find the least significant set bit in IMM. */
2347 for (lsb = 0; lsb < 32; lsb++)
2354 /* Choose variants based on *sign* of the 5-bit field. */
2355 if ((imm & 0x10) == 0)
2356 len = (lsb <= 28) ? 4 : 32 - lsb;
2359 /* Find the width of the bitstring in IMM. */
2360 for (len = 5; len < 32 - lsb; len++)
2362 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2366 /* Sign extend IMM as a 5-bit value. */
2367 imm = (imm & 0xf) - 0x10;
2375 /* Compute position (in OP[1]) and width (in OP[2])
2376 useful for copying IMM to a register using the depdi,z
2377 instructions. Store the immediate value to insert in OP[0]. */
2380 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2382 int lsb, len, maxlen;
2384 maxlen = MIN (HOST_BITS_PER_WIDE_INT, 64);
2386 /* Find the least significant set bit in IMM. */
2387 for (lsb = 0; lsb < maxlen; lsb++)
2394 /* Choose variants based on *sign* of the 5-bit field. */
2395 if ((imm & 0x10) == 0)
2396 len = (lsb <= maxlen - 4) ? 4 : maxlen - lsb;
2399 /* Find the width of the bitstring in IMM. */
2400 for (len = 5; len < maxlen - lsb; len++)
2402 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2406 /* Extend length if host is narrow and IMM is negative. */
2407 if (HOST_BITS_PER_WIDE_INT == 32 && len == maxlen - lsb)
2410 /* Sign extend IMM as a 5-bit value. */
2411 imm = (imm & 0xf) - 0x10;
2419 /* Output assembler code to perform a doubleword move insn
2420 with operands OPERANDS. */
2423 pa_output_move_double (rtx *operands)
2425 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2427 rtx addreg0 = 0, addreg1 = 0;
2429 /* First classify both operands. */
2431 if (REG_P (operands[0]))
2433 else if (offsettable_memref_p (operands[0]))
2435 else if (GET_CODE (operands[0]) == MEM)
2440 if (REG_P (operands[1]))
2442 else if (CONSTANT_P (operands[1]))
2444 else if (offsettable_memref_p (operands[1]))
2446 else if (GET_CODE (operands[1]) == MEM)
2451 /* Check for the cases that the operand constraints are not
2452 supposed to allow to happen. */
2453 gcc_assert (optype0 == REGOP || optype1 == REGOP);
2455 /* Handle copies between general and floating registers. */
2457 if (optype0 == REGOP && optype1 == REGOP
2458 && FP_REG_P (operands[0]) ^ FP_REG_P (operands[1]))
2460 if (FP_REG_P (operands[0]))
2462 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands);
2463 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands);
2464 return "{fldds|fldd} -16(%%sp),%0";
2468 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands);
2469 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands);
2470 return "{ldws|ldw} -12(%%sp),%R0";
2474 /* Handle auto decrementing and incrementing loads and stores
2475 specifically, since the structure of the function doesn't work
2476 for them without major modification. Do it better when we learn
2477 this port about the general inc/dec addressing of PA.
2478 (This was written by tege. Chide him if it doesn't work.) */
2480 if (optype0 == MEMOP)
2482 /* We have to output the address syntax ourselves, since print_operand
2483 doesn't deal with the addresses we want to use. Fix this later. */
2485 rtx addr = XEXP (operands[0], 0);
2486 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2488 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2490 operands[0] = XEXP (addr, 0);
2491 gcc_assert (GET_CODE (operands[1]) == REG
2492 && GET_CODE (operands[0]) == REG);
2494 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2496 /* No overlap between high target register and address
2497 register. (We do this in a non-obvious way to
2498 save a register file writeback) */
2499 if (GET_CODE (addr) == POST_INC)
2500 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2501 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2503 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2505 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2507 operands[0] = XEXP (addr, 0);
2508 gcc_assert (GET_CODE (operands[1]) == REG
2509 && GET_CODE (operands[0]) == REG);
2511 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2512 /* No overlap between high target register and address
2513 register. (We do this in a non-obvious way to save a
2514 register file writeback) */
2515 if (GET_CODE (addr) == PRE_INC)
2516 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2517 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2520 if (optype1 == MEMOP)
2522 /* We have to output the address syntax ourselves, since print_operand
2523 doesn't deal with the addresses we want to use. Fix this later. */
2525 rtx addr = XEXP (operands[1], 0);
2526 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2528 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2530 operands[1] = XEXP (addr, 0);
2531 gcc_assert (GET_CODE (operands[0]) == REG
2532 && GET_CODE (operands[1]) == REG);
2534 if (!reg_overlap_mentioned_p (high_reg, addr))
2536 /* No overlap between high target register and address
2537 register. (We do this in a non-obvious way to
2538 save a register file writeback) */
2539 if (GET_CODE (addr) == POST_INC)
2540 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2541 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2545 /* This is an undefined situation. We should load into the
2546 address register *and* update that register. Probably
2547 we don't need to handle this at all. */
2548 if (GET_CODE (addr) == POST_INC)
2549 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2550 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2553 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2555 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2557 operands[1] = XEXP (addr, 0);
2558 gcc_assert (GET_CODE (operands[0]) == REG
2559 && GET_CODE (operands[1]) == REG);
2561 if (!reg_overlap_mentioned_p (high_reg, addr))
2563 /* No overlap between high target register and address
2564 register. (We do this in a non-obvious way to
2565 save a register file writeback) */
2566 if (GET_CODE (addr) == PRE_INC)
2567 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2568 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2572 /* This is an undefined situation. We should load into the
2573 address register *and* update that register. Probably
2574 we don't need to handle this at all. */
2575 if (GET_CODE (addr) == PRE_INC)
2576 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2577 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2580 else if (GET_CODE (addr) == PLUS
2581 && GET_CODE (XEXP (addr, 0)) == MULT)
2584 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2586 if (!reg_overlap_mentioned_p (high_reg, addr))
2588 xoperands[0] = high_reg;
2589 xoperands[1] = XEXP (addr, 1);
2590 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2591 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2592 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2594 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2598 xoperands[0] = high_reg;
2599 xoperands[1] = XEXP (addr, 1);
2600 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2601 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2602 output_asm_insn ("{sh%O3addl %2,%1,%R0|shladd,l %2,%O3,%1,%R0}",
2604 return "ldw 0(%R0),%0\n\tldw 4(%R0),%R0";
2609 /* If an operand is an unoffsettable memory ref, find a register
2610 we can increment temporarily to make it refer to the second word. */
2612 if (optype0 == MEMOP)
2613 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2615 if (optype1 == MEMOP)
2616 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2618 /* Ok, we can do one word at a time.
2619 Normally we do the low-numbered word first.
2621 In either case, set up in LATEHALF the operands to use
2622 for the high-numbered word and in some cases alter the
2623 operands in OPERANDS to be suitable for the low-numbered word. */
2625 if (optype0 == REGOP)
2626 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2627 else if (optype0 == OFFSOP)
2628 latehalf[0] = adjust_address (operands[0], SImode, 4);
2630 latehalf[0] = operands[0];
2632 if (optype1 == REGOP)
2633 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2634 else if (optype1 == OFFSOP)
2635 latehalf[1] = adjust_address (operands[1], SImode, 4);
2636 else if (optype1 == CNSTOP)
2637 split_double (operands[1], &operands[1], &latehalf[1]);
2639 latehalf[1] = operands[1];
2641 /* If the first move would clobber the source of the second one,
2642 do them in the other order.
2644 This can happen in two cases:
2646 mem -> register where the first half of the destination register
2647 is the same register used in the memory's address. Reload
2648 can create such insns.
2650 mem in this case will be either register indirect or register
2651 indirect plus a valid offset.
2653 register -> register move where REGNO(dst) == REGNO(src + 1)
2654 someone (Tim/Tege?) claimed this can happen for parameter loads.
2656 Handle mem -> register case first. */
2657 if (optype0 == REGOP
2658 && (optype1 == MEMOP || optype1 == OFFSOP)
2659 && refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
2662 /* Do the late half first. */
2664 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2665 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2669 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2670 return pa_singlemove_string (operands);
2673 /* Now handle register -> register case. */
2674 if (optype0 == REGOP && optype1 == REGOP
2675 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2677 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2678 return pa_singlemove_string (operands);
2681 /* Normal case: do the two words, low-numbered first. */
2683 output_asm_insn (pa_singlemove_string (operands), operands);
2685 /* Make any unoffsettable addresses point at high-numbered word. */
2687 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2689 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2692 output_asm_insn (pa_singlemove_string (latehalf), latehalf);
2694 /* Undo the adds we just did. */
2696 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2698 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2704 pa_output_fp_move_double (rtx *operands)
2706 if (FP_REG_P (operands[0]))
2708 if (FP_REG_P (operands[1])
2709 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2710 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2712 output_asm_insn ("fldd%F1 %1,%0", operands);
2714 else if (FP_REG_P (operands[1]))
2716 output_asm_insn ("fstd%F0 %1,%0", operands);
2722 gcc_assert (operands[1] == CONST0_RTX (GET_MODE (operands[0])));
2724 /* This is a pain. You have to be prepared to deal with an
2725 arbitrary address here including pre/post increment/decrement.
2727 so avoid this in the MD. */
2728 gcc_assert (GET_CODE (operands[0]) == REG);
2730 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2731 xoperands[0] = operands[0];
2732 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2737 /* Return a REG that occurs in ADDR with coefficient 1.
2738 ADDR can be effectively incremented by incrementing REG. */
2741 find_addr_reg (rtx addr)
2743 while (GET_CODE (addr) == PLUS)
2745 if (GET_CODE (XEXP (addr, 0)) == REG)
2746 addr = XEXP (addr, 0);
2747 else if (GET_CODE (XEXP (addr, 1)) == REG)
2748 addr = XEXP (addr, 1);
2749 else if (CONSTANT_P (XEXP (addr, 0)))
2750 addr = XEXP (addr, 1);
2751 else if (CONSTANT_P (XEXP (addr, 1)))
2752 addr = XEXP (addr, 0);
2756 gcc_assert (GET_CODE (addr) == REG);
2760 /* Emit code to perform a block move.
2762 OPERANDS[0] is the destination pointer as a REG, clobbered.
2763 OPERANDS[1] is the source pointer as a REG, clobbered.
2764 OPERANDS[2] is a register for temporary storage.
2765 OPERANDS[3] is a register for temporary storage.
2766 OPERANDS[4] is the size as a CONST_INT
2767 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2768 OPERANDS[6] is another temporary register. */
2771 pa_output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2773 int align = INTVAL (operands[5]);
2774 unsigned long n_bytes = INTVAL (operands[4]);
2776 /* We can't move more than a word at a time because the PA
2777 has no longer integer move insns. (Could use fp mem ops?) */
2778 if (align > (TARGET_64BIT ? 8 : 4))
2779 align = (TARGET_64BIT ? 8 : 4);
2781 /* Note that we know each loop below will execute at least twice
2782 (else we would have open-coded the copy). */
2786 /* Pre-adjust the loop counter. */
2787 operands[4] = GEN_INT (n_bytes - 16);
2788 output_asm_insn ("ldi %4,%2", operands);
2791 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2792 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2793 output_asm_insn ("std,ma %3,8(%0)", operands);
2794 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2795 output_asm_insn ("std,ma %6,8(%0)", operands);
2797 /* Handle the residual. There could be up to 7 bytes of
2798 residual to copy! */
2799 if (n_bytes % 16 != 0)
2801 operands[4] = GEN_INT (n_bytes % 8);
2802 if (n_bytes % 16 >= 8)
2803 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2804 if (n_bytes % 8 != 0)
2805 output_asm_insn ("ldd 0(%1),%6", operands);
2806 if (n_bytes % 16 >= 8)
2807 output_asm_insn ("std,ma %3,8(%0)", operands);
2808 if (n_bytes % 8 != 0)
2809 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2814 /* Pre-adjust the loop counter. */
2815 operands[4] = GEN_INT (n_bytes - 8);
2816 output_asm_insn ("ldi %4,%2", operands);
2819 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2820 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2821 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2822 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2823 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2825 /* Handle the residual. There could be up to 7 bytes of
2826 residual to copy! */
2827 if (n_bytes % 8 != 0)
2829 operands[4] = GEN_INT (n_bytes % 4);
2830 if (n_bytes % 8 >= 4)
2831 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2832 if (n_bytes % 4 != 0)
2833 output_asm_insn ("ldw 0(%1),%6", operands);
2834 if (n_bytes % 8 >= 4)
2835 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2836 if (n_bytes % 4 != 0)
2837 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2842 /* Pre-adjust the loop counter. */
2843 operands[4] = GEN_INT (n_bytes - 4);
2844 output_asm_insn ("ldi %4,%2", operands);
2847 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2848 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2849 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2850 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2851 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2853 /* Handle the residual. */
2854 if (n_bytes % 4 != 0)
2856 if (n_bytes % 4 >= 2)
2857 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2858 if (n_bytes % 2 != 0)
2859 output_asm_insn ("ldb 0(%1),%6", operands);
2860 if (n_bytes % 4 >= 2)
2861 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2862 if (n_bytes % 2 != 0)
2863 output_asm_insn ("stb %6,0(%0)", operands);
2868 /* Pre-adjust the loop counter. */
2869 operands[4] = GEN_INT (n_bytes - 2);
2870 output_asm_insn ("ldi %4,%2", operands);
2873 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2874 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2875 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2876 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2877 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2879 /* Handle the residual. */
2880 if (n_bytes % 2 != 0)
2882 output_asm_insn ("ldb 0(%1),%3", operands);
2883 output_asm_insn ("stb %3,0(%0)", operands);
2892 /* Count the number of insns necessary to handle this block move.
2894 Basic structure is the same as emit_block_move, except that we
2895 count insns rather than emit them. */
2898 compute_movmem_length (rtx insn)
2900 rtx pat = PATTERN (insn);
2901 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2902 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2903 unsigned int n_insns = 0;
2905 /* We can't move more than four bytes at a time because the PA
2906 has no longer integer move insns. (Could use fp mem ops?) */
2907 if (align > (TARGET_64BIT ? 8 : 4))
2908 align = (TARGET_64BIT ? 8 : 4);
2910 /* The basic copying loop. */
2914 if (n_bytes % (2 * align) != 0)
2916 if ((n_bytes % (2 * align)) >= align)
2919 if ((n_bytes % align) != 0)
2923 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2927 /* Emit code to perform a block clear.
2929 OPERANDS[0] is the destination pointer as a REG, clobbered.
2930 OPERANDS[1] is a register for temporary storage.
2931 OPERANDS[2] is the size as a CONST_INT
2932 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
2935 pa_output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2937 int align = INTVAL (operands[3]);
2938 unsigned long n_bytes = INTVAL (operands[2]);
2940 /* We can't clear more than a word at a time because the PA
2941 has no longer integer move insns. */
2942 if (align > (TARGET_64BIT ? 8 : 4))
2943 align = (TARGET_64BIT ? 8 : 4);
2945 /* Note that we know each loop below will execute at least twice
2946 (else we would have open-coded the copy). */
2950 /* Pre-adjust the loop counter. */
2951 operands[2] = GEN_INT (n_bytes - 16);
2952 output_asm_insn ("ldi %2,%1", operands);
2955 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2956 output_asm_insn ("addib,>= -16,%1,.-4", operands);
2957 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2959 /* Handle the residual. There could be up to 7 bytes of
2960 residual to copy! */
2961 if (n_bytes % 16 != 0)
2963 operands[2] = GEN_INT (n_bytes % 8);
2964 if (n_bytes % 16 >= 8)
2965 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2966 if (n_bytes % 8 != 0)
2967 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
2972 /* Pre-adjust the loop counter. */
2973 operands[2] = GEN_INT (n_bytes - 8);
2974 output_asm_insn ("ldi %2,%1", operands);
2977 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2978 output_asm_insn ("addib,>= -8,%1,.-4", operands);
2979 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2981 /* Handle the residual. There could be up to 7 bytes of
2982 residual to copy! */
2983 if (n_bytes % 8 != 0)
2985 operands[2] = GEN_INT (n_bytes % 4);
2986 if (n_bytes % 8 >= 4)
2987 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2988 if (n_bytes % 4 != 0)
2989 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
2994 /* Pre-adjust the loop counter. */
2995 operands[2] = GEN_INT (n_bytes - 4);
2996 output_asm_insn ("ldi %2,%1", operands);
2999 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3000 output_asm_insn ("addib,>= -4,%1,.-4", operands);
3001 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3003 /* Handle the residual. */
3004 if (n_bytes % 4 != 0)
3006 if (n_bytes % 4 >= 2)
3007 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
3008 if (n_bytes % 2 != 0)
3009 output_asm_insn ("stb %%r0,0(%0)", operands);
3014 /* Pre-adjust the loop counter. */
3015 operands[2] = GEN_INT (n_bytes - 2);
3016 output_asm_insn ("ldi %2,%1", operands);
3019 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3020 output_asm_insn ("addib,>= -2,%1,.-4", operands);
3021 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
3023 /* Handle the residual. */
3024 if (n_bytes % 2 != 0)
3025 output_asm_insn ("stb %%r0,0(%0)", operands);
3034 /* Count the number of insns necessary to handle this block move.
3036 Basic structure is the same as emit_block_move, except that we
3037 count insns rather than emit them. */
3040 compute_clrmem_length (rtx insn)
3042 rtx pat = PATTERN (insn);
3043 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
3044 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
3045 unsigned int n_insns = 0;
3047 /* We can't clear more than a word at a time because the PA
3048 has no longer integer move insns. */
3049 if (align > (TARGET_64BIT ? 8 : 4))
3050 align = (TARGET_64BIT ? 8 : 4);
3052 /* The basic loop. */
3056 if (n_bytes % (2 * align) != 0)
3058 if ((n_bytes % (2 * align)) >= align)
3061 if ((n_bytes % align) != 0)
3065 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
3071 pa_output_and (rtx *operands)
3073 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3075 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3076 int ls0, ls1, ms0, p, len;
3078 for (ls0 = 0; ls0 < 32; ls0++)
3079 if ((mask & (1 << ls0)) == 0)
3082 for (ls1 = ls0; ls1 < 32; ls1++)
3083 if ((mask & (1 << ls1)) != 0)
3086 for (ms0 = ls1; ms0 < 32; ms0++)
3087 if ((mask & (1 << ms0)) == 0)
3090 gcc_assert (ms0 == 32);
3098 operands[2] = GEN_INT (len);
3099 return "{extru|extrw,u} %1,31,%2,%0";
3103 /* We could use this `depi' for the case above as well, but `depi'
3104 requires one more register file access than an `extru'. */
3109 operands[2] = GEN_INT (p);
3110 operands[3] = GEN_INT (len);
3111 return "{depi|depwi} 0,%2,%3,%0";
3115 return "and %1,%2,%0";
3118 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3119 storing the result in operands[0]. */
3121 pa_output_64bit_and (rtx *operands)
3123 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3125 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3126 int ls0, ls1, ms0, p, len;
3128 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
3129 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
3132 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
3133 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
3136 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
3137 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
3140 gcc_assert (ms0 == HOST_BITS_PER_WIDE_INT);
3142 if (ls1 == HOST_BITS_PER_WIDE_INT)
3148 operands[2] = GEN_INT (len);
3149 return "extrd,u %1,63,%2,%0";
3153 /* We could use this `depi' for the case above as well, but `depi'
3154 requires one more register file access than an `extru'. */
3159 operands[2] = GEN_INT (p);
3160 operands[3] = GEN_INT (len);
3161 return "depdi 0,%2,%3,%0";
3165 return "and %1,%2,%0";
3169 pa_output_ior (rtx *operands)
3171 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3172 int bs0, bs1, p, len;
3174 if (INTVAL (operands[2]) == 0)
3175 return "copy %1,%0";
3177 for (bs0 = 0; bs0 < 32; bs0++)
3178 if ((mask & (1 << bs0)) != 0)
3181 for (bs1 = bs0; bs1 < 32; bs1++)
3182 if ((mask & (1 << bs1)) == 0)
3185 gcc_assert (bs1 == 32 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3190 operands[2] = GEN_INT (p);
3191 operands[3] = GEN_INT (len);
3192 return "{depi|depwi} -1,%2,%3,%0";
3195 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3196 storing the result in operands[0]. */
3198 pa_output_64bit_ior (rtx *operands)
3200 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3201 int bs0, bs1, p, len;
3203 if (INTVAL (operands[2]) == 0)
3204 return "copy %1,%0";
3206 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
3207 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
3210 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
3211 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
3214 gcc_assert (bs1 == HOST_BITS_PER_WIDE_INT
3215 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3220 operands[2] = GEN_INT (p);
3221 operands[3] = GEN_INT (len);
3222 return "depdi -1,%2,%3,%0";
3225 /* Target hook for assembling integer objects. This code handles
3226 aligned SI and DI integers specially since function references
3227 must be preceded by P%. */
3230 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
3232 if (size == UNITS_PER_WORD
3234 && function_label_operand (x, VOIDmode))
3236 fputs (size == 8? "\t.dword\tP%" : "\t.word\tP%", asm_out_file);
3237 output_addr_const (asm_out_file, x);
3238 fputc ('\n', asm_out_file);
3241 return default_assemble_integer (x, size, aligned_p);
3244 /* Output an ascii string. */
3246 pa_output_ascii (FILE *file, const char *p, int size)
3250 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
3252 /* The HP assembler can only take strings of 256 characters at one
3253 time. This is a limitation on input line length, *not* the
3254 length of the string. Sigh. Even worse, it seems that the
3255 restriction is in number of input characters (see \xnn &
3256 \whatever). So we have to do this very carefully. */
3258 fputs ("\t.STRING \"", file);
3261 for (i = 0; i < size; i += 4)
3265 for (io = 0, co = 0; io < MIN (4, size - i); io++)
3267 register unsigned int c = (unsigned char) p[i + io];
3269 if (c == '\"' || c == '\\')
3270 partial_output[co++] = '\\';
3271 if (c >= ' ' && c < 0177)
3272 partial_output[co++] = c;
3276 partial_output[co++] = '\\';
3277 partial_output[co++] = 'x';
3278 hexd = c / 16 - 0 + '0';
3280 hexd -= '9' - 'a' + 1;
3281 partial_output[co++] = hexd;
3282 hexd = c % 16 - 0 + '0';
3284 hexd -= '9' - 'a' + 1;
3285 partial_output[co++] = hexd;
3288 if (chars_output + co > 243)
3290 fputs ("\"\n\t.STRING \"", file);
3293 fwrite (partial_output, 1, (size_t) co, file);
3297 fputs ("\"\n", file);
3300 /* Try to rewrite floating point comparisons & branches to avoid
3301 useless add,tr insns.
3303 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3304 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3305 first attempt to remove useless add,tr insns. It is zero
3306 for the second pass as reorg sometimes leaves bogus REG_DEAD
3309 When CHECK_NOTES is zero we can only eliminate add,tr insns
3310 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3313 remove_useless_addtr_insns (int check_notes)
3316 static int pass = 0;
3318 /* This is fairly cheap, so always run it when optimizing. */
3322 int fbranch_count = 0;
3324 /* Walk all the insns in this function looking for fcmp & fbranch
3325 instructions. Keep track of how many of each we find. */
3326 for (insn = get_insns (); insn; insn = next_insn (insn))
3330 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3331 if (GET_CODE (insn) != INSN && GET_CODE (insn) != JUMP_INSN)
3334 tmp = PATTERN (insn);
3336 /* It must be a set. */
3337 if (GET_CODE (tmp) != SET)
3340 /* If the destination is CCFP, then we've found an fcmp insn. */
3341 tmp = SET_DEST (tmp);
3342 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3348 tmp = PATTERN (insn);
3349 /* If this is an fbranch instruction, bump the fbranch counter. */
3350 if (GET_CODE (tmp) == SET
3351 && SET_DEST (tmp) == pc_rtx
3352 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3353 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3354 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3355 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3363 /* Find all floating point compare + branch insns. If possible,
3364 reverse the comparison & the branch to avoid add,tr insns. */
3365 for (insn = get_insns (); insn; insn = next_insn (insn))
3369 /* Ignore anything that isn't an INSN. */
3370 if (GET_CODE (insn) != INSN)
3373 tmp = PATTERN (insn);
3375 /* It must be a set. */
3376 if (GET_CODE (tmp) != SET)
3379 /* The destination must be CCFP, which is register zero. */
3380 tmp = SET_DEST (tmp);
3381 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3384 /* INSN should be a set of CCFP.
3386 See if the result of this insn is used in a reversed FP
3387 conditional branch. If so, reverse our condition and
3388 the branch. Doing so avoids useless add,tr insns. */
3389 next = next_insn (insn);
3392 /* Jumps, calls and labels stop our search. */
3393 if (GET_CODE (next) == JUMP_INSN
3394 || GET_CODE (next) == CALL_INSN
3395 || GET_CODE (next) == CODE_LABEL)
3398 /* As does another fcmp insn. */
3399 if (GET_CODE (next) == INSN
3400 && GET_CODE (PATTERN (next)) == SET
3401 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3402 && REGNO (SET_DEST (PATTERN (next))) == 0)
3405 next = next_insn (next);
3408 /* Is NEXT_INSN a branch? */
3410 && GET_CODE (next) == JUMP_INSN)
3412 rtx pattern = PATTERN (next);
3414 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3415 and CCFP dies, then reverse our conditional and the branch
3416 to avoid the add,tr. */
3417 if (GET_CODE (pattern) == SET
3418 && SET_DEST (pattern) == pc_rtx
3419 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3420 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3421 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3422 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3423 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3424 && (fcmp_count == fbranch_count
3426 && find_regno_note (next, REG_DEAD, 0))))
3428 /* Reverse the branch. */
3429 tmp = XEXP (SET_SRC (pattern), 1);
3430 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3431 XEXP (SET_SRC (pattern), 2) = tmp;
3432 INSN_CODE (next) = -1;
3434 /* Reverse our condition. */
3435 tmp = PATTERN (insn);
3436 PUT_CODE (XEXP (tmp, 1),
3437 (reverse_condition_maybe_unordered
3438 (GET_CODE (XEXP (tmp, 1)))));
3448 /* You may have trouble believing this, but this is the 32 bit HP-PA
3453 Variable arguments (optional; any number may be allocated)
3455 SP-(4*(N+9)) arg word N
3460 Fixed arguments (must be allocated; may remain unused)
3469 SP-32 External Data Pointer (DP)
3471 SP-24 External/stub RP (RP')
3475 SP-8 Calling Stub RP (RP'')
3480 SP-0 Stack Pointer (points to next available address)
3484 /* This function saves registers as follows. Registers marked with ' are
3485 this function's registers (as opposed to the previous function's).
3486 If a frame_pointer isn't needed, r4 is saved as a general register;
3487 the space for the frame pointer is still allocated, though, to keep
3493 SP (FP') Previous FP
3494 SP + 4 Alignment filler (sigh)
3495 SP + 8 Space for locals reserved here.
3499 SP + n All call saved register used.
3503 SP + o All call saved fp registers used.
3507 SP + p (SP') points to next available address.
3511 /* Global variables set by output_function_prologue(). */
3512 /* Size of frame. Need to know this to emit return insns from
3514 static HOST_WIDE_INT actual_fsize, local_fsize;
3515 static int save_fregs;
3517 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3518 Handle case where DISP > 8k by using the add_high_const patterns.
3520 Note in DISP > 8k case, we will leave the high part of the address
3521 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3524 store_reg (int reg, HOST_WIDE_INT disp, int base)
3526 rtx insn, dest, src, basereg;
3528 src = gen_rtx_REG (word_mode, reg);
3529 basereg = gen_rtx_REG (Pmode, base);
3530 if (VAL_14_BITS_P (disp))
3532 dest = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
3533 insn = emit_move_insn (dest, src);
3535 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3537 rtx delta = GEN_INT (disp);
3538 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3540 emit_move_insn (tmpreg, delta);
3541 insn = emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3544 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3545 gen_rtx_SET (VOIDmode, tmpreg,
3546 gen_rtx_PLUS (Pmode, basereg, delta)));
3547 RTX_FRAME_RELATED_P (insn) = 1;
3549 dest = gen_rtx_MEM (word_mode, tmpreg);
3550 insn = emit_move_insn (dest, src);
3554 rtx delta = GEN_INT (disp);
3555 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3556 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3558 emit_move_insn (tmpreg, high);
3559 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3560 insn = emit_move_insn (dest, src);
3562 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3563 gen_rtx_SET (VOIDmode,
3564 gen_rtx_MEM (word_mode,
3565 gen_rtx_PLUS (word_mode,
3572 RTX_FRAME_RELATED_P (insn) = 1;
3575 /* Emit RTL to store REG at the memory location specified by BASE and then
3576 add MOD to BASE. MOD must be <= 8k. */
3579 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3581 rtx insn, basereg, srcreg, delta;
3583 gcc_assert (VAL_14_BITS_P (mod));
3585 basereg = gen_rtx_REG (Pmode, base);
3586 srcreg = gen_rtx_REG (word_mode, reg);
3587 delta = GEN_INT (mod);
3589 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3592 RTX_FRAME_RELATED_P (insn) = 1;
3594 /* RTX_FRAME_RELATED_P must be set on each frame related set
3595 in a parallel with more than one element. */
3596 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3597 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3601 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3602 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3603 whether to add a frame note or not.
3605 In the DISP > 8k case, we leave the high part of the address in %r1.
3606 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3609 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3613 if (VAL_14_BITS_P (disp))
3615 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3616 plus_constant (Pmode,
3617 gen_rtx_REG (Pmode, base), disp));
3619 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3621 rtx basereg = gen_rtx_REG (Pmode, base);
3622 rtx delta = GEN_INT (disp);
3623 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3625 emit_move_insn (tmpreg, delta);
3626 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3627 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3629 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3630 gen_rtx_SET (VOIDmode, tmpreg,
3631 gen_rtx_PLUS (Pmode, basereg, delta)));
3635 rtx basereg = gen_rtx_REG (Pmode, base);
3636 rtx delta = GEN_INT (disp);
3637 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3639 emit_move_insn (tmpreg,
3640 gen_rtx_PLUS (Pmode, basereg,
3641 gen_rtx_HIGH (Pmode, delta)));
3642 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3643 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3646 if (DO_FRAME_NOTES && note)
3647 RTX_FRAME_RELATED_P (insn) = 1;
3651 pa_compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
3656 /* The code in pa_expand_prologue and pa_expand_epilogue must
3657 be consistent with the rounding and size calculation done here.
3658 Change them at the same time. */
3660 /* We do our own stack alignment. First, round the size of the
3661 stack locals up to a word boundary. */
3662 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3664 /* Space for previous frame pointer + filler. If any frame is
3665 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3666 waste some space here for the sake of HP compatibility. The
3667 first slot is only used when the frame pointer is needed. */
3668 if (size || frame_pointer_needed)
3669 size += STARTING_FRAME_OFFSET;
3671 /* If the current function calls __builtin_eh_return, then we need
3672 to allocate stack space for registers that will hold data for
3673 the exception handler. */
3674 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3678 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3680 size += i * UNITS_PER_WORD;
3683 /* Account for space used by the callee general register saves. */
3684 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3685 if (df_regs_ever_live_p (i))
3686 size += UNITS_PER_WORD;
3688 /* Account for space used by the callee floating point register saves. */
3689 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3690 if (df_regs_ever_live_p (i)
3691 || (!TARGET_64BIT && df_regs_ever_live_p (i + 1)))
3695 /* We always save both halves of the FP register, so always
3696 increment the frame size by 8 bytes. */
3700 /* If any of the floating registers are saved, account for the
3701 alignment needed for the floating point register save block. */
3704 size = (size + 7) & ~7;
3709 /* The various ABIs include space for the outgoing parameters in the
3710 size of the current function's stack frame. We don't need to align
3711 for the outgoing arguments as their alignment is set by the final
3712 rounding for the frame as a whole. */
3713 size += crtl->outgoing_args_size;
3715 /* Allocate space for the fixed frame marker. This space must be
3716 allocated for any function that makes calls or allocates
3718 if (!crtl->is_leaf || size)
3719 size += TARGET_64BIT ? 48 : 32;
3721 /* Finally, round to the preferred stack boundary. */
3722 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3723 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3726 /* Generate the assembly code for function entry. FILE is a stdio
3727 stream to output the code to. SIZE is an int: how many units of
3728 temporary storage to allocate.
3730 Refer to the array `regs_ever_live' to determine which registers to
3731 save; `regs_ever_live[I]' is nonzero if register number I is ever
3732 used in the function. This function is responsible for knowing
3733 which registers should not be saved even if used. */
3735 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3736 of memory. If any fpu reg is used in the function, we allocate
3737 such a block here, at the bottom of the frame, just in case it's needed.
3739 If this function is a leaf procedure, then we may choose not
3740 to do a "save" insn. The decision about whether or not
3741 to do this is made in regclass.c. */
3744 pa_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3746 /* The function's label and associated .PROC must never be
3747 separated and must be output *after* any profiling declarations
3748 to avoid changing spaces/subspaces within a procedure. */
3749 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3750 fputs ("\t.PROC\n", file);
3752 /* pa_expand_prologue does the dirty work now. We just need
3753 to output the assembler directives which denote the start
3755 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3757 fputs (",NO_CALLS", file);
3759 fputs (",CALLS", file);
3761 fputs (",SAVE_RP", file);
3763 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3764 at the beginning of the frame and that it is used as the frame
3765 pointer for the frame. We do this because our current frame
3766 layout doesn't conform to that specified in the HP runtime
3767 documentation and we need a way to indicate to programs such as
3768 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3769 isn't used by HP compilers but is supported by the assembler.
3770 However, SAVE_SP is supposed to indicate that the previous stack
3771 pointer has been saved in the frame marker. */
3772 if (frame_pointer_needed)
3773 fputs (",SAVE_SP", file);
3775 /* Pass on information about the number of callee register saves
3776 performed in the prologue.
3778 The compiler is supposed to pass the highest register number
3779 saved, the assembler then has to adjust that number before
3780 entering it into the unwind descriptor (to account for any
3781 caller saved registers with lower register numbers than the
3782 first callee saved register). */
3784 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3787 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3789 fputs ("\n\t.ENTRY\n", file);
3791 remove_useless_addtr_insns (0);
3795 pa_expand_prologue (void)
3797 int merge_sp_adjust_with_store = 0;
3798 HOST_WIDE_INT size = get_frame_size ();
3799 HOST_WIDE_INT offset;
3807 /* Compute total size for frame pointer, filler, locals and rounding to
3808 the next word boundary. Similar code appears in pa_compute_frame_size
3809 and must be changed in tandem with this code. */
3810 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3811 if (local_fsize || frame_pointer_needed)
3812 local_fsize += STARTING_FRAME_OFFSET;
3814 actual_fsize = pa_compute_frame_size (size, &save_fregs);
3815 if (flag_stack_usage_info)
3816 current_function_static_stack_size = actual_fsize;
3818 /* Compute a few things we will use often. */
3819 tmpreg = gen_rtx_REG (word_mode, 1);
3821 /* Save RP first. The calling conventions manual states RP will
3822 always be stored into the caller's frame at sp - 20 or sp - 16
3823 depending on which ABI is in use. */
3824 if (df_regs_ever_live_p (2) || crtl->calls_eh_return)
3826 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3832 /* Allocate the local frame and set up the frame pointer if needed. */
3833 if (actual_fsize != 0)
3835 if (frame_pointer_needed)
3837 /* Copy the old frame pointer temporarily into %r1. Set up the
3838 new stack pointer, then store away the saved old frame pointer
3839 into the stack at sp and at the same time update the stack
3840 pointer by actual_fsize bytes. Two versions, first
3841 handles small (<8k) frames. The second handles large (>=8k)
3843 insn = emit_move_insn (tmpreg, hard_frame_pointer_rtx);
3845 RTX_FRAME_RELATED_P (insn) = 1;
3847 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3849 RTX_FRAME_RELATED_P (insn) = 1;
3851 if (VAL_14_BITS_P (actual_fsize))
3852 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3855 /* It is incorrect to store the saved frame pointer at *sp,
3856 then increment sp (writes beyond the current stack boundary).
3858 So instead use stwm to store at *sp and post-increment the
3859 stack pointer as an atomic operation. Then increment sp to
3860 finish allocating the new frame. */
3861 HOST_WIDE_INT adjust1 = 8192 - 64;
3862 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3864 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3865 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3869 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3870 we need to store the previous stack pointer (frame pointer)
3871 into the frame marker on targets that use the HP unwind
3872 library. This allows the HP unwind library to be used to
3873 unwind GCC frames. However, we are not fully compatible
3874 with the HP library because our frame layout differs from
3875 that specified in the HP runtime specification.
3877 We don't want a frame note on this instruction as the frame
3878 marker moves during dynamic stack allocation.
3880 This instruction also serves as a blockage to prevent
3881 register spills from being scheduled before the stack
3882 pointer is raised. This is necessary as we store
3883 registers using the frame pointer as a base register,
3884 and the frame pointer is set before sp is raised. */
3885 if (TARGET_HPUX_UNWIND_LIBRARY)
3887 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
3888 GEN_INT (TARGET_64BIT ? -8 : -4));
3890 emit_move_insn (gen_rtx_MEM (word_mode, addr),
3891 hard_frame_pointer_rtx);
3894 emit_insn (gen_blockage ());
3896 /* no frame pointer needed. */
3899 /* In some cases we can perform the first callee register save
3900 and allocating the stack frame at the same time. If so, just
3901 make a note of it and defer allocating the frame until saving
3902 the callee registers. */
3903 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
3904 merge_sp_adjust_with_store = 1;
3905 /* Can not optimize. Adjust the stack frame by actual_fsize
3908 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3913 /* Normal register save.
3915 Do not save the frame pointer in the frame_pointer_needed case. It
3916 was done earlier. */
3917 if (frame_pointer_needed)
3919 offset = local_fsize;
3921 /* Saving the EH return data registers in the frame is the simplest
3922 way to get the frame unwind information emitted. We put them
3923 just before the general registers. */
3924 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3926 unsigned int i, regno;
3930 regno = EH_RETURN_DATA_REGNO (i);
3931 if (regno == INVALID_REGNUM)
3934 store_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
3935 offset += UNITS_PER_WORD;
3939 for (i = 18; i >= 4; i--)
3940 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3942 store_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
3943 offset += UNITS_PER_WORD;
3946 /* Account for %r3 which is saved in a special place. */
3949 /* No frame pointer needed. */
3952 offset = local_fsize - actual_fsize;
3954 /* Saving the EH return data registers in the frame is the simplest
3955 way to get the frame unwind information emitted. */
3956 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3958 unsigned int i, regno;
3962 regno = EH_RETURN_DATA_REGNO (i);
3963 if (regno == INVALID_REGNUM)
3966 /* If merge_sp_adjust_with_store is nonzero, then we can
3967 optimize the first save. */
3968 if (merge_sp_adjust_with_store)
3970 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
3971 merge_sp_adjust_with_store = 0;
3974 store_reg (regno, offset, STACK_POINTER_REGNUM);
3975 offset += UNITS_PER_WORD;
3979 for (i = 18; i >= 3; i--)
3980 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3982 /* If merge_sp_adjust_with_store is nonzero, then we can
3983 optimize the first GR save. */
3984 if (merge_sp_adjust_with_store)
3986 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
3987 merge_sp_adjust_with_store = 0;
3990 store_reg (i, offset, STACK_POINTER_REGNUM);
3991 offset += UNITS_PER_WORD;
3995 /* If we wanted to merge the SP adjustment with a GR save, but we never
3996 did any GR saves, then just emit the adjustment here. */
3997 if (merge_sp_adjust_with_store)
3998 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4002 /* The hppa calling conventions say that %r19, the pic offset
4003 register, is saved at sp - 32 (in this function's frame)
4004 when generating PIC code. FIXME: What is the correct thing
4005 to do for functions which make no calls and allocate no
4006 frame? Do we need to allocate a frame, or can we just omit
4007 the save? For now we'll just omit the save.
4009 We don't want a note on this insn as the frame marker can
4010 move if there is a dynamic stack allocation. */
4011 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
4013 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
4015 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
4019 /* Align pointer properly (doubleword boundary). */
4020 offset = (offset + 7) & ~7;
4022 /* Floating point register store. */
4027 /* First get the frame or stack pointer to the start of the FP register
4029 if (frame_pointer_needed)
4031 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4032 base = hard_frame_pointer_rtx;
4036 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4037 base = stack_pointer_rtx;
4040 /* Now actually save the FP registers. */
4041 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4043 if (df_regs_ever_live_p (i)
4044 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4046 rtx addr, insn, reg;
4047 addr = gen_rtx_MEM (DFmode,
4048 gen_rtx_POST_INC (word_mode, tmpreg));
4049 reg = gen_rtx_REG (DFmode, i);
4050 insn = emit_move_insn (addr, reg);
4053 RTX_FRAME_RELATED_P (insn) = 1;
4056 rtx mem = gen_rtx_MEM (DFmode,
4057 plus_constant (Pmode, base,
4059 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4060 gen_rtx_SET (VOIDmode, mem, reg));
4064 rtx meml = gen_rtx_MEM (SFmode,
4065 plus_constant (Pmode, base,
4067 rtx memr = gen_rtx_MEM (SFmode,
4068 plus_constant (Pmode, base,
4070 rtx regl = gen_rtx_REG (SFmode, i);
4071 rtx regr = gen_rtx_REG (SFmode, i + 1);
4072 rtx setl = gen_rtx_SET (VOIDmode, meml, regl);
4073 rtx setr = gen_rtx_SET (VOIDmode, memr, regr);
4076 RTX_FRAME_RELATED_P (setl) = 1;
4077 RTX_FRAME_RELATED_P (setr) = 1;
4078 vec = gen_rtvec (2, setl, setr);
4079 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4080 gen_rtx_SEQUENCE (VOIDmode, vec));
4083 offset += GET_MODE_SIZE (DFmode);
4090 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
4091 Handle case where DISP > 8k by using the add_high_const patterns. */
4094 load_reg (int reg, HOST_WIDE_INT disp, int base)
4096 rtx dest = gen_rtx_REG (word_mode, reg);
4097 rtx basereg = gen_rtx_REG (Pmode, base);
4100 if (VAL_14_BITS_P (disp))
4101 src = gen_rtx_MEM (word_mode, plus_constant (Pmode, basereg, disp));
4102 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
4104 rtx delta = GEN_INT (disp);
4105 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4107 emit_move_insn (tmpreg, delta);
4108 if (TARGET_DISABLE_INDEXING)
4110 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4111 src = gen_rtx_MEM (word_mode, tmpreg);
4114 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4118 rtx delta = GEN_INT (disp);
4119 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
4120 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4122 emit_move_insn (tmpreg, high);
4123 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
4126 emit_move_insn (dest, src);
4129 /* Update the total code bytes output to the text section. */
4132 update_total_code_bytes (unsigned int nbytes)
4134 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
4135 && !IN_NAMED_SECTION_P (cfun->decl))
4137 unsigned int old_total = total_code_bytes;
4139 total_code_bytes += nbytes;
4141 /* Be prepared to handle overflows. */
4142 if (old_total > total_code_bytes)
4143 total_code_bytes = UINT_MAX;
4147 /* This function generates the assembly code for function exit.
4148 Args are as for output_function_prologue ().
4150 The function epilogue should not depend on the current stack
4151 pointer! It should use the frame pointer only. This is mandatory
4152 because of alloca; we also take advantage of it to omit stack
4153 adjustments before returning. */
4156 pa_output_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4158 rtx insn = get_last_insn ();
4162 /* pa_expand_epilogue does the dirty work now. We just need
4163 to output the assembler directives which denote the end
4166 To make debuggers happy, emit a nop if the epilogue was completely
4167 eliminated due to a volatile call as the last insn in the
4168 current function. That way the return address (in %r2) will
4169 always point to a valid instruction in the current function. */
4171 /* Get the last real insn. */
4172 if (GET_CODE (insn) == NOTE)
4173 insn = prev_real_insn (insn);
4175 /* If it is a sequence, then look inside. */
4176 if (insn && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
4177 insn = XVECEXP (PATTERN (insn), 0, 0);
4179 /* If insn is a CALL_INSN, then it must be a call to a volatile
4180 function (otherwise there would be epilogue insns). */
4181 if (insn && GET_CODE (insn) == CALL_INSN)
4183 fputs ("\tnop\n", file);
4187 fputs ("\t.EXIT\n\t.PROCEND\n", file);
4189 if (TARGET_SOM && TARGET_GAS)
4191 /* We done with this subspace except possibly for some additional
4192 debug information. Forget that we are in this subspace to ensure
4193 that the next function is output in its own subspace. */
4195 cfun->machine->in_nsubspa = 2;
4198 if (INSN_ADDRESSES_SET_P ())
4200 insn = get_last_nonnote_insn ();
4201 last_address += INSN_ADDRESSES (INSN_UID (insn));
4203 last_address += insn_default_length (insn);
4204 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
4205 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
4208 last_address = UINT_MAX;
4210 /* Finally, update the total number of code bytes output so far. */
4211 update_total_code_bytes (last_address);
4215 pa_expand_epilogue (void)
4218 HOST_WIDE_INT offset;
4219 HOST_WIDE_INT ret_off = 0;
4221 int merge_sp_adjust_with_load = 0;
4223 /* We will use this often. */
4224 tmpreg = gen_rtx_REG (word_mode, 1);
4226 /* Try to restore RP early to avoid load/use interlocks when
4227 RP gets used in the return (bv) instruction. This appears to still
4228 be necessary even when we schedule the prologue and epilogue. */
4231 ret_off = TARGET_64BIT ? -16 : -20;
4232 if (frame_pointer_needed)
4234 load_reg (2, ret_off, HARD_FRAME_POINTER_REGNUM);
4239 /* No frame pointer, and stack is smaller than 8k. */
4240 if (VAL_14_BITS_P (ret_off - actual_fsize))
4242 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
4248 /* General register restores. */
4249 if (frame_pointer_needed)
4251 offset = local_fsize;
4253 /* If the current function calls __builtin_eh_return, then we need
4254 to restore the saved EH data registers. */
4255 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4257 unsigned int i, regno;
4261 regno = EH_RETURN_DATA_REGNO (i);
4262 if (regno == INVALID_REGNUM)
4265 load_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
4266 offset += UNITS_PER_WORD;
4270 for (i = 18; i >= 4; i--)
4271 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4273 load_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
4274 offset += UNITS_PER_WORD;
4279 offset = local_fsize - actual_fsize;
4281 /* If the current function calls __builtin_eh_return, then we need
4282 to restore the saved EH data registers. */
4283 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4285 unsigned int i, regno;
4289 regno = EH_RETURN_DATA_REGNO (i);
4290 if (regno == INVALID_REGNUM)
4293 /* Only for the first load.
4294 merge_sp_adjust_with_load holds the register load
4295 with which we will merge the sp adjustment. */
4296 if (merge_sp_adjust_with_load == 0
4298 && VAL_14_BITS_P (-actual_fsize))
4299 merge_sp_adjust_with_load = regno;
4301 load_reg (regno, offset, STACK_POINTER_REGNUM);
4302 offset += UNITS_PER_WORD;
4306 for (i = 18; i >= 3; i--)
4308 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4310 /* Only for the first load.
4311 merge_sp_adjust_with_load holds the register load
4312 with which we will merge the sp adjustment. */
4313 if (merge_sp_adjust_with_load == 0
4315 && VAL_14_BITS_P (-actual_fsize))
4316 merge_sp_adjust_with_load = i;
4318 load_reg (i, offset, STACK_POINTER_REGNUM);
4319 offset += UNITS_PER_WORD;
4324 /* Align pointer properly (doubleword boundary). */
4325 offset = (offset + 7) & ~7;
4327 /* FP register restores. */
4330 /* Adjust the register to index off of. */
4331 if (frame_pointer_needed)
4332 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4334 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4336 /* Actually do the restores now. */
4337 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4338 if (df_regs_ever_live_p (i)
4339 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4341 rtx src = gen_rtx_MEM (DFmode,
4342 gen_rtx_POST_INC (word_mode, tmpreg));
4343 rtx dest = gen_rtx_REG (DFmode, i);
4344 emit_move_insn (dest, src);
4348 /* Emit a blockage insn here to keep these insns from being moved to
4349 an earlier spot in the epilogue, or into the main instruction stream.
4351 This is necessary as we must not cut the stack back before all the
4352 restores are finished. */
4353 emit_insn (gen_blockage ());
4355 /* Reset stack pointer (and possibly frame pointer). The stack
4356 pointer is initially set to fp + 64 to avoid a race condition. */
4357 if (frame_pointer_needed)
4359 rtx delta = GEN_INT (-64);
4361 set_reg_plus_d (STACK_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM, 64, 0);
4362 emit_insn (gen_pre_load (hard_frame_pointer_rtx,
4363 stack_pointer_rtx, delta));
4365 /* If we were deferring a callee register restore, do it now. */
4366 else if (merge_sp_adjust_with_load)
4368 rtx delta = GEN_INT (-actual_fsize);
4369 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4371 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4373 else if (actual_fsize != 0)
4374 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4377 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4378 frame greater than 8k), do so now. */
4380 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4382 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4384 rtx sa = EH_RETURN_STACKADJ_RTX;
4386 emit_insn (gen_blockage ());
4387 emit_insn (TARGET_64BIT
4388 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4389 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4394 pa_can_use_return_insn (void)
4396 if (!reload_completed)
4399 if (frame_pointer_needed)
4402 if (df_regs_ever_live_p (2))
4408 return pa_compute_frame_size (get_frame_size (), 0) == 0;
4412 hppa_pic_save_rtx (void)
4414 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4417 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4418 #define NO_DEFERRED_PROFILE_COUNTERS 0
4422 /* Vector of funcdef numbers. */
4423 static vec<int> funcdef_nos;
4425 /* Output deferred profile counters. */
4427 output_deferred_profile_counters (void)
4432 if (funcdef_nos.is_empty ())
4435 switch_to_section (data_section);
4436 align = MIN (BIGGEST_ALIGNMENT, LONG_TYPE_SIZE);
4437 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
4439 for (i = 0; funcdef_nos.iterate (i, &n); i++)
4441 targetm.asm_out.internal_label (asm_out_file, "LP", n);
4442 assemble_integer (const0_rtx, LONG_TYPE_SIZE / BITS_PER_UNIT, align, 1);
4445 funcdef_nos.release ();
4449 hppa_profile_hook (int label_no)
4451 /* We use SImode for the address of the function in both 32 and
4452 64-bit code to avoid having to provide DImode versions of the
4453 lcla2 and load_offset_label_address insn patterns. */
4454 rtx reg = gen_reg_rtx (SImode);
4455 rtx label_rtx = gen_label_rtx ();
4456 rtx begin_label_rtx, call_insn;
4457 char begin_label_name[16];
4459 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4461 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4464 emit_move_insn (arg_pointer_rtx,
4465 gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx,
4468 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4470 /* The address of the function is loaded into %r25 with an instruction-
4471 relative sequence that avoids the use of relocations. The sequence
4472 is split so that the load_offset_label_address instruction can
4473 occupy the delay slot of the call to _mcount. */
4475 emit_insn (gen_lcla2 (reg, label_rtx));
4477 emit_insn (gen_lcla1 (reg, label_rtx));
4479 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4480 reg, begin_label_rtx, label_rtx));
4482 #if !NO_DEFERRED_PROFILE_COUNTERS
4484 rtx count_label_rtx, addr, r24;
4485 char count_label_name[16];
4487 funcdef_nos.safe_push (label_no);
4488 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4489 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (count_label_name));
4491 addr = force_reg (Pmode, count_label_rtx);
4492 r24 = gen_rtx_REG (Pmode, 24);
4493 emit_move_insn (r24, addr);
4496 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4497 gen_rtx_SYMBOL_REF (Pmode,
4499 GEN_INT (TARGET_64BIT ? 24 : 12)));
4501 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4506 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4507 gen_rtx_SYMBOL_REF (Pmode,
4509 GEN_INT (TARGET_64BIT ? 16 : 8)));
4513 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4514 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4516 /* Indicate the _mcount call cannot throw, nor will it execute a
4518 make_reg_eh_region_note_nothrow_nononlocal (call_insn);
4521 /* Fetch the return address for the frame COUNT steps up from
4522 the current frame, after the prologue. FRAMEADDR is the
4523 frame pointer of the COUNT frame.
4525 We want to ignore any export stub remnants here. To handle this,
4526 we examine the code at the return address, and if it is an export
4527 stub, we return a memory rtx for the stub return address stored
4530 The value returned is used in two different ways:
4532 1. To find a function's caller.
4534 2. To change the return address for a function.
4536 This function handles most instances of case 1; however, it will
4537 fail if there are two levels of stubs to execute on the return
4538 path. The only way I believe that can happen is if the return value
4539 needs a parameter relocation, which never happens for C code.
4541 This function handles most instances of case 2; however, it will
4542 fail if we did not originally have stub code on the return path
4543 but will need stub code on the new return path. This can happen if
4544 the caller & callee are both in the main program, but the new
4545 return location is in a shared library. */
4548 pa_return_addr_rtx (int count, rtx frameaddr)
4555 /* The instruction stream at the return address of a PA1.X export stub is:
4557 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4558 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4559 0x00011820 | stub+16: mtsp r1,sr0
4560 0xe0400002 | stub+20: be,n 0(sr0,rp)
4562 0xe0400002 must be specified as -532676606 so that it won't be
4563 rejected as an invalid immediate operand on 64-bit hosts.
4565 The instruction stream at the return address of a PA2.0 export stub is:
4567 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4568 0xe840d002 | stub+12: bve,n (rp)
4571 HOST_WIDE_INT insns[4];
4577 rp = get_hard_reg_initial_val (Pmode, 2);
4579 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4582 /* If there is no export stub then just use the value saved from
4583 the return pointer register. */
4585 saved_rp = gen_reg_rtx (Pmode);
4586 emit_move_insn (saved_rp, rp);
4588 /* Get pointer to the instruction stream. We have to mask out the
4589 privilege level from the two low order bits of the return address
4590 pointer here so that ins will point to the start of the first
4591 instruction that would have been executed if we returned. */
4592 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4593 label = gen_label_rtx ();
4597 insns[0] = 0x4bc23fd1;
4598 insns[1] = -398405630;
4603 insns[0] = 0x4bc23fd1;
4604 insns[1] = 0x004010a1;
4605 insns[2] = 0x00011820;
4606 insns[3] = -532676606;
4610 /* Check the instruction stream at the normal return address for the
4611 export stub. If it is an export stub, than our return address is
4612 really in -24[frameaddr]. */
4614 for (i = 0; i < len; i++)
4616 rtx op0 = gen_rtx_MEM (SImode, plus_constant (Pmode, ins, i * 4));
4617 rtx op1 = GEN_INT (insns[i]);
4618 emit_cmp_and_jump_insns (op0, op1, NE, NULL, SImode, 0, label);
4621 /* Here we know that our return address points to an export
4622 stub. We don't want to return the address of the export stub,
4623 but rather the return address of the export stub. That return
4624 address is stored at -24[frameaddr]. */
4626 emit_move_insn (saved_rp,
4628 memory_address (Pmode,
4629 plus_constant (Pmode, frameaddr,
4638 pa_emit_bcond_fp (rtx operands[])
4640 enum rtx_code code = GET_CODE (operands[0]);
4641 rtx operand0 = operands[1];
4642 rtx operand1 = operands[2];
4643 rtx label = operands[3];
4645 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 0),
4646 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1)));
4648 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
4649 gen_rtx_IF_THEN_ELSE (VOIDmode,
4652 gen_rtx_REG (CCFPmode, 0),
4654 gen_rtx_LABEL_REF (VOIDmode, label),
4659 /* Adjust the cost of a scheduling dependency. Return the new cost of
4660 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4663 pa_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4665 enum attr_type attr_type;
4667 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4668 true dependencies as they are described with bypasses now. */
4669 if (pa_cpu >= PROCESSOR_8000 || REG_NOTE_KIND (link) == 0)
4672 if (! recog_memoized (insn))
4675 attr_type = get_attr_type (insn);
4677 switch (REG_NOTE_KIND (link))
4680 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4683 if (attr_type == TYPE_FPLOAD)
4685 rtx pat = PATTERN (insn);
4686 rtx dep_pat = PATTERN (dep_insn);
4687 if (GET_CODE (pat) == PARALLEL)
4689 /* This happens for the fldXs,mb patterns. */
4690 pat = XVECEXP (pat, 0, 0);
4692 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4693 /* If this happens, we have to extend this to schedule
4694 optimally. Return 0 for now. */
4697 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4699 if (! recog_memoized (dep_insn))
4701 switch (get_attr_type (dep_insn))
4708 case TYPE_FPSQRTSGL:
4709 case TYPE_FPSQRTDBL:
4710 /* A fpload can't be issued until one cycle before a
4711 preceding arithmetic operation has finished if
4712 the target of the fpload is any of the sources
4713 (or destination) of the arithmetic operation. */
4714 return insn_default_latency (dep_insn) - 1;
4721 else if (attr_type == TYPE_FPALU)
4723 rtx pat = PATTERN (insn);
4724 rtx dep_pat = PATTERN (dep_insn);
4725 if (GET_CODE (pat) == PARALLEL)
4727 /* This happens for the fldXs,mb patterns. */
4728 pat = XVECEXP (pat, 0, 0);
4730 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4731 /* If this happens, we have to extend this to schedule
4732 optimally. Return 0 for now. */
4735 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4737 if (! recog_memoized (dep_insn))
4739 switch (get_attr_type (dep_insn))
4743 case TYPE_FPSQRTSGL:
4744 case TYPE_FPSQRTDBL:
4745 /* An ALU flop can't be issued until two cycles before a
4746 preceding divide or sqrt operation has finished if
4747 the target of the ALU flop is any of the sources
4748 (or destination) of the divide or sqrt operation. */
4749 return insn_default_latency (dep_insn) - 2;
4757 /* For other anti dependencies, the cost is 0. */
4760 case REG_DEP_OUTPUT:
4761 /* Output dependency; DEP_INSN writes a register that INSN writes some
4763 if (attr_type == TYPE_FPLOAD)
4765 rtx pat = PATTERN (insn);
4766 rtx dep_pat = PATTERN (dep_insn);
4767 if (GET_CODE (pat) == PARALLEL)
4769 /* This happens for the fldXs,mb patterns. */
4770 pat = XVECEXP (pat, 0, 0);
4772 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4773 /* If this happens, we have to extend this to schedule
4774 optimally. Return 0 for now. */
4777 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4779 if (! recog_memoized (dep_insn))
4781 switch (get_attr_type (dep_insn))
4788 case TYPE_FPSQRTSGL:
4789 case TYPE_FPSQRTDBL:
4790 /* A fpload can't be issued until one cycle before a
4791 preceding arithmetic operation has finished if
4792 the target of the fpload is the destination of the
4793 arithmetic operation.
4795 Exception: For PA7100LC, PA7200 and PA7300, the cost
4796 is 3 cycles, unless they bundle together. We also
4797 pay the penalty if the second insn is a fpload. */
4798 return insn_default_latency (dep_insn) - 1;
4805 else if (attr_type == TYPE_FPALU)
4807 rtx pat = PATTERN (insn);
4808 rtx dep_pat = PATTERN (dep_insn);
4809 if (GET_CODE (pat) == PARALLEL)
4811 /* This happens for the fldXs,mb patterns. */
4812 pat = XVECEXP (pat, 0, 0);
4814 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4815 /* If this happens, we have to extend this to schedule
4816 optimally. Return 0 for now. */
4819 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4821 if (! recog_memoized (dep_insn))
4823 switch (get_attr_type (dep_insn))
4827 case TYPE_FPSQRTSGL:
4828 case TYPE_FPSQRTDBL:
4829 /* An ALU flop can't be issued until two cycles before a
4830 preceding divide or sqrt operation has finished if
4831 the target of the ALU flop is also the target of
4832 the divide or sqrt operation. */
4833 return insn_default_latency (dep_insn) - 2;
4841 /* For other output dependencies, the cost is 0. */
4849 /* Adjust scheduling priorities. We use this to try and keep addil
4850 and the next use of %r1 close together. */
4852 pa_adjust_priority (rtx insn, int priority)
4854 rtx set = single_set (insn);
4858 src = SET_SRC (set);
4859 dest = SET_DEST (set);
4860 if (GET_CODE (src) == LO_SUM
4861 && symbolic_operand (XEXP (src, 1), VOIDmode)
4862 && ! read_only_operand (XEXP (src, 1), VOIDmode))
4865 else if (GET_CODE (src) == MEM
4866 && GET_CODE (XEXP (src, 0)) == LO_SUM
4867 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
4868 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
4871 else if (GET_CODE (dest) == MEM
4872 && GET_CODE (XEXP (dest, 0)) == LO_SUM
4873 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
4874 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
4880 /* The 700 can only issue a single insn at a time.
4881 The 7XXX processors can issue two insns at a time.
4882 The 8000 can issue 4 insns at a time. */
4884 pa_issue_rate (void)
4888 case PROCESSOR_700: return 1;
4889 case PROCESSOR_7100: return 2;
4890 case PROCESSOR_7100LC: return 2;
4891 case PROCESSOR_7200: return 2;
4892 case PROCESSOR_7300: return 2;
4893 case PROCESSOR_8000: return 4;
4902 /* Return any length plus adjustment needed by INSN which already has
4903 its length computed as LENGTH. Return LENGTH if no adjustment is
4906 Also compute the length of an inline block move here as it is too
4907 complicated to express as a length attribute in pa.md. */
4909 pa_adjust_insn_length (rtx insn, int length)
4911 rtx pat = PATTERN (insn);
4913 /* If length is negative or undefined, provide initial length. */
4914 if ((unsigned int) length >= INT_MAX)
4916 if (GET_CODE (pat) == SEQUENCE)
4917 insn = XVECEXP (pat, 0, 0);
4919 switch (get_attr_type (insn))
4922 length = pa_attr_length_millicode_call (insn);
4925 length = pa_attr_length_call (insn, 0);
4928 length = pa_attr_length_call (insn, 1);
4931 length = pa_attr_length_indirect_call (insn);
4933 case TYPE_SH_FUNC_ADRS:
4934 length = pa_attr_length_millicode_call (insn) + 20;
4941 /* Jumps inside switch tables which have unfilled delay slots need
4943 if (GET_CODE (insn) == JUMP_INSN
4944 && GET_CODE (pat) == PARALLEL
4945 && get_attr_type (insn) == TYPE_BTABLE_BRANCH)
4947 /* Block move pattern. */
4948 else if (GET_CODE (insn) == INSN
4949 && GET_CODE (pat) == PARALLEL
4950 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4951 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4952 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
4953 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
4954 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
4955 length += compute_movmem_length (insn) - 4;
4956 /* Block clear pattern. */
4957 else if (GET_CODE (insn) == INSN
4958 && GET_CODE (pat) == PARALLEL
4959 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4960 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4961 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
4962 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
4963 length += compute_clrmem_length (insn) - 4;
4964 /* Conditional branch with an unfilled delay slot. */
4965 else if (GET_CODE (insn) == JUMP_INSN && ! simplejump_p (insn))
4967 /* Adjust a short backwards conditional with an unfilled delay slot. */
4968 if (GET_CODE (pat) == SET
4970 && JUMP_LABEL (insn) != NULL_RTX
4971 && ! forward_branch_p (insn))
4973 else if (GET_CODE (pat) == PARALLEL
4974 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
4977 /* Adjust dbra insn with short backwards conditional branch with
4978 unfilled delay slot -- only for case where counter is in a
4979 general register register. */
4980 else if (GET_CODE (pat) == PARALLEL
4981 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
4982 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
4983 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
4985 && ! forward_branch_p (insn))
4991 /* Implement the TARGET_PRINT_OPERAND_PUNCT_VALID_P hook. */
4994 pa_print_operand_punct_valid_p (unsigned char code)
5005 /* Print operand X (an rtx) in assembler syntax to file FILE.
5006 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
5007 For `%' followed by punctuation, CODE is the punctuation and X is null. */
5010 pa_print_operand (FILE *file, rtx x, int code)
5015 /* Output a 'nop' if there's nothing for the delay slot. */
5016 if (dbr_sequence_length () == 0)
5017 fputs ("\n\tnop", file);
5020 /* Output a nullification completer if there's nothing for the */
5021 /* delay slot or nullification is requested. */
5022 if (dbr_sequence_length () == 0 ||
5024 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
5028 /* Print out the second register name of a register pair.
5029 I.e., R (6) => 7. */
5030 fputs (reg_names[REGNO (x) + 1], file);
5033 /* A register or zero. */
5035 || (x == CONST0_RTX (DFmode))
5036 || (x == CONST0_RTX (SFmode)))
5038 fputs ("%r0", file);
5044 /* A register or zero (floating point). */
5046 || (x == CONST0_RTX (DFmode))
5047 || (x == CONST0_RTX (SFmode)))
5049 fputs ("%fr0", file);
5058 xoperands[0] = XEXP (XEXP (x, 0), 0);
5059 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
5060 pa_output_global_address (file, xoperands[1], 0);
5061 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
5065 case 'C': /* Plain (C)ondition */
5067 switch (GET_CODE (x))
5070 fputs ("=", file); break;
5072 fputs ("<>", file); break;
5074 fputs (">", file); break;
5076 fputs (">=", file); break;
5078 fputs (">>=", file); break;
5080 fputs (">>", file); break;
5082 fputs ("<", file); break;
5084 fputs ("<=", file); break;
5086 fputs ("<<=", file); break;
5088 fputs ("<<", file); break;
5093 case 'N': /* Condition, (N)egated */
5094 switch (GET_CODE (x))
5097 fputs ("<>", file); break;
5099 fputs ("=", file); break;
5101 fputs ("<=", file); break;
5103 fputs ("<", file); break;
5105 fputs ("<<", file); break;
5107 fputs ("<<=", file); break;
5109 fputs (">=", file); break;
5111 fputs (">", file); break;
5113 fputs (">>", file); break;
5115 fputs (">>=", file); break;
5120 /* For floating point comparisons. Note that the output
5121 predicates are the complement of the desired mode. The
5122 conditions for GT, GE, LT, LE and LTGT cause an invalid
5123 operation exception if the result is unordered and this
5124 exception is enabled in the floating-point status register. */
5126 switch (GET_CODE (x))
5129 fputs ("!=", file); break;
5131 fputs ("=", file); break;
5133 fputs ("!>", file); break;
5135 fputs ("!>=", file); break;
5137 fputs ("!<", file); break;
5139 fputs ("!<=", file); break;
5141 fputs ("!<>", file); break;
5143 fputs ("!?<=", file); break;
5145 fputs ("!?<", file); break;
5147 fputs ("!?>=", file); break;
5149 fputs ("!?>", file); break;
5151 fputs ("!?=", file); break;
5153 fputs ("!?", file); break;
5155 fputs ("?", file); break;
5160 case 'S': /* Condition, operands are (S)wapped. */
5161 switch (GET_CODE (x))
5164 fputs ("=", file); break;
5166 fputs ("<>", file); break;
5168 fputs ("<", file); break;
5170 fputs ("<=", file); break;
5172 fputs ("<<=", file); break;
5174 fputs ("<<", file); break;
5176 fputs (">", file); break;
5178 fputs (">=", file); break;
5180 fputs (">>=", file); break;
5182 fputs (">>", file); break;
5187 case 'B': /* Condition, (B)oth swapped and negate. */
5188 switch (GET_CODE (x))
5191 fputs ("<>", file); break;
5193 fputs ("=", file); break;
5195 fputs (">=", file); break;
5197 fputs (">", file); break;
5199 fputs (">>", file); break;
5201 fputs (">>=", file); break;
5203 fputs ("<=", file); break;
5205 fputs ("<", file); break;
5207 fputs ("<<", file); break;
5209 fputs ("<<=", file); break;
5215 gcc_assert (GET_CODE (x) == CONST_INT);
5216 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
5219 gcc_assert (GET_CODE (x) == CONST_INT);
5220 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
5223 gcc_assert (GET_CODE (x) == CONST_INT);
5224 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
5227 gcc_assert (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0);
5228 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5231 gcc_assert (GET_CODE (x) == CONST_INT);
5232 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
5235 gcc_assert (GET_CODE (x) == CONST_INT);
5236 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
5239 if (GET_CODE (x) == CONST_INT)
5244 switch (GET_CODE (XEXP (x, 0)))
5248 if (ASSEMBLER_DIALECT == 0)
5249 fputs ("s,mb", file);
5251 fputs (",mb", file);
5255 if (ASSEMBLER_DIALECT == 0)
5256 fputs ("s,ma", file);
5258 fputs (",ma", file);
5261 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5262 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5264 if (ASSEMBLER_DIALECT == 0)
5267 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5268 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5270 if (ASSEMBLER_DIALECT == 0)
5271 fputs ("x,s", file);
5275 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
5279 if (code == 'F' && ASSEMBLER_DIALECT == 0)
5285 pa_output_global_address (file, x, 0);
5288 pa_output_global_address (file, x, 1);
5290 case 0: /* Don't do anything special */
5295 compute_zdepwi_operands (INTVAL (x), op);
5296 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5302 compute_zdepdi_operands (INTVAL (x), op);
5303 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5307 /* We can get here from a .vtable_inherit due to our
5308 CONSTANT_ADDRESS_P rejecting perfectly good constant
5314 if (GET_CODE (x) == REG)
5316 fputs (reg_names [REGNO (x)], file);
5317 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
5323 && GET_MODE_SIZE (GET_MODE (x)) <= 4
5324 && (REGNO (x) & 1) == 0)
5327 else if (GET_CODE (x) == MEM)
5329 int size = GET_MODE_SIZE (GET_MODE (x));
5330 rtx base = NULL_RTX;
5331 switch (GET_CODE (XEXP (x, 0)))
5335 base = XEXP (XEXP (x, 0), 0);
5336 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
5340 base = XEXP (XEXP (x, 0), 0);
5341 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5344 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5345 fprintf (file, "%s(%s)",
5346 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5347 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5348 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5349 fprintf (file, "%s(%s)",
5350 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5351 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5352 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5353 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5355 /* Because the REG_POINTER flag can get lost during reload,
5356 pa_legitimate_address_p canonicalizes the order of the
5357 index and base registers in the combined move patterns. */
5358 rtx base = XEXP (XEXP (x, 0), 1);
5359 rtx index = XEXP (XEXP (x, 0), 0);
5361 fprintf (file, "%s(%s)",
5362 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5365 output_address (XEXP (x, 0));
5368 output_address (XEXP (x, 0));
5373 output_addr_const (file, x);
5376 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5379 pa_output_global_address (FILE *file, rtx x, int round_constant)
5382 /* Imagine (high (const (plus ...))). */
5383 if (GET_CODE (x) == HIGH)
5386 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5387 output_addr_const (file, x);
5388 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5390 output_addr_const (file, x);
5391 fputs ("-$global$", file);
5393 else if (GET_CODE (x) == CONST)
5395 const char *sep = "";
5396 int offset = 0; /* assembler wants -$global$ at end */
5397 rtx base = NULL_RTX;
5399 switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
5402 base = XEXP (XEXP (x, 0), 0);
5403 output_addr_const (file, base);
5406 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5412 switch (GET_CODE (XEXP (XEXP (x, 0), 1)))
5415 base = XEXP (XEXP (x, 0), 1);
5416 output_addr_const (file, base);
5419 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5425 /* How bogus. The compiler is apparently responsible for
5426 rounding the constant if it uses an LR field selector.
5428 The linker and/or assembler seem a better place since
5429 they have to do this kind of thing already.
5431 If we fail to do this, HP's optimizing linker may eliminate
5432 an addil, but not update the ldw/stw/ldo instruction that
5433 uses the result of the addil. */
5435 offset = ((offset + 0x1000) & ~0x1fff);
5437 switch (GET_CODE (XEXP (x, 0)))
5450 gcc_assert (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF);
5458 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5459 fputs ("-$global$", file);
5461 fprintf (file, "%s%d", sep, offset);
5464 output_addr_const (file, x);
5467 /* Output boilerplate text to appear at the beginning of the file.
5468 There are several possible versions. */
5469 #define aputs(x) fputs(x, asm_out_file)
5471 pa_file_start_level (void)
5474 aputs ("\t.LEVEL 2.0w\n");
5475 else if (TARGET_PA_20)
5476 aputs ("\t.LEVEL 2.0\n");
5477 else if (TARGET_PA_11)
5478 aputs ("\t.LEVEL 1.1\n");
5480 aputs ("\t.LEVEL 1.0\n");
5484 pa_file_start_space (int sortspace)
5486 aputs ("\t.SPACE $PRIVATE$");
5489 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31");
5491 aputs ("\n\t.SUBSPA $TM_CLONE_TABLE$,QUAD=1,ALIGN=8,ACCESS=31");
5492 aputs ("\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5493 "\n\t.SPACE $TEXT$");
5496 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5497 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5501 pa_file_start_file (int want_version)
5503 if (write_symbols != NO_DEBUG)
5505 output_file_directive (asm_out_file, main_input_filename);
5507 aputs ("\t.version\t\"01.01\"\n");
5512 pa_file_start_mcount (const char *aswhat)
5515 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5519 pa_elf_file_start (void)
5521 pa_file_start_level ();
5522 pa_file_start_mcount ("ENTRY");
5523 pa_file_start_file (0);
5527 pa_som_file_start (void)
5529 pa_file_start_level ();
5530 pa_file_start_space (0);
5531 aputs ("\t.IMPORT $global$,DATA\n"
5532 "\t.IMPORT $$dyncall,MILLICODE\n");
5533 pa_file_start_mcount ("CODE");
5534 pa_file_start_file (0);
5538 pa_linux_file_start (void)
5540 pa_file_start_file (1);
5541 pa_file_start_level ();
5542 pa_file_start_mcount ("CODE");
5546 pa_hpux64_gas_file_start (void)
5548 pa_file_start_level ();
5549 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5551 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5553 pa_file_start_file (1);
5557 pa_hpux64_hpas_file_start (void)
5559 pa_file_start_level ();
5560 pa_file_start_space (1);
5561 pa_file_start_mcount ("CODE");
5562 pa_file_start_file (0);
5566 /* Search the deferred plabel list for SYMBOL and return its internal
5567 label. If an entry for SYMBOL is not found, a new entry is created. */
5570 pa_get_deferred_plabel (rtx symbol)
5572 const char *fname = XSTR (symbol, 0);
5575 /* See if we have already put this function on the list of deferred
5576 plabels. This list is generally small, so a liner search is not
5577 too ugly. If it proves too slow replace it with something faster. */
5578 for (i = 0; i < n_deferred_plabels; i++)
5579 if (strcmp (fname, XSTR (deferred_plabels[i].symbol, 0)) == 0)
5582 /* If the deferred plabel list is empty, or this entry was not found
5583 on the list, create a new entry on the list. */
5584 if (deferred_plabels == NULL || i == n_deferred_plabels)
5588 if (deferred_plabels == 0)
5589 deferred_plabels = ggc_alloc_deferred_plabel ();
5591 deferred_plabels = GGC_RESIZEVEC (struct deferred_plabel,
5593 n_deferred_plabels + 1);
5595 i = n_deferred_plabels++;
5596 deferred_plabels[i].internal_label = gen_label_rtx ();
5597 deferred_plabels[i].symbol = symbol;
5599 /* Gross. We have just implicitly taken the address of this
5600 function. Mark it in the same manner as assemble_name. */
5601 id = maybe_get_identifier (targetm.strip_name_encoding (fname));
5603 mark_referenced (id);
5606 return deferred_plabels[i].internal_label;
5610 output_deferred_plabels (void)
5614 /* If we have some deferred plabels, then we need to switch into the
5615 data or readonly data section, and align it to a 4 byte boundary
5616 before outputting the deferred plabels. */
5617 if (n_deferred_plabels)
5619 switch_to_section (flag_pic ? data_section : readonly_data_section);
5620 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5623 /* Now output the deferred plabels. */
5624 for (i = 0; i < n_deferred_plabels; i++)
5626 targetm.asm_out.internal_label (asm_out_file, "L",
5627 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5628 assemble_integer (deferred_plabels[i].symbol,
5629 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5633 /* Initialize optabs to point to emulation routines. */
5636 pa_init_libfuncs (void)
5638 if (HPUX_LONG_DOUBLE_LIBRARY)
5640 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5641 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5642 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5643 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5644 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5645 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5646 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5647 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5648 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5650 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5651 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5652 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5653 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5654 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5655 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5656 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5658 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5659 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5660 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5661 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5663 set_conv_libfunc (sfix_optab, SImode, TFmode,
5664 TARGET_64BIT ? "__U_Qfcnvfxt_quad_to_sgl"
5665 : "_U_Qfcnvfxt_quad_to_sgl");
5666 set_conv_libfunc (sfix_optab, DImode, TFmode,
5667 "_U_Qfcnvfxt_quad_to_dbl");
5668 set_conv_libfunc (ufix_optab, SImode, TFmode,
5669 "_U_Qfcnvfxt_quad_to_usgl");
5670 set_conv_libfunc (ufix_optab, DImode, TFmode,
5671 "_U_Qfcnvfxt_quad_to_udbl");
5673 set_conv_libfunc (sfloat_optab, TFmode, SImode,
5674 "_U_Qfcnvxf_sgl_to_quad");
5675 set_conv_libfunc (sfloat_optab, TFmode, DImode,
5676 "_U_Qfcnvxf_dbl_to_quad");
5677 set_conv_libfunc (ufloat_optab, TFmode, SImode,
5678 "_U_Qfcnvxf_usgl_to_quad");
5679 set_conv_libfunc (ufloat_optab, TFmode, DImode,
5680 "_U_Qfcnvxf_udbl_to_quad");
5683 if (TARGET_SYNC_LIBCALL)
5684 init_sync_libfuncs (UNITS_PER_WORD);
5687 /* HP's millicode routines mean something special to the assembler.
5688 Keep track of which ones we have used. */
5690 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5691 static void import_milli (enum millicodes);
5692 static char imported[(int) end1000];
5693 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5694 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5695 #define MILLI_START 10
5698 import_milli (enum millicodes code)
5700 char str[sizeof (import_string)];
5702 if (!imported[(int) code])
5704 imported[(int) code] = 1;
5705 strcpy (str, import_string);
5706 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5707 output_asm_insn (str, 0);
5711 /* The register constraints have put the operands and return value in
5712 the proper registers. */
5715 pa_output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx insn)
5717 import_milli (mulI);
5718 return pa_output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5721 /* Emit the rtl for doing a division by a constant. */
5723 /* Do magic division millicodes exist for this value? */
5724 const int pa_magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5726 /* We'll use an array to keep track of the magic millicodes and
5727 whether or not we've used them already. [n][0] is signed, [n][1] is
5730 static int div_milli[16][2];
5733 pa_emit_hpdiv_const (rtx *operands, int unsignedp)
5735 if (GET_CODE (operands[2]) == CONST_INT
5736 && INTVAL (operands[2]) > 0
5737 && INTVAL (operands[2]) < 16
5738 && pa_magic_milli[INTVAL (operands[2])])
5740 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5742 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5746 gen_rtvec (6, gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 29),
5747 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5749 gen_rtx_REG (SImode, 26),
5751 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5752 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5753 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5754 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5755 gen_rtx_CLOBBER (VOIDmode, ret))));
5756 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5763 pa_output_div_insn (rtx *operands, int unsignedp, rtx insn)
5767 /* If the divisor is a constant, try to use one of the special
5769 if (GET_CODE (operands[0]) == CONST_INT)
5771 static char buf[100];
5772 divisor = INTVAL (operands[0]);
5773 if (!div_milli[divisor][unsignedp])
5775 div_milli[divisor][unsignedp] = 1;
5777 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5779 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5783 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5784 INTVAL (operands[0]));
5785 return pa_output_millicode_call (insn,
5786 gen_rtx_SYMBOL_REF (SImode, buf));
5790 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5791 INTVAL (operands[0]));
5792 return pa_output_millicode_call (insn,
5793 gen_rtx_SYMBOL_REF (SImode, buf));
5796 /* Divisor isn't a special constant. */
5801 import_milli (divU);
5802 return pa_output_millicode_call (insn,
5803 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5807 import_milli (divI);
5808 return pa_output_millicode_call (insn,
5809 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5814 /* Output a $$rem millicode to do mod. */
5817 pa_output_mod_insn (int unsignedp, rtx insn)
5821 import_milli (remU);
5822 return pa_output_millicode_call (insn,
5823 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5827 import_milli (remI);
5828 return pa_output_millicode_call (insn,
5829 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5834 pa_output_arg_descriptor (rtx call_insn)
5836 const char *arg_regs[4];
5837 enum machine_mode arg_mode;
5839 int i, output_flag = 0;
5842 /* We neither need nor want argument location descriptors for the
5843 64bit runtime environment or the ELF32 environment. */
5844 if (TARGET_64BIT || TARGET_ELF32)
5847 for (i = 0; i < 4; i++)
5850 /* Specify explicitly that no argument relocations should take place
5851 if using the portable runtime calling conventions. */
5852 if (TARGET_PORTABLE_RUNTIME)
5854 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5859 gcc_assert (GET_CODE (call_insn) == CALL_INSN);
5860 for (link = CALL_INSN_FUNCTION_USAGE (call_insn);
5861 link; link = XEXP (link, 1))
5863 rtx use = XEXP (link, 0);
5865 if (! (GET_CODE (use) == USE
5866 && GET_CODE (XEXP (use, 0)) == REG
5867 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
5870 arg_mode = GET_MODE (XEXP (use, 0));
5871 regno = REGNO (XEXP (use, 0));
5872 if (regno >= 23 && regno <= 26)
5874 arg_regs[26 - regno] = "GR";
5875 if (arg_mode == DImode)
5876 arg_regs[25 - regno] = "GR";
5878 else if (regno >= 32 && regno <= 39)
5880 if (arg_mode == SFmode)
5881 arg_regs[(regno - 32) / 2] = "FR";
5884 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5885 arg_regs[(regno - 34) / 2] = "FR";
5886 arg_regs[(regno - 34) / 2 + 1] = "FU";
5888 arg_regs[(regno - 34) / 2] = "FU";
5889 arg_regs[(regno - 34) / 2 + 1] = "FR";
5894 fputs ("\t.CALL ", asm_out_file);
5895 for (i = 0; i < 4; i++)
5900 fputc (',', asm_out_file);
5901 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
5904 fputc ('\n', asm_out_file);
5907 /* Inform reload about cases where moving X with a mode MODE to or from
5908 a register in RCLASS requires an extra scratch or immediate register.
5909 Return the class needed for the immediate register. */
5912 pa_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
5913 enum machine_mode mode, secondary_reload_info *sri)
5916 enum reg_class rclass = (enum reg_class) rclass_i;
5918 /* Handle the easy stuff first. */
5919 if (rclass == R1_REGS)
5925 if (rclass == BASE_REG_CLASS && regno < FIRST_PSEUDO_REGISTER)
5931 /* If we have something like (mem (mem (...)), we can safely assume the
5932 inner MEM will end up in a general register after reloading, so there's
5933 no need for a secondary reload. */
5934 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == MEM)
5937 /* Trying to load a constant into a FP register during PIC code
5938 generation requires %r1 as a scratch register. For float modes,
5939 the only legitimate constant is CONST0_RTX. However, there are
5940 a few patterns that accept constant double operands. */
5942 && FP_REG_CLASS_P (rclass)
5943 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
5948 sri->icode = CODE_FOR_reload_insi_r1;
5952 sri->icode = CODE_FOR_reload_indi_r1;
5956 sri->icode = CODE_FOR_reload_insf_r1;
5960 sri->icode = CODE_FOR_reload_indf_r1;
5969 /* Secondary reloads of symbolic expressions require %r1 as a scratch
5970 register when we're generating PIC code or when the operand isn't
5972 if (pa_symbolic_expression_p (x))
5974 if (GET_CODE (x) == HIGH)
5977 if (flag_pic || !read_only_operand (x, VOIDmode))
5982 sri->icode = CODE_FOR_reload_insi_r1;
5986 sri->icode = CODE_FOR_reload_indi_r1;
5996 /* Profiling showed the PA port spends about 1.3% of its compilation
5997 time in true_regnum from calls inside pa_secondary_reload_class. */
5998 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
5999 regno = true_regnum (x);
6001 /* Handle reloads for floating point loads and stores. */
6002 if ((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
6003 && FP_REG_CLASS_P (rclass))
6009 /* We don't need an intermediate for indexed and LO_SUM DLT
6010 memory addresses. When INT14_OK_STRICT is true, it might
6011 appear that we could directly allow register indirect
6012 memory addresses. However, this doesn't work because we
6013 don't support SUBREGs in floating-point register copies
6014 and reload doesn't tell us when it's going to use a SUBREG. */
6015 if (IS_INDEX_ADDR_P (x)
6016 || IS_LO_SUM_DLT_ADDR_P (x))
6019 /* Request intermediate general register. */
6020 return GENERAL_REGS;
6023 /* Request a secondary reload with a general scratch register
6024 for everything else. ??? Could symbolic operands be handled
6025 directly when generating non-pic PA 2.0 code? */
6027 ? direct_optab_handler (reload_in_optab, mode)
6028 : direct_optab_handler (reload_out_optab, mode));
6032 /* A SAR<->FP register copy requires an intermediate general register
6033 and secondary memory. We need a secondary reload with a general
6034 scratch register for spills. */
6035 if (rclass == SHIFT_REGS)
6038 if (regno >= FIRST_PSEUDO_REGISTER || regno < 0)
6041 ? direct_optab_handler (reload_in_optab, mode)
6042 : direct_optab_handler (reload_out_optab, mode));
6046 /* Handle FP copy. */
6047 if (FP_REG_CLASS_P (REGNO_REG_CLASS (regno)))
6048 return GENERAL_REGS;
6051 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
6052 && REGNO_REG_CLASS (regno) == SHIFT_REGS
6053 && FP_REG_CLASS_P (rclass))
6054 return GENERAL_REGS;
6059 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. The argument pointer
6060 is only marked as live on entry by df-scan when it is a fixed
6061 register. It isn't a fixed register in the 64-bit runtime,
6062 so we need to mark it here. */
6065 pa_extra_live_on_entry (bitmap regs)
6068 bitmap_set_bit (regs, ARG_POINTER_REGNUM);
6071 /* Implement EH_RETURN_HANDLER_RTX. The MEM needs to be volatile
6072 to prevent it from being deleted. */
6075 pa_eh_return_handler_rtx (void)
6079 tmp = gen_rtx_PLUS (word_mode, hard_frame_pointer_rtx,
6080 TARGET_64BIT ? GEN_INT (-16) : GEN_INT (-20));
6081 tmp = gen_rtx_MEM (word_mode, tmp);
6086 /* In the 32-bit runtime, arguments larger than eight bytes are passed
6087 by invisible reference. As a GCC extension, we also pass anything
6088 with a zero or variable size by reference.
6090 The 64-bit runtime does not describe passing any types by invisible
6091 reference. The internals of GCC can't currently handle passing
6092 empty structures, and zero or variable length arrays when they are
6093 not passed entirely on the stack or by reference. Thus, as a GCC
6094 extension, we pass these types by reference. The HP compiler doesn't
6095 support these types, so hopefully there shouldn't be any compatibility
6096 issues. This may have to be revisited when HP releases a C99 compiler
6097 or updates the ABI. */
6100 pa_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
6101 enum machine_mode mode, const_tree type,
6102 bool named ATTRIBUTE_UNUSED)
6107 size = int_size_in_bytes (type);
6109 size = GET_MODE_SIZE (mode);
6114 return size <= 0 || size > 8;
6118 pa_function_arg_padding (enum machine_mode mode, const_tree type)
6123 && (AGGREGATE_TYPE_P (type)
6124 || TREE_CODE (type) == COMPLEX_TYPE
6125 || TREE_CODE (type) == VECTOR_TYPE)))
6127 /* Return none if justification is not required. */
6129 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
6130 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
6133 /* The directions set here are ignored when a BLKmode argument larger
6134 than a word is placed in a register. Different code is used for
6135 the stack and registers. This makes it difficult to have a
6136 consistent data representation for both the stack and registers.
6137 For both runtimes, the justification and padding for arguments on
6138 the stack and in registers should be identical. */
6140 /* The 64-bit runtime specifies left justification for aggregates. */
6143 /* The 32-bit runtime architecture specifies right justification.
6144 When the argument is passed on the stack, the argument is padded
6145 with garbage on the left. The HP compiler pads with zeros. */
6149 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
6156 /* Do what is necessary for `va_start'. We look at the current function
6157 to determine if stdargs or varargs is used and fill in an initial
6158 va_list. A pointer to this constructor is returned. */
6161 hppa_builtin_saveregs (void)
6164 tree fntype = TREE_TYPE (current_function_decl);
6165 int argadj = ((!stdarg_p (fntype))
6166 ? UNITS_PER_WORD : 0);
6169 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, argadj);
6171 offset = crtl->args.arg_offset_rtx;
6177 /* Adjust for varargs/stdarg differences. */
6179 offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, -argadj);
6181 offset = crtl->args.arg_offset_rtx;
6183 /* We need to save %r26 .. %r19 inclusive starting at offset -64
6184 from the incoming arg pointer and growing to larger addresses. */
6185 for (i = 26, off = -64; i >= 19; i--, off += 8)
6186 emit_move_insn (gen_rtx_MEM (word_mode,
6187 plus_constant (Pmode,
6188 arg_pointer_rtx, off)),
6189 gen_rtx_REG (word_mode, i));
6191 /* The incoming args pointer points just beyond the flushback area;
6192 normally this is not a serious concern. However, when we are doing
6193 varargs/stdargs we want to make the arg pointer point to the start
6194 of the incoming argument area. */
6195 emit_move_insn (virtual_incoming_args_rtx,
6196 plus_constant (Pmode, arg_pointer_rtx, -64));
6198 /* Now return a pointer to the first anonymous argument. */
6199 return copy_to_reg (expand_binop (Pmode, add_optab,
6200 virtual_incoming_args_rtx,
6201 offset, 0, 0, OPTAB_LIB_WIDEN));
6204 /* Store general registers on the stack. */
6205 dest = gen_rtx_MEM (BLKmode,
6206 plus_constant (Pmode, crtl->args.internal_arg_pointer,
6208 set_mem_alias_set (dest, get_varargs_alias_set ());
6209 set_mem_align (dest, BITS_PER_WORD);
6210 move_block_from_reg (23, dest, 4);
6212 /* move_block_from_reg will emit code to store the argument registers
6213 individually as scalar stores.
6215 However, other insns may later load from the same addresses for
6216 a structure load (passing a struct to a varargs routine).
6218 The alias code assumes that such aliasing can never happen, so we
6219 have to keep memory referencing insns from moving up beyond the
6220 last argument register store. So we emit a blockage insn here. */
6221 emit_insn (gen_blockage ());
6223 return copy_to_reg (expand_binop (Pmode, add_optab,
6224 crtl->args.internal_arg_pointer,
6225 offset, 0, 0, OPTAB_LIB_WIDEN));
6229 hppa_va_start (tree valist, rtx nextarg)
6231 nextarg = expand_builtin_saveregs ();
6232 std_expand_builtin_va_start (valist, nextarg);
6236 hppa_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
6241 /* Args grow upward. We can use the generic routines. */
6242 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6244 else /* !TARGET_64BIT */
6246 tree ptr = build_pointer_type (type);
6249 unsigned int size, ofs;
6252 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
6256 ptr = build_pointer_type (type);
6258 size = int_size_in_bytes (type);
6259 valist_type = TREE_TYPE (valist);
6261 /* Args grow down. Not handled by generic routines. */
6263 u = fold_convert (sizetype, size_in_bytes (type));
6264 u = fold_build1 (NEGATE_EXPR, sizetype, u);
6265 t = fold_build_pointer_plus (valist, u);
6267 /* Align to 4 or 8 byte boundary depending on argument size. */
6269 u = build_int_cst (TREE_TYPE (t), (HOST_WIDE_INT)(size > 4 ? -8 : -4));
6270 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t, u);
6271 t = fold_convert (valist_type, t);
6273 t = build2 (MODIFY_EXPR, valist_type, valist, t);
6275 ofs = (8 - size) % 4;
6277 t = fold_build_pointer_plus_hwi (t, ofs);
6279 t = fold_convert (ptr, t);
6280 t = build_va_arg_indirect_ref (t);
6283 t = build_va_arg_indirect_ref (t);
6289 /* True if MODE is valid for the target. By "valid", we mean able to
6290 be manipulated in non-trivial ways. In particular, this means all
6291 the arithmetic is supported.
6293 Currently, TImode is not valid as the HP 64-bit runtime documentation
6294 doesn't document the alignment and calling conventions for this type.
6295 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
6296 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
6299 pa_scalar_mode_supported_p (enum machine_mode mode)
6301 int precision = GET_MODE_PRECISION (mode);
6303 switch (GET_MODE_CLASS (mode))
6305 case MODE_PARTIAL_INT:
6307 if (precision == CHAR_TYPE_SIZE)
6309 if (precision == SHORT_TYPE_SIZE)
6311 if (precision == INT_TYPE_SIZE)
6313 if (precision == LONG_TYPE_SIZE)
6315 if (precision == LONG_LONG_TYPE_SIZE)
6320 if (precision == FLOAT_TYPE_SIZE)
6322 if (precision == DOUBLE_TYPE_SIZE)
6324 if (precision == LONG_DOUBLE_TYPE_SIZE)
6328 case MODE_DECIMAL_FLOAT:
6336 /* Return TRUE if INSN, a jump insn, has an unfilled delay slot and
6337 it branches into the delay slot. Otherwise, return FALSE. */
6340 branch_to_delay_slot_p (rtx insn)
6344 if (dbr_sequence_length ())
6347 jump_insn = next_active_insn (JUMP_LABEL (insn));
6350 insn = next_active_insn (insn);
6351 if (jump_insn == insn)
6354 /* We can't rely on the length of asms. So, we return FALSE when
6355 the branch is followed by an asm. */
6357 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6358 || extract_asm_operands (PATTERN (insn)) != NULL_RTX
6359 || get_attr_length (insn) > 0)
6366 /* Return TRUE if INSN, a forward jump insn, needs a nop in its delay slot.
6368 This occurs when INSN has an unfilled delay slot and is followed
6369 by an asm. Disaster can occur if the asm is empty and the jump
6370 branches into the delay slot. So, we add a nop in the delay slot
6371 when this occurs. */
6374 branch_needs_nop_p (rtx insn)
6378 if (dbr_sequence_length ())
6381 jump_insn = next_active_insn (JUMP_LABEL (insn));
6384 insn = next_active_insn (insn);
6385 if (!insn || jump_insn == insn)
6388 if (!(GET_CODE (PATTERN (insn)) == ASM_INPUT
6389 || extract_asm_operands (PATTERN (insn)) != NULL_RTX)
6390 && get_attr_length (insn) > 0)
6397 /* Return TRUE if INSN, a forward jump insn, can use nullification
6398 to skip the following instruction. This avoids an extra cycle due
6399 to a mis-predicted branch when we fall through. */
6402 use_skip_p (rtx insn)
6404 rtx jump_insn = next_active_insn (JUMP_LABEL (insn));
6408 insn = next_active_insn (insn);
6410 /* We can't rely on the length of asms, so we can't skip asms. */
6412 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6413 || extract_asm_operands (PATTERN (insn)) != NULL_RTX)
6415 if (get_attr_length (insn) == 4
6416 && jump_insn == next_active_insn (insn))
6418 if (get_attr_length (insn) > 0)
6425 /* This routine handles all the normal conditional branch sequences we
6426 might need to generate. It handles compare immediate vs compare
6427 register, nullification of delay slots, varying length branches,
6428 negated branches, and all combinations of the above. It returns the
6429 output appropriate to emit the branch corresponding to all given
6433 pa_output_cbranch (rtx *operands, int negated, rtx insn)
6435 static char buf[100];
6437 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6438 int length = get_attr_length (insn);
6441 /* A conditional branch to the following instruction (e.g. the delay slot)
6442 is asking for a disaster. This can happen when not optimizing and
6443 when jump optimization fails.
6445 While it is usually safe to emit nothing, this can fail if the
6446 preceding instruction is a nullified branch with an empty delay
6447 slot and the same branch target as this branch. We could check
6448 for this but jump optimization should eliminate nop jumps. It
6449 is always safe to emit a nop. */
6450 if (branch_to_delay_slot_p (insn))
6453 /* The doubleword form of the cmpib instruction doesn't have the LEU
6454 and GTU conditions while the cmpb instruction does. Since we accept
6455 zero for cmpb, we must ensure that we use cmpb for the comparison. */
6456 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
6457 operands[2] = gen_rtx_REG (DImode, 0);
6458 if (GET_MODE (operands[2]) == DImode && operands[1] == const0_rtx)
6459 operands[1] = gen_rtx_REG (DImode, 0);
6461 /* If this is a long branch with its delay slot unfilled, set `nullify'
6462 as it can nullify the delay slot and save a nop. */
6463 if (length == 8 && dbr_sequence_length () == 0)
6466 /* If this is a short forward conditional branch which did not get
6467 its delay slot filled, the delay slot can still be nullified. */
6468 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6469 nullify = forward_branch_p (insn);
6471 /* A forward branch over a single nullified insn can be done with a
6472 comclr instruction. This avoids a single cycle penalty due to
6473 mis-predicted branch if we fall through (branch not taken). */
6474 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6478 /* All short conditional branches except backwards with an unfilled
6482 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6484 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6485 if (GET_MODE (operands[1]) == DImode)
6488 strcat (buf, "%B3");
6490 strcat (buf, "%S3");
6492 strcat (buf, " %2,%r1,%%r0");
6495 if (branch_needs_nop_p (insn))
6496 strcat (buf, ",n %2,%r1,%0%#");
6498 strcat (buf, ",n %2,%r1,%0");
6501 strcat (buf, " %2,%r1,%0");
6504 /* All long conditionals. Note a short backward branch with an
6505 unfilled delay slot is treated just like a long backward branch
6506 with an unfilled delay slot. */
6508 /* Handle weird backwards branch with a filled delay slot
6509 which is nullified. */
6510 if (dbr_sequence_length () != 0
6511 && ! forward_branch_p (insn)
6514 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6515 if (GET_MODE (operands[1]) == DImode)
6518 strcat (buf, "%S3");
6520 strcat (buf, "%B3");
6521 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
6523 /* Handle short backwards branch with an unfilled delay slot.
6524 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6525 taken and untaken branches. */
6526 else if (dbr_sequence_length () == 0
6527 && ! forward_branch_p (insn)
6528 && INSN_ADDRESSES_SET_P ()
6529 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6530 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6532 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6533 if (GET_MODE (operands[1]) == DImode)
6536 strcat (buf, "%B3 %2,%r1,%0%#");
6538 strcat (buf, "%S3 %2,%r1,%0%#");
6542 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6543 if (GET_MODE (operands[1]) == DImode)
6546 strcat (buf, "%S3");
6548 strcat (buf, "%B3");
6550 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6552 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6557 /* The reversed conditional branch must branch over one additional
6558 instruction if the delay slot is filled and needs to be extracted
6559 by pa_output_lbranch. If the delay slot is empty or this is a
6560 nullified forward branch, the instruction after the reversed
6561 condition branch must be nullified. */
6562 if (dbr_sequence_length () == 0
6563 || (nullify && forward_branch_p (insn)))
6567 operands[4] = GEN_INT (length);
6572 operands[4] = GEN_INT (length + 4);
6575 /* Create a reversed conditional branch which branches around
6576 the following insns. */
6577 if (GET_MODE (operands[1]) != DImode)
6583 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6586 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6592 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6595 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6604 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6607 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6613 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6616 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6620 output_asm_insn (buf, operands);
6621 return pa_output_lbranch (operands[0], insn, xdelay);
6626 /* This routine handles output of long unconditional branches that
6627 exceed the maximum range of a simple branch instruction. Since
6628 we don't have a register available for the branch, we save register
6629 %r1 in the frame marker, load the branch destination DEST into %r1,
6630 execute the branch, and restore %r1 in the delay slot of the branch.
6632 Since long branches may have an insn in the delay slot and the
6633 delay slot is used to restore %r1, we in general need to extract
6634 this insn and execute it before the branch. However, to facilitate
6635 use of this function by conditional branches, we also provide an
6636 option to not extract the delay insn so that it will be emitted
6637 after the long branch. So, if there is an insn in the delay slot,
6638 it is extracted if XDELAY is nonzero.
6640 The lengths of the various long-branch sequences are 20, 16 and 24
6641 bytes for the portable runtime, non-PIC and PIC cases, respectively. */
6644 pa_output_lbranch (rtx dest, rtx insn, int xdelay)
6648 xoperands[0] = dest;
6650 /* First, free up the delay slot. */
6651 if (xdelay && dbr_sequence_length () != 0)
6653 /* We can't handle a jump in the delay slot. */
6654 gcc_assert (GET_CODE (NEXT_INSN (insn)) != JUMP_INSN);
6656 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6659 /* Now delete the delay insn. */
6660 SET_INSN_DELETED (NEXT_INSN (insn));
6663 /* Output an insn to save %r1. The runtime documentation doesn't
6664 specify whether the "Clean Up" slot in the callers frame can
6665 be clobbered by the callee. It isn't copied by HP's builtin
6666 alloca, so this suggests that it can be clobbered if necessary.
6667 The "Static Link" location is copied by HP builtin alloca, so
6668 we avoid using it. Using the cleanup slot might be a problem
6669 if we have to interoperate with languages that pass cleanup
6670 information. However, it should be possible to handle these
6671 situations with GCC's asm feature.
6673 The "Current RP" slot is reserved for the called procedure, so
6674 we try to use it when we don't have a frame of our own. It's
6675 rather unlikely that we won't have a frame when we need to emit
6678 Really the way to go long term is a register scavenger; goto
6679 the target of the jump and find a register which we can use
6680 as a scratch to hold the value in %r1. Then, we wouldn't have
6681 to free up the delay slot or clobber a slot that may be needed
6682 for other purposes. */
6685 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6686 /* Use the return pointer slot in the frame marker. */
6687 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6689 /* Use the slot at -40 in the frame marker since HP builtin
6690 alloca doesn't copy it. */
6691 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6695 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6696 /* Use the return pointer slot in the frame marker. */
6697 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6699 /* Use the "Clean Up" slot in the frame marker. In GCC,
6700 the only other use of this location is for copying a
6701 floating point double argument from a floating-point
6702 register to two general registers. The copy is done
6703 as an "atomic" operation when outputting a call, so it
6704 won't interfere with our using the location here. */
6705 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6708 if (TARGET_PORTABLE_RUNTIME)
6710 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6711 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6712 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6716 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
6717 if (TARGET_SOM || !TARGET_GAS)
6719 xoperands[1] = gen_label_rtx ();
6720 output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands);
6721 targetm.asm_out.internal_label (asm_out_file, "L",
6722 CODE_LABEL_NUMBER (xoperands[1]));
6723 output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands);
6727 output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands);
6728 output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
6730 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6733 /* Now output a very long branch to the original target. */
6734 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6736 /* Now restore the value of %r1 in the delay slot. */
6739 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6740 return "ldd -16(%%r30),%%r1";
6742 return "ldd -40(%%r30),%%r1";
6746 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6747 return "ldw -20(%%r30),%%r1";
6749 return "ldw -12(%%r30),%%r1";
6753 /* This routine handles all the branch-on-bit conditional branch sequences we
6754 might need to generate. It handles nullification of delay slots,
6755 varying length branches, negated branches and all combinations of the
6756 above. it returns the appropriate output template to emit the branch. */
6759 pa_output_bb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn, int which)
6761 static char buf[100];
6763 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6764 int length = get_attr_length (insn);
6767 /* A conditional branch to the following instruction (e.g. the delay slot) is
6768 asking for a disaster. I do not think this can happen as this pattern
6769 is only used when optimizing; jump optimization should eliminate the
6770 jump. But be prepared just in case. */
6772 if (branch_to_delay_slot_p (insn))
6775 /* If this is a long branch with its delay slot unfilled, set `nullify'
6776 as it can nullify the delay slot and save a nop. */
6777 if (length == 8 && dbr_sequence_length () == 0)
6780 /* If this is a short forward conditional branch which did not get
6781 its delay slot filled, the delay slot can still be nullified. */
6782 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6783 nullify = forward_branch_p (insn);
6785 /* A forward branch over a single nullified insn can be done with a
6786 extrs instruction. This avoids a single cycle penalty due to
6787 mis-predicted branch if we fall through (branch not taken). */
6788 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6793 /* All short conditional branches except backwards with an unfilled
6797 strcpy (buf, "{extrs,|extrw,s,}");
6799 strcpy (buf, "bb,");
6800 if (useskip && GET_MODE (operands[0]) == DImode)
6801 strcpy (buf, "extrd,s,*");
6802 else if (GET_MODE (operands[0]) == DImode)
6803 strcpy (buf, "bb,*");
6804 if ((which == 0 && negated)
6805 || (which == 1 && ! negated))
6810 strcat (buf, " %0,%1,1,%%r0");
6811 else if (nullify && negated)
6813 if (branch_needs_nop_p (insn))
6814 strcat (buf, ",n %0,%1,%3%#");
6816 strcat (buf, ",n %0,%1,%3");
6818 else if (nullify && ! negated)
6820 if (branch_needs_nop_p (insn))
6821 strcat (buf, ",n %0,%1,%2%#");
6823 strcat (buf, ",n %0,%1,%2");
6825 else if (! nullify && negated)
6826 strcat (buf, " %0,%1,%3");
6827 else if (! nullify && ! negated)
6828 strcat (buf, " %0,%1,%2");
6831 /* All long conditionals. Note a short backward branch with an
6832 unfilled delay slot is treated just like a long backward branch
6833 with an unfilled delay slot. */
6835 /* Handle weird backwards branch with a filled delay slot
6836 which is nullified. */
6837 if (dbr_sequence_length () != 0
6838 && ! forward_branch_p (insn)
6841 strcpy (buf, "bb,");
6842 if (GET_MODE (operands[0]) == DImode)
6844 if ((which == 0 && negated)
6845 || (which == 1 && ! negated))
6850 strcat (buf, ",n %0,%1,.+12\n\tb %3");
6852 strcat (buf, ",n %0,%1,.+12\n\tb %2");
6854 /* Handle short backwards branch with an unfilled delay slot.
6855 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6856 taken and untaken branches. */
6857 else if (dbr_sequence_length () == 0
6858 && ! forward_branch_p (insn)
6859 && INSN_ADDRESSES_SET_P ()
6860 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6861 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6863 strcpy (buf, "bb,");
6864 if (GET_MODE (operands[0]) == DImode)
6866 if ((which == 0 && negated)
6867 || (which == 1 && ! negated))
6872 strcat (buf, " %0,%1,%3%#");
6874 strcat (buf, " %0,%1,%2%#");
6878 if (GET_MODE (operands[0]) == DImode)
6879 strcpy (buf, "extrd,s,*");
6881 strcpy (buf, "{extrs,|extrw,s,}");
6882 if ((which == 0 && negated)
6883 || (which == 1 && ! negated))
6887 if (nullify && negated)
6888 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
6889 else if (nullify && ! negated)
6890 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
6892 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
6894 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
6899 /* The reversed conditional branch must branch over one additional
6900 instruction if the delay slot is filled and needs to be extracted
6901 by pa_output_lbranch. If the delay slot is empty or this is a
6902 nullified forward branch, the instruction after the reversed
6903 condition branch must be nullified. */
6904 if (dbr_sequence_length () == 0
6905 || (nullify && forward_branch_p (insn)))
6909 operands[4] = GEN_INT (length);
6914 operands[4] = GEN_INT (length + 4);
6917 if (GET_MODE (operands[0]) == DImode)
6918 strcpy (buf, "bb,*");
6920 strcpy (buf, "bb,");
6921 if ((which == 0 && negated)
6922 || (which == 1 && !negated))
6927 strcat (buf, ",n %0,%1,.+%4");
6929 strcat (buf, " %0,%1,.+%4");
6930 output_asm_insn (buf, operands);
6931 return pa_output_lbranch (negated ? operands[3] : operands[2],
6937 /* This routine handles all the branch-on-variable-bit conditional branch
6938 sequences we might need to generate. It handles nullification of delay
6939 slots, varying length branches, negated branches and all combinations
6940 of the above. it returns the appropriate output template to emit the
6944 pa_output_bvb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn,
6947 static char buf[100];
6949 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6950 int length = get_attr_length (insn);
6953 /* A conditional branch to the following instruction (e.g. the delay slot) is
6954 asking for a disaster. I do not think this can happen as this pattern
6955 is only used when optimizing; jump optimization should eliminate the
6956 jump. But be prepared just in case. */
6958 if (branch_to_delay_slot_p (insn))
6961 /* If this is a long branch with its delay slot unfilled, set `nullify'
6962 as it can nullify the delay slot and save a nop. */
6963 if (length == 8 && dbr_sequence_length () == 0)
6966 /* If this is a short forward conditional branch which did not get
6967 its delay slot filled, the delay slot can still be nullified. */
6968 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6969 nullify = forward_branch_p (insn);
6971 /* A forward branch over a single nullified insn can be done with a
6972 extrs instruction. This avoids a single cycle penalty due to
6973 mis-predicted branch if we fall through (branch not taken). */
6974 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6979 /* All short conditional branches except backwards with an unfilled
6983 strcpy (buf, "{vextrs,|extrw,s,}");
6985 strcpy (buf, "{bvb,|bb,}");
6986 if (useskip && GET_MODE (operands[0]) == DImode)
6987 strcpy (buf, "extrd,s,*");
6988 else if (GET_MODE (operands[0]) == DImode)
6989 strcpy (buf, "bb,*");
6990 if ((which == 0 && negated)
6991 || (which == 1 && ! negated))
6996 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
6997 else if (nullify && negated)
6999 if (branch_needs_nop_p (insn))
7000 strcat (buf, "{,n %0,%3%#|,n %0,%%sar,%3%#}");
7002 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
7004 else if (nullify && ! negated)
7006 if (branch_needs_nop_p (insn))
7007 strcat (buf, "{,n %0,%2%#|,n %0,%%sar,%2%#}");
7009 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
7011 else if (! nullify && negated)
7012 strcat (buf, "{ %0,%3| %0,%%sar,%3}");
7013 else if (! nullify && ! negated)
7014 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
7017 /* All long conditionals. Note a short backward branch with an
7018 unfilled delay slot is treated just like a long backward branch
7019 with an unfilled delay slot. */
7021 /* Handle weird backwards branch with a filled delay slot
7022 which is nullified. */
7023 if (dbr_sequence_length () != 0
7024 && ! forward_branch_p (insn)
7027 strcpy (buf, "{bvb,|bb,}");
7028 if (GET_MODE (operands[0]) == DImode)
7030 if ((which == 0 && negated)
7031 || (which == 1 && ! negated))
7036 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
7038 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
7040 /* Handle short backwards branch with an unfilled delay slot.
7041 Using a bb;nop rather than extrs;bl saves 1 cycle for both
7042 taken and untaken branches. */
7043 else if (dbr_sequence_length () == 0
7044 && ! forward_branch_p (insn)
7045 && INSN_ADDRESSES_SET_P ()
7046 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7047 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7049 strcpy (buf, "{bvb,|bb,}");
7050 if (GET_MODE (operands[0]) == DImode)
7052 if ((which == 0 && negated)
7053 || (which == 1 && ! negated))
7058 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
7060 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
7064 strcpy (buf, "{vextrs,|extrw,s,}");
7065 if (GET_MODE (operands[0]) == DImode)
7066 strcpy (buf, "extrd,s,*");
7067 if ((which == 0 && negated)
7068 || (which == 1 && ! negated))
7072 if (nullify && negated)
7073 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
7074 else if (nullify && ! negated)
7075 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
7077 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
7079 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
7084 /* The reversed conditional branch must branch over one additional
7085 instruction if the delay slot is filled and needs to be extracted
7086 by pa_output_lbranch. If the delay slot is empty or this is a
7087 nullified forward branch, the instruction after the reversed
7088 condition branch must be nullified. */
7089 if (dbr_sequence_length () == 0
7090 || (nullify && forward_branch_p (insn)))
7094 operands[4] = GEN_INT (length);
7099 operands[4] = GEN_INT (length + 4);
7102 if (GET_MODE (operands[0]) == DImode)
7103 strcpy (buf, "bb,*");
7105 strcpy (buf, "{bvb,|bb,}");
7106 if ((which == 0 && negated)
7107 || (which == 1 && !negated))
7112 strcat (buf, ",n {%0,.+%4|%0,%%sar,.+%4}");
7114 strcat (buf, " {%0,.+%4|%0,%%sar,.+%4}");
7115 output_asm_insn (buf, operands);
7116 return pa_output_lbranch (negated ? operands[3] : operands[2],
7122 /* Return the output template for emitting a dbra type insn.
7124 Note it may perform some output operations on its own before
7125 returning the final output string. */
7127 pa_output_dbra (rtx *operands, rtx insn, int which_alternative)
7129 int length = get_attr_length (insn);
7131 /* A conditional branch to the following instruction (e.g. the delay slot) is
7132 asking for a disaster. Be prepared! */
7134 if (branch_to_delay_slot_p (insn))
7136 if (which_alternative == 0)
7137 return "ldo %1(%0),%0";
7138 else if (which_alternative == 1)
7140 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
7141 output_asm_insn ("ldw -16(%%r30),%4", operands);
7142 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7143 return "{fldws|fldw} -16(%%r30),%0";
7147 output_asm_insn ("ldw %0,%4", operands);
7148 return "ldo %1(%4),%4\n\tstw %4,%0";
7152 if (which_alternative == 0)
7154 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7157 /* If this is a long branch with its delay slot unfilled, set `nullify'
7158 as it can nullify the delay slot and save a nop. */
7159 if (length == 8 && dbr_sequence_length () == 0)
7162 /* If this is a short forward conditional branch which did not get
7163 its delay slot filled, the delay slot can still be nullified. */
7164 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7165 nullify = forward_branch_p (insn);
7172 if (branch_needs_nop_p (insn))
7173 return "addib,%C2,n %1,%0,%3%#";
7175 return "addib,%C2,n %1,%0,%3";
7178 return "addib,%C2 %1,%0,%3";
7181 /* Handle weird backwards branch with a fulled delay slot
7182 which is nullified. */
7183 if (dbr_sequence_length () != 0
7184 && ! forward_branch_p (insn)
7186 return "addib,%N2,n %1,%0,.+12\n\tb %3";
7187 /* Handle short backwards branch with an unfilled delay slot.
7188 Using a addb;nop rather than addi;bl saves 1 cycle for both
7189 taken and untaken branches. */
7190 else if (dbr_sequence_length () == 0
7191 && ! forward_branch_p (insn)
7192 && INSN_ADDRESSES_SET_P ()
7193 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7194 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7195 return "addib,%C2 %1,%0,%3%#";
7197 /* Handle normal cases. */
7199 return "addi,%N2 %1,%0,%0\n\tb,n %3";
7201 return "addi,%N2 %1,%0,%0\n\tb %3";
7204 /* The reversed conditional branch must branch over one additional
7205 instruction if the delay slot is filled and needs to be extracted
7206 by pa_output_lbranch. If the delay slot is empty or this is a
7207 nullified forward branch, the instruction after the reversed
7208 condition branch must be nullified. */
7209 if (dbr_sequence_length () == 0
7210 || (nullify && forward_branch_p (insn)))
7214 operands[4] = GEN_INT (length);
7219 operands[4] = GEN_INT (length + 4);
7223 output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands);
7225 output_asm_insn ("addib,%N2 %1,%0,.+%4", operands);
7227 return pa_output_lbranch (operands[3], insn, xdelay);
7231 /* Deal with gross reload from FP register case. */
7232 else if (which_alternative == 1)
7234 /* Move loop counter from FP register to MEM then into a GR,
7235 increment the GR, store the GR into MEM, and finally reload
7236 the FP register from MEM from within the branch's delay slot. */
7237 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
7239 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7241 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
7242 else if (length == 28)
7243 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7246 operands[5] = GEN_INT (length - 16);
7247 output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands);
7248 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7249 return pa_output_lbranch (operands[3], insn, 0);
7252 /* Deal with gross reload from memory case. */
7255 /* Reload loop counter from memory, the store back to memory
7256 happens in the branch's delay slot. */
7257 output_asm_insn ("ldw %0,%4", operands);
7259 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
7260 else if (length == 16)
7261 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
7264 operands[5] = GEN_INT (length - 4);
7265 output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands);
7266 return pa_output_lbranch (operands[3], insn, 0);
7271 /* Return the output template for emitting a movb type insn.
7273 Note it may perform some output operations on its own before
7274 returning the final output string. */
7276 pa_output_movb (rtx *operands, rtx insn, int which_alternative,
7277 int reverse_comparison)
7279 int length = get_attr_length (insn);
7281 /* A conditional branch to the following instruction (e.g. the delay slot) is
7282 asking for a disaster. Be prepared! */
7284 if (branch_to_delay_slot_p (insn))
7286 if (which_alternative == 0)
7287 return "copy %1,%0";
7288 else if (which_alternative == 1)
7290 output_asm_insn ("stw %1,-16(%%r30)", operands);
7291 return "{fldws|fldw} -16(%%r30),%0";
7293 else if (which_alternative == 2)
7299 /* Support the second variant. */
7300 if (reverse_comparison)
7301 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
7303 if (which_alternative == 0)
7305 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7308 /* If this is a long branch with its delay slot unfilled, set `nullify'
7309 as it can nullify the delay slot and save a nop. */
7310 if (length == 8 && dbr_sequence_length () == 0)
7313 /* If this is a short forward conditional branch which did not get
7314 its delay slot filled, the delay slot can still be nullified. */
7315 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7316 nullify = forward_branch_p (insn);
7323 if (branch_needs_nop_p (insn))
7324 return "movb,%C2,n %1,%0,%3%#";
7326 return "movb,%C2,n %1,%0,%3";
7329 return "movb,%C2 %1,%0,%3";
7332 /* Handle weird backwards branch with a filled delay slot
7333 which is nullified. */
7334 if (dbr_sequence_length () != 0
7335 && ! forward_branch_p (insn)
7337 return "movb,%N2,n %1,%0,.+12\n\tb %3";
7339 /* Handle short backwards branch with an unfilled delay slot.
7340 Using a movb;nop rather than or;bl saves 1 cycle for both
7341 taken and untaken branches. */
7342 else if (dbr_sequence_length () == 0
7343 && ! forward_branch_p (insn)
7344 && INSN_ADDRESSES_SET_P ()
7345 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7346 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7347 return "movb,%C2 %1,%0,%3%#";
7348 /* Handle normal cases. */
7350 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
7352 return "or,%N2 %1,%%r0,%0\n\tb %3";
7355 /* The reversed conditional branch must branch over one additional
7356 instruction if the delay slot is filled and needs to be extracted
7357 by pa_output_lbranch. If the delay slot is empty or this is a
7358 nullified forward branch, the instruction after the reversed
7359 condition branch must be nullified. */
7360 if (dbr_sequence_length () == 0
7361 || (nullify && forward_branch_p (insn)))
7365 operands[4] = GEN_INT (length);
7370 operands[4] = GEN_INT (length + 4);
7374 output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands);
7376 output_asm_insn ("movb,%N2 %1,%0,.+%4", operands);
7378 return pa_output_lbranch (operands[3], insn, xdelay);
7381 /* Deal with gross reload for FP destination register case. */
7382 else if (which_alternative == 1)
7384 /* Move source register to MEM, perform the branch test, then
7385 finally load the FP register from MEM from within the branch's
7387 output_asm_insn ("stw %1,-16(%%r30)", operands);
7389 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
7390 else if (length == 16)
7391 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7394 operands[4] = GEN_INT (length - 4);
7395 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands);
7396 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7397 return pa_output_lbranch (operands[3], insn, 0);
7400 /* Deal with gross reload from memory case. */
7401 else if (which_alternative == 2)
7403 /* Reload loop counter from memory, the store back to memory
7404 happens in the branch's delay slot. */
7406 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
7407 else if (length == 12)
7408 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
7411 operands[4] = GEN_INT (length);
7412 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
7414 return pa_output_lbranch (operands[3], insn, 0);
7417 /* Handle SAR as a destination. */
7421 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
7422 else if (length == 12)
7423 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
7426 operands[4] = GEN_INT (length);
7427 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
7429 return pa_output_lbranch (operands[3], insn, 0);
7434 /* Copy any FP arguments in INSN into integer registers. */
7436 copy_fp_args (rtx insn)
7441 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7443 int arg_mode, regno;
7444 rtx use = XEXP (link, 0);
7446 if (! (GET_CODE (use) == USE
7447 && GET_CODE (XEXP (use, 0)) == REG
7448 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7451 arg_mode = GET_MODE (XEXP (use, 0));
7452 regno = REGNO (XEXP (use, 0));
7454 /* Is it a floating point register? */
7455 if (regno >= 32 && regno <= 39)
7457 /* Copy the FP register into an integer register via memory. */
7458 if (arg_mode == SFmode)
7460 xoperands[0] = XEXP (use, 0);
7461 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
7462 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
7463 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7467 xoperands[0] = XEXP (use, 0);
7468 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
7469 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
7470 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
7471 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7477 /* Compute length of the FP argument copy sequence for INSN. */
7479 length_fp_args (rtx insn)
7484 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7486 int arg_mode, regno;
7487 rtx use = XEXP (link, 0);
7489 if (! (GET_CODE (use) == USE
7490 && GET_CODE (XEXP (use, 0)) == REG
7491 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7494 arg_mode = GET_MODE (XEXP (use, 0));
7495 regno = REGNO (XEXP (use, 0));
7497 /* Is it a floating point register? */
7498 if (regno >= 32 && regno <= 39)
7500 if (arg_mode == SFmode)
7510 /* Return the attribute length for the millicode call instruction INSN.
7511 The length must match the code generated by pa_output_millicode_call.
7512 We include the delay slot in the returned length as it is better to
7513 over estimate the length than to under estimate it. */
7516 pa_attr_length_millicode_call (rtx insn)
7518 unsigned long distance = -1;
7519 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7521 if (INSN_ADDRESSES_SET_P ())
7523 distance = (total + insn_current_reference_address (insn));
7524 if (distance < total)
7530 if (!TARGET_LONG_CALLS && distance < 7600000)
7535 else if (TARGET_PORTABLE_RUNTIME)
7539 if (!TARGET_LONG_CALLS && distance < MAX_PCREL17F_OFFSET)
7542 if (TARGET_LONG_ABS_CALL && !flag_pic)
7549 /* INSN is a function call. It may have an unconditional jump
7552 CALL_DEST is the routine we are calling. */
7555 pa_output_millicode_call (rtx insn, rtx call_dest)
7557 int attr_length = get_attr_length (insn);
7558 int seq_length = dbr_sequence_length ();
7563 xoperands[0] = call_dest;
7564 xoperands[2] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
7566 /* Handle the common case where we are sure that the branch will
7567 reach the beginning of the $CODE$ subspace. The within reach
7568 form of the $$sh_func_adrs call has a length of 28. Because it
7569 has an attribute type of sh_func_adrs, it never has a nonzero
7570 sequence length (i.e., the delay slot is never filled). */
7571 if (!TARGET_LONG_CALLS
7572 && (attr_length == 8
7573 || (attr_length == 28
7574 && get_attr_type (insn) == TYPE_SH_FUNC_ADRS)))
7576 output_asm_insn ("{bl|b,l} %0,%2", xoperands);
7582 /* It might seem that one insn could be saved by accessing
7583 the millicode function using the linkage table. However,
7584 this doesn't work in shared libraries and other dynamically
7585 loaded objects. Using a pc-relative sequence also avoids
7586 problems related to the implicit use of the gp register. */
7587 output_asm_insn ("b,l .+8,%%r1", xoperands);
7591 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7592 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
7596 xoperands[1] = gen_label_rtx ();
7597 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7598 targetm.asm_out.internal_label (asm_out_file, "L",
7599 CODE_LABEL_NUMBER (xoperands[1]));
7600 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7603 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7605 else if (TARGET_PORTABLE_RUNTIME)
7607 /* Pure portable runtime doesn't allow be/ble; we also don't
7608 have PIC support in the assembler/linker, so this sequence
7611 /* Get the address of our target into %r1. */
7612 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7613 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
7615 /* Get our return address into %r31. */
7616 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
7617 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
7619 /* Jump to our target address in %r1. */
7620 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7624 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7626 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
7628 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7632 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7633 output_asm_insn ("addi 16,%%r1,%%r31", xoperands);
7635 if (TARGET_SOM || !TARGET_GAS)
7637 /* The HP assembler can generate relocations for the
7638 difference of two symbols. GAS can do this for a
7639 millicode symbol but not an arbitrary external
7640 symbol when generating SOM output. */
7641 xoperands[1] = gen_label_rtx ();
7642 targetm.asm_out.internal_label (asm_out_file, "L",
7643 CODE_LABEL_NUMBER (xoperands[1]));
7644 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7645 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7649 output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands);
7650 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1",
7654 /* Jump to our target address in %r1. */
7655 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7659 if (seq_length == 0)
7660 output_asm_insn ("nop", xoperands);
7662 /* We are done if there isn't a jump in the delay slot. */
7663 if (seq_length == 0 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
7666 /* This call has an unconditional jump in its delay slot. */
7667 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7669 /* See if the return address can be adjusted. Use the containing
7670 sequence insn's address. */
7671 if (INSN_ADDRESSES_SET_P ())
7673 seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7674 distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7675 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7677 if (VAL_14_BITS_P (distance))
7679 xoperands[1] = gen_label_rtx ();
7680 output_asm_insn ("ldo %0-%1(%2),%2", xoperands);
7681 targetm.asm_out.internal_label (asm_out_file, "L",
7682 CODE_LABEL_NUMBER (xoperands[1]));
7685 /* ??? This branch may not reach its target. */
7686 output_asm_insn ("nop\n\tb,n %0", xoperands);
7689 /* ??? This branch may not reach its target. */
7690 output_asm_insn ("nop\n\tb,n %0", xoperands);
7692 /* Delete the jump. */
7693 SET_INSN_DELETED (NEXT_INSN (insn));
7698 /* Return the attribute length of the call instruction INSN. The SIBCALL
7699 flag indicates whether INSN is a regular call or a sibling call. The
7700 length returned must be longer than the code actually generated by
7701 pa_output_call. Since branch shortening is done before delay branch
7702 sequencing, there is no way to determine whether or not the delay
7703 slot will be filled during branch shortening. Even when the delay
7704 slot is filled, we may have to add a nop if the delay slot contains
7705 a branch that can't reach its target. Thus, we always have to include
7706 the delay slot in the length estimate. This used to be done in
7707 pa_adjust_insn_length but we do it here now as some sequences always
7708 fill the delay slot and we can save four bytes in the estimate for
7712 pa_attr_length_call (rtx insn, int sibcall)
7715 rtx call, call_dest;
7718 rtx pat = PATTERN (insn);
7719 unsigned long distance = -1;
7721 gcc_assert (GET_CODE (insn) == CALL_INSN);
7723 if (INSN_ADDRESSES_SET_P ())
7725 unsigned long total;
7727 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7728 distance = (total + insn_current_reference_address (insn));
7729 if (distance < total)
7733 gcc_assert (GET_CODE (pat) == PARALLEL);
7735 /* Get the call rtx. */
7736 call = XVECEXP (pat, 0, 0);
7737 if (GET_CODE (call) == SET)
7738 call = SET_SRC (call);
7740 gcc_assert (GET_CODE (call) == CALL);
7742 /* Determine if this is a local call. */
7743 call_dest = XEXP (XEXP (call, 0), 0);
7744 call_decl = SYMBOL_REF_DECL (call_dest);
7745 local_call = call_decl && targetm.binds_local_p (call_decl);
7747 /* pc-relative branch. */
7748 if (!TARGET_LONG_CALLS
7749 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7750 || distance < MAX_PCREL17F_OFFSET))
7753 /* 64-bit plabel sequence. */
7754 else if (TARGET_64BIT && !local_call)
7755 length += sibcall ? 28 : 24;
7757 /* non-pic long absolute branch sequence. */
7758 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7761 /* long pc-relative branch sequence. */
7762 else if (TARGET_LONG_PIC_SDIFF_CALL
7763 || (TARGET_GAS && !TARGET_SOM
7764 && (TARGET_LONG_PIC_PCREL_CALL || local_call)))
7768 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7772 /* 32-bit plabel sequence. */
7778 length += length_fp_args (insn);
7788 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7796 /* INSN is a function call. It may have an unconditional jump
7799 CALL_DEST is the routine we are calling. */
7802 pa_output_call (rtx insn, rtx call_dest, int sibcall)
7804 int delay_insn_deleted = 0;
7805 int delay_slot_filled = 0;
7806 int seq_length = dbr_sequence_length ();
7807 tree call_decl = SYMBOL_REF_DECL (call_dest);
7808 int local_call = call_decl && targetm.binds_local_p (call_decl);
7811 xoperands[0] = call_dest;
7813 /* Handle the common case where we're sure that the branch will reach
7814 the beginning of the "$CODE$" subspace. This is the beginning of
7815 the current function if we are in a named section. */
7816 if (!TARGET_LONG_CALLS && pa_attr_length_call (insn, sibcall) == 8)
7818 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7819 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7823 if (TARGET_64BIT && !local_call)
7825 /* ??? As far as I can tell, the HP linker doesn't support the
7826 long pc-relative sequence described in the 64-bit runtime
7827 architecture. So, we use a slightly longer indirect call. */
7828 xoperands[0] = pa_get_deferred_plabel (call_dest);
7829 xoperands[1] = gen_label_rtx ();
7831 /* If this isn't a sibcall, we put the load of %r27 into the
7832 delay slot. We can't do this in a sibcall as we don't
7833 have a second call-clobbered scratch register available. */
7835 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7838 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7841 /* Now delete the delay insn. */
7842 SET_INSN_DELETED (NEXT_INSN (insn));
7843 delay_insn_deleted = 1;
7846 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7847 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7848 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7852 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7853 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7854 output_asm_insn ("bve (%%r1)", xoperands);
7858 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7859 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7860 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7861 delay_slot_filled = 1;
7866 int indirect_call = 0;
7868 /* Emit a long call. There are several different sequences
7869 of increasing length and complexity. In most cases,
7870 they don't allow an instruction in the delay slot. */
7871 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7872 && !TARGET_LONG_PIC_SDIFF_CALL
7873 && !(TARGET_GAS && !TARGET_SOM
7874 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7879 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7883 || ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)))
7885 /* A non-jump insn in the delay slot. By definition we can
7886 emit this insn before the call (and in fact before argument
7888 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0,
7891 /* Now delete the delay insn. */
7892 SET_INSN_DELETED (NEXT_INSN (insn));
7893 delay_insn_deleted = 1;
7896 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7898 /* This is the best sequence for making long calls in
7899 non-pic code. Unfortunately, GNU ld doesn't provide
7900 the stub needed for external calls, and GAS's support
7901 for this with the SOM linker is buggy. It is safe
7902 to use this for local calls. */
7903 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7905 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
7909 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7912 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7914 output_asm_insn ("copy %%r31,%%r2", xoperands);
7915 delay_slot_filled = 1;
7920 if (TARGET_LONG_PIC_SDIFF_CALL)
7922 /* The HP assembler and linker can handle relocations
7923 for the difference of two symbols. The HP assembler
7924 recognizes the sequence as a pc-relative call and
7925 the linker provides stubs when needed. */
7926 xoperands[1] = gen_label_rtx ();
7927 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7928 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7929 targetm.asm_out.internal_label (asm_out_file, "L",
7930 CODE_LABEL_NUMBER (xoperands[1]));
7931 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7933 else if (TARGET_GAS && !TARGET_SOM
7934 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7936 /* GAS currently can't generate the relocations that
7937 are needed for the SOM linker under HP-UX using this
7938 sequence. The GNU linker doesn't generate the stubs
7939 that are needed for external calls on TARGET_ELF32
7940 with this sequence. For now, we have to use a
7941 longer plabel sequence when using GAS. */
7942 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7943 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1",
7945 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1",
7950 /* Emit a long plabel-based call sequence. This is
7951 essentially an inline implementation of $$dyncall.
7952 We don't actually try to call $$dyncall as this is
7953 as difficult as calling the function itself. */
7954 xoperands[0] = pa_get_deferred_plabel (call_dest);
7955 xoperands[1] = gen_label_rtx ();
7957 /* Since the call is indirect, FP arguments in registers
7958 need to be copied to the general registers. Then, the
7959 argument relocation stub will copy them back. */
7961 copy_fp_args (insn);
7965 output_asm_insn ("addil LT'%0,%%r19", xoperands);
7966 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
7967 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
7971 output_asm_insn ("addil LR'%0-$global$,%%r27",
7973 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
7977 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
7978 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
7979 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
7980 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
7982 if (!sibcall && !TARGET_PA_20)
7984 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
7985 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
7986 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
7988 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
7995 output_asm_insn ("bve (%%r1)", xoperands);
8000 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
8001 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
8002 delay_slot_filled = 1;
8005 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
8010 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
8011 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
8016 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8017 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
8019 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
8023 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
8024 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
8026 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
8029 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
8031 output_asm_insn ("copy %%r31,%%r2", xoperands);
8032 delay_slot_filled = 1;
8039 if (!delay_slot_filled && (seq_length == 0 || delay_insn_deleted))
8040 output_asm_insn ("nop", xoperands);
8042 /* We are done if there isn't a jump in the delay slot. */
8044 || delay_insn_deleted
8045 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
8048 /* A sibcall should never have a branch in the delay slot. */
8049 gcc_assert (!sibcall);
8051 /* This call has an unconditional jump in its delay slot. */
8052 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
8054 if (!delay_slot_filled && INSN_ADDRESSES_SET_P ())
8056 /* See if the return address can be adjusted. Use the containing
8057 sequence insn's address. This would break the regular call/return@
8058 relationship assumed by the table based eh unwinder, so only do that
8059 if the call is not possibly throwing. */
8060 rtx seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
8061 int distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
8062 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
8064 if (VAL_14_BITS_P (distance)
8065 && !(can_throw_internal (insn) || can_throw_external (insn)))
8067 xoperands[1] = gen_label_rtx ();
8068 output_asm_insn ("ldo %0-%1(%%r2),%%r2", xoperands);
8069 targetm.asm_out.internal_label (asm_out_file, "L",
8070 CODE_LABEL_NUMBER (xoperands[1]));
8073 output_asm_insn ("nop\n\tb,n %0", xoperands);
8076 output_asm_insn ("b,n %0", xoperands);
8078 /* Delete the jump. */
8079 SET_INSN_DELETED (NEXT_INSN (insn));
8084 /* Return the attribute length of the indirect call instruction INSN.
8085 The length must match the code generated by output_indirect call.
8086 The returned length includes the delay slot. Currently, the delay
8087 slot of an indirect call sequence is not exposed and it is used by
8088 the sequence itself. */
8091 pa_attr_length_indirect_call (rtx insn)
8093 unsigned long distance = -1;
8094 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
8096 if (INSN_ADDRESSES_SET_P ())
8098 distance = (total + insn_current_reference_address (insn));
8099 if (distance < total)
8106 if (TARGET_FAST_INDIRECT_CALLS
8107 || (!TARGET_PORTABLE_RUNTIME
8108 && ((TARGET_PA_20 && !TARGET_SOM && distance < 7600000)
8109 || distance < MAX_PCREL17F_OFFSET)))
8115 if (TARGET_PORTABLE_RUNTIME)
8118 /* Out of reach, can use ble. */
8123 pa_output_indirect_call (rtx insn, rtx call_dest)
8129 xoperands[0] = call_dest;
8130 output_asm_insn ("ldd 16(%0),%%r2", xoperands);
8131 output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands);
8135 /* First the special case for kernels, level 0 systems, etc. */
8136 if (TARGET_FAST_INDIRECT_CALLS)
8137 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8139 /* Now the normal case -- we can reach $$dyncall directly or
8140 we're sure that we can get there via a long-branch stub.
8142 No need to check target flags as the length uniquely identifies
8143 the remaining cases. */
8144 if (pa_attr_length_indirect_call (insn) == 8)
8146 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
8147 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
8148 variant of the B,L instruction can't be used on the SOM target. */
8149 if (TARGET_PA_20 && !TARGET_SOM)
8150 return ".CALL\tARGW0=GR\n\tb,l $$dyncall,%%r2\n\tcopy %%r2,%%r31";
8152 return ".CALL\tARGW0=GR\n\tbl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
8155 /* Long millicode call, but we are not generating PIC or portable runtime
8157 if (pa_attr_length_indirect_call (insn) == 12)
8158 return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
8160 /* Long millicode call for portable runtime. */
8161 if (pa_attr_length_indirect_call (insn) == 20)
8162 return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)\n\tnop";
8164 /* We need a long PIC call to $$dyncall. */
8165 xoperands[0] = NULL_RTX;
8166 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
8167 if (TARGET_SOM || !TARGET_GAS)
8169 xoperands[0] = gen_label_rtx ();
8170 output_asm_insn ("addil L'$$dyncall-%0,%%r1", xoperands);
8171 targetm.asm_out.internal_label (asm_out_file, "L",
8172 CODE_LABEL_NUMBER (xoperands[0]));
8173 output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands);
8177 output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r1", xoperands);
8178 output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1",
8181 output_asm_insn ("blr %%r0,%%r2", xoperands);
8182 output_asm_insn ("bv,n %%r0(%%r1)\n\tnop", xoperands);
8186 /* In HPUX 8.0's shared library scheme, special relocations are needed
8187 for function labels if they might be passed to a function
8188 in a shared library (because shared libraries don't live in code
8189 space), and special magic is needed to construct their address. */
8192 pa_encode_label (rtx sym)
8194 const char *str = XSTR (sym, 0);
8195 int len = strlen (str) + 1;
8198 p = newstr = XALLOCAVEC (char, len + 1);
8202 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
8206 pa_encode_section_info (tree decl, rtx rtl, int first)
8208 int old_referenced = 0;
8210 if (!first && MEM_P (rtl) && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF)
8212 = SYMBOL_REF_FLAGS (XEXP (rtl, 0)) & SYMBOL_FLAG_REFERENCED;
8214 default_encode_section_info (decl, rtl, first);
8216 if (first && TEXT_SPACE_P (decl))
8218 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
8219 if (TREE_CODE (decl) == FUNCTION_DECL)
8220 pa_encode_label (XEXP (rtl, 0));
8222 else if (old_referenced)
8223 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= old_referenced;
8226 /* This is sort of inverse to pa_encode_section_info. */
8229 pa_strip_name_encoding (const char *str)
8231 str += (*str == '@');
8232 str += (*str == '*');
8236 /* Returns 1 if OP is a function label involved in a simple addition
8237 with a constant. Used to keep certain patterns from matching
8238 during instruction combination. */
8240 pa_is_function_label_plus_const (rtx op)
8242 /* Strip off any CONST. */
8243 if (GET_CODE (op) == CONST)
8246 return (GET_CODE (op) == PLUS
8247 && function_label_operand (XEXP (op, 0), VOIDmode)
8248 && GET_CODE (XEXP (op, 1)) == CONST_INT);
8251 /* Output assembly code for a thunk to FUNCTION. */
8254 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
8255 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
8258 static unsigned int current_thunk_number;
8259 int val_14 = VAL_14_BITS_P (delta);
8260 unsigned int old_last_address = last_address, nbytes = 0;
8264 xoperands[0] = XEXP (DECL_RTL (function), 0);
8265 xoperands[1] = XEXP (DECL_RTL (thunk_fndecl), 0);
8266 xoperands[2] = GEN_INT (delta);
8268 ASM_OUTPUT_LABEL (file, XSTR (xoperands[1], 0));
8269 fprintf (file, "\t.PROC\n\t.CALLINFO FRAME=0,NO_CALLS\n\t.ENTRY\n");
8271 /* Output the thunk. We know that the function is in the same
8272 translation unit (i.e., the same space) as the thunk, and that
8273 thunks are output after their method. Thus, we don't need an
8274 external branch to reach the function. With SOM and GAS,
8275 functions and thunks are effectively in different sections.
8276 Thus, we can always use a IA-relative branch and the linker
8277 will add a long branch stub if necessary.
8279 However, we have to be careful when generating PIC code on the
8280 SOM port to ensure that the sequence does not transfer to an
8281 import stub for the target function as this could clobber the
8282 return value saved at SP-24. This would also apply to the
8283 32-bit linux port if the multi-space model is implemented. */
8284 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8285 && !(flag_pic && TREE_PUBLIC (function))
8286 && (TARGET_GAS || last_address < 262132))
8287 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8288 && ((targetm_common.have_named_sections
8289 && DECL_SECTION_NAME (thunk_fndecl) != NULL
8290 /* The GNU 64-bit linker has rather poor stub management.
8291 So, we use a long branch from thunks that aren't in
8292 the same section as the target function. */
8294 && (DECL_SECTION_NAME (thunk_fndecl)
8295 != DECL_SECTION_NAME (function)))
8296 || ((DECL_SECTION_NAME (thunk_fndecl)
8297 == DECL_SECTION_NAME (function))
8298 && last_address < 262132)))
8299 || (targetm_common.have_named_sections
8300 && DECL_SECTION_NAME (thunk_fndecl) == NULL
8301 && DECL_SECTION_NAME (function) == NULL
8302 && last_address < 262132)
8303 || (!targetm_common.have_named_sections
8304 && last_address < 262132))))
8307 output_asm_insn ("addil L'%2,%%r26", xoperands);
8309 output_asm_insn ("b %0", xoperands);
8313 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8318 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8322 else if (TARGET_64BIT)
8324 /* We only have one call-clobbered scratch register, so we can't
8325 make use of the delay slot if delta doesn't fit in 14 bits. */
8328 output_asm_insn ("addil L'%2,%%r26", xoperands);
8329 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8332 output_asm_insn ("b,l .+8,%%r1", xoperands);
8336 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
8337 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
8341 xoperands[3] = GEN_INT (val_14 ? 8 : 16);
8342 output_asm_insn ("addil L'%0-%1-%3,%%r1", xoperands);
8347 output_asm_insn ("bv %%r0(%%r1)", xoperands);
8348 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8353 output_asm_insn ("bv,n %%r0(%%r1)", xoperands);
8357 else if (TARGET_PORTABLE_RUNTIME)
8359 output_asm_insn ("ldil L'%0,%%r1", xoperands);
8360 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands);
8363 output_asm_insn ("addil L'%2,%%r26", xoperands);
8365 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8369 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8374 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8378 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8380 /* The function is accessible from outside this module. The only
8381 way to avoid an import stub between the thunk and function is to
8382 call the function directly with an indirect sequence similar to
8383 that used by $$dyncall. This is possible because $$dyncall acts
8384 as the import stub in an indirect call. */
8385 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
8386 xoperands[3] = gen_rtx_SYMBOL_REF (Pmode, label);
8387 output_asm_insn ("addil LT'%3,%%r19", xoperands);
8388 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands);
8389 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8390 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands);
8391 output_asm_insn ("depi 0,31,2,%%r22", xoperands);
8392 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands);
8393 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8397 output_asm_insn ("addil L'%2,%%r26", xoperands);
8403 output_asm_insn ("bve (%%r22)", xoperands);
8406 else if (TARGET_NO_SPACE_REGS)
8408 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands);
8413 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands);
8414 output_asm_insn ("mtsp %%r21,%%sr0", xoperands);
8415 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands);
8420 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8422 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8426 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
8428 if (TARGET_SOM || !TARGET_GAS)
8430 output_asm_insn ("addil L'%0-%1-8,%%r1", xoperands);
8431 output_asm_insn ("ldo R'%0-%1-8(%%r1),%%r22", xoperands);
8435 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
8436 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r22", xoperands);
8440 output_asm_insn ("addil L'%2,%%r26", xoperands);
8442 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8446 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8451 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8458 output_asm_insn ("addil L'%2,%%r26", xoperands);
8460 output_asm_insn ("ldil L'%0,%%r22", xoperands);
8461 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands);
8465 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8470 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8475 fprintf (file, "\t.EXIT\n\t.PROCEND\n");
8477 if (TARGET_SOM && TARGET_GAS)
8479 /* We done with this subspace except possibly for some additional
8480 debug information. Forget that we are in this subspace to ensure
8481 that the next function is output in its own subspace. */
8483 cfun->machine->in_nsubspa = 2;
8486 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8488 switch_to_section (data_section);
8489 output_asm_insn (".align 4", xoperands);
8490 ASM_OUTPUT_LABEL (file, label);
8491 output_asm_insn (".word P'%0", xoperands);
8494 current_thunk_number++;
8495 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
8496 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
8497 last_address += nbytes;
8498 if (old_last_address > last_address)
8499 last_address = UINT_MAX;
8500 update_total_code_bytes (nbytes);
8503 /* Only direct calls to static functions are allowed to be sibling (tail)
8506 This restriction is necessary because some linker generated stubs will
8507 store return pointers into rp' in some cases which might clobber a
8508 live value already in rp'.
8510 In a sibcall the current function and the target function share stack
8511 space. Thus if the path to the current function and the path to the
8512 target function save a value in rp', they save the value into the
8513 same stack slot, which has undesirable consequences.
8515 Because of the deferred binding nature of shared libraries any function
8516 with external scope could be in a different load module and thus require
8517 rp' to be saved when calling that function. So sibcall optimizations
8518 can only be safe for static function.
8520 Note that GCC never needs return value relocations, so we don't have to
8521 worry about static calls with return value relocations (which require
8524 It is safe to perform a sibcall optimization when the target function
8525 will never return. */
8527 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8529 if (TARGET_PORTABLE_RUNTIME)
8532 /* Sibcalls are ok for TARGET_ELF32 as along as the linker is used in
8533 single subspace mode and the call is not indirect. As far as I know,
8534 there is no operating system support for the multiple subspace mode.
8535 It might be possible to support indirect calls if we didn't use
8536 $$dyncall (see the indirect sequence generated in pa_output_call). */
8538 return (decl != NULL_TREE);
8540 /* Sibcalls are not ok because the arg pointer register is not a fixed
8541 register. This prevents the sibcall optimization from occurring. In
8542 addition, there are problems with stub placement using GNU ld. This
8543 is because a normal sibcall branch uses a 17-bit relocation while
8544 a regular call branch uses a 22-bit relocation. As a result, more
8545 care needs to be taken in the placement of long-branch stubs. */
8549 /* Sibcalls are only ok within a translation unit. */
8550 return (decl && !TREE_PUBLIC (decl));
8553 /* ??? Addition is not commutative on the PA due to the weird implicit
8554 space register selection rules for memory addresses. Therefore, we
8555 don't consider a + b == b + a, as this might be inside a MEM. */
8557 pa_commutative_p (const_rtx x, int outer_code)
8559 return (COMMUTATIVE_P (x)
8560 && (TARGET_NO_SPACE_REGS
8561 || (outer_code != UNKNOWN && outer_code != MEM)
8562 || GET_CODE (x) != PLUS));
8565 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8566 use in fmpyadd instructions. */
8568 pa_fmpyaddoperands (rtx *operands)
8570 enum machine_mode mode = GET_MODE (operands[0]);
8572 /* Must be a floating point mode. */
8573 if (mode != SFmode && mode != DFmode)
8576 /* All modes must be the same. */
8577 if (! (mode == GET_MODE (operands[1])
8578 && mode == GET_MODE (operands[2])
8579 && mode == GET_MODE (operands[3])
8580 && mode == GET_MODE (operands[4])
8581 && mode == GET_MODE (operands[5])))
8584 /* All operands must be registers. */
8585 if (! (GET_CODE (operands[1]) == REG
8586 && GET_CODE (operands[2]) == REG
8587 && GET_CODE (operands[3]) == REG
8588 && GET_CODE (operands[4]) == REG
8589 && GET_CODE (operands[5]) == REG))
8592 /* Only 2 real operands to the addition. One of the input operands must
8593 be the same as the output operand. */
8594 if (! rtx_equal_p (operands[3], operands[4])
8595 && ! rtx_equal_p (operands[3], operands[5]))
8598 /* Inout operand of add cannot conflict with any operands from multiply. */
8599 if (rtx_equal_p (operands[3], operands[0])
8600 || rtx_equal_p (operands[3], operands[1])
8601 || rtx_equal_p (operands[3], operands[2]))
8604 /* multiply cannot feed into addition operands. */
8605 if (rtx_equal_p (operands[4], operands[0])
8606 || rtx_equal_p (operands[5], operands[0]))
8609 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8611 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8612 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8613 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8614 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8615 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8616 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8619 /* Passed. Operands are suitable for fmpyadd. */
8623 #if !defined(USE_COLLECT2)
8625 pa_asm_out_constructor (rtx symbol, int priority)
8627 if (!function_label_operand (symbol, VOIDmode))
8628 pa_encode_label (symbol);
8630 #ifdef CTORS_SECTION_ASM_OP
8631 default_ctor_section_asm_out_constructor (symbol, priority);
8633 # ifdef TARGET_ASM_NAMED_SECTION
8634 default_named_section_asm_out_constructor (symbol, priority);
8636 default_stabs_asm_out_constructor (symbol, priority);
8642 pa_asm_out_destructor (rtx symbol, int priority)
8644 if (!function_label_operand (symbol, VOIDmode))
8645 pa_encode_label (symbol);
8647 #ifdef DTORS_SECTION_ASM_OP
8648 default_dtor_section_asm_out_destructor (symbol, priority);
8650 # ifdef TARGET_ASM_NAMED_SECTION
8651 default_named_section_asm_out_destructor (symbol, priority);
8653 default_stabs_asm_out_destructor (symbol, priority);
8659 /* This function places uninitialized global data in the bss section.
8660 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8661 function on the SOM port to prevent uninitialized global data from
8662 being placed in the data section. */
8665 pa_asm_output_aligned_bss (FILE *stream,
8667 unsigned HOST_WIDE_INT size,
8670 switch_to_section (bss_section);
8671 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8673 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8674 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
8677 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8678 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8681 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8682 ASM_OUTPUT_LABEL (stream, name);
8683 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8686 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8687 that doesn't allow the alignment of global common storage to be directly
8688 specified. The SOM linker aligns common storage based on the rounded
8689 value of the NUM_BYTES parameter in the .comm directive. It's not
8690 possible to use the .align directive as it doesn't affect the alignment
8691 of the label associated with a .comm directive. */
8694 pa_asm_output_aligned_common (FILE *stream,
8696 unsigned HOST_WIDE_INT size,
8699 unsigned int max_common_align;
8701 max_common_align = TARGET_64BIT ? 128 : (size >= 4096 ? 256 : 64);
8702 if (align > max_common_align)
8704 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8705 "for global common data. Using %u",
8706 align / BITS_PER_UNIT, name, max_common_align / BITS_PER_UNIT);
8707 align = max_common_align;
8710 switch_to_section (bss_section);
8712 assemble_name (stream, name);
8713 fprintf (stream, "\t.comm "HOST_WIDE_INT_PRINT_UNSIGNED"\n",
8714 MAX (size, align / BITS_PER_UNIT));
8717 /* We can't use .comm for local common storage as the SOM linker effectively
8718 treats the symbol as universal and uses the same storage for local symbols
8719 with the same name in different object files. The .block directive
8720 reserves an uninitialized block of storage. However, it's not common
8721 storage. Fortunately, GCC never requests common storage with the same
8722 name in any given translation unit. */
8725 pa_asm_output_aligned_local (FILE *stream,
8727 unsigned HOST_WIDE_INT size,
8730 switch_to_section (bss_section);
8731 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8734 fprintf (stream, "%s", LOCAL_ASM_OP);
8735 assemble_name (stream, name);
8736 fprintf (stream, "\n");
8739 ASM_OUTPUT_LABEL (stream, name);
8740 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8743 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8744 use in fmpysub instructions. */
8746 pa_fmpysuboperands (rtx *operands)
8748 enum machine_mode mode = GET_MODE (operands[0]);
8750 /* Must be a floating point mode. */
8751 if (mode != SFmode && mode != DFmode)
8754 /* All modes must be the same. */
8755 if (! (mode == GET_MODE (operands[1])
8756 && mode == GET_MODE (operands[2])
8757 && mode == GET_MODE (operands[3])
8758 && mode == GET_MODE (operands[4])
8759 && mode == GET_MODE (operands[5])))
8762 /* All operands must be registers. */
8763 if (! (GET_CODE (operands[1]) == REG
8764 && GET_CODE (operands[2]) == REG
8765 && GET_CODE (operands[3]) == REG
8766 && GET_CODE (operands[4]) == REG
8767 && GET_CODE (operands[5]) == REG))
8770 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8771 operation, so operands[4] must be the same as operand[3]. */
8772 if (! rtx_equal_p (operands[3], operands[4]))
8775 /* multiply cannot feed into subtraction. */
8776 if (rtx_equal_p (operands[5], operands[0]))
8779 /* Inout operand of sub cannot conflict with any operands from multiply. */
8780 if (rtx_equal_p (operands[3], operands[0])
8781 || rtx_equal_p (operands[3], operands[1])
8782 || rtx_equal_p (operands[3], operands[2]))
8785 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8787 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8788 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8789 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8790 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8791 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8792 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8795 /* Passed. Operands are suitable for fmpysub. */
8799 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8800 constants for shadd instructions. */
8802 pa_shadd_constant_p (int val)
8804 if (val == 2 || val == 4 || val == 8)
8810 /* Return TRUE if INSN branches forward. */
8813 forward_branch_p (rtx insn)
8815 rtx lab = JUMP_LABEL (insn);
8817 /* The INSN must have a jump label. */
8818 gcc_assert (lab != NULL_RTX);
8820 if (INSN_ADDRESSES_SET_P ())
8821 return INSN_ADDRESSES (INSN_UID (lab)) > INSN_ADDRESSES (INSN_UID (insn));
8828 insn = NEXT_INSN (insn);
8834 /* Return 1 if INSN is in the delay slot of a call instruction. */
8836 pa_jump_in_call_delay (rtx insn)
8839 if (GET_CODE (insn) != JUMP_INSN)
8842 if (PREV_INSN (insn)
8843 && PREV_INSN (PREV_INSN (insn))
8844 && GET_CODE (next_real_insn (PREV_INSN (PREV_INSN (insn)))) == INSN)
8846 rtx test_insn = next_real_insn (PREV_INSN (PREV_INSN (insn)));
8848 return (GET_CODE (PATTERN (test_insn)) == SEQUENCE
8849 && XVECEXP (PATTERN (test_insn), 0, 1) == insn);
8856 /* Output an unconditional move and branch insn. */
8859 pa_output_parallel_movb (rtx *operands, rtx insn)
8861 int length = get_attr_length (insn);
8863 /* These are the cases in which we win. */
8865 return "mov%I1b,tr %1,%0,%2";
8867 /* None of the following cases win, but they don't lose either. */
8870 if (dbr_sequence_length () == 0)
8872 /* Nothing in the delay slot, fake it by putting the combined
8873 insn (the copy or add) in the delay slot of a bl. */
8874 if (GET_CODE (operands[1]) == CONST_INT)
8875 return "b %2\n\tldi %1,%0";
8877 return "b %2\n\tcopy %1,%0";
8881 /* Something in the delay slot, but we've got a long branch. */
8882 if (GET_CODE (operands[1]) == CONST_INT)
8883 return "ldi %1,%0\n\tb %2";
8885 return "copy %1,%0\n\tb %2";
8889 if (GET_CODE (operands[1]) == CONST_INT)
8890 output_asm_insn ("ldi %1,%0", operands);
8892 output_asm_insn ("copy %1,%0", operands);
8893 return pa_output_lbranch (operands[2], insn, 1);
8896 /* Output an unconditional add and branch insn. */
8899 pa_output_parallel_addb (rtx *operands, rtx insn)
8901 int length = get_attr_length (insn);
8903 /* To make life easy we want operand0 to be the shared input/output
8904 operand and operand1 to be the readonly operand. */
8905 if (operands[0] == operands[1])
8906 operands[1] = operands[2];
8908 /* These are the cases in which we win. */
8910 return "add%I1b,tr %1,%0,%3";
8912 /* None of the following cases win, but they don't lose either. */
8915 if (dbr_sequence_length () == 0)
8916 /* Nothing in the delay slot, fake it by putting the combined
8917 insn (the copy or add) in the delay slot of a bl. */
8918 return "b %3\n\tadd%I1 %1,%0,%0";
8920 /* Something in the delay slot, but we've got a long branch. */
8921 return "add%I1 %1,%0,%0\n\tb %3";
8924 output_asm_insn ("add%I1 %1,%0,%0", operands);
8925 return pa_output_lbranch (operands[3], insn, 1);
8928 /* Return nonzero if INSN (a jump insn) immediately follows a call
8929 to a named function. This is used to avoid filling the delay slot
8930 of the jump since it can usually be eliminated by modifying RP in
8931 the delay slot of the call. */
8934 pa_following_call (rtx insn)
8936 if (! TARGET_JUMP_IN_DELAY)
8939 /* Find the previous real insn, skipping NOTEs. */
8940 insn = PREV_INSN (insn);
8941 while (insn && GET_CODE (insn) == NOTE)
8942 insn = PREV_INSN (insn);
8944 /* Check for CALL_INSNs and millicode calls. */
8946 && ((GET_CODE (insn) == CALL_INSN
8947 && get_attr_type (insn) != TYPE_DYNCALL)
8948 || (GET_CODE (insn) == INSN
8949 && GET_CODE (PATTERN (insn)) != SEQUENCE
8950 && GET_CODE (PATTERN (insn)) != USE
8951 && GET_CODE (PATTERN (insn)) != CLOBBER
8952 && get_attr_type (insn) == TYPE_MILLI)))
8958 /* We use this hook to perform a PA specific optimization which is difficult
8959 to do in earlier passes.
8961 We want the delay slots of branches within jump tables to be filled.
8962 None of the compiler passes at the moment even has the notion that a
8963 PA jump table doesn't contain addresses, but instead contains actual
8966 Because we actually jump into the table, the addresses of each entry
8967 must stay constant in relation to the beginning of the table (which
8968 itself must stay constant relative to the instruction to jump into
8969 it). I don't believe we can guarantee earlier passes of the compiler
8970 will adhere to those rules.
8972 So, late in the compilation process we find all the jump tables, and
8973 expand them into real code -- e.g. each entry in the jump table vector
8974 will get an appropriate label followed by a jump to the final target.
8976 Reorg and the final jump pass can then optimize these branches and
8977 fill their delay slots. We end up with smaller, more efficient code.
8979 The jump instructions within the table are special; we must be able
8980 to identify them during assembly output (if the jumps don't get filled
8981 we need to emit a nop rather than nullifying the delay slot)). We
8982 identify jumps in switch tables by using insns with the attribute
8983 type TYPE_BTABLE_BRANCH.
8985 We also surround the jump table itself with BEGIN_BRTAB and END_BRTAB
8986 insns. This serves two purposes, first it prevents jump.c from
8987 noticing that the last N entries in the table jump to the instruction
8988 immediately after the table and deleting the jumps. Second, those
8989 insns mark where we should emit .begin_brtab and .end_brtab directives
8990 when using GAS (allows for better link time optimizations). */
8997 remove_useless_addtr_insns (1);
8999 if (pa_cpu < PROCESSOR_8000)
9000 pa_combine_instructions ();
9003 /* This is fairly cheap, so always run it if optimizing. */
9004 if (optimize > 0 && !TARGET_BIG_SWITCH)
9006 /* Find and explode all ADDR_VEC or ADDR_DIFF_VEC insns. */
9007 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9009 rtx pattern, tmp, location, label;
9010 unsigned int length, i;
9012 /* Find an ADDR_VEC or ADDR_DIFF_VEC insn to explode. */
9013 if (GET_CODE (insn) != JUMP_INSN
9014 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
9015 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
9018 /* Emit marker for the beginning of the branch table. */
9019 emit_insn_before (gen_begin_brtab (), insn);
9021 pattern = PATTERN (insn);
9022 location = PREV_INSN (insn);
9023 length = XVECLEN (pattern, GET_CODE (pattern) == ADDR_DIFF_VEC);
9025 for (i = 0; i < length; i++)
9027 /* Emit a label before each jump to keep jump.c from
9028 removing this code. */
9029 tmp = gen_label_rtx ();
9030 LABEL_NUSES (tmp) = 1;
9031 emit_label_after (tmp, location);
9032 location = NEXT_INSN (location);
9034 if (GET_CODE (pattern) == ADDR_VEC)
9035 label = XEXP (XVECEXP (pattern, 0, i), 0);
9037 label = XEXP (XVECEXP (pattern, 1, i), 0);
9039 tmp = gen_short_jump (label);
9041 /* Emit the jump itself. */
9042 tmp = emit_jump_insn_after (tmp, location);
9043 JUMP_LABEL (tmp) = label;
9044 LABEL_NUSES (label)++;
9045 location = NEXT_INSN (location);
9047 /* Emit a BARRIER after the jump. */
9048 emit_barrier_after (location);
9049 location = NEXT_INSN (location);
9052 /* Emit marker for the end of the branch table. */
9053 emit_insn_before (gen_end_brtab (), location);
9054 location = NEXT_INSN (location);
9055 emit_barrier_after (location);
9057 /* Delete the ADDR_VEC or ADDR_DIFF_VEC. */
9063 /* Still need brtab marker insns. FIXME: the presence of these
9064 markers disables output of the branch table to readonly memory,
9065 and any alignment directives that might be needed. Possibly,
9066 the begin_brtab insn should be output before the label for the
9067 table. This doesn't matter at the moment since the tables are
9068 always output in the text section. */
9069 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9071 /* Find an ADDR_VEC insn. */
9072 if (GET_CODE (insn) != JUMP_INSN
9073 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
9074 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
9077 /* Now generate markers for the beginning and end of the
9079 emit_insn_before (gen_begin_brtab (), insn);
9080 emit_insn_after (gen_end_brtab (), insn);
9085 /* The PA has a number of odd instructions which can perform multiple
9086 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
9087 it may be profitable to combine two instructions into one instruction
9088 with two outputs. It's not profitable PA2.0 machines because the
9089 two outputs would take two slots in the reorder buffers.
9091 This routine finds instructions which can be combined and combines
9092 them. We only support some of the potential combinations, and we
9093 only try common ways to find suitable instructions.
9095 * addb can add two registers or a register and a small integer
9096 and jump to a nearby (+-8k) location. Normally the jump to the
9097 nearby location is conditional on the result of the add, but by
9098 using the "true" condition we can make the jump unconditional.
9099 Thus addb can perform two independent operations in one insn.
9101 * movb is similar to addb in that it can perform a reg->reg
9102 or small immediate->reg copy and jump to a nearby (+-8k location).
9104 * fmpyadd and fmpysub can perform a FP multiply and either an
9105 FP add or FP sub if the operands of the multiply and add/sub are
9106 independent (there are other minor restrictions). Note both
9107 the fmpy and fadd/fsub can in theory move to better spots according
9108 to data dependencies, but for now we require the fmpy stay at a
9111 * Many of the memory operations can perform pre & post updates
9112 of index registers. GCC's pre/post increment/decrement addressing
9113 is far too simple to take advantage of all the possibilities. This
9114 pass may not be suitable since those insns may not be independent.
9116 * comclr can compare two ints or an int and a register, nullify
9117 the following instruction and zero some other register. This
9118 is more difficult to use as it's harder to find an insn which
9119 will generate a comclr than finding something like an unconditional
9120 branch. (conditional moves & long branches create comclr insns).
9122 * Most arithmetic operations can conditionally skip the next
9123 instruction. They can be viewed as "perform this operation
9124 and conditionally jump to this nearby location" (where nearby
9125 is an insns away). These are difficult to use due to the
9126 branch length restrictions. */
9129 pa_combine_instructions (void)
9131 rtx anchor, new_rtx;
9133 /* This can get expensive since the basic algorithm is on the
9134 order of O(n^2) (or worse). Only do it for -O2 or higher
9135 levels of optimization. */
9139 /* Walk down the list of insns looking for "anchor" insns which
9140 may be combined with "floating" insns. As the name implies,
9141 "anchor" instructions don't move, while "floating" insns may
9143 new_rtx = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
9144 new_rtx = make_insn_raw (new_rtx);
9146 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
9148 enum attr_pa_combine_type anchor_attr;
9149 enum attr_pa_combine_type floater_attr;
9151 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
9152 Also ignore any special USE insns. */
9153 if ((GET_CODE (anchor) != INSN
9154 && GET_CODE (anchor) != JUMP_INSN
9155 && GET_CODE (anchor) != CALL_INSN)
9156 || GET_CODE (PATTERN (anchor)) == USE
9157 || GET_CODE (PATTERN (anchor)) == CLOBBER
9158 || GET_CODE (PATTERN (anchor)) == ADDR_VEC
9159 || GET_CODE (PATTERN (anchor)) == ADDR_DIFF_VEC)
9162 anchor_attr = get_attr_pa_combine_type (anchor);
9163 /* See if anchor is an insn suitable for combination. */
9164 if (anchor_attr == PA_COMBINE_TYPE_FMPY
9165 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
9166 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9167 && ! forward_branch_p (anchor)))
9171 for (floater = PREV_INSN (anchor);
9173 floater = PREV_INSN (floater))
9175 if (GET_CODE (floater) == NOTE
9176 || (GET_CODE (floater) == INSN
9177 && (GET_CODE (PATTERN (floater)) == USE
9178 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9181 /* Anything except a regular INSN will stop our search. */
9182 if (GET_CODE (floater) != INSN
9183 || GET_CODE (PATTERN (floater)) == ADDR_VEC
9184 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
9190 /* See if FLOATER is suitable for combination with the
9192 floater_attr = get_attr_pa_combine_type (floater);
9193 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9194 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9195 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9196 && floater_attr == PA_COMBINE_TYPE_FMPY))
9198 /* If ANCHOR and FLOATER can be combined, then we're
9199 done with this pass. */
9200 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9201 SET_DEST (PATTERN (floater)),
9202 XEXP (SET_SRC (PATTERN (floater)), 0),
9203 XEXP (SET_SRC (PATTERN (floater)), 1)))
9207 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9208 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
9210 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
9212 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9213 SET_DEST (PATTERN (floater)),
9214 XEXP (SET_SRC (PATTERN (floater)), 0),
9215 XEXP (SET_SRC (PATTERN (floater)), 1)))
9220 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9221 SET_DEST (PATTERN (floater)),
9222 SET_SRC (PATTERN (floater)),
9223 SET_SRC (PATTERN (floater))))
9229 /* If we didn't find anything on the backwards scan try forwards. */
9231 && (anchor_attr == PA_COMBINE_TYPE_FMPY
9232 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
9234 for (floater = anchor; floater; floater = NEXT_INSN (floater))
9236 if (GET_CODE (floater) == NOTE
9237 || (GET_CODE (floater) == INSN
9238 && (GET_CODE (PATTERN (floater)) == USE
9239 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9243 /* Anything except a regular INSN will stop our search. */
9244 if (GET_CODE (floater) != INSN
9245 || GET_CODE (PATTERN (floater)) == ADDR_VEC
9246 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
9252 /* See if FLOATER is suitable for combination with the
9254 floater_attr = get_attr_pa_combine_type (floater);
9255 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9256 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9257 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9258 && floater_attr == PA_COMBINE_TYPE_FMPY))
9260 /* If ANCHOR and FLOATER can be combined, then we're
9261 done with this pass. */
9262 if (pa_can_combine_p (new_rtx, anchor, floater, 1,
9263 SET_DEST (PATTERN (floater)),
9264 XEXP (SET_SRC (PATTERN (floater)),
9266 XEXP (SET_SRC (PATTERN (floater)),
9273 /* FLOATER will be nonzero if we found a suitable floating
9274 insn for combination with ANCHOR. */
9276 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9277 || anchor_attr == PA_COMBINE_TYPE_FMPY))
9279 /* Emit the new instruction and delete the old anchor. */
9280 emit_insn_before (gen_rtx_PARALLEL
9282 gen_rtvec (2, PATTERN (anchor),
9283 PATTERN (floater))),
9286 SET_INSN_DELETED (anchor);
9288 /* Emit a special USE insn for FLOATER, then delete
9289 the floating insn. */
9290 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9291 delete_insn (floater);
9296 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
9299 /* Emit the new_jump instruction and delete the old anchor. */
9301 = emit_jump_insn_before (gen_rtx_PARALLEL
9303 gen_rtvec (2, PATTERN (anchor),
9304 PATTERN (floater))),
9307 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
9308 SET_INSN_DELETED (anchor);
9310 /* Emit a special USE insn for FLOATER, then delete
9311 the floating insn. */
9312 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9313 delete_insn (floater);
9321 pa_can_combine_p (rtx new_rtx, rtx anchor, rtx floater, int reversed, rtx dest,
9324 int insn_code_number;
9327 /* Create a PARALLEL with the patterns of ANCHOR and
9328 FLOATER, try to recognize it, then test constraints
9329 for the resulting pattern.
9331 If the pattern doesn't match or the constraints
9332 aren't met keep searching for a suitable floater
9334 XVECEXP (PATTERN (new_rtx), 0, 0) = PATTERN (anchor);
9335 XVECEXP (PATTERN (new_rtx), 0, 1) = PATTERN (floater);
9336 INSN_CODE (new_rtx) = -1;
9337 insn_code_number = recog_memoized (new_rtx);
9338 if (insn_code_number < 0
9339 || (extract_insn (new_rtx), ! constrain_operands (1)))
9353 /* There's up to three operands to consider. One
9354 output and two inputs.
9356 The output must not be used between FLOATER & ANCHOR
9357 exclusive. The inputs must not be set between
9358 FLOATER and ANCHOR exclusive. */
9360 if (reg_used_between_p (dest, start, end))
9363 if (reg_set_between_p (src1, start, end))
9366 if (reg_set_between_p (src2, start, end))
9369 /* If we get here, then everything is good. */
9373 /* Return nonzero if references for INSN are delayed.
9375 Millicode insns are actually function calls with some special
9376 constraints on arguments and register usage.
9378 Millicode calls always expect their arguments in the integer argument
9379 registers, and always return their result in %r29 (ret1). They
9380 are expected to clobber their arguments, %r1, %r29, and the return
9381 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
9383 This function tells reorg that the references to arguments and
9384 millicode calls do not appear to happen until after the millicode call.
9385 This allows reorg to put insns which set the argument registers into the
9386 delay slot of the millicode call -- thus they act more like traditional
9389 Note we cannot consider side effects of the insn to be delayed because
9390 the branch and link insn will clobber the return pointer. If we happened
9391 to use the return pointer in the delay slot of the call, then we lose.
9393 get_attr_type will try to recognize the given insn, so make sure to
9394 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
9397 pa_insn_refs_are_delayed (rtx insn)
9399 return ((GET_CODE (insn) == INSN
9400 && GET_CODE (PATTERN (insn)) != SEQUENCE
9401 && GET_CODE (PATTERN (insn)) != USE
9402 && GET_CODE (PATTERN (insn)) != CLOBBER
9403 && get_attr_type (insn) == TYPE_MILLI));
9406 /* Promote the return value, but not the arguments. */
9408 static enum machine_mode
9409 pa_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
9410 enum machine_mode mode,
9411 int *punsignedp ATTRIBUTE_UNUSED,
9412 const_tree fntype ATTRIBUTE_UNUSED,
9415 if (for_return == 0)
9417 return promote_mode (type, mode, punsignedp);
9420 /* On the HP-PA the value is found in register(s) 28(-29), unless
9421 the mode is SF or DF. Then the value is returned in fr4 (32).
9423 This must perform the same promotions as PROMOTE_MODE, else promoting
9424 return values in TARGET_PROMOTE_FUNCTION_MODE will not work correctly.
9426 Small structures must be returned in a PARALLEL on PA64 in order
9427 to match the HP Compiler ABI. */
9430 pa_function_value (const_tree valtype,
9431 const_tree func ATTRIBUTE_UNUSED,
9432 bool outgoing ATTRIBUTE_UNUSED)
9434 enum machine_mode valmode;
9436 if (AGGREGATE_TYPE_P (valtype)
9437 || TREE_CODE (valtype) == COMPLEX_TYPE
9438 || TREE_CODE (valtype) == VECTOR_TYPE)
9442 /* Aggregates with a size less than or equal to 128 bits are
9443 returned in GR 28(-29). They are left justified. The pad
9444 bits are undefined. Larger aggregates are returned in
9448 int ub = int_size_in_bytes (valtype) <= UNITS_PER_WORD ? 1 : 2;
9450 for (i = 0; i < ub; i++)
9452 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9453 gen_rtx_REG (DImode, 28 + i),
9458 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
9460 else if (int_size_in_bytes (valtype) > UNITS_PER_WORD)
9462 /* Aggregates 5 to 8 bytes in size are returned in general
9463 registers r28-r29 in the same manner as other non
9464 floating-point objects. The data is right-justified and
9465 zero-extended to 64 bits. This is opposite to the normal
9466 justification used on big endian targets and requires
9467 special treatment. */
9468 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9469 gen_rtx_REG (DImode, 28), const0_rtx);
9470 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9474 if ((INTEGRAL_TYPE_P (valtype)
9475 && GET_MODE_BITSIZE (TYPE_MODE (valtype)) < BITS_PER_WORD)
9476 || POINTER_TYPE_P (valtype))
9477 valmode = word_mode;
9479 valmode = TYPE_MODE (valtype);
9481 if (TREE_CODE (valtype) == REAL_TYPE
9482 && !AGGREGATE_TYPE_P (valtype)
9483 && TYPE_MODE (valtype) != TFmode
9484 && !TARGET_SOFT_FLOAT)
9485 return gen_rtx_REG (valmode, 32);
9487 return gen_rtx_REG (valmode, 28);
9490 /* Implement the TARGET_LIBCALL_VALUE hook. */
9493 pa_libcall_value (enum machine_mode mode,
9494 const_rtx fun ATTRIBUTE_UNUSED)
9496 if (! TARGET_SOFT_FLOAT
9497 && (mode == SFmode || mode == DFmode))
9498 return gen_rtx_REG (mode, 32);
9500 return gen_rtx_REG (mode, 28);
9503 /* Implement the TARGET_FUNCTION_VALUE_REGNO_P hook. */
9506 pa_function_value_regno_p (const unsigned int regno)
9509 || (! TARGET_SOFT_FLOAT && regno == 32))
9515 /* Update the data in CUM to advance over an argument
9516 of mode MODE and data type TYPE.
9517 (TYPE is null for libcalls where that information may not be available.) */
9520 pa_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
9521 const_tree type, bool named ATTRIBUTE_UNUSED)
9523 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9524 int arg_size = FUNCTION_ARG_SIZE (mode, type);
9526 cum->nargs_prototype--;
9527 cum->words += (arg_size
9528 + ((cum->words & 01)
9529 && type != NULL_TREE
9533 /* Return the location of a parameter that is passed in a register or NULL
9534 if the parameter has any component that is passed in memory.
9536 This is new code and will be pushed to into the net sources after
9539 ??? We might want to restructure this so that it looks more like other
9542 pa_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
9543 const_tree type, bool named ATTRIBUTE_UNUSED)
9545 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9546 int max_arg_words = (TARGET_64BIT ? 8 : 4);
9553 if (mode == VOIDmode)
9556 arg_size = FUNCTION_ARG_SIZE (mode, type);
9558 /* If this arg would be passed partially or totally on the stack, then
9559 this routine should return zero. pa_arg_partial_bytes will
9560 handle arguments which are split between regs and stack slots if
9561 the ABI mandates split arguments. */
9564 /* The 32-bit ABI does not split arguments. */
9565 if (cum->words + arg_size > max_arg_words)
9571 alignment = cum->words & 1;
9572 if (cum->words + alignment >= max_arg_words)
9576 /* The 32bit ABIs and the 64bit ABIs are rather different,
9577 particularly in their handling of FP registers. We might
9578 be able to cleverly share code between them, but I'm not
9579 going to bother in the hope that splitting them up results
9580 in code that is more easily understood. */
9584 /* Advance the base registers to their current locations.
9586 Remember, gprs grow towards smaller register numbers while
9587 fprs grow to higher register numbers. Also remember that
9588 although FP regs are 32-bit addressable, we pretend that
9589 the registers are 64-bits wide. */
9590 gpr_reg_base = 26 - cum->words;
9591 fpr_reg_base = 32 + cum->words;
9593 /* Arguments wider than one word and small aggregates need special
9597 || (type && (AGGREGATE_TYPE_P (type)
9598 || TREE_CODE (type) == COMPLEX_TYPE
9599 || TREE_CODE (type) == VECTOR_TYPE)))
9601 /* Double-extended precision (80-bit), quad-precision (128-bit)
9602 and aggregates including complex numbers are aligned on
9603 128-bit boundaries. The first eight 64-bit argument slots
9604 are associated one-to-one, with general registers r26
9605 through r19, and also with floating-point registers fr4
9606 through fr11. Arguments larger than one word are always
9607 passed in general registers.
9609 Using a PARALLEL with a word mode register results in left
9610 justified data on a big-endian target. */
9613 int i, offset = 0, ub = arg_size;
9615 /* Align the base register. */
9616 gpr_reg_base -= alignment;
9618 ub = MIN (ub, max_arg_words - cum->words - alignment);
9619 for (i = 0; i < ub; i++)
9621 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9622 gen_rtx_REG (DImode, gpr_reg_base),
9628 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
9633 /* If the argument is larger than a word, then we know precisely
9634 which registers we must use. */
9648 /* Structures 5 to 8 bytes in size are passed in the general
9649 registers in the same manner as other non floating-point
9650 objects. The data is right-justified and zero-extended
9651 to 64 bits. This is opposite to the normal justification
9652 used on big endian targets and requires special treatment.
9653 We now define BLOCK_REG_PADDING to pad these objects.
9654 Aggregates, complex and vector types are passed in the same
9655 manner as structures. */
9657 || (type && (AGGREGATE_TYPE_P (type)
9658 || TREE_CODE (type) == COMPLEX_TYPE
9659 || TREE_CODE (type) == VECTOR_TYPE)))
9661 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9662 gen_rtx_REG (DImode, gpr_reg_base),
9664 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9669 /* We have a single word (32 bits). A simple computation
9670 will get us the register #s we need. */
9671 gpr_reg_base = 26 - cum->words;
9672 fpr_reg_base = 32 + 2 * cum->words;
9676 /* Determine if the argument needs to be passed in both general and
9677 floating point registers. */
9678 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
9679 /* If we are doing soft-float with portable runtime, then there
9680 is no need to worry about FP regs. */
9681 && !TARGET_SOFT_FLOAT
9682 /* The parameter must be some kind of scalar float, else we just
9683 pass it in integer registers. */
9684 && GET_MODE_CLASS (mode) == MODE_FLOAT
9685 /* The target function must not have a prototype. */
9686 && cum->nargs_prototype <= 0
9687 /* libcalls do not need to pass items in both FP and general
9689 && type != NULL_TREE
9690 /* All this hair applies to "outgoing" args only. This includes
9691 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9693 /* Also pass outgoing floating arguments in both registers in indirect
9694 calls with the 32 bit ABI and the HP assembler since there is no
9695 way to the specify argument locations in static functions. */
9700 && GET_MODE_CLASS (mode) == MODE_FLOAT))
9706 gen_rtx_EXPR_LIST (VOIDmode,
9707 gen_rtx_REG (mode, fpr_reg_base),
9709 gen_rtx_EXPR_LIST (VOIDmode,
9710 gen_rtx_REG (mode, gpr_reg_base),
9715 /* See if we should pass this parameter in a general register. */
9716 if (TARGET_SOFT_FLOAT
9717 /* Indirect calls in the normal 32bit ABI require all arguments
9718 to be passed in general registers. */
9719 || (!TARGET_PORTABLE_RUNTIME
9723 /* If the parameter is not a scalar floating-point parameter,
9724 then it belongs in GPRs. */
9725 || GET_MODE_CLASS (mode) != MODE_FLOAT
9726 /* Structure with single SFmode field belongs in GPR. */
9727 || (type && AGGREGATE_TYPE_P (type)))
9728 retval = gen_rtx_REG (mode, gpr_reg_base);
9730 retval = gen_rtx_REG (mode, fpr_reg_base);
9735 /* Arguments larger than one word are double word aligned. */
9738 pa_function_arg_boundary (enum machine_mode mode, const_tree type)
9740 bool singleword = (type
9741 ? (integer_zerop (TYPE_SIZE (type))
9742 || !TREE_CONSTANT (TYPE_SIZE (type))
9743 || int_size_in_bytes (type) <= UNITS_PER_WORD)
9744 : GET_MODE_SIZE (mode) <= UNITS_PER_WORD);
9746 return singleword ? PARM_BOUNDARY : MAX_PARM_BOUNDARY;
9749 /* If this arg would be passed totally in registers or totally on the stack,
9750 then this routine should return zero. */
9753 pa_arg_partial_bytes (cumulative_args_t cum_v, enum machine_mode mode,
9754 tree type, bool named ATTRIBUTE_UNUSED)
9756 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9757 unsigned int max_arg_words = 8;
9758 unsigned int offset = 0;
9763 if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1))
9766 if (cum->words + offset + FUNCTION_ARG_SIZE (mode, type) <= max_arg_words)
9767 /* Arg fits fully into registers. */
9769 else if (cum->words + offset >= max_arg_words)
9770 /* Arg fully on the stack. */
9774 return (max_arg_words - cum->words - offset) * UNITS_PER_WORD;
9778 /* A get_unnamed_section callback for switching to the text section.
9780 This function is only used with SOM. Because we don't support
9781 named subspaces, we can only create a new subspace or switch back
9782 to the default text subspace. */
9785 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9787 gcc_assert (TARGET_SOM);
9790 if (cfun && cfun->machine && !cfun->machine->in_nsubspa)
9792 /* We only want to emit a .nsubspa directive once at the
9793 start of the function. */
9794 cfun->machine->in_nsubspa = 1;
9796 /* Create a new subspace for the text. This provides
9797 better stub placement and one-only functions. */
9799 && DECL_ONE_ONLY (cfun->decl)
9800 && !DECL_WEAK (cfun->decl))
9802 output_section_asm_op ("\t.SPACE $TEXT$\n"
9803 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9804 "ACCESS=44,SORT=24,COMDAT");
9810 /* There isn't a current function or the body of the current
9811 function has been completed. So, we are changing to the
9812 text section to output debugging information. Thus, we
9813 need to forget that we are in the text section so that
9814 varasm.c will call us when text_section is selected again. */
9815 gcc_assert (!cfun || !cfun->machine
9816 || cfun->machine->in_nsubspa == 2);
9819 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9822 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9825 /* A get_unnamed_section callback for switching to comdat data
9826 sections. This function is only used with SOM. */
9829 som_output_comdat_data_section_asm_op (const void *data)
9832 output_section_asm_op (data);
9835 /* Implement TARGET_ASM_INITIALIZE_SECTIONS */
9838 pa_som_asm_init_sections (void)
9841 = get_unnamed_section (0, som_output_text_section_asm_op, NULL);
9843 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9844 is not being generated. */
9845 som_readonly_data_section
9846 = get_unnamed_section (0, output_section_asm_op,
9847 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9849 /* When secondary definitions are not supported, SOM makes readonly
9850 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9852 som_one_only_readonly_data_section
9853 = get_unnamed_section (0, som_output_comdat_data_section_asm_op,
9855 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9856 "ACCESS=0x2c,SORT=16,COMDAT");
9859 /* When secondary definitions are not supported, SOM makes data one-only
9860 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9861 som_one_only_data_section
9862 = get_unnamed_section (SECTION_WRITE,
9863 som_output_comdat_data_section_asm_op,
9864 "\t.SPACE $PRIVATE$\n"
9865 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9866 "ACCESS=31,SORT=24,COMDAT");
9869 som_tm_clone_table_section
9870 = get_unnamed_section (0, output_section_asm_op,
9871 "\t.SPACE $PRIVATE$\n\t.SUBSPA $TM_CLONE_TABLE$");
9873 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9874 which reference data within the $TEXT$ space (for example constant
9875 strings in the $LIT$ subspace).
9877 The assemblers (GAS and HP as) both have problems with handling
9878 the difference of two symbols which is the other correct way to
9879 reference constant data during PIC code generation.
9881 So, there's no way to reference constant data which is in the
9882 $TEXT$ space during PIC generation. Instead place all constant
9883 data into the $PRIVATE$ subspace (this reduces sharing, but it
9884 works correctly). */
9885 readonly_data_section = flag_pic ? data_section : som_readonly_data_section;
9887 /* We must not have a reference to an external symbol defined in a
9888 shared library in a readonly section, else the SOM linker will
9891 So, we force exception information into the data section. */
9892 exception_section = data_section;
9895 /* Implement TARGET_ASM_TM_CLONE_TABLE_SECTION. */
9898 pa_som_tm_clone_table_section (void)
9900 return som_tm_clone_table_section;
9903 /* On hpux10, the linker will give an error if we have a reference
9904 in the read-only data section to a symbol defined in a shared
9905 library. Therefore, expressions that might require a reloc can
9906 not be placed in the read-only data section. */
9909 pa_select_section (tree exp, int reloc,
9910 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
9912 if (TREE_CODE (exp) == VAR_DECL
9913 && TREE_READONLY (exp)
9914 && !TREE_THIS_VOLATILE (exp)
9915 && DECL_INITIAL (exp)
9916 && (DECL_INITIAL (exp) == error_mark_node
9917 || TREE_CONSTANT (DECL_INITIAL (exp)))
9921 && DECL_ONE_ONLY (exp)
9922 && !DECL_WEAK (exp))
9923 return som_one_only_readonly_data_section;
9925 return readonly_data_section;
9927 else if (CONSTANT_CLASS_P (exp) && !reloc)
9928 return readonly_data_section;
9930 && TREE_CODE (exp) == VAR_DECL
9931 && DECL_ONE_ONLY (exp)
9932 && !DECL_WEAK (exp))
9933 return som_one_only_data_section;
9935 return data_section;
9939 pa_globalize_label (FILE *stream, const char *name)
9941 /* We only handle DATA objects here, functions are globalized in
9942 ASM_DECLARE_FUNCTION_NAME. */
9943 if (! FUNCTION_NAME_P (name))
9945 fputs ("\t.EXPORT ", stream);
9946 assemble_name (stream, name);
9947 fputs (",DATA\n", stream);
9951 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9954 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
9955 int incoming ATTRIBUTE_UNUSED)
9957 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
9960 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9963 pa_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
9965 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9966 PA64 ABI says that objects larger than 128 bits are returned in memory.
9967 Note, int_size_in_bytes can return -1 if the size of the object is
9968 variable or larger than the maximum value that can be expressed as
9969 a HOST_WIDE_INT. It can also return zero for an empty type. The
9970 simplest way to handle variable and empty types is to pass them in
9971 memory. This avoids problems in defining the boundaries of argument
9972 slots, allocating registers, etc. */
9973 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
9974 || int_size_in_bytes (type) <= 0);
9977 /* Structure to hold declaration and name of external symbols that are
9978 emitted by GCC. We generate a vector of these symbols and output them
9979 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9980 This avoids putting out names that are never really used. */
9982 typedef struct GTY(()) extern_symbol
9988 /* Define gc'd vector type for extern_symbol. */
9990 /* Vector of extern_symbol pointers. */
9991 static GTY(()) vec<extern_symbol, va_gc> *extern_symbols;
9993 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9994 /* Mark DECL (name NAME) as an external reference (assembler output
9995 file FILE). This saves the names to output at the end of the file
9996 if actually referenced. */
9999 pa_hpux_asm_output_external (FILE *file, tree decl, const char *name)
10001 gcc_assert (file == asm_out_file);
10002 extern_symbol p = {decl, name};
10003 vec_safe_push (extern_symbols, p);
10006 /* Output text required at the end of an assembler file.
10007 This includes deferred plabels and .import directives for
10008 all external symbols that were actually referenced. */
10011 pa_hpux_file_end (void)
10016 if (!NO_DEFERRED_PROFILE_COUNTERS)
10017 output_deferred_profile_counters ();
10019 output_deferred_plabels ();
10021 for (i = 0; vec_safe_iterate (extern_symbols, i, &p); i++)
10023 tree decl = p->decl;
10025 if (!TREE_ASM_WRITTEN (decl)
10026 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl), 0)))
10027 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file, decl, p->name);
10030 vec_free (extern_symbols);
10034 /* Return true if a change from mode FROM to mode TO for a register
10035 in register class RCLASS is invalid. */
10038 pa_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
10039 enum reg_class rclass)
10044 /* Reject changes to/from complex and vector modes. */
10045 if (COMPLEX_MODE_P (from) || VECTOR_MODE_P (from)
10046 || COMPLEX_MODE_P (to) || VECTOR_MODE_P (to))
10049 if (GET_MODE_SIZE (from) == GET_MODE_SIZE (to))
10052 /* There is no way to load QImode or HImode values directly from
10053 memory. SImode loads to the FP registers are not zero extended.
10054 On the 64-bit target, this conflicts with the definition of
10055 LOAD_EXTEND_OP. Thus, we can't allow changing between modes
10056 with different sizes in the floating-point registers. */
10057 if (MAYBE_FP_REG_CLASS_P (rclass))
10060 /* HARD_REGNO_MODE_OK places modes with sizes larger than a word
10061 in specific sets of registers. Thus, we cannot allow changing
10062 to a larger mode when it's larger than a word. */
10063 if (GET_MODE_SIZE (to) > UNITS_PER_WORD
10064 && GET_MODE_SIZE (to) > GET_MODE_SIZE (from))
10070 /* Returns TRUE if it is a good idea to tie two pseudo registers
10071 when one has mode MODE1 and one has mode MODE2.
10072 If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
10073 for any hard reg, then this must be FALSE for correct output.
10075 We should return FALSE for QImode and HImode because these modes
10076 are not ok in the floating-point registers. However, this prevents
10077 tieing these modes to SImode and DImode in the general registers.
10078 So, this isn't a good idea. We rely on HARD_REGNO_MODE_OK and
10079 CANNOT_CHANGE_MODE_CLASS to prevent these modes from being used
10080 in the floating-point registers. */
10083 pa_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
10085 /* Don't tie modes in different classes. */
10086 if (GET_MODE_CLASS (mode1) != GET_MODE_CLASS (mode2))
10093 /* Length in units of the trampoline instruction code. */
10095 #define TRAMPOLINE_CODE_SIZE (TARGET_64BIT ? 24 : (TARGET_PA_20 ? 32 : 40))
10098 /* Output assembler code for a block containing the constant parts
10099 of a trampoline, leaving space for the variable parts.\
10101 The trampoline sets the static chain pointer to STATIC_CHAIN_REGNUM
10102 and then branches to the specified routine.
10104 This code template is copied from text segment to stack location
10105 and then patched with pa_trampoline_init to contain valid values,
10106 and then entered as a subroutine.
10108 It is best to keep this as small as possible to avoid having to
10109 flush multiple lines in the cache. */
10112 pa_asm_trampoline_template (FILE *f)
10116 fputs ("\tldw 36(%r22),%r21\n", f);
10117 fputs ("\tbb,>=,n %r21,30,.+16\n", f);
10118 if (ASSEMBLER_DIALECT == 0)
10119 fputs ("\tdepi 0,31,2,%r21\n", f);
10121 fputs ("\tdepwi 0,31,2,%r21\n", f);
10122 fputs ("\tldw 4(%r21),%r19\n", f);
10123 fputs ("\tldw 0(%r21),%r21\n", f);
10126 fputs ("\tbve (%r21)\n", f);
10127 fputs ("\tldw 40(%r22),%r29\n", f);
10128 fputs ("\t.word 0\n", f);
10129 fputs ("\t.word 0\n", f);
10133 fputs ("\tldsid (%r21),%r1\n", f);
10134 fputs ("\tmtsp %r1,%sr0\n", f);
10135 fputs ("\tbe 0(%sr0,%r21)\n", f);
10136 fputs ("\tldw 40(%r22),%r29\n", f);
10138 fputs ("\t.word 0\n", f);
10139 fputs ("\t.word 0\n", f);
10140 fputs ("\t.word 0\n", f);
10141 fputs ("\t.word 0\n", f);
10145 fputs ("\t.dword 0\n", f);
10146 fputs ("\t.dword 0\n", f);
10147 fputs ("\t.dword 0\n", f);
10148 fputs ("\t.dword 0\n", f);
10149 fputs ("\tmfia %r31\n", f);
10150 fputs ("\tldd 24(%r31),%r1\n", f);
10151 fputs ("\tldd 24(%r1),%r27\n", f);
10152 fputs ("\tldd 16(%r1),%r1\n", f);
10153 fputs ("\tbve (%r1)\n", f);
10154 fputs ("\tldd 32(%r31),%r31\n", f);
10155 fputs ("\t.dword 0 ; fptr\n", f);
10156 fputs ("\t.dword 0 ; static link\n", f);
10160 /* Emit RTL insns to initialize the variable parts of a trampoline.
10161 FNADDR is an RTX for the address of the function's pure code.
10162 CXT is an RTX for the static chain value for the function.
10164 Move the function address to the trampoline template at offset 36.
10165 Move the static chain value to trampoline template at offset 40.
10166 Move the trampoline address to trampoline template at offset 44.
10167 Move r19 to trampoline template at offset 48. The latter two
10168 words create a plabel for the indirect call to the trampoline.
10170 A similar sequence is used for the 64-bit port but the plabel is
10171 at the beginning of the trampoline.
10173 Finally, the cache entries for the trampoline code are flushed.
10174 This is necessary to ensure that the trampoline instruction sequence
10175 is written to memory prior to any attempts at prefetching the code
10179 pa_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
10181 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
10182 rtx start_addr = gen_reg_rtx (Pmode);
10183 rtx end_addr = gen_reg_rtx (Pmode);
10184 rtx line_length = gen_reg_rtx (Pmode);
10187 emit_block_move (m_tramp, assemble_trampoline_template (),
10188 GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
10189 r_tramp = force_reg (Pmode, XEXP (m_tramp, 0));
10193 tmp = adjust_address (m_tramp, Pmode, 36);
10194 emit_move_insn (tmp, fnaddr);
10195 tmp = adjust_address (m_tramp, Pmode, 40);
10196 emit_move_insn (tmp, chain_value);
10198 /* Create a fat pointer for the trampoline. */
10199 tmp = adjust_address (m_tramp, Pmode, 44);
10200 emit_move_insn (tmp, r_tramp);
10201 tmp = adjust_address (m_tramp, Pmode, 48);
10202 emit_move_insn (tmp, gen_rtx_REG (Pmode, 19));
10204 /* fdc and fic only use registers for the address to flush,
10205 they do not accept integer displacements. We align the
10206 start and end addresses to the beginning of their respective
10207 cache lines to minimize the number of lines flushed. */
10208 emit_insn (gen_andsi3 (start_addr, r_tramp,
10209 GEN_INT (-MIN_CACHELINE_SIZE)));
10210 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp,
10211 TRAMPOLINE_CODE_SIZE-1));
10212 emit_insn (gen_andsi3 (end_addr, tmp,
10213 GEN_INT (-MIN_CACHELINE_SIZE)));
10214 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10215 emit_insn (gen_dcacheflushsi (start_addr, end_addr, line_length));
10216 emit_insn (gen_icacheflushsi (start_addr, end_addr, line_length,
10217 gen_reg_rtx (Pmode),
10218 gen_reg_rtx (Pmode)));
10222 tmp = adjust_address (m_tramp, Pmode, 56);
10223 emit_move_insn (tmp, fnaddr);
10224 tmp = adjust_address (m_tramp, Pmode, 64);
10225 emit_move_insn (tmp, chain_value);
10227 /* Create a fat pointer for the trampoline. */
10228 tmp = adjust_address (m_tramp, Pmode, 16);
10229 emit_move_insn (tmp, force_reg (Pmode, plus_constant (Pmode,
10231 tmp = adjust_address (m_tramp, Pmode, 24);
10232 emit_move_insn (tmp, gen_rtx_REG (Pmode, 27));
10234 /* fdc and fic only use registers for the address to flush,
10235 they do not accept integer displacements. We align the
10236 start and end addresses to the beginning of their respective
10237 cache lines to minimize the number of lines flushed. */
10238 tmp = force_reg (Pmode, plus_constant (Pmode, r_tramp, 32));
10239 emit_insn (gen_anddi3 (start_addr, tmp,
10240 GEN_INT (-MIN_CACHELINE_SIZE)));
10241 tmp = force_reg (Pmode, plus_constant (Pmode, tmp,
10242 TRAMPOLINE_CODE_SIZE - 1));
10243 emit_insn (gen_anddi3 (end_addr, tmp,
10244 GEN_INT (-MIN_CACHELINE_SIZE)));
10245 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10246 emit_insn (gen_dcacheflushdi (start_addr, end_addr, line_length));
10247 emit_insn (gen_icacheflushdi (start_addr, end_addr, line_length,
10248 gen_reg_rtx (Pmode),
10249 gen_reg_rtx (Pmode)));
10253 /* Perform any machine-specific adjustment in the address of the trampoline.
10254 ADDR contains the address that was passed to pa_trampoline_init.
10255 Adjust the trampoline address to point to the plabel at offset 44. */
10258 pa_trampoline_adjust_address (rtx addr)
10261 addr = memory_address (Pmode, plus_constant (Pmode, addr, 46));
10266 pa_delegitimize_address (rtx orig_x)
10268 rtx x = delegitimize_mem_from_attrs (orig_x);
10270 if (GET_CODE (x) == LO_SUM
10271 && GET_CODE (XEXP (x, 1)) == UNSPEC
10272 && XINT (XEXP (x, 1), 1) == UNSPEC_DLTIND14R)
10273 return gen_const_mem (Pmode, XVECEXP (XEXP (x, 1), 0, 0));
10278 pa_internal_arg_pointer (void)
10280 /* The argument pointer and the hard frame pointer are the same in
10281 the 32-bit runtime, so we don't need a copy. */
10283 return copy_to_reg (virtual_incoming_args_rtx);
10285 return virtual_incoming_args_rtx;
10288 /* Given FROM and TO register numbers, say whether this elimination is allowed.
10289 Frame pointer elimination is automatically handled. */
10292 pa_can_eliminate (const int from, const int to)
10294 /* The argument cannot be eliminated in the 64-bit runtime. */
10295 if (TARGET_64BIT && from == ARG_POINTER_REGNUM)
10298 return (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM
10299 ? ! frame_pointer_needed
10303 /* Define the offset between two registers, FROM to be eliminated and its
10304 replacement TO, at the start of a routine. */
10306 pa_initial_elimination_offset (int from, int to)
10308 HOST_WIDE_INT offset;
10310 if ((from == HARD_FRAME_POINTER_REGNUM || from == FRAME_POINTER_REGNUM)
10311 && to == STACK_POINTER_REGNUM)
10312 offset = -pa_compute_frame_size (get_frame_size (), 0);
10313 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
10316 gcc_unreachable ();
10322 pa_conditional_register_usage (void)
10326 if (!TARGET_64BIT && !TARGET_PA_11)
10328 for (i = 56; i <= FP_REG_LAST; i++)
10329 fixed_regs[i] = call_used_regs[i] = 1;
10330 for (i = 33; i < 56; i += 2)
10331 fixed_regs[i] = call_used_regs[i] = 1;
10333 if (TARGET_DISABLE_FPREGS || TARGET_SOFT_FLOAT)
10335 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
10336 fixed_regs[i] = call_used_regs[i] = 1;
10339 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10342 /* Target hook for c_mode_for_suffix. */
10344 static enum machine_mode
10345 pa_c_mode_for_suffix (char suffix)
10347 if (HPUX_LONG_DOUBLE_LIBRARY)
10356 /* Target hook for function_section. */
10359 pa_function_section (tree decl, enum node_frequency freq,
10360 bool startup, bool exit)
10362 /* Put functions in text section if target doesn't have named sections. */
10363 if (!targetm_common.have_named_sections)
10364 return text_section;
10366 /* Force nested functions into the same section as the containing
10369 && DECL_SECTION_NAME (decl) == NULL_TREE
10370 && DECL_CONTEXT (decl) != NULL_TREE
10371 && TREE_CODE (DECL_CONTEXT (decl)) == FUNCTION_DECL
10372 && DECL_SECTION_NAME (DECL_CONTEXT (decl)) == NULL_TREE)
10373 return function_section (DECL_CONTEXT (decl));
10375 /* Otherwise, use the default function section. */
10376 return default_function_section (decl, freq, startup, exit);
10379 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
10381 In 64-bit mode, we reject CONST_DOUBLES. We also reject CONST_INTS
10382 that need more than three instructions to load prior to reload. This
10383 limit is somewhat arbitrary. It takes three instructions to load a
10384 CONST_INT from memory but two are memory accesses. It may be better
10385 to increase the allowed range for CONST_INTS. We may also be able
10386 to handle CONST_DOUBLES. */
10389 pa_legitimate_constant_p (enum machine_mode mode, rtx x)
10391 if (GET_MODE_CLASS (mode) == MODE_FLOAT && x != CONST0_RTX (mode))
10394 if (!NEW_HP_ASSEMBLER && !TARGET_GAS && GET_CODE (x) == LABEL_REF)
10397 /* TLS_MODEL_GLOBAL_DYNAMIC and TLS_MODEL_LOCAL_DYNAMIC are not
10398 legitimate constants. The other variants can't be handled by
10399 the move patterns after reload starts. */
10400 if (PA_SYMBOL_REF_TLS_P (x))
10403 if (TARGET_64BIT && GET_CODE (x) == CONST_DOUBLE)
10407 && HOST_BITS_PER_WIDE_INT > 32
10408 && GET_CODE (x) == CONST_INT
10409 && !reload_in_progress
10410 && !reload_completed
10411 && !LEGITIMATE_64BIT_CONST_INT_P (INTVAL (x))
10412 && !pa_cint_ok_for_move (INTVAL (x)))
10415 if (function_label_operand (x, mode))
10421 /* Implement TARGET_SECTION_TYPE_FLAGS. */
10423 static unsigned int
10424 pa_section_type_flags (tree decl, const char *name, int reloc)
10426 unsigned int flags;
10428 flags = default_section_type_flags (decl, name, reloc);
10430 /* Function labels are placed in the constant pool. This can
10431 cause a section conflict if decls are put in ".data.rel.ro"
10432 or ".data.rel.ro.local" using the __attribute__ construct. */
10433 if (strcmp (name, ".data.rel.ro") == 0
10434 || strcmp (name, ".data.rel.ro.local") == 0)
10435 flags |= SECTION_WRITE | SECTION_RELRO;
10440 /* pa_legitimate_address_p recognizes an RTL expression that is a
10441 valid memory address for an instruction. The MODE argument is the
10442 machine mode for the MEM expression that wants to use this address.
10444 On HP PA-RISC, the legitimate address forms are REG+SMALLINT,
10445 REG+REG, and REG+(REG*SCALE). The indexed address forms are only
10446 available with floating point loads and stores, and integer loads.
10447 We get better code by allowing indexed addresses in the initial
10450 The acceptance of indexed addresses as legitimate implies that we
10451 must provide patterns for doing indexed integer stores, or the move
10452 expanders must force the address of an indexed store to a register.
10453 We have adopted the latter approach.
10455 Another function of pa_legitimate_address_p is to ensure that
10456 the base register is a valid pointer for indexed instructions.
10457 On targets that have non-equivalent space registers, we have to
10458 know at the time of assembler output which register in a REG+REG
10459 pair is the base register. The REG_POINTER flag is sometimes lost
10460 in reload and the following passes, so it can't be relied on during
10461 code generation. Thus, we either have to canonicalize the order
10462 of the registers in REG+REG indexed addresses, or treat REG+REG
10463 addresses separately and provide patterns for both permutations.
10465 The latter approach requires several hundred additional lines of
10466 code in pa.md. The downside to canonicalizing is that a PLUS
10467 in the wrong order can't combine to form to make a scaled indexed
10468 memory operand. As we won't need to canonicalize the operands if
10469 the REG_POINTER lossage can be fixed, it seems better canonicalize.
10471 We initially break out scaled indexed addresses in canonical order
10472 in pa_emit_move_sequence. LEGITIMIZE_ADDRESS also canonicalizes
10473 scaled indexed addresses during RTL generation. However, fold_rtx
10474 has its own opinion on how the operands of a PLUS should be ordered.
10475 If one of the operands is equivalent to a constant, it will make
10476 that operand the second operand. As the base register is likely to
10477 be equivalent to a SYMBOL_REF, we have made it the second operand.
10479 pa_legitimate_address_p accepts REG+REG as legitimate when the
10480 operands are in the order INDEX+BASE on targets with non-equivalent
10481 space registers, and in any order on targets with equivalent space
10482 registers. It accepts both MULT+BASE and BASE+MULT for scaled indexing.
10484 We treat a SYMBOL_REF as legitimate if it is part of the current
10485 function's constant-pool, because such addresses can actually be
10486 output as REG+SMALLINT. */
10489 pa_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
10492 && (strict ? STRICT_REG_OK_FOR_BASE_P (x)
10493 : REG_OK_FOR_BASE_P (x)))
10494 || ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_DEC
10495 || GET_CODE (x) == PRE_INC || GET_CODE (x) == POST_INC)
10496 && REG_P (XEXP (x, 0))
10497 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10498 : REG_OK_FOR_BASE_P (XEXP (x, 0)))))
10501 if (GET_CODE (x) == PLUS)
10505 /* For REG+REG, the base register should be in XEXP (x, 1),
10506 so check it first. */
10507 if (REG_P (XEXP (x, 1))
10508 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 1))
10509 : REG_OK_FOR_BASE_P (XEXP (x, 1))))
10510 base = XEXP (x, 1), index = XEXP (x, 0);
10511 else if (REG_P (XEXP (x, 0))
10512 && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0))
10513 : REG_OK_FOR_BASE_P (XEXP (x, 0))))
10514 base = XEXP (x, 0), index = XEXP (x, 1);
10518 if (GET_CODE (index) == CONST_INT)
10520 if (INT_5_BITS (index))
10523 /* When INT14_OK_STRICT is false, a secondary reload is needed
10524 to adjust the displacement of SImode and DImode floating point
10525 instructions. So, we return false when STRICT is true. We
10526 also reject long displacements for float mode addresses since
10527 the majority of accesses will use floating point instructions
10528 that don't support 14-bit offsets. */
10529 if (!INT14_OK_STRICT
10530 && reload_in_progress
10536 return base14_operand (index, mode);
10539 if (!TARGET_DISABLE_INDEXING
10540 /* Only accept the "canonical" INDEX+BASE operand order
10541 on targets with non-equivalent space registers. */
10542 && (TARGET_NO_SPACE_REGS
10544 : (base == XEXP (x, 1) && REG_P (index)
10545 && (reload_completed
10546 || (reload_in_progress && HARD_REGISTER_P (base))
10547 || REG_POINTER (base))
10548 && (reload_completed
10549 || (reload_in_progress && HARD_REGISTER_P (index))
10550 || !REG_POINTER (index))))
10551 && MODE_OK_FOR_UNSCALED_INDEXING_P (mode)
10552 && (strict ? STRICT_REG_OK_FOR_INDEX_P (index)
10553 : REG_OK_FOR_INDEX_P (index))
10554 && borx_reg_operand (base, Pmode)
10555 && borx_reg_operand (index, Pmode))
10558 if (!TARGET_DISABLE_INDEXING
10559 && GET_CODE (index) == MULT
10560 && MODE_OK_FOR_SCALED_INDEXING_P (mode)
10561 && REG_P (XEXP (index, 0))
10562 && GET_MODE (XEXP (index, 0)) == Pmode
10563 && (strict ? STRICT_REG_OK_FOR_INDEX_P (XEXP (index, 0))
10564 : REG_OK_FOR_INDEX_P (XEXP (index, 0)))
10565 && GET_CODE (XEXP (index, 1)) == CONST_INT
10566 && INTVAL (XEXP (index, 1))
10567 == (HOST_WIDE_INT) GET_MODE_SIZE (mode)
10568 && borx_reg_operand (base, Pmode))
10574 if (GET_CODE (x) == LO_SUM)
10576 rtx y = XEXP (x, 0);
10578 if (GET_CODE (y) == SUBREG)
10579 y = SUBREG_REG (y);
10582 && (strict ? STRICT_REG_OK_FOR_BASE_P (y)
10583 : REG_OK_FOR_BASE_P (y)))
10585 /* Needed for -fPIC */
10587 && GET_CODE (XEXP (x, 1)) == UNSPEC)
10590 if (!INT14_OK_STRICT
10591 && reload_in_progress
10597 if (CONSTANT_P (XEXP (x, 1)))
10603 if (GET_CODE (x) == CONST_INT && INT_5_BITS (x))
10609 /* Look for machine dependent ways to make the invalid address AD a
10612 For the PA, transform:
10614 memory(X + <large int>)
10618 if (<large int> & mask) >= 16
10619 Y = (<large int> & ~mask) + mask + 1 Round up.
10621 Y = (<large int> & ~mask) Round down.
10623 memory (Z + (<large int> - Y));
10625 This makes reload inheritance and reload_cse work better since Z
10628 There may be more opportunities to improve code with this hook. */
10631 pa_legitimize_reload_address (rtx ad, enum machine_mode mode,
10632 int opnum, int type,
10633 int ind_levels ATTRIBUTE_UNUSED)
10635 long offset, newoffset, mask;
10636 rtx new_rtx, temp = NULL_RTX;
10638 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
10639 && !INT14_OK_STRICT ? 0x1f : 0x3fff);
10641 if (optimize && GET_CODE (ad) == PLUS)
10642 temp = simplify_binary_operation (PLUS, Pmode,
10643 XEXP (ad, 0), XEXP (ad, 1));
10645 new_rtx = temp ? temp : ad;
10648 && GET_CODE (new_rtx) == PLUS
10649 && GET_CODE (XEXP (new_rtx, 0)) == REG
10650 && GET_CODE (XEXP (new_rtx, 1)) == CONST_INT)
10652 offset = INTVAL (XEXP ((new_rtx), 1));
10654 /* Choose rounding direction. Round up if we are >= halfway. */
10655 if ((offset & mask) >= ((mask + 1) / 2))
10656 newoffset = (offset & ~mask) + mask + 1;
10658 newoffset = offset & ~mask;
10660 /* Ensure that long displacements are aligned. */
10662 && (GET_MODE_CLASS (mode) == MODE_FLOAT
10663 || (TARGET_64BIT && (mode) == DImode)))
10664 newoffset &= ~(GET_MODE_SIZE (mode) - 1);
10666 if (newoffset != 0 && VAL_14_BITS_P (newoffset))
10668 temp = gen_rtx_PLUS (Pmode, XEXP (new_rtx, 0),
10669 GEN_INT (newoffset));
10670 ad = gen_rtx_PLUS (Pmode, temp, GEN_INT (offset - newoffset));
10671 push_reload (XEXP (ad, 0), 0, &XEXP (ad, 0), 0,
10672 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
10673 opnum, (enum reload_type) type);