1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
5 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
29 #include "hard-reg-set.h"
30 #include "insn-config.h"
31 #include "conditions.h"
32 #include "insn-attr.h"
40 #include "integrate.h"
42 #include "diagnostic-core.h"
48 #include "common/common-target.h"
49 #include "target-def.h"
50 #include "langhooks.h"
54 /* Return nonzero if there is a bypass for the output of
55 OUT_INSN and the fp store IN_INSN. */
57 hppa_fpstore_bypass_p (rtx out_insn, rtx in_insn)
59 enum machine_mode store_mode;
60 enum machine_mode other_mode;
63 if (recog_memoized (in_insn) < 0
64 || (get_attr_type (in_insn) != TYPE_FPSTORE
65 && get_attr_type (in_insn) != TYPE_FPSTORE_LOAD)
66 || recog_memoized (out_insn) < 0)
69 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
71 set = single_set (out_insn);
75 other_mode = GET_MODE (SET_SRC (set));
77 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
81 #ifndef DO_FRAME_NOTES
82 #ifdef INCOMING_RETURN_ADDR_RTX
83 #define DO_FRAME_NOTES 1
85 #define DO_FRAME_NOTES 0
89 static void pa_option_override (void);
90 static void copy_reg_pointer (rtx, rtx);
91 static void fix_range (const char *);
92 static int hppa_register_move_cost (enum machine_mode mode, reg_class_t,
94 static int hppa_address_cost (rtx, bool);
95 static bool hppa_rtx_costs (rtx, int, int, int, int *, bool);
96 static inline rtx force_mode (enum machine_mode, rtx);
97 static void pa_reorg (void);
98 static void pa_combine_instructions (void);
99 static int pa_can_combine_p (rtx, rtx, rtx, int, rtx, rtx, rtx);
100 static bool forward_branch_p (rtx);
101 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
102 static int compute_movmem_length (rtx);
103 static int compute_clrmem_length (rtx);
104 static bool pa_assemble_integer (rtx, unsigned int, int);
105 static void remove_useless_addtr_insns (int);
106 static void store_reg (int, HOST_WIDE_INT, int);
107 static void store_reg_modify (int, int, HOST_WIDE_INT);
108 static void load_reg (int, HOST_WIDE_INT, int);
109 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
110 static rtx pa_function_value (const_tree, const_tree, bool);
111 static rtx pa_libcall_value (enum machine_mode, const_rtx);
112 static bool pa_function_value_regno_p (const unsigned int);
113 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT);
114 static void update_total_code_bytes (unsigned int);
115 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT);
116 static int pa_adjust_cost (rtx, rtx, rtx, int);
117 static int pa_adjust_priority (rtx, int);
118 static int pa_issue_rate (void);
119 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED;
120 static section *pa_select_section (tree, int, unsigned HOST_WIDE_INT)
122 static void pa_encode_section_info (tree, rtx, int);
123 static const char *pa_strip_name_encoding (const char *);
124 static bool pa_function_ok_for_sibcall (tree, tree);
125 static void pa_globalize_label (FILE *, const char *)
127 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
128 HOST_WIDE_INT, tree);
129 #if !defined(USE_COLLECT2)
130 static void pa_asm_out_constructor (rtx, int);
131 static void pa_asm_out_destructor (rtx, int);
133 static void pa_init_builtins (void);
134 static rtx pa_expand_builtin (tree, rtx, rtx, enum machine_mode mode, int);
135 static rtx hppa_builtin_saveregs (void);
136 static void hppa_va_start (tree, rtx);
137 static tree hppa_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
138 static bool pa_scalar_mode_supported_p (enum machine_mode);
139 static bool pa_commutative_p (const_rtx x, int outer_code);
140 static void copy_fp_args (rtx) ATTRIBUTE_UNUSED;
141 static int length_fp_args (rtx) ATTRIBUTE_UNUSED;
142 static rtx hppa_legitimize_address (rtx, rtx, enum machine_mode);
143 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
144 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
145 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
146 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
147 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
148 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
149 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
150 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
151 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
152 static void output_deferred_plabels (void);
153 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED;
154 #ifdef ASM_OUTPUT_EXTERNAL_REAL
155 static void pa_hpux_file_end (void);
157 #if HPUX_LONG_DOUBLE_LIBRARY
158 static void pa_hpux_init_libfuncs (void);
160 static rtx pa_struct_value_rtx (tree, int);
161 static bool pa_pass_by_reference (cumulative_args_t, enum machine_mode,
163 static int pa_arg_partial_bytes (cumulative_args_t, enum machine_mode,
165 static void pa_function_arg_advance (cumulative_args_t, enum machine_mode,
167 static rtx pa_function_arg (cumulative_args_t, enum machine_mode,
169 static unsigned int pa_function_arg_boundary (enum machine_mode, const_tree);
170 static struct machine_function * pa_init_machine_status (void);
171 static reg_class_t pa_secondary_reload (bool, rtx, reg_class_t,
173 secondary_reload_info *);
174 static void pa_extra_live_on_entry (bitmap);
175 static enum machine_mode pa_promote_function_mode (const_tree,
176 enum machine_mode, int *,
179 static void pa_asm_trampoline_template (FILE *);
180 static void pa_trampoline_init (rtx, tree, rtx);
181 static rtx pa_trampoline_adjust_address (rtx);
182 static rtx pa_delegitimize_address (rtx);
183 static bool pa_print_operand_punct_valid_p (unsigned char);
184 static rtx pa_internal_arg_pointer (void);
185 static bool pa_can_eliminate (const int, const int);
186 static void pa_conditional_register_usage (void);
187 static enum machine_mode pa_c_mode_for_suffix (char);
188 static section *pa_function_section (tree, enum node_frequency, bool, bool);
189 static bool pa_cannot_force_const_mem (enum machine_mode, rtx);
190 static bool pa_legitimate_constant_p (enum machine_mode, rtx);
192 /* The following extra sections are only used for SOM. */
193 static GTY(()) section *som_readonly_data_section;
194 static GTY(()) section *som_one_only_readonly_data_section;
195 static GTY(()) section *som_one_only_data_section;
197 /* Counts for the number of callee-saved general and floating point
198 registers which were saved by the current function's prologue. */
199 static int gr_saved, fr_saved;
201 /* Boolean indicating whether the return pointer was saved by the
202 current function's prologue. */
203 static bool rp_saved;
205 static rtx find_addr_reg (rtx);
207 /* Keep track of the number of bytes we have output in the CODE subspace
208 during this compilation so we'll know when to emit inline long-calls. */
209 unsigned long total_code_bytes;
211 /* The last address of the previous function plus the number of bytes in
212 associated thunks that have been output. This is used to determine if
213 a thunk can use an IA-relative branch to reach its target function. */
214 static unsigned int last_address;
216 /* Variables to handle plabels that we discover are necessary at assembly
217 output time. They are output after the current function. */
218 struct GTY(()) deferred_plabel
223 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
225 static size_t n_deferred_plabels = 0;
227 /* Initialize the GCC target structure. */
229 #undef TARGET_OPTION_OVERRIDE
230 #define TARGET_OPTION_OVERRIDE pa_option_override
232 #undef TARGET_ASM_ALIGNED_HI_OP
233 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
234 #undef TARGET_ASM_ALIGNED_SI_OP
235 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
236 #undef TARGET_ASM_ALIGNED_DI_OP
237 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
238 #undef TARGET_ASM_UNALIGNED_HI_OP
239 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
240 #undef TARGET_ASM_UNALIGNED_SI_OP
241 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
242 #undef TARGET_ASM_UNALIGNED_DI_OP
243 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
244 #undef TARGET_ASM_INTEGER
245 #define TARGET_ASM_INTEGER pa_assemble_integer
247 #undef TARGET_ASM_FUNCTION_PROLOGUE
248 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
249 #undef TARGET_ASM_FUNCTION_EPILOGUE
250 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
252 #undef TARGET_FUNCTION_VALUE
253 #define TARGET_FUNCTION_VALUE pa_function_value
254 #undef TARGET_LIBCALL_VALUE
255 #define TARGET_LIBCALL_VALUE pa_libcall_value
256 #undef TARGET_FUNCTION_VALUE_REGNO_P
257 #define TARGET_FUNCTION_VALUE_REGNO_P pa_function_value_regno_p
259 #undef TARGET_LEGITIMIZE_ADDRESS
260 #define TARGET_LEGITIMIZE_ADDRESS hppa_legitimize_address
262 #undef TARGET_SCHED_ADJUST_COST
263 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
264 #undef TARGET_SCHED_ADJUST_PRIORITY
265 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
266 #undef TARGET_SCHED_ISSUE_RATE
267 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
269 #undef TARGET_ENCODE_SECTION_INFO
270 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
271 #undef TARGET_STRIP_NAME_ENCODING
272 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
274 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
275 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
277 #undef TARGET_COMMUTATIVE_P
278 #define TARGET_COMMUTATIVE_P pa_commutative_p
280 #undef TARGET_ASM_OUTPUT_MI_THUNK
281 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
282 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
283 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
285 #undef TARGET_ASM_FILE_END
286 #ifdef ASM_OUTPUT_EXTERNAL_REAL
287 #define TARGET_ASM_FILE_END pa_hpux_file_end
289 #define TARGET_ASM_FILE_END output_deferred_plabels
292 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
293 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P pa_print_operand_punct_valid_p
295 #if !defined(USE_COLLECT2)
296 #undef TARGET_ASM_CONSTRUCTOR
297 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
298 #undef TARGET_ASM_DESTRUCTOR
299 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
302 #undef TARGET_INIT_BUILTINS
303 #define TARGET_INIT_BUILTINS pa_init_builtins
305 #undef TARGET_EXPAND_BUILTIN
306 #define TARGET_EXPAND_BUILTIN pa_expand_builtin
308 #undef TARGET_REGISTER_MOVE_COST
309 #define TARGET_REGISTER_MOVE_COST hppa_register_move_cost
310 #undef TARGET_RTX_COSTS
311 #define TARGET_RTX_COSTS hppa_rtx_costs
312 #undef TARGET_ADDRESS_COST
313 #define TARGET_ADDRESS_COST hppa_address_cost
315 #undef TARGET_MACHINE_DEPENDENT_REORG
316 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
318 #if HPUX_LONG_DOUBLE_LIBRARY
319 #undef TARGET_INIT_LIBFUNCS
320 #define TARGET_INIT_LIBFUNCS pa_hpux_init_libfuncs
323 #undef TARGET_PROMOTE_FUNCTION_MODE
324 #define TARGET_PROMOTE_FUNCTION_MODE pa_promote_function_mode
325 #undef TARGET_PROMOTE_PROTOTYPES
326 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
328 #undef TARGET_STRUCT_VALUE_RTX
329 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
330 #undef TARGET_RETURN_IN_MEMORY
331 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
332 #undef TARGET_MUST_PASS_IN_STACK
333 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
334 #undef TARGET_PASS_BY_REFERENCE
335 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
336 #undef TARGET_CALLEE_COPIES
337 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
338 #undef TARGET_ARG_PARTIAL_BYTES
339 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
340 #undef TARGET_FUNCTION_ARG
341 #define TARGET_FUNCTION_ARG pa_function_arg
342 #undef TARGET_FUNCTION_ARG_ADVANCE
343 #define TARGET_FUNCTION_ARG_ADVANCE pa_function_arg_advance
344 #undef TARGET_FUNCTION_ARG_BOUNDARY
345 #define TARGET_FUNCTION_ARG_BOUNDARY pa_function_arg_boundary
347 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
348 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
349 #undef TARGET_EXPAND_BUILTIN_VA_START
350 #define TARGET_EXPAND_BUILTIN_VA_START hppa_va_start
351 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
352 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
354 #undef TARGET_SCALAR_MODE_SUPPORTED_P
355 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
357 #undef TARGET_CANNOT_FORCE_CONST_MEM
358 #define TARGET_CANNOT_FORCE_CONST_MEM pa_cannot_force_const_mem
360 #undef TARGET_SECONDARY_RELOAD
361 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
363 #undef TARGET_EXTRA_LIVE_ON_ENTRY
364 #define TARGET_EXTRA_LIVE_ON_ENTRY pa_extra_live_on_entry
366 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
367 #define TARGET_ASM_TRAMPOLINE_TEMPLATE pa_asm_trampoline_template
368 #undef TARGET_TRAMPOLINE_INIT
369 #define TARGET_TRAMPOLINE_INIT pa_trampoline_init
370 #undef TARGET_TRAMPOLINE_ADJUST_ADDRESS
371 #define TARGET_TRAMPOLINE_ADJUST_ADDRESS pa_trampoline_adjust_address
372 #undef TARGET_DELEGITIMIZE_ADDRESS
373 #define TARGET_DELEGITIMIZE_ADDRESS pa_delegitimize_address
374 #undef TARGET_INTERNAL_ARG_POINTER
375 #define TARGET_INTERNAL_ARG_POINTER pa_internal_arg_pointer
376 #undef TARGET_CAN_ELIMINATE
377 #define TARGET_CAN_ELIMINATE pa_can_eliminate
378 #undef TARGET_CONDITIONAL_REGISTER_USAGE
379 #define TARGET_CONDITIONAL_REGISTER_USAGE pa_conditional_register_usage
380 #undef TARGET_C_MODE_FOR_SUFFIX
381 #define TARGET_C_MODE_FOR_SUFFIX pa_c_mode_for_suffix
382 #undef TARGET_ASM_FUNCTION_SECTION
383 #define TARGET_ASM_FUNCTION_SECTION pa_function_section
385 #undef TARGET_LEGITIMATE_CONSTANT_P
386 #define TARGET_LEGITIMATE_CONSTANT_P pa_legitimate_constant_p
388 struct gcc_target targetm = TARGET_INITIALIZER;
390 /* Parse the -mfixed-range= option string. */
393 fix_range (const char *const_str)
396 char *str, *dash, *comma;
398 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
399 REG2 are either register names or register numbers. The effect
400 of this option is to mark the registers in the range from REG1 to
401 REG2 as ``fixed'' so they won't be used by the compiler. This is
402 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
404 i = strlen (const_str);
405 str = (char *) alloca (i + 1);
406 memcpy (str, const_str, i + 1);
410 dash = strchr (str, '-');
413 warning (0, "value of -mfixed-range must have form REG1-REG2");
418 comma = strchr (dash + 1, ',');
422 first = decode_reg_name (str);
425 warning (0, "unknown register name: %s", str);
429 last = decode_reg_name (dash + 1);
432 warning (0, "unknown register name: %s", dash + 1);
440 warning (0, "%s-%s is an empty range", str, dash + 1);
444 for (i = first; i <= last; ++i)
445 fixed_regs[i] = call_used_regs[i] = 1;
454 /* Check if all floating point registers have been fixed. */
455 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
460 target_flags |= MASK_DISABLE_FPREGS;
463 /* Implement the TARGET_OPTION_OVERRIDE hook. */
466 pa_option_override (void)
469 cl_deferred_option *opt;
470 VEC(cl_deferred_option,heap) *vec
471 = (VEC(cl_deferred_option,heap) *) pa_deferred_options;
473 FOR_EACH_VEC_ELT (cl_deferred_option, vec, i, opt)
475 switch (opt->opt_index)
477 case OPT_mfixed_range_:
478 fix_range (opt->arg);
486 /* Unconditional branches in the delay slot are not compatible with dwarf2
487 call frame information. There is no benefit in using this optimization
488 on PA8000 and later processors. */
489 if (pa_cpu >= PROCESSOR_8000
490 || (targetm_common.except_unwind_info (&global_options) == UI_DWARF2
492 || flag_unwind_tables)
493 target_flags &= ~MASK_JUMP_IN_DELAY;
495 if (flag_pic && TARGET_PORTABLE_RUNTIME)
497 warning (0, "PIC code generation is not supported in the portable runtime model");
500 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
502 warning (0, "PIC code generation is not compatible with fast indirect calls");
505 if (! TARGET_GAS && write_symbols != NO_DEBUG)
507 warning (0, "-g is only supported when using GAS on this processor,");
508 warning (0, "-g option disabled");
509 write_symbols = NO_DEBUG;
512 /* We only support the "big PIC" model now. And we always generate PIC
513 code when in 64bit mode. */
514 if (flag_pic == 1 || TARGET_64BIT)
517 /* Disable -freorder-blocks-and-partition as we don't support hot and
518 cold partitioning. */
519 if (flag_reorder_blocks_and_partition)
521 inform (input_location,
522 "-freorder-blocks-and-partition does not work "
523 "on this architecture");
524 flag_reorder_blocks_and_partition = 0;
525 flag_reorder_blocks = 1;
528 /* We can't guarantee that .dword is available for 32-bit targets. */
529 if (UNITS_PER_WORD == 4)
530 targetm.asm_out.aligned_op.di = NULL;
532 /* The unaligned ops are only available when using GAS. */
535 targetm.asm_out.unaligned_op.hi = NULL;
536 targetm.asm_out.unaligned_op.si = NULL;
537 targetm.asm_out.unaligned_op.di = NULL;
540 init_machine_status = pa_init_machine_status;
545 PA_BUILTIN_COPYSIGNQ,
548 PA_BUILTIN_HUGE_VALQ,
552 static GTY(()) tree pa_builtins[(int) PA_BUILTIN_max];
555 pa_init_builtins (void)
557 #ifdef DONT_HAVE_FPUTC_UNLOCKED
559 tree decl = builtin_decl_explicit (BUILT_IN_PUTC_UNLOCKED);
560 set_builtin_decl (BUILT_IN_FPUTC_UNLOCKED, decl,
561 builtin_decl_implicit_p (BUILT_IN_PUTC_UNLOCKED));
568 if ((decl = builtin_decl_explicit (BUILT_IN_FINITE)) != NULL_TREE)
569 set_user_assembler_name (decl, "_Isfinite");
570 if ((decl = builtin_decl_explicit (BUILT_IN_FINITEF)) != NULL_TREE)
571 set_user_assembler_name (decl, "_Isfinitef");
575 if (HPUX_LONG_DOUBLE_LIBRARY)
579 /* Under HPUX, the __float128 type is a synonym for "long double". */
580 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
583 /* TFmode support builtins. */
584 ftype = build_function_type_list (long_double_type_node,
585 long_double_type_node,
587 decl = add_builtin_function ("__builtin_fabsq", ftype,
588 PA_BUILTIN_FABSQ, BUILT_IN_MD,
589 "_U_Qfabs", NULL_TREE);
590 TREE_READONLY (decl) = 1;
591 pa_builtins[PA_BUILTIN_FABSQ] = decl;
593 ftype = build_function_type_list (long_double_type_node,
594 long_double_type_node,
595 long_double_type_node,
597 decl = add_builtin_function ("__builtin_copysignq", ftype,
598 PA_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
599 "_U_Qfcopysign", NULL_TREE);
600 TREE_READONLY (decl) = 1;
601 pa_builtins[PA_BUILTIN_COPYSIGNQ] = decl;
603 ftype = build_function_type_list (long_double_type_node, NULL_TREE);
604 decl = add_builtin_function ("__builtin_infq", ftype,
605 PA_BUILTIN_INFQ, BUILT_IN_MD,
607 pa_builtins[PA_BUILTIN_INFQ] = decl;
609 decl = add_builtin_function ("__builtin_huge_valq", ftype,
610 PA_BUILTIN_HUGE_VALQ, BUILT_IN_MD,
612 pa_builtins[PA_BUILTIN_HUGE_VALQ] = decl;
617 pa_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
618 enum machine_mode mode ATTRIBUTE_UNUSED,
619 int ignore ATTRIBUTE_UNUSED)
621 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
622 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
626 case PA_BUILTIN_FABSQ:
627 case PA_BUILTIN_COPYSIGNQ:
628 return expand_call (exp, target, ignore);
630 case PA_BUILTIN_INFQ:
631 case PA_BUILTIN_HUGE_VALQ:
633 enum machine_mode target_mode = TYPE_MODE (TREE_TYPE (exp));
638 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, target_mode);
640 tmp = validize_mem (force_const_mem (target_mode, tmp));
643 target = gen_reg_rtx (target_mode);
645 emit_move_insn (target, tmp);
656 /* Function to init struct machine_function.
657 This will be called, via a pointer variable,
658 from push_function_context. */
660 static struct machine_function *
661 pa_init_machine_status (void)
663 return ggc_alloc_cleared_machine_function ();
666 /* If FROM is a probable pointer register, mark TO as a probable
667 pointer register with the same pointer alignment as FROM. */
670 copy_reg_pointer (rtx to, rtx from)
672 if (REG_POINTER (from))
673 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
676 /* Return 1 if X contains a symbolic expression. We know these
677 expressions will have one of a few well defined forms, so
678 we need only check those forms. */
680 symbolic_expression_p (rtx x)
683 /* Strip off any HIGH. */
684 if (GET_CODE (x) == HIGH)
687 return (symbolic_operand (x, VOIDmode));
690 /* Accept any constant that can be moved in one instruction into a
693 cint_ok_for_move (HOST_WIDE_INT ival)
695 /* OK if ldo, ldil, or zdepi, can be used. */
696 return (VAL_14_BITS_P (ival)
697 || ldil_cint_p (ival)
698 || zdepi_cint_p (ival));
701 /* True iff ldil can be used to load this CONST_INT. The least
702 significant 11 bits of the value must be zero and the value must
703 not change sign when extended from 32 to 64 bits. */
705 ldil_cint_p (HOST_WIDE_INT ival)
707 HOST_WIDE_INT x = ival & (((HOST_WIDE_INT) -1 << 31) | 0x7ff);
709 return x == 0 || x == ((HOST_WIDE_INT) -1 << 31);
712 /* True iff zdepi can be used to generate this CONST_INT.
713 zdepi first sign extends a 5-bit signed number to a given field
714 length, then places this field anywhere in a zero. */
716 zdepi_cint_p (unsigned HOST_WIDE_INT x)
718 unsigned HOST_WIDE_INT lsb_mask, t;
720 /* This might not be obvious, but it's at least fast.
721 This function is critical; we don't have the time loops would take. */
723 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
724 /* Return true iff t is a power of two. */
725 return ((t & (t - 1)) == 0);
728 /* True iff depi or extru can be used to compute (reg & mask).
729 Accept bit pattern like these:
734 and_mask_p (unsigned HOST_WIDE_INT mask)
737 mask += mask & -mask;
738 return (mask & (mask - 1)) == 0;
741 /* True iff depi can be used to compute (reg | MASK). */
743 ior_mask_p (unsigned HOST_WIDE_INT mask)
745 mask += mask & -mask;
746 return (mask & (mask - 1)) == 0;
749 /* Legitimize PIC addresses. If the address is already
750 position-independent, we return ORIG. Newly generated
751 position-independent addresses go to REG. If we need more
752 than one register, we lose. */
755 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
759 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig));
761 /* Labels need special handling. */
762 if (pic_label_operand (orig, mode))
766 /* We do not want to go through the movXX expanders here since that
767 would create recursion.
769 Nor do we really want to call a generator for a named pattern
770 since that requires multiple patterns if we want to support
773 So instead we just emit the raw set, which avoids the movXX
774 expanders completely. */
775 mark_reg_pointer (reg, BITS_PER_UNIT);
776 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, orig));
778 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
779 add_reg_note (insn, REG_EQUAL, orig);
781 /* During and after reload, we need to generate a REG_LABEL_OPERAND note
782 and update LABEL_NUSES because this is not done automatically. */
783 if (reload_in_progress || reload_completed)
785 /* Extract LABEL_REF. */
786 if (GET_CODE (orig) == CONST)
787 orig = XEXP (XEXP (orig, 0), 0);
788 /* Extract CODE_LABEL. */
789 orig = XEXP (orig, 0);
790 add_reg_note (insn, REG_LABEL_OPERAND, orig);
791 LABEL_NUSES (orig)++;
793 crtl->uses_pic_offset_table = 1;
796 if (GET_CODE (orig) == SYMBOL_REF)
802 /* Before reload, allocate a temporary register for the intermediate
803 result. This allows the sequence to be deleted when the final
804 result is unused and the insns are trivially dead. */
805 tmp_reg = ((reload_in_progress || reload_completed)
806 ? reg : gen_reg_rtx (Pmode));
808 if (function_label_operand (orig, VOIDmode))
810 /* Force function label into memory in word mode. */
811 orig = XEXP (force_const_mem (word_mode, orig), 0);
812 /* Load plabel address from DLT. */
813 emit_move_insn (tmp_reg,
814 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
815 gen_rtx_HIGH (word_mode, orig)));
817 = gen_const_mem (Pmode,
818 gen_rtx_LO_SUM (Pmode, tmp_reg,
819 gen_rtx_UNSPEC (Pmode,
822 emit_move_insn (reg, pic_ref);
823 /* Now load address of function descriptor. */
824 pic_ref = gen_rtx_MEM (Pmode, reg);
828 /* Load symbol reference from DLT. */
829 emit_move_insn (tmp_reg,
830 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
831 gen_rtx_HIGH (word_mode, orig)));
833 = gen_const_mem (Pmode,
834 gen_rtx_LO_SUM (Pmode, tmp_reg,
835 gen_rtx_UNSPEC (Pmode,
840 crtl->uses_pic_offset_table = 1;
841 mark_reg_pointer (reg, BITS_PER_UNIT);
842 insn = emit_move_insn (reg, pic_ref);
844 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
845 set_unique_reg_note (insn, REG_EQUAL, orig);
849 else if (GET_CODE (orig) == CONST)
853 if (GET_CODE (XEXP (orig, 0)) == PLUS
854 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
858 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
860 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
861 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
862 base == reg ? 0 : reg);
864 if (GET_CODE (orig) == CONST_INT)
866 if (INT_14_BITS (orig))
867 return plus_constant (base, INTVAL (orig));
868 orig = force_reg (Pmode, orig);
870 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
871 /* Likewise, should we set special REG_NOTEs here? */
877 static GTY(()) rtx gen_tls_tga;
880 gen_tls_get_addr (void)
883 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
888 hppa_tls_call (rtx arg)
892 ret = gen_reg_rtx (Pmode);
893 emit_library_call_value (gen_tls_get_addr (), ret,
894 LCT_CONST, Pmode, 1, arg, Pmode);
900 legitimize_tls_address (rtx addr)
902 rtx ret, insn, tmp, t1, t2, tp;
903 enum tls_model model = SYMBOL_REF_TLS_MODEL (addr);
907 case TLS_MODEL_GLOBAL_DYNAMIC:
908 tmp = gen_reg_rtx (Pmode);
910 emit_insn (gen_tgd_load_pic (tmp, addr));
912 emit_insn (gen_tgd_load (tmp, addr));
913 ret = hppa_tls_call (tmp);
916 case TLS_MODEL_LOCAL_DYNAMIC:
917 ret = gen_reg_rtx (Pmode);
918 tmp = gen_reg_rtx (Pmode);
921 emit_insn (gen_tld_load_pic (tmp, addr));
923 emit_insn (gen_tld_load (tmp, addr));
924 t1 = hppa_tls_call (tmp);
927 t2 = gen_reg_rtx (Pmode);
928 emit_libcall_block (insn, t2, t1,
929 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
931 emit_insn (gen_tld_offset_load (ret, addr, t2));
934 case TLS_MODEL_INITIAL_EXEC:
935 tp = gen_reg_rtx (Pmode);
936 tmp = gen_reg_rtx (Pmode);
937 ret = gen_reg_rtx (Pmode);
938 emit_insn (gen_tp_load (tp));
940 emit_insn (gen_tie_load_pic (tmp, addr));
942 emit_insn (gen_tie_load (tmp, addr));
943 emit_move_insn (ret, gen_rtx_PLUS (Pmode, tp, tmp));
946 case TLS_MODEL_LOCAL_EXEC:
947 tp = gen_reg_rtx (Pmode);
948 ret = gen_reg_rtx (Pmode);
949 emit_insn (gen_tp_load (tp));
950 emit_insn (gen_tle_load (ret, addr, tp));
960 /* Try machine-dependent ways of modifying an illegitimate address
961 to be legitimate. If we find one, return the new, valid address.
962 This macro is used in only one place: `memory_address' in explow.c.
964 OLDX is the address as it was before break_out_memory_refs was called.
965 In some cases it is useful to look at this to decide what needs to be done.
967 It is always safe for this macro to do nothing. It exists to recognize
968 opportunities to optimize the output.
970 For the PA, transform:
972 memory(X + <large int>)
976 if (<large int> & mask) >= 16
977 Y = (<large int> & ~mask) + mask + 1 Round up.
979 Y = (<large int> & ~mask) Round down.
981 memory (Z + (<large int> - Y));
983 This is for CSE to find several similar references, and only use one Z.
985 X can either be a SYMBOL_REF or REG, but because combine cannot
986 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
987 D will not fit in 14 bits.
989 MODE_FLOAT references allow displacements which fit in 5 bits, so use
992 MODE_INT references allow displacements which fit in 14 bits, so use
995 This relies on the fact that most mode MODE_FLOAT references will use FP
996 registers and most mode MODE_INT references will use integer registers.
997 (In the rare case of an FP register used in an integer MODE, we depend
998 on secondary reloads to clean things up.)
1001 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
1002 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
1003 addressing modes to be used).
1005 Put X and Z into registers. Then put the entire expression into
1009 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1010 enum machine_mode mode)
1014 /* We need to canonicalize the order of operands in unscaled indexed
1015 addresses since the code that checks if an address is valid doesn't
1016 always try both orders. */
1017 if (!TARGET_NO_SPACE_REGS
1018 && GET_CODE (x) == PLUS
1019 && GET_MODE (x) == Pmode
1020 && REG_P (XEXP (x, 0))
1021 && REG_P (XEXP (x, 1))
1022 && REG_POINTER (XEXP (x, 0))
1023 && !REG_POINTER (XEXP (x, 1)))
1024 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
1026 if (PA_SYMBOL_REF_TLS_P (x))
1027 return legitimize_tls_address (x);
1029 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
1031 /* Strip off CONST. */
1032 if (GET_CODE (x) == CONST)
1035 /* Special case. Get the SYMBOL_REF into a register and use indexing.
1036 That should always be safe. */
1037 if (GET_CODE (x) == PLUS
1038 && GET_CODE (XEXP (x, 0)) == REG
1039 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
1041 rtx reg = force_reg (Pmode, XEXP (x, 1));
1042 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
1045 /* Note we must reject symbols which represent function addresses
1046 since the assembler/linker can't handle arithmetic on plabels. */
1047 if (GET_CODE (x) == PLUS
1048 && GET_CODE (XEXP (x, 1)) == CONST_INT
1049 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
1050 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
1051 || GET_CODE (XEXP (x, 0)) == REG))
1053 rtx int_part, ptr_reg;
1055 int offset = INTVAL (XEXP (x, 1));
1058 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
1059 ? (INT14_OK_STRICT ? 0x3fff : 0x1f) : 0x3fff);
1061 /* Choose which way to round the offset. Round up if we
1062 are >= halfway to the next boundary. */
1063 if ((offset & mask) >= ((mask + 1) / 2))
1064 newoffset = (offset & ~ mask) + mask + 1;
1066 newoffset = (offset & ~ mask);
1068 /* If the newoffset will not fit in 14 bits (ldo), then
1069 handling this would take 4 or 5 instructions (2 to load
1070 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
1071 add the new offset and the SYMBOL_REF.) Combine can
1072 not handle 4->2 or 5->2 combinations, so do not create
1074 if (! VAL_14_BITS_P (newoffset)
1075 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
1077 rtx const_part = plus_constant (XEXP (x, 0), newoffset);
1080 gen_rtx_HIGH (Pmode, const_part));
1083 gen_rtx_LO_SUM (Pmode,
1084 tmp_reg, const_part));
1088 if (! VAL_14_BITS_P (newoffset))
1089 int_part = force_reg (Pmode, GEN_INT (newoffset));
1091 int_part = GEN_INT (newoffset);
1093 ptr_reg = force_reg (Pmode,
1094 gen_rtx_PLUS (Pmode,
1095 force_reg (Pmode, XEXP (x, 0)),
1098 return plus_constant (ptr_reg, offset - newoffset);
1101 /* Handle (plus (mult (a) (shadd_constant)) (b)). */
1103 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT
1104 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1105 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1)))
1106 && (OBJECT_P (XEXP (x, 1))
1107 || GET_CODE (XEXP (x, 1)) == SUBREG)
1108 && GET_CODE (XEXP (x, 1)) != CONST)
1110 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1114 if (GET_CODE (reg1) != REG)
1115 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1117 reg2 = XEXP (XEXP (x, 0), 0);
1118 if (GET_CODE (reg2) != REG)
1119 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1121 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1122 gen_rtx_MULT (Pmode,
1128 /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)).
1130 Only do so for floating point modes since this is more speculative
1131 and we lose if it's an integer store. */
1132 if (GET_CODE (x) == PLUS
1133 && GET_CODE (XEXP (x, 0)) == PLUS
1134 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
1135 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
1136 && shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)))
1137 && (mode == SFmode || mode == DFmode))
1140 /* First, try and figure out what to use as a base register. */
1141 rtx reg1, reg2, base, idx;
1143 reg1 = XEXP (XEXP (x, 0), 1);
1148 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
1149 then emit_move_sequence will turn on REG_POINTER so we'll know
1150 it's a base register below. */
1151 if (GET_CODE (reg1) != REG)
1152 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1154 if (GET_CODE (reg2) != REG)
1155 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1157 /* Figure out what the base and index are. */
1159 if (GET_CODE (reg1) == REG
1160 && REG_POINTER (reg1))
1163 idx = gen_rtx_PLUS (Pmode,
1164 gen_rtx_MULT (Pmode,
1165 XEXP (XEXP (XEXP (x, 0), 0), 0),
1166 XEXP (XEXP (XEXP (x, 0), 0), 1)),
1169 else if (GET_CODE (reg2) == REG
1170 && REG_POINTER (reg2))
1179 /* If the index adds a large constant, try to scale the
1180 constant so that it can be loaded with only one insn. */
1181 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1182 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
1183 / INTVAL (XEXP (XEXP (idx, 0), 1)))
1184 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
1186 /* Divide the CONST_INT by the scale factor, then add it to A. */
1187 int val = INTVAL (XEXP (idx, 1));
1189 val /= INTVAL (XEXP (XEXP (idx, 0), 1));
1190 reg1 = XEXP (XEXP (idx, 0), 0);
1191 if (GET_CODE (reg1) != REG)
1192 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1194 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
1196 /* We can now generate a simple scaled indexed address. */
1199 (Pmode, gen_rtx_PLUS (Pmode,
1200 gen_rtx_MULT (Pmode, reg1,
1201 XEXP (XEXP (idx, 0), 1)),
1205 /* If B + C is still a valid base register, then add them. */
1206 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1207 && INTVAL (XEXP (idx, 1)) <= 4096
1208 && INTVAL (XEXP (idx, 1)) >= -4096)
1210 int val = INTVAL (XEXP (XEXP (idx, 0), 1));
1213 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1215 reg2 = XEXP (XEXP (idx, 0), 0);
1216 if (GET_CODE (reg2) != CONST_INT)
1217 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1219 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1220 gen_rtx_MULT (Pmode,
1226 /* Get the index into a register, then add the base + index and
1227 return a register holding the result. */
1229 /* First get A into a register. */
1230 reg1 = XEXP (XEXP (idx, 0), 0);
1231 if (GET_CODE (reg1) != REG)
1232 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1234 /* And get B into a register. */
1235 reg2 = XEXP (idx, 1);
1236 if (GET_CODE (reg2) != REG)
1237 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1239 reg1 = force_reg (Pmode,
1240 gen_rtx_PLUS (Pmode,
1241 gen_rtx_MULT (Pmode, reg1,
1242 XEXP (XEXP (idx, 0), 1)),
1245 /* Add the result to our base register and return. */
1246 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1250 /* Uh-oh. We might have an address for x[n-100000]. This needs
1251 special handling to avoid creating an indexed memory address
1252 with x-100000 as the base.
1254 If the constant part is small enough, then it's still safe because
1255 there is a guard page at the beginning and end of the data segment.
1257 Scaled references are common enough that we want to try and rearrange the
1258 terms so that we can use indexing for these addresses too. Only
1259 do the optimization for floatint point modes. */
1261 if (GET_CODE (x) == PLUS
1262 && symbolic_expression_p (XEXP (x, 1)))
1264 /* Ugly. We modify things here so that the address offset specified
1265 by the index expression is computed first, then added to x to form
1266 the entire address. */
1268 rtx regx1, regx2, regy1, regy2, y;
1270 /* Strip off any CONST. */
1272 if (GET_CODE (y) == CONST)
1275 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1277 /* See if this looks like
1278 (plus (mult (reg) (shadd_const))
1279 (const (plus (symbol_ref) (const_int))))
1281 Where const_int is small. In that case the const
1282 expression is a valid pointer for indexing.
1284 If const_int is big, but can be divided evenly by shadd_const
1285 and added to (reg). This allows more scaled indexed addresses. */
1286 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1287 && GET_CODE (XEXP (x, 0)) == MULT
1288 && GET_CODE (XEXP (y, 1)) == CONST_INT
1289 && INTVAL (XEXP (y, 1)) >= -4096
1290 && INTVAL (XEXP (y, 1)) <= 4095
1291 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1292 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1294 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1298 if (GET_CODE (reg1) != REG)
1299 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1301 reg2 = XEXP (XEXP (x, 0), 0);
1302 if (GET_CODE (reg2) != REG)
1303 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1305 return force_reg (Pmode,
1306 gen_rtx_PLUS (Pmode,
1307 gen_rtx_MULT (Pmode,
1312 else if ((mode == DFmode || mode == SFmode)
1313 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1314 && GET_CODE (XEXP (x, 0)) == MULT
1315 && GET_CODE (XEXP (y, 1)) == CONST_INT
1316 && INTVAL (XEXP (y, 1)) % INTVAL (XEXP (XEXP (x, 0), 1)) == 0
1317 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1318 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1321 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1322 / INTVAL (XEXP (XEXP (x, 0), 1))));
1323 regx2 = XEXP (XEXP (x, 0), 0);
1324 if (GET_CODE (regx2) != REG)
1325 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1326 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1330 gen_rtx_PLUS (Pmode,
1331 gen_rtx_MULT (Pmode, regx2,
1332 XEXP (XEXP (x, 0), 1)),
1333 force_reg (Pmode, XEXP (y, 0))));
1335 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1336 && INTVAL (XEXP (y, 1)) >= -4096
1337 && INTVAL (XEXP (y, 1)) <= 4095)
1339 /* This is safe because of the guard page at the
1340 beginning and end of the data space. Just
1341 return the original address. */
1346 /* Doesn't look like one we can optimize. */
1347 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1348 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1349 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1350 regx1 = force_reg (Pmode,
1351 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1353 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1361 /* Implement the TARGET_REGISTER_MOVE_COST hook.
1363 Compute extra cost of moving data between one register class
1366 Make moves from SAR so expensive they should never happen. We used to
1367 have 0xffff here, but that generates overflow in rare cases.
1369 Copies involving a FP register and a non-FP register are relatively
1370 expensive because they must go through memory.
1372 Other copies are reasonably cheap. */
1375 hppa_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
1376 reg_class_t from, reg_class_t to)
1378 if (from == SHIFT_REGS)
1380 else if (to == SHIFT_REGS && FP_REG_CLASS_P (from))
1382 else if ((FP_REG_CLASS_P (from) && ! FP_REG_CLASS_P (to))
1383 || (FP_REG_CLASS_P (to) && ! FP_REG_CLASS_P (from)))
1389 /* For the HPPA, REG and REG+CONST is cost 0
1390 and addresses involving symbolic constants are cost 2.
1392 PIC addresses are very expensive.
1394 It is no coincidence that this has the same structure
1395 as GO_IF_LEGITIMATE_ADDRESS. */
1398 hppa_address_cost (rtx X,
1399 bool speed ATTRIBUTE_UNUSED)
1401 switch (GET_CODE (X))
1414 /* Compute a (partial) cost for rtx X. Return true if the complete
1415 cost has been computed, and false if subexpressions should be
1416 scanned. In either case, *TOTAL contains the cost result. */
1419 hppa_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
1420 int *total, bool speed ATTRIBUTE_UNUSED)
1425 if (INTVAL (x) == 0)
1427 else if (INT_14_BITS (x))
1444 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1445 && outer_code != SET)
1452 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1453 *total = COSTS_N_INSNS (3);
1454 else if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1455 *total = COSTS_N_INSNS (8);
1457 *total = COSTS_N_INSNS (20);
1461 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1463 *total = COSTS_N_INSNS (14);
1471 *total = COSTS_N_INSNS (60);
1474 case PLUS: /* this includes shNadd insns */
1476 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1477 *total = COSTS_N_INSNS (3);
1479 *total = COSTS_N_INSNS (1);
1485 *total = COSTS_N_INSNS (1);
1493 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1494 new rtx with the correct mode. */
1496 force_mode (enum machine_mode mode, rtx orig)
1498 if (mode == GET_MODE (orig))
1501 gcc_assert (REGNO (orig) < FIRST_PSEUDO_REGISTER);
1503 return gen_rtx_REG (mode, REGNO (orig));
1506 /* Return 1 if *X is a thread-local symbol. */
1509 pa_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1511 return PA_SYMBOL_REF_TLS_P (*x);
1514 /* Return 1 if X contains a thread-local symbol. */
1517 pa_tls_referenced_p (rtx x)
1519 if (!TARGET_HAVE_TLS)
1522 return for_each_rtx (&x, &pa_tls_symbol_ref_1, 0);
1525 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1528 pa_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1530 return pa_tls_referenced_p (x);
1533 /* Emit insns to move operands[1] into operands[0].
1535 Return 1 if we have written out everything that needs to be done to
1536 do the move. Otherwise, return 0 and the caller will emit the move
1539 Note SCRATCH_REG may not be in the proper mode depending on how it
1540 will be used. This routine is responsible for creating a new copy
1541 of SCRATCH_REG in the proper mode. */
1544 emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
1546 register rtx operand0 = operands[0];
1547 register rtx operand1 = operands[1];
1550 /* We can only handle indexed addresses in the destination operand
1551 of floating point stores. Thus, we need to break out indexed
1552 addresses from the destination operand. */
1553 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1555 gcc_assert (can_create_pseudo_p ());
1557 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1558 operand0 = replace_equiv_address (operand0, tem);
1561 /* On targets with non-equivalent space registers, break out unscaled
1562 indexed addresses from the source operand before the final CSE.
1563 We have to do this because the REG_POINTER flag is not correctly
1564 carried through various optimization passes and CSE may substitute
1565 a pseudo without the pointer set for one with the pointer set. As
1566 a result, we loose various opportunities to create insns with
1567 unscaled indexed addresses. */
1568 if (!TARGET_NO_SPACE_REGS
1569 && !cse_not_expected
1570 && GET_CODE (operand1) == MEM
1571 && GET_CODE (XEXP (operand1, 0)) == PLUS
1572 && REG_P (XEXP (XEXP (operand1, 0), 0))
1573 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1575 = replace_equiv_address (operand1,
1576 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1579 && reload_in_progress && GET_CODE (operand0) == REG
1580 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1581 operand0 = reg_equiv_mem (REGNO (operand0));
1582 else if (scratch_reg
1583 && reload_in_progress && GET_CODE (operand0) == SUBREG
1584 && GET_CODE (SUBREG_REG (operand0)) == REG
1585 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1587 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1588 the code which tracks sets/uses for delete_output_reload. */
1589 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1590 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
1591 SUBREG_BYTE (operand0));
1592 operand0 = alter_subreg (&temp);
1596 && reload_in_progress && GET_CODE (operand1) == REG
1597 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1598 operand1 = reg_equiv_mem (REGNO (operand1));
1599 else if (scratch_reg
1600 && reload_in_progress && GET_CODE (operand1) == SUBREG
1601 && GET_CODE (SUBREG_REG (operand1)) == REG
1602 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1604 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1605 the code which tracks sets/uses for delete_output_reload. */
1606 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1607 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
1608 SUBREG_BYTE (operand1));
1609 operand1 = alter_subreg (&temp);
1612 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1613 && ((tem = find_replacement (&XEXP (operand0, 0)))
1614 != XEXP (operand0, 0)))
1615 operand0 = replace_equiv_address (operand0, tem);
1617 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1618 && ((tem = find_replacement (&XEXP (operand1, 0)))
1619 != XEXP (operand1, 0)))
1620 operand1 = replace_equiv_address (operand1, tem);
1622 /* Handle secondary reloads for loads/stores of FP registers from
1623 REG+D addresses where D does not fit in 5 or 14 bits, including
1624 (subreg (mem (addr))) cases. */
1626 && fp_reg_operand (operand0, mode)
1627 && ((GET_CODE (operand1) == MEM
1628 && !memory_address_p ((GET_MODE_SIZE (mode) == 4 ? SFmode : DFmode),
1629 XEXP (operand1, 0)))
1630 || ((GET_CODE (operand1) == SUBREG
1631 && GET_CODE (XEXP (operand1, 0)) == MEM
1632 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1634 XEXP (XEXP (operand1, 0), 0))))))
1636 if (GET_CODE (operand1) == SUBREG)
1637 operand1 = XEXP (operand1, 0);
1639 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1640 it in WORD_MODE regardless of what mode it was originally given
1642 scratch_reg = force_mode (word_mode, scratch_reg);
1644 /* D might not fit in 14 bits either; for such cases load D into
1646 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
1648 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1649 emit_move_insn (scratch_reg,
1650 gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
1652 XEXP (XEXP (operand1, 0), 0),
1656 emit_move_insn (scratch_reg, XEXP (operand1, 0));
1657 emit_insn (gen_rtx_SET (VOIDmode, operand0,
1658 replace_equiv_address (operand1, scratch_reg)));
1661 else if (scratch_reg
1662 && fp_reg_operand (operand1, mode)
1663 && ((GET_CODE (operand0) == MEM
1664 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1666 XEXP (operand0, 0)))
1667 || ((GET_CODE (operand0) == SUBREG)
1668 && GET_CODE (XEXP (operand0, 0)) == MEM
1669 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1671 XEXP (XEXP (operand0, 0), 0)))))
1673 if (GET_CODE (operand0) == SUBREG)
1674 operand0 = XEXP (operand0, 0);
1676 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1677 it in WORD_MODE regardless of what mode it was originally given
1679 scratch_reg = force_mode (word_mode, scratch_reg);
1681 /* D might not fit in 14 bits either; for such cases load D into
1683 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
1685 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
1686 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
1689 XEXP (XEXP (operand0, 0),
1694 emit_move_insn (scratch_reg, XEXP (operand0, 0));
1695 emit_insn (gen_rtx_SET (VOIDmode,
1696 replace_equiv_address (operand0, scratch_reg),
1700 /* Handle secondary reloads for loads of FP registers from constant
1701 expressions by forcing the constant into memory.
1703 Use scratch_reg to hold the address of the memory location.
1705 The proper fix is to change TARGET_PREFERRED_RELOAD_CLASS to return
1706 NO_REGS when presented with a const_int and a register class
1707 containing only FP registers. Doing so unfortunately creates
1708 more problems than it solves. Fix this for 2.5. */
1709 else if (scratch_reg
1710 && CONSTANT_P (operand1)
1711 && fp_reg_operand (operand0, mode))
1713 rtx const_mem, xoperands[2];
1715 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1716 it in WORD_MODE regardless of what mode it was originally given
1718 scratch_reg = force_mode (word_mode, scratch_reg);
1720 /* Force the constant into memory and put the address of the
1721 memory location into scratch_reg. */
1722 const_mem = force_const_mem (mode, operand1);
1723 xoperands[0] = scratch_reg;
1724 xoperands[1] = XEXP (const_mem, 0);
1725 emit_move_sequence (xoperands, Pmode, 0);
1727 /* Now load the destination register. */
1728 emit_insn (gen_rtx_SET (mode, operand0,
1729 replace_equiv_address (const_mem, scratch_reg)));
1732 /* Handle secondary reloads for SAR. These occur when trying to load
1733 the SAR from memory or a constant. */
1734 else if (scratch_reg
1735 && GET_CODE (operand0) == REG
1736 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1737 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1738 && (GET_CODE (operand1) == MEM || GET_CODE (operand1) == CONST_INT))
1740 /* D might not fit in 14 bits either; for such cases load D into
1742 if (GET_CODE (operand1) == MEM
1743 && !memory_address_p (GET_MODE (operand0), XEXP (operand1, 0)))
1745 /* We are reloading the address into the scratch register, so we
1746 want to make sure the scratch register is a full register. */
1747 scratch_reg = force_mode (word_mode, scratch_reg);
1749 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1750 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1753 XEXP (XEXP (operand1, 0),
1757 /* Now we are going to load the scratch register from memory,
1758 we want to load it in the same width as the original MEM,
1759 which must be the same as the width of the ultimate destination,
1761 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1763 emit_move_insn (scratch_reg,
1764 replace_equiv_address (operand1, scratch_reg));
1768 /* We want to load the scratch register using the same mode as
1769 the ultimate destination. */
1770 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1772 emit_move_insn (scratch_reg, operand1);
1775 /* And emit the insn to set the ultimate destination. We know that
1776 the scratch register has the same mode as the destination at this
1778 emit_move_insn (operand0, scratch_reg);
1781 /* Handle the most common case: storing into a register. */
1782 else if (register_operand (operand0, mode))
1784 /* Legitimize TLS symbol references. This happens for references
1785 that aren't a legitimate constant. */
1786 if (PA_SYMBOL_REF_TLS_P (operand1))
1787 operand1 = legitimize_tls_address (operand1);
1789 if (register_operand (operand1, mode)
1790 || (GET_CODE (operand1) == CONST_INT
1791 && cint_ok_for_move (INTVAL (operand1)))
1792 || (operand1 == CONST0_RTX (mode))
1793 || (GET_CODE (operand1) == HIGH
1794 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1795 /* Only `general_operands' can come here, so MEM is ok. */
1796 || GET_CODE (operand1) == MEM)
1798 /* Various sets are created during RTL generation which don't
1799 have the REG_POINTER flag correctly set. After the CSE pass,
1800 instruction recognition can fail if we don't consistently
1801 set this flag when performing register copies. This should
1802 also improve the opportunities for creating insns that use
1803 unscaled indexing. */
1804 if (REG_P (operand0) && REG_P (operand1))
1806 if (REG_POINTER (operand1)
1807 && !REG_POINTER (operand0)
1808 && !HARD_REGISTER_P (operand0))
1809 copy_reg_pointer (operand0, operand1);
1812 /* When MEMs are broken out, the REG_POINTER flag doesn't
1813 get set. In some cases, we can set the REG_POINTER flag
1814 from the declaration for the MEM. */
1815 if (REG_P (operand0)
1816 && GET_CODE (operand1) == MEM
1817 && !REG_POINTER (operand0))
1819 tree decl = MEM_EXPR (operand1);
1821 /* Set the register pointer flag and register alignment
1822 if the declaration for this memory reference is a
1828 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1830 if (TREE_CODE (decl) == COMPONENT_REF)
1831 decl = TREE_OPERAND (decl, 1);
1833 type = TREE_TYPE (decl);
1834 type = strip_array_types (type);
1836 if (POINTER_TYPE_P (type))
1840 type = TREE_TYPE (type);
1841 /* Using TYPE_ALIGN_OK is rather conservative as
1842 only the ada frontend actually sets it. */
1843 align = (TYPE_ALIGN_OK (type) ? TYPE_ALIGN (type)
1845 mark_reg_pointer (operand0, align);
1850 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1854 else if (GET_CODE (operand0) == MEM)
1856 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1857 && !(reload_in_progress || reload_completed))
1859 rtx temp = gen_reg_rtx (DFmode);
1861 emit_insn (gen_rtx_SET (VOIDmode, temp, operand1));
1862 emit_insn (gen_rtx_SET (VOIDmode, operand0, temp));
1865 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1867 /* Run this case quickly. */
1868 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1871 if (! (reload_in_progress || reload_completed))
1873 operands[0] = validize_mem (operand0);
1874 operands[1] = operand1 = force_reg (mode, operand1);
1878 /* Simplify the source if we need to.
1879 Note we do have to handle function labels here, even though we do
1880 not consider them legitimate constants. Loop optimizations can
1881 call the emit_move_xxx with one as a source. */
1882 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
1883 || function_label_operand (operand1, VOIDmode)
1884 || (GET_CODE (operand1) == HIGH
1885 && symbolic_operand (XEXP (operand1, 0), mode)))
1889 if (GET_CODE (operand1) == HIGH)
1892 operand1 = XEXP (operand1, 0);
1894 if (symbolic_operand (operand1, mode))
1896 /* Argh. The assembler and linker can't handle arithmetic
1899 So we force the plabel into memory, load operand0 from
1900 the memory location, then add in the constant part. */
1901 if ((GET_CODE (operand1) == CONST
1902 && GET_CODE (XEXP (operand1, 0)) == PLUS
1903 && function_label_operand (XEXP (XEXP (operand1, 0), 0),
1905 || function_label_operand (operand1, VOIDmode))
1907 rtx temp, const_part;
1909 /* Figure out what (if any) scratch register to use. */
1910 if (reload_in_progress || reload_completed)
1912 scratch_reg = scratch_reg ? scratch_reg : operand0;
1913 /* SCRATCH_REG will hold an address and maybe the actual
1914 data. We want it in WORD_MODE regardless of what mode it
1915 was originally given to us. */
1916 scratch_reg = force_mode (word_mode, scratch_reg);
1919 scratch_reg = gen_reg_rtx (Pmode);
1921 if (GET_CODE (operand1) == CONST)
1923 /* Save away the constant part of the expression. */
1924 const_part = XEXP (XEXP (operand1, 0), 1);
1925 gcc_assert (GET_CODE (const_part) == CONST_INT);
1927 /* Force the function label into memory. */
1928 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
1932 /* No constant part. */
1933 const_part = NULL_RTX;
1935 /* Force the function label into memory. */
1936 temp = force_const_mem (mode, operand1);
1940 /* Get the address of the memory location. PIC-ify it if
1942 temp = XEXP (temp, 0);
1944 temp = legitimize_pic_address (temp, mode, scratch_reg);
1946 /* Put the address of the memory location into our destination
1949 emit_move_sequence (operands, mode, scratch_reg);
1951 /* Now load from the memory location into our destination
1953 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
1954 emit_move_sequence (operands, mode, scratch_reg);
1956 /* And add back in the constant part. */
1957 if (const_part != NULL_RTX)
1958 expand_inc (operand0, const_part);
1967 if (reload_in_progress || reload_completed)
1969 temp = scratch_reg ? scratch_reg : operand0;
1970 /* TEMP will hold an address and maybe the actual
1971 data. We want it in WORD_MODE regardless of what mode it
1972 was originally given to us. */
1973 temp = force_mode (word_mode, temp);
1976 temp = gen_reg_rtx (Pmode);
1978 /* (const (plus (symbol) (const_int))) must be forced to
1979 memory during/after reload if the const_int will not fit
1981 if (GET_CODE (operand1) == CONST
1982 && GET_CODE (XEXP (operand1, 0)) == PLUS
1983 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
1984 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1))
1985 && (reload_completed || reload_in_progress)
1988 rtx const_mem = force_const_mem (mode, operand1);
1989 operands[1] = legitimize_pic_address (XEXP (const_mem, 0),
1991 operands[1] = replace_equiv_address (const_mem, operands[1]);
1992 emit_move_sequence (operands, mode, temp);
1996 operands[1] = legitimize_pic_address (operand1, mode, temp);
1997 if (REG_P (operand0) && REG_P (operands[1]))
1998 copy_reg_pointer (operand0, operands[1]);
1999 emit_insn (gen_rtx_SET (VOIDmode, operand0, operands[1]));
2002 /* On the HPPA, references to data space are supposed to use dp,
2003 register 27, but showing it in the RTL inhibits various cse
2004 and loop optimizations. */
2009 if (reload_in_progress || reload_completed)
2011 temp = scratch_reg ? scratch_reg : operand0;
2012 /* TEMP will hold an address and maybe the actual
2013 data. We want it in WORD_MODE regardless of what mode it
2014 was originally given to us. */
2015 temp = force_mode (word_mode, temp);
2018 temp = gen_reg_rtx (mode);
2020 /* Loading a SYMBOL_REF into a register makes that register
2021 safe to be used as the base in an indexed address.
2023 Don't mark hard registers though. That loses. */
2024 if (GET_CODE (operand0) == REG
2025 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
2026 mark_reg_pointer (operand0, BITS_PER_UNIT);
2027 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
2028 mark_reg_pointer (temp, BITS_PER_UNIT);
2031 set = gen_rtx_SET (mode, operand0, temp);
2033 set = gen_rtx_SET (VOIDmode,
2035 gen_rtx_LO_SUM (mode, temp, operand1));
2037 emit_insn (gen_rtx_SET (VOIDmode,
2039 gen_rtx_HIGH (mode, operand1)));
2045 else if (pa_tls_referenced_p (operand1))
2050 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
2052 addend = XEXP (XEXP (tmp, 0), 1);
2053 tmp = XEXP (XEXP (tmp, 0), 0);
2056 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
2057 tmp = legitimize_tls_address (tmp);
2060 tmp = gen_rtx_PLUS (mode, tmp, addend);
2061 tmp = force_operand (tmp, operands[0]);
2065 else if (GET_CODE (operand1) != CONST_INT
2066 || !cint_ok_for_move (INTVAL (operand1)))
2070 HOST_WIDE_INT value = 0;
2071 HOST_WIDE_INT insv = 0;
2074 if (GET_CODE (operand1) == CONST_INT)
2075 value = INTVAL (operand1);
2078 && GET_CODE (operand1) == CONST_INT
2079 && HOST_BITS_PER_WIDE_INT > 32
2080 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
2084 /* Extract the low order 32 bits of the value and sign extend.
2085 If the new value is the same as the original value, we can
2086 can use the original value as-is. If the new value is
2087 different, we use it and insert the most-significant 32-bits
2088 of the original value into the final result. */
2089 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
2090 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
2093 #if HOST_BITS_PER_WIDE_INT > 32
2094 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
2098 operand1 = GEN_INT (nval);
2102 if (reload_in_progress || reload_completed)
2103 temp = scratch_reg ? scratch_reg : operand0;
2105 temp = gen_reg_rtx (mode);
2107 /* We don't directly split DImode constants on 32-bit targets
2108 because PLUS uses an 11-bit immediate and the insn sequence
2109 generated is not as efficient as the one using HIGH/LO_SUM. */
2110 if (GET_CODE (operand1) == CONST_INT
2111 && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD
2112 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2115 /* Directly break constant into high and low parts. This
2116 provides better optimization opportunities because various
2117 passes recognize constants split with PLUS but not LO_SUM.
2118 We use a 14-bit signed low part except when the addition
2119 of 0x4000 to the high part might change the sign of the
2121 HOST_WIDE_INT low = value & 0x3fff;
2122 HOST_WIDE_INT high = value & ~ 0x3fff;
2126 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
2134 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (high)));
2135 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
2139 emit_insn (gen_rtx_SET (VOIDmode, temp,
2140 gen_rtx_HIGH (mode, operand1)));
2141 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
2144 insn = emit_move_insn (operands[0], operands[1]);
2146 /* Now insert the most significant 32 bits of the value
2147 into the register. When we don't have a second register
2148 available, it could take up to nine instructions to load
2149 a 64-bit integer constant. Prior to reload, we force
2150 constants that would take more than three instructions
2151 to load to the constant pool. During and after reload,
2152 we have to handle all possible values. */
2155 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
2156 register and the value to be inserted is outside the
2157 range that can be loaded with three depdi instructions. */
2158 if (temp != operand0 && (insv >= 16384 || insv < -16384))
2160 operand1 = GEN_INT (insv);
2162 emit_insn (gen_rtx_SET (VOIDmode, temp,
2163 gen_rtx_HIGH (mode, operand1)));
2164 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
2165 emit_insn (gen_insv (operand0, GEN_INT (32),
2170 int len = 5, pos = 27;
2172 /* Insert the bits using the depdi instruction. */
2175 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
2176 HOST_WIDE_INT sign = v5 < 0;
2178 /* Left extend the insertion. */
2179 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
2180 while (pos > 0 && (insv & 1) == sign)
2182 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
2187 emit_insn (gen_insv (operand0, GEN_INT (len),
2188 GEN_INT (pos), GEN_INT (v5)));
2190 len = pos > 0 && pos < 5 ? pos : 5;
2196 set_unique_reg_note (insn, REG_EQUAL, op1);
2201 /* Now have insn-emit do whatever it normally does. */
2205 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2206 it will need a link/runtime reloc). */
2209 reloc_needed (tree exp)
2213 switch (TREE_CODE (exp))
2218 case POINTER_PLUS_EXPR:
2221 reloc = reloc_needed (TREE_OPERAND (exp, 0));
2222 reloc |= reloc_needed (TREE_OPERAND (exp, 1));
2226 case NON_LVALUE_EXPR:
2227 reloc = reloc_needed (TREE_OPERAND (exp, 0));
2233 unsigned HOST_WIDE_INT ix;
2235 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), ix, value)
2237 reloc |= reloc_needed (value);
2251 /* Return the best assembler insn template
2252 for moving operands[1] into operands[0] as a fullword. */
2254 singlemove_string (rtx *operands)
2256 HOST_WIDE_INT intval;
2258 if (GET_CODE (operands[0]) == MEM)
2259 return "stw %r1,%0";
2260 if (GET_CODE (operands[1]) == MEM)
2262 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2267 gcc_assert (GET_MODE (operands[1]) == SFmode);
2269 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2271 REAL_VALUE_FROM_CONST_DOUBLE (d, operands[1]);
2272 REAL_VALUE_TO_TARGET_SINGLE (d, i);
2274 operands[1] = GEN_INT (i);
2275 /* Fall through to CONST_INT case. */
2277 if (GET_CODE (operands[1]) == CONST_INT)
2279 intval = INTVAL (operands[1]);
2281 if (VAL_14_BITS_P (intval))
2283 else if ((intval & 0x7ff) == 0)
2284 return "ldil L'%1,%0";
2285 else if (zdepi_cint_p (intval))
2286 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2288 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2290 return "copy %1,%0";
2294 /* Compute position (in OP[1]) and width (in OP[2])
2295 useful for copying IMM to a register using the zdepi
2296 instructions. Store the immediate value to insert in OP[0]. */
2298 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2302 /* Find the least significant set bit in IMM. */
2303 for (lsb = 0; lsb < 32; lsb++)
2310 /* Choose variants based on *sign* of the 5-bit field. */
2311 if ((imm & 0x10) == 0)
2312 len = (lsb <= 28) ? 4 : 32 - lsb;
2315 /* Find the width of the bitstring in IMM. */
2316 for (len = 5; len < 32 - lsb; len++)
2318 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2322 /* Sign extend IMM as a 5-bit value. */
2323 imm = (imm & 0xf) - 0x10;
2331 /* Compute position (in OP[1]) and width (in OP[2])
2332 useful for copying IMM to a register using the depdi,z
2333 instructions. Store the immediate value to insert in OP[0]. */
2335 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2337 int lsb, len, maxlen;
2339 maxlen = MIN (HOST_BITS_PER_WIDE_INT, 64);
2341 /* Find the least significant set bit in IMM. */
2342 for (lsb = 0; lsb < maxlen; lsb++)
2349 /* Choose variants based on *sign* of the 5-bit field. */
2350 if ((imm & 0x10) == 0)
2351 len = (lsb <= maxlen - 4) ? 4 : maxlen - lsb;
2354 /* Find the width of the bitstring in IMM. */
2355 for (len = 5; len < maxlen - lsb; len++)
2357 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2361 /* Extend length if host is narrow and IMM is negative. */
2362 if (HOST_BITS_PER_WIDE_INT == 32 && len == maxlen - lsb)
2365 /* Sign extend IMM as a 5-bit value. */
2366 imm = (imm & 0xf) - 0x10;
2374 /* Output assembler code to perform a doubleword move insn
2375 with operands OPERANDS. */
2378 output_move_double (rtx *operands)
2380 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2382 rtx addreg0 = 0, addreg1 = 0;
2384 /* First classify both operands. */
2386 if (REG_P (operands[0]))
2388 else if (offsettable_memref_p (operands[0]))
2390 else if (GET_CODE (operands[0]) == MEM)
2395 if (REG_P (operands[1]))
2397 else if (CONSTANT_P (operands[1]))
2399 else if (offsettable_memref_p (operands[1]))
2401 else if (GET_CODE (operands[1]) == MEM)
2406 /* Check for the cases that the operand constraints are not
2407 supposed to allow to happen. */
2408 gcc_assert (optype0 == REGOP || optype1 == REGOP);
2410 /* Handle copies between general and floating registers. */
2412 if (optype0 == REGOP && optype1 == REGOP
2413 && FP_REG_P (operands[0]) ^ FP_REG_P (operands[1]))
2415 if (FP_REG_P (operands[0]))
2417 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands);
2418 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands);
2419 return "{fldds|fldd} -16(%%sp),%0";
2423 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands);
2424 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands);
2425 return "{ldws|ldw} -12(%%sp),%R0";
2429 /* Handle auto decrementing and incrementing loads and stores
2430 specifically, since the structure of the function doesn't work
2431 for them without major modification. Do it better when we learn
2432 this port about the general inc/dec addressing of PA.
2433 (This was written by tege. Chide him if it doesn't work.) */
2435 if (optype0 == MEMOP)
2437 /* We have to output the address syntax ourselves, since print_operand
2438 doesn't deal with the addresses we want to use. Fix this later. */
2440 rtx addr = XEXP (operands[0], 0);
2441 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2443 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2445 operands[0] = XEXP (addr, 0);
2446 gcc_assert (GET_CODE (operands[1]) == REG
2447 && GET_CODE (operands[0]) == REG);
2449 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2451 /* No overlap between high target register and address
2452 register. (We do this in a non-obvious way to
2453 save a register file writeback) */
2454 if (GET_CODE (addr) == POST_INC)
2455 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2456 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2458 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2460 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2462 operands[0] = XEXP (addr, 0);
2463 gcc_assert (GET_CODE (operands[1]) == REG
2464 && GET_CODE (operands[0]) == REG);
2466 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2467 /* No overlap between high target register and address
2468 register. (We do this in a non-obvious way to save a
2469 register file writeback) */
2470 if (GET_CODE (addr) == PRE_INC)
2471 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2472 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2475 if (optype1 == MEMOP)
2477 /* We have to output the address syntax ourselves, since print_operand
2478 doesn't deal with the addresses we want to use. Fix this later. */
2480 rtx addr = XEXP (operands[1], 0);
2481 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2483 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2485 operands[1] = XEXP (addr, 0);
2486 gcc_assert (GET_CODE (operands[0]) == REG
2487 && GET_CODE (operands[1]) == REG);
2489 if (!reg_overlap_mentioned_p (high_reg, addr))
2491 /* No overlap between high target register and address
2492 register. (We do this in a non-obvious way to
2493 save a register file writeback) */
2494 if (GET_CODE (addr) == POST_INC)
2495 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2496 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2500 /* This is an undefined situation. We should load into the
2501 address register *and* update that register. Probably
2502 we don't need to handle this at all. */
2503 if (GET_CODE (addr) == POST_INC)
2504 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2505 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2508 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2510 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2512 operands[1] = XEXP (addr, 0);
2513 gcc_assert (GET_CODE (operands[0]) == REG
2514 && GET_CODE (operands[1]) == REG);
2516 if (!reg_overlap_mentioned_p (high_reg, addr))
2518 /* No overlap between high target register and address
2519 register. (We do this in a non-obvious way to
2520 save a register file writeback) */
2521 if (GET_CODE (addr) == PRE_INC)
2522 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2523 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2527 /* This is an undefined situation. We should load into the
2528 address register *and* update that register. Probably
2529 we don't need to handle this at all. */
2530 if (GET_CODE (addr) == PRE_INC)
2531 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2532 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2535 else if (GET_CODE (addr) == PLUS
2536 && GET_CODE (XEXP (addr, 0)) == MULT)
2539 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2541 if (!reg_overlap_mentioned_p (high_reg, addr))
2543 xoperands[0] = high_reg;
2544 xoperands[1] = XEXP (addr, 1);
2545 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2546 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2547 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2549 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2553 xoperands[0] = high_reg;
2554 xoperands[1] = XEXP (addr, 1);
2555 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2556 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2557 output_asm_insn ("{sh%O3addl %2,%1,%R0|shladd,l %2,%O3,%1,%R0}",
2559 return "ldw 0(%R0),%0\n\tldw 4(%R0),%R0";
2564 /* If an operand is an unoffsettable memory ref, find a register
2565 we can increment temporarily to make it refer to the second word. */
2567 if (optype0 == MEMOP)
2568 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2570 if (optype1 == MEMOP)
2571 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2573 /* Ok, we can do one word at a time.
2574 Normally we do the low-numbered word first.
2576 In either case, set up in LATEHALF the operands to use
2577 for the high-numbered word and in some cases alter the
2578 operands in OPERANDS to be suitable for the low-numbered word. */
2580 if (optype0 == REGOP)
2581 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2582 else if (optype0 == OFFSOP)
2583 latehalf[0] = adjust_address (operands[0], SImode, 4);
2585 latehalf[0] = operands[0];
2587 if (optype1 == REGOP)
2588 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2589 else if (optype1 == OFFSOP)
2590 latehalf[1] = adjust_address (operands[1], SImode, 4);
2591 else if (optype1 == CNSTOP)
2592 split_double (operands[1], &operands[1], &latehalf[1]);
2594 latehalf[1] = operands[1];
2596 /* If the first move would clobber the source of the second one,
2597 do them in the other order.
2599 This can happen in two cases:
2601 mem -> register where the first half of the destination register
2602 is the same register used in the memory's address. Reload
2603 can create such insns.
2605 mem in this case will be either register indirect or register
2606 indirect plus a valid offset.
2608 register -> register move where REGNO(dst) == REGNO(src + 1)
2609 someone (Tim/Tege?) claimed this can happen for parameter loads.
2611 Handle mem -> register case first. */
2612 if (optype0 == REGOP
2613 && (optype1 == MEMOP || optype1 == OFFSOP)
2614 && refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
2617 /* Do the late half first. */
2619 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2620 output_asm_insn (singlemove_string (latehalf), latehalf);
2624 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2625 return singlemove_string (operands);
2628 /* Now handle register -> register case. */
2629 if (optype0 == REGOP && optype1 == REGOP
2630 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2632 output_asm_insn (singlemove_string (latehalf), latehalf);
2633 return singlemove_string (operands);
2636 /* Normal case: do the two words, low-numbered first. */
2638 output_asm_insn (singlemove_string (operands), operands);
2640 /* Make any unoffsettable addresses point at high-numbered word. */
2642 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2644 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2647 output_asm_insn (singlemove_string (latehalf), latehalf);
2649 /* Undo the adds we just did. */
2651 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2653 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2659 output_fp_move_double (rtx *operands)
2661 if (FP_REG_P (operands[0]))
2663 if (FP_REG_P (operands[1])
2664 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2665 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2667 output_asm_insn ("fldd%F1 %1,%0", operands);
2669 else if (FP_REG_P (operands[1]))
2671 output_asm_insn ("fstd%F0 %1,%0", operands);
2677 gcc_assert (operands[1] == CONST0_RTX (GET_MODE (operands[0])));
2679 /* This is a pain. You have to be prepared to deal with an
2680 arbitrary address here including pre/post increment/decrement.
2682 so avoid this in the MD. */
2683 gcc_assert (GET_CODE (operands[0]) == REG);
2685 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2686 xoperands[0] = operands[0];
2687 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2692 /* Return a REG that occurs in ADDR with coefficient 1.
2693 ADDR can be effectively incremented by incrementing REG. */
2696 find_addr_reg (rtx addr)
2698 while (GET_CODE (addr) == PLUS)
2700 if (GET_CODE (XEXP (addr, 0)) == REG)
2701 addr = XEXP (addr, 0);
2702 else if (GET_CODE (XEXP (addr, 1)) == REG)
2703 addr = XEXP (addr, 1);
2704 else if (CONSTANT_P (XEXP (addr, 0)))
2705 addr = XEXP (addr, 1);
2706 else if (CONSTANT_P (XEXP (addr, 1)))
2707 addr = XEXP (addr, 0);
2711 gcc_assert (GET_CODE (addr) == REG);
2715 /* Emit code to perform a block move.
2717 OPERANDS[0] is the destination pointer as a REG, clobbered.
2718 OPERANDS[1] is the source pointer as a REG, clobbered.
2719 OPERANDS[2] is a register for temporary storage.
2720 OPERANDS[3] is a register for temporary storage.
2721 OPERANDS[4] is the size as a CONST_INT
2722 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2723 OPERANDS[6] is another temporary register. */
2726 output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2728 int align = INTVAL (operands[5]);
2729 unsigned long n_bytes = INTVAL (operands[4]);
2731 /* We can't move more than a word at a time because the PA
2732 has no longer integer move insns. (Could use fp mem ops?) */
2733 if (align > (TARGET_64BIT ? 8 : 4))
2734 align = (TARGET_64BIT ? 8 : 4);
2736 /* Note that we know each loop below will execute at least twice
2737 (else we would have open-coded the copy). */
2741 /* Pre-adjust the loop counter. */
2742 operands[4] = GEN_INT (n_bytes - 16);
2743 output_asm_insn ("ldi %4,%2", operands);
2746 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2747 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2748 output_asm_insn ("std,ma %3,8(%0)", operands);
2749 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2750 output_asm_insn ("std,ma %6,8(%0)", operands);
2752 /* Handle the residual. There could be up to 7 bytes of
2753 residual to copy! */
2754 if (n_bytes % 16 != 0)
2756 operands[4] = GEN_INT (n_bytes % 8);
2757 if (n_bytes % 16 >= 8)
2758 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2759 if (n_bytes % 8 != 0)
2760 output_asm_insn ("ldd 0(%1),%6", operands);
2761 if (n_bytes % 16 >= 8)
2762 output_asm_insn ("std,ma %3,8(%0)", operands);
2763 if (n_bytes % 8 != 0)
2764 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2769 /* Pre-adjust the loop counter. */
2770 operands[4] = GEN_INT (n_bytes - 8);
2771 output_asm_insn ("ldi %4,%2", operands);
2774 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2775 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2776 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2777 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2778 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2780 /* Handle the residual. There could be up to 7 bytes of
2781 residual to copy! */
2782 if (n_bytes % 8 != 0)
2784 operands[4] = GEN_INT (n_bytes % 4);
2785 if (n_bytes % 8 >= 4)
2786 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2787 if (n_bytes % 4 != 0)
2788 output_asm_insn ("ldw 0(%1),%6", operands);
2789 if (n_bytes % 8 >= 4)
2790 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2791 if (n_bytes % 4 != 0)
2792 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2797 /* Pre-adjust the loop counter. */
2798 operands[4] = GEN_INT (n_bytes - 4);
2799 output_asm_insn ("ldi %4,%2", operands);
2802 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2803 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2804 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2805 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2806 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2808 /* Handle the residual. */
2809 if (n_bytes % 4 != 0)
2811 if (n_bytes % 4 >= 2)
2812 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2813 if (n_bytes % 2 != 0)
2814 output_asm_insn ("ldb 0(%1),%6", operands);
2815 if (n_bytes % 4 >= 2)
2816 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2817 if (n_bytes % 2 != 0)
2818 output_asm_insn ("stb %6,0(%0)", operands);
2823 /* Pre-adjust the loop counter. */
2824 operands[4] = GEN_INT (n_bytes - 2);
2825 output_asm_insn ("ldi %4,%2", operands);
2828 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2829 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2830 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2831 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2832 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2834 /* Handle the residual. */
2835 if (n_bytes % 2 != 0)
2837 output_asm_insn ("ldb 0(%1),%3", operands);
2838 output_asm_insn ("stb %3,0(%0)", operands);
2847 /* Count the number of insns necessary to handle this block move.
2849 Basic structure is the same as emit_block_move, except that we
2850 count insns rather than emit them. */
2853 compute_movmem_length (rtx insn)
2855 rtx pat = PATTERN (insn);
2856 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2857 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2858 unsigned int n_insns = 0;
2860 /* We can't move more than four bytes at a time because the PA
2861 has no longer integer move insns. (Could use fp mem ops?) */
2862 if (align > (TARGET_64BIT ? 8 : 4))
2863 align = (TARGET_64BIT ? 8 : 4);
2865 /* The basic copying loop. */
2869 if (n_bytes % (2 * align) != 0)
2871 if ((n_bytes % (2 * align)) >= align)
2874 if ((n_bytes % align) != 0)
2878 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2882 /* Emit code to perform a block clear.
2884 OPERANDS[0] is the destination pointer as a REG, clobbered.
2885 OPERANDS[1] is a register for temporary storage.
2886 OPERANDS[2] is the size as a CONST_INT
2887 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
2890 output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2892 int align = INTVAL (operands[3]);
2893 unsigned long n_bytes = INTVAL (operands[2]);
2895 /* We can't clear more than a word at a time because the PA
2896 has no longer integer move insns. */
2897 if (align > (TARGET_64BIT ? 8 : 4))
2898 align = (TARGET_64BIT ? 8 : 4);
2900 /* Note that we know each loop below will execute at least twice
2901 (else we would have open-coded the copy). */
2905 /* Pre-adjust the loop counter. */
2906 operands[2] = GEN_INT (n_bytes - 16);
2907 output_asm_insn ("ldi %2,%1", operands);
2910 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2911 output_asm_insn ("addib,>= -16,%1,.-4", operands);
2912 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2914 /* Handle the residual. There could be up to 7 bytes of
2915 residual to copy! */
2916 if (n_bytes % 16 != 0)
2918 operands[2] = GEN_INT (n_bytes % 8);
2919 if (n_bytes % 16 >= 8)
2920 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2921 if (n_bytes % 8 != 0)
2922 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
2927 /* Pre-adjust the loop counter. */
2928 operands[2] = GEN_INT (n_bytes - 8);
2929 output_asm_insn ("ldi %2,%1", operands);
2932 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2933 output_asm_insn ("addib,>= -8,%1,.-4", operands);
2934 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2936 /* Handle the residual. There could be up to 7 bytes of
2937 residual to copy! */
2938 if (n_bytes % 8 != 0)
2940 operands[2] = GEN_INT (n_bytes % 4);
2941 if (n_bytes % 8 >= 4)
2942 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2943 if (n_bytes % 4 != 0)
2944 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
2949 /* Pre-adjust the loop counter. */
2950 operands[2] = GEN_INT (n_bytes - 4);
2951 output_asm_insn ("ldi %2,%1", operands);
2954 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2955 output_asm_insn ("addib,>= -4,%1,.-4", operands);
2956 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2958 /* Handle the residual. */
2959 if (n_bytes % 4 != 0)
2961 if (n_bytes % 4 >= 2)
2962 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2963 if (n_bytes % 2 != 0)
2964 output_asm_insn ("stb %%r0,0(%0)", operands);
2969 /* Pre-adjust the loop counter. */
2970 operands[2] = GEN_INT (n_bytes - 2);
2971 output_asm_insn ("ldi %2,%1", operands);
2974 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
2975 output_asm_insn ("addib,>= -2,%1,.-4", operands);
2976 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
2978 /* Handle the residual. */
2979 if (n_bytes % 2 != 0)
2980 output_asm_insn ("stb %%r0,0(%0)", operands);
2989 /* Count the number of insns necessary to handle this block move.
2991 Basic structure is the same as emit_block_move, except that we
2992 count insns rather than emit them. */
2995 compute_clrmem_length (rtx insn)
2997 rtx pat = PATTERN (insn);
2998 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
2999 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
3000 unsigned int n_insns = 0;
3002 /* We can't clear more than a word at a time because the PA
3003 has no longer integer move insns. */
3004 if (align > (TARGET_64BIT ? 8 : 4))
3005 align = (TARGET_64BIT ? 8 : 4);
3007 /* The basic loop. */
3011 if (n_bytes % (2 * align) != 0)
3013 if ((n_bytes % (2 * align)) >= align)
3016 if ((n_bytes % align) != 0)
3020 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
3026 output_and (rtx *operands)
3028 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3030 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3031 int ls0, ls1, ms0, p, len;
3033 for (ls0 = 0; ls0 < 32; ls0++)
3034 if ((mask & (1 << ls0)) == 0)
3037 for (ls1 = ls0; ls1 < 32; ls1++)
3038 if ((mask & (1 << ls1)) != 0)
3041 for (ms0 = ls1; ms0 < 32; ms0++)
3042 if ((mask & (1 << ms0)) == 0)
3045 gcc_assert (ms0 == 32);
3053 operands[2] = GEN_INT (len);
3054 return "{extru|extrw,u} %1,31,%2,%0";
3058 /* We could use this `depi' for the case above as well, but `depi'
3059 requires one more register file access than an `extru'. */
3064 operands[2] = GEN_INT (p);
3065 operands[3] = GEN_INT (len);
3066 return "{depi|depwi} 0,%2,%3,%0";
3070 return "and %1,%2,%0";
3073 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3074 storing the result in operands[0]. */
3076 output_64bit_and (rtx *operands)
3078 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
3080 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3081 int ls0, ls1, ms0, p, len;
3083 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
3084 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
3087 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
3088 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
3091 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
3092 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
3095 gcc_assert (ms0 == HOST_BITS_PER_WIDE_INT);
3097 if (ls1 == HOST_BITS_PER_WIDE_INT)
3103 operands[2] = GEN_INT (len);
3104 return "extrd,u %1,63,%2,%0";
3108 /* We could use this `depi' for the case above as well, but `depi'
3109 requires one more register file access than an `extru'. */
3114 operands[2] = GEN_INT (p);
3115 operands[3] = GEN_INT (len);
3116 return "depdi 0,%2,%3,%0";
3120 return "and %1,%2,%0";
3124 output_ior (rtx *operands)
3126 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3127 int bs0, bs1, p, len;
3129 if (INTVAL (operands[2]) == 0)
3130 return "copy %1,%0";
3132 for (bs0 = 0; bs0 < 32; bs0++)
3133 if ((mask & (1 << bs0)) != 0)
3136 for (bs1 = bs0; bs1 < 32; bs1++)
3137 if ((mask & (1 << bs1)) == 0)
3140 gcc_assert (bs1 == 32 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3145 operands[2] = GEN_INT (p);
3146 operands[3] = GEN_INT (len);
3147 return "{depi|depwi} -1,%2,%3,%0";
3150 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3151 storing the result in operands[0]. */
3153 output_64bit_ior (rtx *operands)
3155 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3156 int bs0, bs1, p, len;
3158 if (INTVAL (operands[2]) == 0)
3159 return "copy %1,%0";
3161 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
3162 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
3165 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
3166 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
3169 gcc_assert (bs1 == HOST_BITS_PER_WIDE_INT
3170 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3175 operands[2] = GEN_INT (p);
3176 operands[3] = GEN_INT (len);
3177 return "depdi -1,%2,%3,%0";
3180 /* Target hook for assembling integer objects. This code handles
3181 aligned SI and DI integers specially since function references
3182 must be preceded by P%. */
3185 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
3187 if (size == UNITS_PER_WORD
3189 && function_label_operand (x, VOIDmode))
3191 fputs (size == 8? "\t.dword\tP%" : "\t.word\tP%", asm_out_file);
3192 output_addr_const (asm_out_file, x);
3193 fputc ('\n', asm_out_file);
3196 return default_assemble_integer (x, size, aligned_p);
3199 /* Output an ascii string. */
3201 output_ascii (FILE *file, const char *p, int size)
3205 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
3207 /* The HP assembler can only take strings of 256 characters at one
3208 time. This is a limitation on input line length, *not* the
3209 length of the string. Sigh. Even worse, it seems that the
3210 restriction is in number of input characters (see \xnn &
3211 \whatever). So we have to do this very carefully. */
3213 fputs ("\t.STRING \"", file);
3216 for (i = 0; i < size; i += 4)
3220 for (io = 0, co = 0; io < MIN (4, size - i); io++)
3222 register unsigned int c = (unsigned char) p[i + io];
3224 if (c == '\"' || c == '\\')
3225 partial_output[co++] = '\\';
3226 if (c >= ' ' && c < 0177)
3227 partial_output[co++] = c;
3231 partial_output[co++] = '\\';
3232 partial_output[co++] = 'x';
3233 hexd = c / 16 - 0 + '0';
3235 hexd -= '9' - 'a' + 1;
3236 partial_output[co++] = hexd;
3237 hexd = c % 16 - 0 + '0';
3239 hexd -= '9' - 'a' + 1;
3240 partial_output[co++] = hexd;
3243 if (chars_output + co > 243)
3245 fputs ("\"\n\t.STRING \"", file);
3248 fwrite (partial_output, 1, (size_t) co, file);
3252 fputs ("\"\n", file);
3255 /* Try to rewrite floating point comparisons & branches to avoid
3256 useless add,tr insns.
3258 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3259 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3260 first attempt to remove useless add,tr insns. It is zero
3261 for the second pass as reorg sometimes leaves bogus REG_DEAD
3264 When CHECK_NOTES is zero we can only eliminate add,tr insns
3265 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3268 remove_useless_addtr_insns (int check_notes)
3271 static int pass = 0;
3273 /* This is fairly cheap, so always run it when optimizing. */
3277 int fbranch_count = 0;
3279 /* Walk all the insns in this function looking for fcmp & fbranch
3280 instructions. Keep track of how many of each we find. */
3281 for (insn = get_insns (); insn; insn = next_insn (insn))
3285 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3286 if (GET_CODE (insn) != INSN && GET_CODE (insn) != JUMP_INSN)
3289 tmp = PATTERN (insn);
3291 /* It must be a set. */
3292 if (GET_CODE (tmp) != SET)
3295 /* If the destination is CCFP, then we've found an fcmp insn. */
3296 tmp = SET_DEST (tmp);
3297 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3303 tmp = PATTERN (insn);
3304 /* If this is an fbranch instruction, bump the fbranch counter. */
3305 if (GET_CODE (tmp) == SET
3306 && SET_DEST (tmp) == pc_rtx
3307 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3308 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3309 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3310 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3318 /* Find all floating point compare + branch insns. If possible,
3319 reverse the comparison & the branch to avoid add,tr insns. */
3320 for (insn = get_insns (); insn; insn = next_insn (insn))
3324 /* Ignore anything that isn't an INSN. */
3325 if (GET_CODE (insn) != INSN)
3328 tmp = PATTERN (insn);
3330 /* It must be a set. */
3331 if (GET_CODE (tmp) != SET)
3334 /* The destination must be CCFP, which is register zero. */
3335 tmp = SET_DEST (tmp);
3336 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3339 /* INSN should be a set of CCFP.
3341 See if the result of this insn is used in a reversed FP
3342 conditional branch. If so, reverse our condition and
3343 the branch. Doing so avoids useless add,tr insns. */
3344 next = next_insn (insn);
3347 /* Jumps, calls and labels stop our search. */
3348 if (GET_CODE (next) == JUMP_INSN
3349 || GET_CODE (next) == CALL_INSN
3350 || GET_CODE (next) == CODE_LABEL)
3353 /* As does another fcmp insn. */
3354 if (GET_CODE (next) == INSN
3355 && GET_CODE (PATTERN (next)) == SET
3356 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3357 && REGNO (SET_DEST (PATTERN (next))) == 0)
3360 next = next_insn (next);
3363 /* Is NEXT_INSN a branch? */
3365 && GET_CODE (next) == JUMP_INSN)
3367 rtx pattern = PATTERN (next);
3369 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3370 and CCFP dies, then reverse our conditional and the branch
3371 to avoid the add,tr. */
3372 if (GET_CODE (pattern) == SET
3373 && SET_DEST (pattern) == pc_rtx
3374 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3375 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3376 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3377 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3378 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3379 && (fcmp_count == fbranch_count
3381 && find_regno_note (next, REG_DEAD, 0))))
3383 /* Reverse the branch. */
3384 tmp = XEXP (SET_SRC (pattern), 1);
3385 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3386 XEXP (SET_SRC (pattern), 2) = tmp;
3387 INSN_CODE (next) = -1;
3389 /* Reverse our condition. */
3390 tmp = PATTERN (insn);
3391 PUT_CODE (XEXP (tmp, 1),
3392 (reverse_condition_maybe_unordered
3393 (GET_CODE (XEXP (tmp, 1)))));
3403 /* You may have trouble believing this, but this is the 32 bit HP-PA
3408 Variable arguments (optional; any number may be allocated)
3410 SP-(4*(N+9)) arg word N
3415 Fixed arguments (must be allocated; may remain unused)
3424 SP-32 External Data Pointer (DP)
3426 SP-24 External/stub RP (RP')
3430 SP-8 Calling Stub RP (RP'')
3435 SP-0 Stack Pointer (points to next available address)
3439 /* This function saves registers as follows. Registers marked with ' are
3440 this function's registers (as opposed to the previous function's).
3441 If a frame_pointer isn't needed, r4 is saved as a general register;
3442 the space for the frame pointer is still allocated, though, to keep
3448 SP (FP') Previous FP
3449 SP + 4 Alignment filler (sigh)
3450 SP + 8 Space for locals reserved here.
3454 SP + n All call saved register used.
3458 SP + o All call saved fp registers used.
3462 SP + p (SP') points to next available address.
3466 /* Global variables set by output_function_prologue(). */
3467 /* Size of frame. Need to know this to emit return insns from
3469 static HOST_WIDE_INT actual_fsize, local_fsize;
3470 static int save_fregs;
3472 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3473 Handle case where DISP > 8k by using the add_high_const patterns.
3475 Note in DISP > 8k case, we will leave the high part of the address
3476 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3479 store_reg (int reg, HOST_WIDE_INT disp, int base)
3481 rtx insn, dest, src, basereg;
3483 src = gen_rtx_REG (word_mode, reg);
3484 basereg = gen_rtx_REG (Pmode, base);
3485 if (VAL_14_BITS_P (disp))
3487 dest = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
3488 insn = emit_move_insn (dest, src);
3490 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3492 rtx delta = GEN_INT (disp);
3493 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3495 emit_move_insn (tmpreg, delta);
3496 insn = emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3499 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3500 gen_rtx_SET (VOIDmode, tmpreg,
3501 gen_rtx_PLUS (Pmode, basereg, delta)));
3502 RTX_FRAME_RELATED_P (insn) = 1;
3504 dest = gen_rtx_MEM (word_mode, tmpreg);
3505 insn = emit_move_insn (dest, src);
3509 rtx delta = GEN_INT (disp);
3510 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3511 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3513 emit_move_insn (tmpreg, high);
3514 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3515 insn = emit_move_insn (dest, src);
3517 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3518 gen_rtx_SET (VOIDmode,
3519 gen_rtx_MEM (word_mode,
3520 gen_rtx_PLUS (word_mode,
3527 RTX_FRAME_RELATED_P (insn) = 1;
3530 /* Emit RTL to store REG at the memory location specified by BASE and then
3531 add MOD to BASE. MOD must be <= 8k. */
3534 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3536 rtx insn, basereg, srcreg, delta;
3538 gcc_assert (VAL_14_BITS_P (mod));
3540 basereg = gen_rtx_REG (Pmode, base);
3541 srcreg = gen_rtx_REG (word_mode, reg);
3542 delta = GEN_INT (mod);
3544 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3547 RTX_FRAME_RELATED_P (insn) = 1;
3549 /* RTX_FRAME_RELATED_P must be set on each frame related set
3550 in a parallel with more than one element. */
3551 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3552 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3556 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3557 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3558 whether to add a frame note or not.
3560 In the DISP > 8k case, we leave the high part of the address in %r1.
3561 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3564 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3568 if (VAL_14_BITS_P (disp))
3570 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3571 plus_constant (gen_rtx_REG (Pmode, base), disp));
3573 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3575 rtx basereg = gen_rtx_REG (Pmode, base);
3576 rtx delta = GEN_INT (disp);
3577 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3579 emit_move_insn (tmpreg, delta);
3580 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3581 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3583 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3584 gen_rtx_SET (VOIDmode, tmpreg,
3585 gen_rtx_PLUS (Pmode, basereg, delta)));
3589 rtx basereg = gen_rtx_REG (Pmode, base);
3590 rtx delta = GEN_INT (disp);
3591 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3593 emit_move_insn (tmpreg,
3594 gen_rtx_PLUS (Pmode, basereg,
3595 gen_rtx_HIGH (Pmode, delta)));
3596 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3597 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3600 if (DO_FRAME_NOTES && note)
3601 RTX_FRAME_RELATED_P (insn) = 1;
3605 compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
3610 /* The code in hppa_expand_prologue and hppa_expand_epilogue must
3611 be consistent with the rounding and size calculation done here.
3612 Change them at the same time. */
3614 /* We do our own stack alignment. First, round the size of the
3615 stack locals up to a word boundary. */
3616 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3618 /* Space for previous frame pointer + filler. If any frame is
3619 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3620 waste some space here for the sake of HP compatibility. The
3621 first slot is only used when the frame pointer is needed. */
3622 if (size || frame_pointer_needed)
3623 size += STARTING_FRAME_OFFSET;
3625 /* If the current function calls __builtin_eh_return, then we need
3626 to allocate stack space for registers that will hold data for
3627 the exception handler. */
3628 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3632 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3634 size += i * UNITS_PER_WORD;
3637 /* Account for space used by the callee general register saves. */
3638 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3639 if (df_regs_ever_live_p (i))
3640 size += UNITS_PER_WORD;
3642 /* Account for space used by the callee floating point register saves. */
3643 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3644 if (df_regs_ever_live_p (i)
3645 || (!TARGET_64BIT && df_regs_ever_live_p (i + 1)))
3649 /* We always save both halves of the FP register, so always
3650 increment the frame size by 8 bytes. */
3654 /* If any of the floating registers are saved, account for the
3655 alignment needed for the floating point register save block. */
3658 size = (size + 7) & ~7;
3663 /* The various ABIs include space for the outgoing parameters in the
3664 size of the current function's stack frame. We don't need to align
3665 for the outgoing arguments as their alignment is set by the final
3666 rounding for the frame as a whole. */
3667 size += crtl->outgoing_args_size;
3669 /* Allocate space for the fixed frame marker. This space must be
3670 allocated for any function that makes calls or allocates
3672 if (!current_function_is_leaf || size)
3673 size += TARGET_64BIT ? 48 : 32;
3675 /* Finally, round to the preferred stack boundary. */
3676 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3677 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3680 /* Generate the assembly code for function entry. FILE is a stdio
3681 stream to output the code to. SIZE is an int: how many units of
3682 temporary storage to allocate.
3684 Refer to the array `regs_ever_live' to determine which registers to
3685 save; `regs_ever_live[I]' is nonzero if register number I is ever
3686 used in the function. This function is responsible for knowing
3687 which registers should not be saved even if used. */
3689 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3690 of memory. If any fpu reg is used in the function, we allocate
3691 such a block here, at the bottom of the frame, just in case it's needed.
3693 If this function is a leaf procedure, then we may choose not
3694 to do a "save" insn. The decision about whether or not
3695 to do this is made in regclass.c. */
3698 pa_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3700 /* The function's label and associated .PROC must never be
3701 separated and must be output *after* any profiling declarations
3702 to avoid changing spaces/subspaces within a procedure. */
3703 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3704 fputs ("\t.PROC\n", file);
3706 /* hppa_expand_prologue does the dirty work now. We just need
3707 to output the assembler directives which denote the start
3709 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3710 if (current_function_is_leaf)
3711 fputs (",NO_CALLS", file);
3713 fputs (",CALLS", file);
3715 fputs (",SAVE_RP", file);
3717 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3718 at the beginning of the frame and that it is used as the frame
3719 pointer for the frame. We do this because our current frame
3720 layout doesn't conform to that specified in the HP runtime
3721 documentation and we need a way to indicate to programs such as
3722 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3723 isn't used by HP compilers but is supported by the assembler.
3724 However, SAVE_SP is supposed to indicate that the previous stack
3725 pointer has been saved in the frame marker. */
3726 if (frame_pointer_needed)
3727 fputs (",SAVE_SP", file);
3729 /* Pass on information about the number of callee register saves
3730 performed in the prologue.
3732 The compiler is supposed to pass the highest register number
3733 saved, the assembler then has to adjust that number before
3734 entering it into the unwind descriptor (to account for any
3735 caller saved registers with lower register numbers than the
3736 first callee saved register). */
3738 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3741 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3743 fputs ("\n\t.ENTRY\n", file);
3745 remove_useless_addtr_insns (0);
3749 hppa_expand_prologue (void)
3751 int merge_sp_adjust_with_store = 0;
3752 HOST_WIDE_INT size = get_frame_size ();
3753 HOST_WIDE_INT offset;
3761 /* Compute total size for frame pointer, filler, locals and rounding to
3762 the next word boundary. Similar code appears in compute_frame_size
3763 and must be changed in tandem with this code. */
3764 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3765 if (local_fsize || frame_pointer_needed)
3766 local_fsize += STARTING_FRAME_OFFSET;
3768 actual_fsize = compute_frame_size (size, &save_fregs);
3769 if (flag_stack_usage_info)
3770 current_function_static_stack_size = actual_fsize;
3772 /* Compute a few things we will use often. */
3773 tmpreg = gen_rtx_REG (word_mode, 1);
3775 /* Save RP first. The calling conventions manual states RP will
3776 always be stored into the caller's frame at sp - 20 or sp - 16
3777 depending on which ABI is in use. */
3778 if (df_regs_ever_live_p (2) || crtl->calls_eh_return)
3780 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3786 /* Allocate the local frame and set up the frame pointer if needed. */
3787 if (actual_fsize != 0)
3789 if (frame_pointer_needed)
3791 /* Copy the old frame pointer temporarily into %r1. Set up the
3792 new stack pointer, then store away the saved old frame pointer
3793 into the stack at sp and at the same time update the stack
3794 pointer by actual_fsize bytes. Two versions, first
3795 handles small (<8k) frames. The second handles large (>=8k)
3797 insn = emit_move_insn (tmpreg, hard_frame_pointer_rtx);
3799 RTX_FRAME_RELATED_P (insn) = 1;
3801 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3803 RTX_FRAME_RELATED_P (insn) = 1;
3805 if (VAL_14_BITS_P (actual_fsize))
3806 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3809 /* It is incorrect to store the saved frame pointer at *sp,
3810 then increment sp (writes beyond the current stack boundary).
3812 So instead use stwm to store at *sp and post-increment the
3813 stack pointer as an atomic operation. Then increment sp to
3814 finish allocating the new frame. */
3815 HOST_WIDE_INT adjust1 = 8192 - 64;
3816 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3818 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3819 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3823 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3824 we need to store the previous stack pointer (frame pointer)
3825 into the frame marker on targets that use the HP unwind
3826 library. This allows the HP unwind library to be used to
3827 unwind GCC frames. However, we are not fully compatible
3828 with the HP library because our frame layout differs from
3829 that specified in the HP runtime specification.
3831 We don't want a frame note on this instruction as the frame
3832 marker moves during dynamic stack allocation.
3834 This instruction also serves as a blockage to prevent
3835 register spills from being scheduled before the stack
3836 pointer is raised. This is necessary as we store
3837 registers using the frame pointer as a base register,
3838 and the frame pointer is set before sp is raised. */
3839 if (TARGET_HPUX_UNWIND_LIBRARY)
3841 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
3842 GEN_INT (TARGET_64BIT ? -8 : -4));
3844 emit_move_insn (gen_rtx_MEM (word_mode, addr),
3845 hard_frame_pointer_rtx);
3848 emit_insn (gen_blockage ());
3850 /* no frame pointer needed. */
3853 /* In some cases we can perform the first callee register save
3854 and allocating the stack frame at the same time. If so, just
3855 make a note of it and defer allocating the frame until saving
3856 the callee registers. */
3857 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
3858 merge_sp_adjust_with_store = 1;
3859 /* Can not optimize. Adjust the stack frame by actual_fsize
3862 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3867 /* Normal register save.
3869 Do not save the frame pointer in the frame_pointer_needed case. It
3870 was done earlier. */
3871 if (frame_pointer_needed)
3873 offset = local_fsize;
3875 /* Saving the EH return data registers in the frame is the simplest
3876 way to get the frame unwind information emitted. We put them
3877 just before the general registers. */
3878 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3880 unsigned int i, regno;
3884 regno = EH_RETURN_DATA_REGNO (i);
3885 if (regno == INVALID_REGNUM)
3888 store_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
3889 offset += UNITS_PER_WORD;
3893 for (i = 18; i >= 4; i--)
3894 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3896 store_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
3897 offset += UNITS_PER_WORD;
3900 /* Account for %r3 which is saved in a special place. */
3903 /* No frame pointer needed. */
3906 offset = local_fsize - actual_fsize;
3908 /* Saving the EH return data registers in the frame is the simplest
3909 way to get the frame unwind information emitted. */
3910 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3912 unsigned int i, regno;
3916 regno = EH_RETURN_DATA_REGNO (i);
3917 if (regno == INVALID_REGNUM)
3920 /* If merge_sp_adjust_with_store is nonzero, then we can
3921 optimize the first save. */
3922 if (merge_sp_adjust_with_store)
3924 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
3925 merge_sp_adjust_with_store = 0;
3928 store_reg (regno, offset, STACK_POINTER_REGNUM);
3929 offset += UNITS_PER_WORD;
3933 for (i = 18; i >= 3; i--)
3934 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3936 /* If merge_sp_adjust_with_store is nonzero, then we can
3937 optimize the first GR save. */
3938 if (merge_sp_adjust_with_store)
3940 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
3941 merge_sp_adjust_with_store = 0;
3944 store_reg (i, offset, STACK_POINTER_REGNUM);
3945 offset += UNITS_PER_WORD;
3949 /* If we wanted to merge the SP adjustment with a GR save, but we never
3950 did any GR saves, then just emit the adjustment here. */
3951 if (merge_sp_adjust_with_store)
3952 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3956 /* The hppa calling conventions say that %r19, the pic offset
3957 register, is saved at sp - 32 (in this function's frame)
3958 when generating PIC code. FIXME: What is the correct thing
3959 to do for functions which make no calls and allocate no
3960 frame? Do we need to allocate a frame, or can we just omit
3961 the save? For now we'll just omit the save.
3963 We don't want a note on this insn as the frame marker can
3964 move if there is a dynamic stack allocation. */
3965 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
3967 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
3969 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
3973 /* Align pointer properly (doubleword boundary). */
3974 offset = (offset + 7) & ~7;
3976 /* Floating point register store. */
3981 /* First get the frame or stack pointer to the start of the FP register
3983 if (frame_pointer_needed)
3985 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
3986 base = hard_frame_pointer_rtx;
3990 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
3991 base = stack_pointer_rtx;
3994 /* Now actually save the FP registers. */
3995 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3997 if (df_regs_ever_live_p (i)
3998 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4000 rtx addr, insn, reg;
4001 addr = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
4002 reg = gen_rtx_REG (DFmode, i);
4003 insn = emit_move_insn (addr, reg);
4006 RTX_FRAME_RELATED_P (insn) = 1;
4009 rtx mem = gen_rtx_MEM (DFmode,
4010 plus_constant (base, offset));
4011 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4012 gen_rtx_SET (VOIDmode, mem, reg));
4016 rtx meml = gen_rtx_MEM (SFmode,
4017 plus_constant (base, offset));
4018 rtx memr = gen_rtx_MEM (SFmode,
4019 plus_constant (base, offset + 4));
4020 rtx regl = gen_rtx_REG (SFmode, i);
4021 rtx regr = gen_rtx_REG (SFmode, i + 1);
4022 rtx setl = gen_rtx_SET (VOIDmode, meml, regl);
4023 rtx setr = gen_rtx_SET (VOIDmode, memr, regr);
4026 RTX_FRAME_RELATED_P (setl) = 1;
4027 RTX_FRAME_RELATED_P (setr) = 1;
4028 vec = gen_rtvec (2, setl, setr);
4029 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4030 gen_rtx_SEQUENCE (VOIDmode, vec));
4033 offset += GET_MODE_SIZE (DFmode);
4040 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
4041 Handle case where DISP > 8k by using the add_high_const patterns. */
4044 load_reg (int reg, HOST_WIDE_INT disp, int base)
4046 rtx dest = gen_rtx_REG (word_mode, reg);
4047 rtx basereg = gen_rtx_REG (Pmode, base);
4050 if (VAL_14_BITS_P (disp))
4051 src = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
4052 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
4054 rtx delta = GEN_INT (disp);
4055 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4057 emit_move_insn (tmpreg, delta);
4058 if (TARGET_DISABLE_INDEXING)
4060 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4061 src = gen_rtx_MEM (word_mode, tmpreg);
4064 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
4068 rtx delta = GEN_INT (disp);
4069 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
4070 rtx tmpreg = gen_rtx_REG (Pmode, 1);
4072 emit_move_insn (tmpreg, high);
4073 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
4076 emit_move_insn (dest, src);
4079 /* Update the total code bytes output to the text section. */
4082 update_total_code_bytes (unsigned int nbytes)
4084 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
4085 && !IN_NAMED_SECTION_P (cfun->decl))
4087 unsigned int old_total = total_code_bytes;
4089 total_code_bytes += nbytes;
4091 /* Be prepared to handle overflows. */
4092 if (old_total > total_code_bytes)
4093 total_code_bytes = UINT_MAX;
4097 /* This function generates the assembly code for function exit.
4098 Args are as for output_function_prologue ().
4100 The function epilogue should not depend on the current stack
4101 pointer! It should use the frame pointer only. This is mandatory
4102 because of alloca; we also take advantage of it to omit stack
4103 adjustments before returning. */
4106 pa_output_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4108 rtx insn = get_last_insn ();
4112 /* hppa_expand_epilogue does the dirty work now. We just need
4113 to output the assembler directives which denote the end
4116 To make debuggers happy, emit a nop if the epilogue was completely
4117 eliminated due to a volatile call as the last insn in the
4118 current function. That way the return address (in %r2) will
4119 always point to a valid instruction in the current function. */
4121 /* Get the last real insn. */
4122 if (GET_CODE (insn) == NOTE)
4123 insn = prev_real_insn (insn);
4125 /* If it is a sequence, then look inside. */
4126 if (insn && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
4127 insn = XVECEXP (PATTERN (insn), 0, 0);
4129 /* If insn is a CALL_INSN, then it must be a call to a volatile
4130 function (otherwise there would be epilogue insns). */
4131 if (insn && GET_CODE (insn) == CALL_INSN)
4133 fputs ("\tnop\n", file);
4137 fputs ("\t.EXIT\n\t.PROCEND\n", file);
4139 if (TARGET_SOM && TARGET_GAS)
4141 /* We done with this subspace except possibly for some additional
4142 debug information. Forget that we are in this subspace to ensure
4143 that the next function is output in its own subspace. */
4145 cfun->machine->in_nsubspa = 2;
4148 if (INSN_ADDRESSES_SET_P ())
4150 insn = get_last_nonnote_insn ();
4151 last_address += INSN_ADDRESSES (INSN_UID (insn));
4153 last_address += insn_default_length (insn);
4154 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
4155 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
4158 last_address = UINT_MAX;
4160 /* Finally, update the total number of code bytes output so far. */
4161 update_total_code_bytes (last_address);
4165 hppa_expand_epilogue (void)
4168 HOST_WIDE_INT offset;
4169 HOST_WIDE_INT ret_off = 0;
4171 int merge_sp_adjust_with_load = 0;
4173 /* We will use this often. */
4174 tmpreg = gen_rtx_REG (word_mode, 1);
4176 /* Try to restore RP early to avoid load/use interlocks when
4177 RP gets used in the return (bv) instruction. This appears to still
4178 be necessary even when we schedule the prologue and epilogue. */
4181 ret_off = TARGET_64BIT ? -16 : -20;
4182 if (frame_pointer_needed)
4184 load_reg (2, ret_off, HARD_FRAME_POINTER_REGNUM);
4189 /* No frame pointer, and stack is smaller than 8k. */
4190 if (VAL_14_BITS_P (ret_off - actual_fsize))
4192 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
4198 /* General register restores. */
4199 if (frame_pointer_needed)
4201 offset = local_fsize;
4203 /* If the current function calls __builtin_eh_return, then we need
4204 to restore the saved EH data registers. */
4205 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4207 unsigned int i, regno;
4211 regno = EH_RETURN_DATA_REGNO (i);
4212 if (regno == INVALID_REGNUM)
4215 load_reg (regno, offset, HARD_FRAME_POINTER_REGNUM);
4216 offset += UNITS_PER_WORD;
4220 for (i = 18; i >= 4; i--)
4221 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4223 load_reg (i, offset, HARD_FRAME_POINTER_REGNUM);
4224 offset += UNITS_PER_WORD;
4229 offset = local_fsize - actual_fsize;
4231 /* If the current function calls __builtin_eh_return, then we need
4232 to restore the saved EH data registers. */
4233 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4235 unsigned int i, regno;
4239 regno = EH_RETURN_DATA_REGNO (i);
4240 if (regno == INVALID_REGNUM)
4243 /* Only for the first load.
4244 merge_sp_adjust_with_load holds the register load
4245 with which we will merge the sp adjustment. */
4246 if (merge_sp_adjust_with_load == 0
4248 && VAL_14_BITS_P (-actual_fsize))
4249 merge_sp_adjust_with_load = regno;
4251 load_reg (regno, offset, STACK_POINTER_REGNUM);
4252 offset += UNITS_PER_WORD;
4256 for (i = 18; i >= 3; i--)
4258 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4260 /* Only for the first load.
4261 merge_sp_adjust_with_load holds the register load
4262 with which we will merge the sp adjustment. */
4263 if (merge_sp_adjust_with_load == 0
4265 && VAL_14_BITS_P (-actual_fsize))
4266 merge_sp_adjust_with_load = i;
4268 load_reg (i, offset, STACK_POINTER_REGNUM);
4269 offset += UNITS_PER_WORD;
4274 /* Align pointer properly (doubleword boundary). */
4275 offset = (offset + 7) & ~7;
4277 /* FP register restores. */
4280 /* Adjust the register to index off of. */
4281 if (frame_pointer_needed)
4282 set_reg_plus_d (1, HARD_FRAME_POINTER_REGNUM, offset, 0);
4284 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4286 /* Actually do the restores now. */
4287 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4288 if (df_regs_ever_live_p (i)
4289 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4291 rtx src = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
4292 rtx dest = gen_rtx_REG (DFmode, i);
4293 emit_move_insn (dest, src);
4297 /* Emit a blockage insn here to keep these insns from being moved to
4298 an earlier spot in the epilogue, or into the main instruction stream.
4300 This is necessary as we must not cut the stack back before all the
4301 restores are finished. */
4302 emit_insn (gen_blockage ());
4304 /* Reset stack pointer (and possibly frame pointer). The stack
4305 pointer is initially set to fp + 64 to avoid a race condition. */
4306 if (frame_pointer_needed)
4308 rtx delta = GEN_INT (-64);
4310 set_reg_plus_d (STACK_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM, 64, 0);
4311 emit_insn (gen_pre_load (hard_frame_pointer_rtx,
4312 stack_pointer_rtx, delta));
4314 /* If we were deferring a callee register restore, do it now. */
4315 else if (merge_sp_adjust_with_load)
4317 rtx delta = GEN_INT (-actual_fsize);
4318 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4320 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4322 else if (actual_fsize != 0)
4323 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4326 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4327 frame greater than 8k), do so now. */
4329 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4331 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4333 rtx sa = EH_RETURN_STACKADJ_RTX;
4335 emit_insn (gen_blockage ());
4336 emit_insn (TARGET_64BIT
4337 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4338 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4343 pa_can_use_return_insn (void)
4345 if (!reload_completed)
4348 if (frame_pointer_needed)
4351 if (df_regs_ever_live_p (2))
4357 return compute_frame_size (get_frame_size (), 0) == 0;
4361 hppa_pic_save_rtx (void)
4363 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4366 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4367 #define NO_DEFERRED_PROFILE_COUNTERS 0
4371 /* Vector of funcdef numbers. */
4372 static VEC(int,heap) *funcdef_nos;
4374 /* Output deferred profile counters. */
4376 output_deferred_profile_counters (void)
4381 if (VEC_empty (int, funcdef_nos))
4384 switch_to_section (data_section);
4385 align = MIN (BIGGEST_ALIGNMENT, LONG_TYPE_SIZE);
4386 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
4388 for (i = 0; VEC_iterate (int, funcdef_nos, i, n); i++)
4390 targetm.asm_out.internal_label (asm_out_file, "LP", n);
4391 assemble_integer (const0_rtx, LONG_TYPE_SIZE / BITS_PER_UNIT, align, 1);
4394 VEC_free (int, heap, funcdef_nos);
4398 hppa_profile_hook (int label_no)
4400 /* We use SImode for the address of the function in both 32 and
4401 64-bit code to avoid having to provide DImode versions of the
4402 lcla2 and load_offset_label_address insn patterns. */
4403 rtx reg = gen_reg_rtx (SImode);
4404 rtx label_rtx = gen_label_rtx ();
4405 rtx begin_label_rtx, call_insn;
4406 char begin_label_name[16];
4408 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4410 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4413 emit_move_insn (arg_pointer_rtx,
4414 gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx,
4417 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4419 /* The address of the function is loaded into %r25 with an instruction-
4420 relative sequence that avoids the use of relocations. The sequence
4421 is split so that the load_offset_label_address instruction can
4422 occupy the delay slot of the call to _mcount. */
4424 emit_insn (gen_lcla2 (reg, label_rtx));
4426 emit_insn (gen_lcla1 (reg, label_rtx));
4428 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4429 reg, begin_label_rtx, label_rtx));
4431 #if !NO_DEFERRED_PROFILE_COUNTERS
4433 rtx count_label_rtx, addr, r24;
4434 char count_label_name[16];
4436 VEC_safe_push (int, heap, funcdef_nos, label_no);
4437 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4438 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (count_label_name));
4440 addr = force_reg (Pmode, count_label_rtx);
4441 r24 = gen_rtx_REG (Pmode, 24);
4442 emit_move_insn (r24, addr);
4445 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4446 gen_rtx_SYMBOL_REF (Pmode,
4448 GEN_INT (TARGET_64BIT ? 24 : 12)));
4450 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4455 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4456 gen_rtx_SYMBOL_REF (Pmode,
4458 GEN_INT (TARGET_64BIT ? 16 : 8)));
4462 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4463 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4465 /* Indicate the _mcount call cannot throw, nor will it execute a
4467 make_reg_eh_region_note_nothrow_nononlocal (call_insn);
4470 /* Fetch the return address for the frame COUNT steps up from
4471 the current frame, after the prologue. FRAMEADDR is the
4472 frame pointer of the COUNT frame.
4474 We want to ignore any export stub remnants here. To handle this,
4475 we examine the code at the return address, and if it is an export
4476 stub, we return a memory rtx for the stub return address stored
4479 The value returned is used in two different ways:
4481 1. To find a function's caller.
4483 2. To change the return address for a function.
4485 This function handles most instances of case 1; however, it will
4486 fail if there are two levels of stubs to execute on the return
4487 path. The only way I believe that can happen is if the return value
4488 needs a parameter relocation, which never happens for C code.
4490 This function handles most instances of case 2; however, it will
4491 fail if we did not originally have stub code on the return path
4492 but will need stub code on the new return path. This can happen if
4493 the caller & callee are both in the main program, but the new
4494 return location is in a shared library. */
4497 return_addr_rtx (int count, rtx frameaddr)
4504 /* Instruction stream at the normal return address for the export stub:
4506 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4507 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4508 0x00011820 | stub+16: mtsp r1,sr0
4509 0xe0400002 | stub+20: be,n 0(sr0,rp)
4511 0xe0400002 must be specified as -532676606 so that it won't be
4512 rejected as an invalid immediate operand on 64-bit hosts. */
4514 HOST_WIDE_INT insns[4] = {0x4bc23fd1, 0x004010a1, 0x00011820, -532676606};
4520 rp = get_hard_reg_initial_val (Pmode, 2);
4522 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4525 /* If there is no export stub then just use the value saved from
4526 the return pointer register. */
4528 saved_rp = gen_reg_rtx (Pmode);
4529 emit_move_insn (saved_rp, rp);
4531 /* Get pointer to the instruction stream. We have to mask out the
4532 privilege level from the two low order bits of the return address
4533 pointer here so that ins will point to the start of the first
4534 instruction that would have been executed if we returned. */
4535 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4536 label = gen_label_rtx ();
4538 /* Check the instruction stream at the normal return address for the
4539 export stub. If it is an export stub, than our return address is
4540 really in -24[frameaddr]. */
4542 for (i = 0; i < 3; i++)
4544 rtx op0 = gen_rtx_MEM (SImode, plus_constant (ins, i * 4));
4545 rtx op1 = GEN_INT (insns[i]);
4546 emit_cmp_and_jump_insns (op0, op1, NE, NULL, SImode, 0, label);
4549 /* Here we know that our return address points to an export
4550 stub. We don't want to return the address of the export stub,
4551 but rather the return address of the export stub. That return
4552 address is stored at -24[frameaddr]. */
4554 emit_move_insn (saved_rp,
4556 memory_address (Pmode,
4557 plus_constant (frameaddr,
4566 emit_bcond_fp (rtx operands[])
4568 enum rtx_code code = GET_CODE (operands[0]);
4569 rtx operand0 = operands[1];
4570 rtx operand1 = operands[2];
4571 rtx label = operands[3];
4573 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 0),
4574 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1)));
4576 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
4577 gen_rtx_IF_THEN_ELSE (VOIDmode,
4580 gen_rtx_REG (CCFPmode, 0),
4582 gen_rtx_LABEL_REF (VOIDmode, label),
4587 /* Adjust the cost of a scheduling dependency. Return the new cost of
4588 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4591 pa_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4593 enum attr_type attr_type;
4595 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4596 true dependencies as they are described with bypasses now. */
4597 if (pa_cpu >= PROCESSOR_8000 || REG_NOTE_KIND (link) == 0)
4600 if (! recog_memoized (insn))
4603 attr_type = get_attr_type (insn);
4605 switch (REG_NOTE_KIND (link))
4608 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4611 if (attr_type == TYPE_FPLOAD)
4613 rtx pat = PATTERN (insn);
4614 rtx dep_pat = PATTERN (dep_insn);
4615 if (GET_CODE (pat) == PARALLEL)
4617 /* This happens for the fldXs,mb patterns. */
4618 pat = XVECEXP (pat, 0, 0);
4620 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4621 /* If this happens, we have to extend this to schedule
4622 optimally. Return 0 for now. */
4625 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4627 if (! recog_memoized (dep_insn))
4629 switch (get_attr_type (dep_insn))
4636 case TYPE_FPSQRTSGL:
4637 case TYPE_FPSQRTDBL:
4638 /* A fpload can't be issued until one cycle before a
4639 preceding arithmetic operation has finished if
4640 the target of the fpload is any of the sources
4641 (or destination) of the arithmetic operation. */
4642 return insn_default_latency (dep_insn) - 1;
4649 else if (attr_type == TYPE_FPALU)
4651 rtx pat = PATTERN (insn);
4652 rtx dep_pat = PATTERN (dep_insn);
4653 if (GET_CODE (pat) == PARALLEL)
4655 /* This happens for the fldXs,mb patterns. */
4656 pat = XVECEXP (pat, 0, 0);
4658 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4659 /* If this happens, we have to extend this to schedule
4660 optimally. Return 0 for now. */
4663 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4665 if (! recog_memoized (dep_insn))
4667 switch (get_attr_type (dep_insn))
4671 case TYPE_FPSQRTSGL:
4672 case TYPE_FPSQRTDBL:
4673 /* An ALU flop can't be issued until two cycles before a
4674 preceding divide or sqrt operation has finished if
4675 the target of the ALU flop is any of the sources
4676 (or destination) of the divide or sqrt operation. */
4677 return insn_default_latency (dep_insn) - 2;
4685 /* For other anti dependencies, the cost is 0. */
4688 case REG_DEP_OUTPUT:
4689 /* Output dependency; DEP_INSN writes a register that INSN writes some
4691 if (attr_type == TYPE_FPLOAD)
4693 rtx pat = PATTERN (insn);
4694 rtx dep_pat = PATTERN (dep_insn);
4695 if (GET_CODE (pat) == PARALLEL)
4697 /* This happens for the fldXs,mb patterns. */
4698 pat = XVECEXP (pat, 0, 0);
4700 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4701 /* If this happens, we have to extend this to schedule
4702 optimally. Return 0 for now. */
4705 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4707 if (! recog_memoized (dep_insn))
4709 switch (get_attr_type (dep_insn))
4716 case TYPE_FPSQRTSGL:
4717 case TYPE_FPSQRTDBL:
4718 /* A fpload can't be issued until one cycle before a
4719 preceding arithmetic operation has finished if
4720 the target of the fpload is the destination of the
4721 arithmetic operation.
4723 Exception: For PA7100LC, PA7200 and PA7300, the cost
4724 is 3 cycles, unless they bundle together. We also
4725 pay the penalty if the second insn is a fpload. */
4726 return insn_default_latency (dep_insn) - 1;
4733 else if (attr_type == TYPE_FPALU)
4735 rtx pat = PATTERN (insn);
4736 rtx dep_pat = PATTERN (dep_insn);
4737 if (GET_CODE (pat) == PARALLEL)
4739 /* This happens for the fldXs,mb patterns. */
4740 pat = XVECEXP (pat, 0, 0);
4742 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4743 /* If this happens, we have to extend this to schedule
4744 optimally. Return 0 for now. */
4747 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4749 if (! recog_memoized (dep_insn))
4751 switch (get_attr_type (dep_insn))
4755 case TYPE_FPSQRTSGL:
4756 case TYPE_FPSQRTDBL:
4757 /* An ALU flop can't be issued until two cycles before a
4758 preceding divide or sqrt operation has finished if
4759 the target of the ALU flop is also the target of
4760 the divide or sqrt operation. */
4761 return insn_default_latency (dep_insn) - 2;
4769 /* For other output dependencies, the cost is 0. */
4777 /* Adjust scheduling priorities. We use this to try and keep addil
4778 and the next use of %r1 close together. */
4780 pa_adjust_priority (rtx insn, int priority)
4782 rtx set = single_set (insn);
4786 src = SET_SRC (set);
4787 dest = SET_DEST (set);
4788 if (GET_CODE (src) == LO_SUM
4789 && symbolic_operand (XEXP (src, 1), VOIDmode)
4790 && ! read_only_operand (XEXP (src, 1), VOIDmode))
4793 else if (GET_CODE (src) == MEM
4794 && GET_CODE (XEXP (src, 0)) == LO_SUM
4795 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
4796 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
4799 else if (GET_CODE (dest) == MEM
4800 && GET_CODE (XEXP (dest, 0)) == LO_SUM
4801 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
4802 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
4808 /* The 700 can only issue a single insn at a time.
4809 The 7XXX processors can issue two insns at a time.
4810 The 8000 can issue 4 insns at a time. */
4812 pa_issue_rate (void)
4816 case PROCESSOR_700: return 1;
4817 case PROCESSOR_7100: return 2;
4818 case PROCESSOR_7100LC: return 2;
4819 case PROCESSOR_7200: return 2;
4820 case PROCESSOR_7300: return 2;
4821 case PROCESSOR_8000: return 4;
4830 /* Return any length adjustment needed by INSN which already has its length
4831 computed as LENGTH. Return zero if no adjustment is necessary.
4833 For the PA: function calls, millicode calls, and backwards short
4834 conditional branches with unfilled delay slots need an adjustment by +1
4835 (to account for the NOP which will be inserted into the instruction stream).
4837 Also compute the length of an inline block move here as it is too
4838 complicated to express as a length attribute in pa.md. */
4840 pa_adjust_insn_length (rtx insn, int length)
4842 rtx pat = PATTERN (insn);
4844 /* Jumps inside switch tables which have unfilled delay slots need
4846 if (GET_CODE (insn) == JUMP_INSN
4847 && GET_CODE (pat) == PARALLEL
4848 && get_attr_type (insn) == TYPE_BTABLE_BRANCH)
4850 /* Millicode insn with an unfilled delay slot. */
4851 else if (GET_CODE (insn) == INSN
4852 && GET_CODE (pat) != SEQUENCE
4853 && GET_CODE (pat) != USE
4854 && GET_CODE (pat) != CLOBBER
4855 && get_attr_type (insn) == TYPE_MILLI)
4857 /* Block move pattern. */
4858 else if (GET_CODE (insn) == INSN
4859 && GET_CODE (pat) == PARALLEL
4860 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4861 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4862 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
4863 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
4864 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
4865 return compute_movmem_length (insn) - 4;
4866 /* Block clear pattern. */
4867 else if (GET_CODE (insn) == INSN
4868 && GET_CODE (pat) == PARALLEL
4869 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4870 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4871 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
4872 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
4873 return compute_clrmem_length (insn) - 4;
4874 /* Conditional branch with an unfilled delay slot. */
4875 else if (GET_CODE (insn) == JUMP_INSN && ! simplejump_p (insn))
4877 /* Adjust a short backwards conditional with an unfilled delay slot. */
4878 if (GET_CODE (pat) == SET
4880 && JUMP_LABEL (insn) != NULL_RTX
4881 && ! forward_branch_p (insn))
4883 else if (GET_CODE (pat) == PARALLEL
4884 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
4887 /* Adjust dbra insn with short backwards conditional branch with
4888 unfilled delay slot -- only for case where counter is in a
4889 general register register. */
4890 else if (GET_CODE (pat) == PARALLEL
4891 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
4892 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
4893 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
4895 && ! forward_branch_p (insn))
4903 /* Implement the TARGET_PRINT_OPERAND_PUNCT_VALID_P hook. */
4906 pa_print_operand_punct_valid_p (unsigned char code)
4917 /* Print operand X (an rtx) in assembler syntax to file FILE.
4918 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
4919 For `%' followed by punctuation, CODE is the punctuation and X is null. */
4922 print_operand (FILE *file, rtx x, int code)
4927 /* Output a 'nop' if there's nothing for the delay slot. */
4928 if (dbr_sequence_length () == 0)
4929 fputs ("\n\tnop", file);
4932 /* Output a nullification completer if there's nothing for the */
4933 /* delay slot or nullification is requested. */
4934 if (dbr_sequence_length () == 0 ||
4936 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
4940 /* Print out the second register name of a register pair.
4941 I.e., R (6) => 7. */
4942 fputs (reg_names[REGNO (x) + 1], file);
4945 /* A register or zero. */
4947 || (x == CONST0_RTX (DFmode))
4948 || (x == CONST0_RTX (SFmode)))
4950 fputs ("%r0", file);
4956 /* A register or zero (floating point). */
4958 || (x == CONST0_RTX (DFmode))
4959 || (x == CONST0_RTX (SFmode)))
4961 fputs ("%fr0", file);
4970 xoperands[0] = XEXP (XEXP (x, 0), 0);
4971 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
4972 output_global_address (file, xoperands[1], 0);
4973 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
4977 case 'C': /* Plain (C)ondition */
4979 switch (GET_CODE (x))
4982 fputs ("=", file); break;
4984 fputs ("<>", file); break;
4986 fputs (">", file); break;
4988 fputs (">=", file); break;
4990 fputs (">>=", file); break;
4992 fputs (">>", file); break;
4994 fputs ("<", file); break;
4996 fputs ("<=", file); break;
4998 fputs ("<<=", file); break;
5000 fputs ("<<", file); break;
5005 case 'N': /* Condition, (N)egated */
5006 switch (GET_CODE (x))
5009 fputs ("<>", file); break;
5011 fputs ("=", file); break;
5013 fputs ("<=", file); break;
5015 fputs ("<", file); break;
5017 fputs ("<<", file); break;
5019 fputs ("<<=", file); break;
5021 fputs (">=", file); break;
5023 fputs (">", file); break;
5025 fputs (">>", file); break;
5027 fputs (">>=", file); break;
5032 /* For floating point comparisons. Note that the output
5033 predicates are the complement of the desired mode. The
5034 conditions for GT, GE, LT, LE and LTGT cause an invalid
5035 operation exception if the result is unordered and this
5036 exception is enabled in the floating-point status register. */
5038 switch (GET_CODE (x))
5041 fputs ("!=", file); break;
5043 fputs ("=", file); break;
5045 fputs ("!>", file); break;
5047 fputs ("!>=", file); break;
5049 fputs ("!<", file); break;
5051 fputs ("!<=", file); break;
5053 fputs ("!<>", file); break;
5055 fputs ("!?<=", file); break;
5057 fputs ("!?<", file); break;
5059 fputs ("!?>=", file); break;
5061 fputs ("!?>", file); break;
5063 fputs ("!?=", file); break;
5065 fputs ("!?", file); break;
5067 fputs ("?", file); break;
5072 case 'S': /* Condition, operands are (S)wapped. */
5073 switch (GET_CODE (x))
5076 fputs ("=", file); break;
5078 fputs ("<>", file); break;
5080 fputs ("<", file); break;
5082 fputs ("<=", file); break;
5084 fputs ("<<=", file); break;
5086 fputs ("<<", file); break;
5088 fputs (">", file); break;
5090 fputs (">=", file); break;
5092 fputs (">>=", file); break;
5094 fputs (">>", file); break;
5099 case 'B': /* Condition, (B)oth swapped and negate. */
5100 switch (GET_CODE (x))
5103 fputs ("<>", file); break;
5105 fputs ("=", file); break;
5107 fputs (">=", file); break;
5109 fputs (">", file); break;
5111 fputs (">>", file); break;
5113 fputs (">>=", file); break;
5115 fputs ("<=", file); break;
5117 fputs ("<", file); break;
5119 fputs ("<<", file); break;
5121 fputs ("<<=", file); break;
5127 gcc_assert (GET_CODE (x) == CONST_INT);
5128 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
5131 gcc_assert (GET_CODE (x) == CONST_INT);
5132 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
5135 gcc_assert (GET_CODE (x) == CONST_INT);
5136 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
5139 gcc_assert (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0);
5140 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5143 gcc_assert (GET_CODE (x) == CONST_INT);
5144 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
5147 gcc_assert (GET_CODE (x) == CONST_INT);
5148 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
5151 if (GET_CODE (x) == CONST_INT)
5156 switch (GET_CODE (XEXP (x, 0)))
5160 if (ASSEMBLER_DIALECT == 0)
5161 fputs ("s,mb", file);
5163 fputs (",mb", file);
5167 if (ASSEMBLER_DIALECT == 0)
5168 fputs ("s,ma", file);
5170 fputs (",ma", file);
5173 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5174 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5176 if (ASSEMBLER_DIALECT == 0)
5179 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5180 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5182 if (ASSEMBLER_DIALECT == 0)
5183 fputs ("x,s", file);
5187 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
5191 if (code == 'F' && ASSEMBLER_DIALECT == 0)
5197 output_global_address (file, x, 0);
5200 output_global_address (file, x, 1);
5202 case 0: /* Don't do anything special */
5207 compute_zdepwi_operands (INTVAL (x), op);
5208 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5214 compute_zdepdi_operands (INTVAL (x), op);
5215 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5219 /* We can get here from a .vtable_inherit due to our
5220 CONSTANT_ADDRESS_P rejecting perfectly good constant
5226 if (GET_CODE (x) == REG)
5228 fputs (reg_names [REGNO (x)], file);
5229 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
5235 && GET_MODE_SIZE (GET_MODE (x)) <= 4
5236 && (REGNO (x) & 1) == 0)
5239 else if (GET_CODE (x) == MEM)
5241 int size = GET_MODE_SIZE (GET_MODE (x));
5242 rtx base = NULL_RTX;
5243 switch (GET_CODE (XEXP (x, 0)))
5247 base = XEXP (XEXP (x, 0), 0);
5248 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
5252 base = XEXP (XEXP (x, 0), 0);
5253 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5256 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5257 fprintf (file, "%s(%s)",
5258 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5259 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5260 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5261 fprintf (file, "%s(%s)",
5262 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5263 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5264 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5265 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5267 /* Because the REG_POINTER flag can get lost during reload,
5268 GO_IF_LEGITIMATE_ADDRESS canonicalizes the order of the
5269 index and base registers in the combined move patterns. */
5270 rtx base = XEXP (XEXP (x, 0), 1);
5271 rtx index = XEXP (XEXP (x, 0), 0);
5273 fprintf (file, "%s(%s)",
5274 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5277 output_address (XEXP (x, 0));
5280 output_address (XEXP (x, 0));
5285 output_addr_const (file, x);
5288 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5291 output_global_address (FILE *file, rtx x, int round_constant)
5294 /* Imagine (high (const (plus ...))). */
5295 if (GET_CODE (x) == HIGH)
5298 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5299 output_addr_const (file, x);
5300 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5302 output_addr_const (file, x);
5303 fputs ("-$global$", file);
5305 else if (GET_CODE (x) == CONST)
5307 const char *sep = "";
5308 int offset = 0; /* assembler wants -$global$ at end */
5309 rtx base = NULL_RTX;
5311 switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
5314 base = XEXP (XEXP (x, 0), 0);
5315 output_addr_const (file, base);
5318 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5324 switch (GET_CODE (XEXP (XEXP (x, 0), 1)))
5327 base = XEXP (XEXP (x, 0), 1);
5328 output_addr_const (file, base);
5331 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5337 /* How bogus. The compiler is apparently responsible for
5338 rounding the constant if it uses an LR field selector.
5340 The linker and/or assembler seem a better place since
5341 they have to do this kind of thing already.
5343 If we fail to do this, HP's optimizing linker may eliminate
5344 an addil, but not update the ldw/stw/ldo instruction that
5345 uses the result of the addil. */
5347 offset = ((offset + 0x1000) & ~0x1fff);
5349 switch (GET_CODE (XEXP (x, 0)))
5362 gcc_assert (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF);
5370 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5371 fputs ("-$global$", file);
5373 fprintf (file, "%s%d", sep, offset);
5376 output_addr_const (file, x);
5379 /* Output boilerplate text to appear at the beginning of the file.
5380 There are several possible versions. */
5381 #define aputs(x) fputs(x, asm_out_file)
5383 pa_file_start_level (void)
5386 aputs ("\t.LEVEL 2.0w\n");
5387 else if (TARGET_PA_20)
5388 aputs ("\t.LEVEL 2.0\n");
5389 else if (TARGET_PA_11)
5390 aputs ("\t.LEVEL 1.1\n");
5392 aputs ("\t.LEVEL 1.0\n");
5396 pa_file_start_space (int sortspace)
5398 aputs ("\t.SPACE $PRIVATE$");
5401 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31"
5402 "\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5403 "\n\t.SPACE $TEXT$");
5406 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5407 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5411 pa_file_start_file (int want_version)
5413 if (write_symbols != NO_DEBUG)
5415 output_file_directive (asm_out_file, main_input_filename);
5417 aputs ("\t.version\t\"01.01\"\n");
5422 pa_file_start_mcount (const char *aswhat)
5425 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5429 pa_elf_file_start (void)
5431 pa_file_start_level ();
5432 pa_file_start_mcount ("ENTRY");
5433 pa_file_start_file (0);
5437 pa_som_file_start (void)
5439 pa_file_start_level ();
5440 pa_file_start_space (0);
5441 aputs ("\t.IMPORT $global$,DATA\n"
5442 "\t.IMPORT $$dyncall,MILLICODE\n");
5443 pa_file_start_mcount ("CODE");
5444 pa_file_start_file (0);
5448 pa_linux_file_start (void)
5450 pa_file_start_file (1);
5451 pa_file_start_level ();
5452 pa_file_start_mcount ("CODE");
5456 pa_hpux64_gas_file_start (void)
5458 pa_file_start_level ();
5459 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5461 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5463 pa_file_start_file (1);
5467 pa_hpux64_hpas_file_start (void)
5469 pa_file_start_level ();
5470 pa_file_start_space (1);
5471 pa_file_start_mcount ("CODE");
5472 pa_file_start_file (0);
5476 /* Search the deferred plabel list for SYMBOL and return its internal
5477 label. If an entry for SYMBOL is not found, a new entry is created. */
5480 get_deferred_plabel (rtx symbol)
5482 const char *fname = XSTR (symbol, 0);
5485 /* See if we have already put this function on the list of deferred
5486 plabels. This list is generally small, so a liner search is not
5487 too ugly. If it proves too slow replace it with something faster. */
5488 for (i = 0; i < n_deferred_plabels; i++)
5489 if (strcmp (fname, XSTR (deferred_plabels[i].symbol, 0)) == 0)
5492 /* If the deferred plabel list is empty, or this entry was not found
5493 on the list, create a new entry on the list. */
5494 if (deferred_plabels == NULL || i == n_deferred_plabels)
5498 if (deferred_plabels == 0)
5499 deferred_plabels = ggc_alloc_deferred_plabel ();
5501 deferred_plabels = GGC_RESIZEVEC (struct deferred_plabel,
5503 n_deferred_plabels + 1);
5505 i = n_deferred_plabels++;
5506 deferred_plabels[i].internal_label = gen_label_rtx ();
5507 deferred_plabels[i].symbol = symbol;
5509 /* Gross. We have just implicitly taken the address of this
5510 function. Mark it in the same manner as assemble_name. */
5511 id = maybe_get_identifier (targetm.strip_name_encoding (fname));
5513 mark_referenced (id);
5516 return deferred_plabels[i].internal_label;
5520 output_deferred_plabels (void)
5524 /* If we have some deferred plabels, then we need to switch into the
5525 data or readonly data section, and align it to a 4 byte boundary
5526 before outputting the deferred plabels. */
5527 if (n_deferred_plabels)
5529 switch_to_section (flag_pic ? data_section : readonly_data_section);
5530 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5533 /* Now output the deferred plabels. */
5534 for (i = 0; i < n_deferred_plabels; i++)
5536 targetm.asm_out.internal_label (asm_out_file, "L",
5537 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5538 assemble_integer (deferred_plabels[i].symbol,
5539 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5543 #if HPUX_LONG_DOUBLE_LIBRARY
5544 /* Initialize optabs to point to HPUX long double emulation routines. */
5546 pa_hpux_init_libfuncs (void)
5548 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5549 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5550 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5551 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5552 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5553 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5554 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5555 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5556 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5558 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5559 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5560 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5561 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5562 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5563 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5564 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5566 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5567 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5568 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5569 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5571 set_conv_libfunc (sfix_optab, SImode, TFmode, TARGET_64BIT
5572 ? "__U_Qfcnvfxt_quad_to_sgl"
5573 : "_U_Qfcnvfxt_quad_to_sgl");
5574 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
5575 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_usgl");
5576 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_udbl");
5578 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
5579 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
5580 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_U_Qfcnvxf_usgl_to_quad");
5581 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_U_Qfcnvxf_udbl_to_quad");
5585 /* HP's millicode routines mean something special to the assembler.
5586 Keep track of which ones we have used. */
5588 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5589 static void import_milli (enum millicodes);
5590 static char imported[(int) end1000];
5591 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5592 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5593 #define MILLI_START 10
5596 import_milli (enum millicodes code)
5598 char str[sizeof (import_string)];
5600 if (!imported[(int) code])
5602 imported[(int) code] = 1;
5603 strcpy (str, import_string);
5604 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5605 output_asm_insn (str, 0);
5609 /* The register constraints have put the operands and return value in
5610 the proper registers. */
5613 output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx insn)
5615 import_milli (mulI);
5616 return output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5619 /* Emit the rtl for doing a division by a constant. */
5621 /* Do magic division millicodes exist for this value? */
5622 const int magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5624 /* We'll use an array to keep track of the magic millicodes and
5625 whether or not we've used them already. [n][0] is signed, [n][1] is
5628 static int div_milli[16][2];
5631 emit_hpdiv_const (rtx *operands, int unsignedp)
5633 if (GET_CODE (operands[2]) == CONST_INT
5634 && INTVAL (operands[2]) > 0
5635 && INTVAL (operands[2]) < 16
5636 && magic_milli[INTVAL (operands[2])])
5638 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5640 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5644 gen_rtvec (6, gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 29),
5645 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5647 gen_rtx_REG (SImode, 26),
5649 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5650 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5651 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5652 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5653 gen_rtx_CLOBBER (VOIDmode, ret))));
5654 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5661 output_div_insn (rtx *operands, int unsignedp, rtx insn)
5665 /* If the divisor is a constant, try to use one of the special
5667 if (GET_CODE (operands[0]) == CONST_INT)
5669 static char buf[100];
5670 divisor = INTVAL (operands[0]);
5671 if (!div_milli[divisor][unsignedp])
5673 div_milli[divisor][unsignedp] = 1;
5675 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5677 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5681 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5682 INTVAL (operands[0]));
5683 return output_millicode_call (insn,
5684 gen_rtx_SYMBOL_REF (SImode, buf));
5688 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5689 INTVAL (operands[0]));
5690 return output_millicode_call (insn,
5691 gen_rtx_SYMBOL_REF (SImode, buf));
5694 /* Divisor isn't a special constant. */
5699 import_milli (divU);
5700 return output_millicode_call (insn,
5701 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5705 import_milli (divI);
5706 return output_millicode_call (insn,
5707 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5712 /* Output a $$rem millicode to do mod. */
5715 output_mod_insn (int unsignedp, rtx insn)
5719 import_milli (remU);
5720 return output_millicode_call (insn,
5721 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5725 import_milli (remI);
5726 return output_millicode_call (insn,
5727 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5732 output_arg_descriptor (rtx call_insn)
5734 const char *arg_regs[4];
5735 enum machine_mode arg_mode;
5737 int i, output_flag = 0;
5740 /* We neither need nor want argument location descriptors for the
5741 64bit runtime environment or the ELF32 environment. */
5742 if (TARGET_64BIT || TARGET_ELF32)
5745 for (i = 0; i < 4; i++)
5748 /* Specify explicitly that no argument relocations should take place
5749 if using the portable runtime calling conventions. */
5750 if (TARGET_PORTABLE_RUNTIME)
5752 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5757 gcc_assert (GET_CODE (call_insn) == CALL_INSN);
5758 for (link = CALL_INSN_FUNCTION_USAGE (call_insn);
5759 link; link = XEXP (link, 1))
5761 rtx use = XEXP (link, 0);
5763 if (! (GET_CODE (use) == USE
5764 && GET_CODE (XEXP (use, 0)) == REG
5765 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
5768 arg_mode = GET_MODE (XEXP (use, 0));
5769 regno = REGNO (XEXP (use, 0));
5770 if (regno >= 23 && regno <= 26)
5772 arg_regs[26 - regno] = "GR";
5773 if (arg_mode == DImode)
5774 arg_regs[25 - regno] = "GR";
5776 else if (regno >= 32 && regno <= 39)
5778 if (arg_mode == SFmode)
5779 arg_regs[(regno - 32) / 2] = "FR";
5782 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5783 arg_regs[(regno - 34) / 2] = "FR";
5784 arg_regs[(regno - 34) / 2 + 1] = "FU";
5786 arg_regs[(regno - 34) / 2] = "FU";
5787 arg_regs[(regno - 34) / 2 + 1] = "FR";
5792 fputs ("\t.CALL ", asm_out_file);
5793 for (i = 0; i < 4; i++)
5798 fputc (',', asm_out_file);
5799 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
5802 fputc ('\n', asm_out_file);
5805 /* Inform reload about cases where moving X with a mode MODE to a register in
5806 RCLASS requires an extra scratch or immediate register. Return the class
5807 needed for the immediate register. */
5810 pa_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
5811 enum machine_mode mode, secondary_reload_info *sri)
5814 enum reg_class rclass = (enum reg_class) rclass_i;
5816 /* Handle the easy stuff first. */
5817 if (rclass == R1_REGS)
5823 if (rclass == BASE_REG_CLASS && regno < FIRST_PSEUDO_REGISTER)
5829 /* If we have something like (mem (mem (...)), we can safely assume the
5830 inner MEM will end up in a general register after reloading, so there's
5831 no need for a secondary reload. */
5832 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == MEM)
5835 /* Trying to load a constant into a FP register during PIC code
5836 generation requires %r1 as a scratch register. */
5838 && (mode == SImode || mode == DImode)
5839 && FP_REG_CLASS_P (rclass)
5840 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
5842 sri->icode = (mode == SImode ? CODE_FOR_reload_insi_r1
5843 : CODE_FOR_reload_indi_r1);
5847 /* Secondary reloads of symbolic operands require %r1 as a scratch
5848 register when we're generating PIC code and when the operand isn't
5850 if (symbolic_expression_p (x))
5852 if (GET_CODE (x) == HIGH)
5855 if (flag_pic || !read_only_operand (x, VOIDmode))
5857 gcc_assert (mode == SImode || mode == DImode);
5858 sri->icode = (mode == SImode ? CODE_FOR_reload_insi_r1
5859 : CODE_FOR_reload_indi_r1);
5864 /* Profiling showed the PA port spends about 1.3% of its compilation
5865 time in true_regnum from calls inside pa_secondary_reload_class. */
5866 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
5867 regno = true_regnum (x);
5869 /* In order to allow 14-bit displacements in integer loads and stores,
5870 we need to prevent reload from generating out of range integer mode
5871 loads and stores to the floating point registers. Previously, we
5872 used to call for a secondary reload and have emit_move_sequence()
5873 fix the instruction sequence. However, reload occasionally wouldn't
5874 generate the reload and we would end up with an invalid REG+D memory
5875 address. So, now we use an intermediate general register for most
5876 memory loads and stores. */
5877 if ((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
5878 && GET_MODE_CLASS (mode) == MODE_INT
5879 && FP_REG_CLASS_P (rclass))
5881 /* Reload passes (mem:SI (reg/f:DI 30 %r30) when it wants to check
5882 the secondary reload needed for a pseudo. It never passes a
5884 if (GET_CODE (x) == MEM)
5888 /* We don't need an intermediate for indexed and LO_SUM DLT
5889 memory addresses. When INT14_OK_STRICT is true, it might
5890 appear that we could directly allow register indirect
5891 memory addresses. However, this doesn't work because we
5892 don't support SUBREGs in floating-point register copies
5893 and reload doesn't tell us when it's going to use a SUBREG. */
5894 if (IS_INDEX_ADDR_P (x)
5895 || IS_LO_SUM_DLT_ADDR_P (x))
5898 /* Otherwise, we need an intermediate general register. */
5899 return GENERAL_REGS;
5902 /* Request a secondary reload with a general scratch register
5903 for everthing else. ??? Could symbolic operands be handled
5904 directly when generating non-pic PA 2.0 code? */
5906 ? direct_optab_handler (reload_in_optab, mode)
5907 : direct_optab_handler (reload_out_optab, mode));
5911 /* A SAR<->FP register copy requires an intermediate general register
5912 and secondary memory. We need a secondary reload with a general
5913 scratch register for spills. */
5914 if (rclass == SHIFT_REGS)
5917 if (regno >= FIRST_PSEUDO_REGISTER || regno < 0)
5920 ? direct_optab_handler (reload_in_optab, mode)
5921 : direct_optab_handler (reload_out_optab, mode));
5925 /* Handle FP copy. */
5926 if (FP_REG_CLASS_P (REGNO_REG_CLASS (regno)))
5927 return GENERAL_REGS;
5930 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
5931 && REGNO_REG_CLASS (regno) == SHIFT_REGS
5932 && FP_REG_CLASS_P (rclass))
5933 return GENERAL_REGS;
5938 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. The argument pointer
5939 is only marked as live on entry by df-scan when it is a fixed
5940 register. It isn't a fixed register in the 64-bit runtime,
5941 so we need to mark it here. */
5944 pa_extra_live_on_entry (bitmap regs)
5947 bitmap_set_bit (regs, ARG_POINTER_REGNUM);
5950 /* Implement EH_RETURN_HANDLER_RTX. The MEM needs to be volatile
5951 to prevent it from being deleted. */
5954 pa_eh_return_handler_rtx (void)
5958 tmp = gen_rtx_PLUS (word_mode, hard_frame_pointer_rtx,
5959 TARGET_64BIT ? GEN_INT (-16) : GEN_INT (-20));
5960 tmp = gen_rtx_MEM (word_mode, tmp);
5965 /* In the 32-bit runtime, arguments larger than eight bytes are passed
5966 by invisible reference. As a GCC extension, we also pass anything
5967 with a zero or variable size by reference.
5969 The 64-bit runtime does not describe passing any types by invisible
5970 reference. The internals of GCC can't currently handle passing
5971 empty structures, and zero or variable length arrays when they are
5972 not passed entirely on the stack or by reference. Thus, as a GCC
5973 extension, we pass these types by reference. The HP compiler doesn't
5974 support these types, so hopefully there shouldn't be any compatibility
5975 issues. This may have to be revisited when HP releases a C99 compiler
5976 or updates the ABI. */
5979 pa_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
5980 enum machine_mode mode, const_tree type,
5981 bool named ATTRIBUTE_UNUSED)
5986 size = int_size_in_bytes (type);
5988 size = GET_MODE_SIZE (mode);
5993 return size <= 0 || size > 8;
5997 function_arg_padding (enum machine_mode mode, const_tree type)
6002 && (AGGREGATE_TYPE_P (type)
6003 || TREE_CODE (type) == COMPLEX_TYPE
6004 || TREE_CODE (type) == VECTOR_TYPE)))
6006 /* Return none if justification is not required. */
6008 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
6009 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
6012 /* The directions set here are ignored when a BLKmode argument larger
6013 than a word is placed in a register. Different code is used for
6014 the stack and registers. This makes it difficult to have a
6015 consistent data representation for both the stack and registers.
6016 For both runtimes, the justification and padding for arguments on
6017 the stack and in registers should be identical. */
6019 /* The 64-bit runtime specifies left justification for aggregates. */
6022 /* The 32-bit runtime architecture specifies right justification.
6023 When the argument is passed on the stack, the argument is padded
6024 with garbage on the left. The HP compiler pads with zeros. */
6028 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
6035 /* Do what is necessary for `va_start'. We look at the current function
6036 to determine if stdargs or varargs is used and fill in an initial
6037 va_list. A pointer to this constructor is returned. */
6040 hppa_builtin_saveregs (void)
6043 tree fntype = TREE_TYPE (current_function_decl);
6044 int argadj = ((!stdarg_p (fntype))
6045 ? UNITS_PER_WORD : 0);
6048 offset = plus_constant (crtl->args.arg_offset_rtx, argadj);
6050 offset = crtl->args.arg_offset_rtx;
6056 /* Adjust for varargs/stdarg differences. */
6058 offset = plus_constant (crtl->args.arg_offset_rtx, -argadj);
6060 offset = crtl->args.arg_offset_rtx;
6062 /* We need to save %r26 .. %r19 inclusive starting at offset -64
6063 from the incoming arg pointer and growing to larger addresses. */
6064 for (i = 26, off = -64; i >= 19; i--, off += 8)
6065 emit_move_insn (gen_rtx_MEM (word_mode,
6066 plus_constant (arg_pointer_rtx, off)),
6067 gen_rtx_REG (word_mode, i));
6069 /* The incoming args pointer points just beyond the flushback area;
6070 normally this is not a serious concern. However, when we are doing
6071 varargs/stdargs we want to make the arg pointer point to the start
6072 of the incoming argument area. */
6073 emit_move_insn (virtual_incoming_args_rtx,
6074 plus_constant (arg_pointer_rtx, -64));
6076 /* Now return a pointer to the first anonymous argument. */
6077 return copy_to_reg (expand_binop (Pmode, add_optab,
6078 virtual_incoming_args_rtx,
6079 offset, 0, 0, OPTAB_LIB_WIDEN));
6082 /* Store general registers on the stack. */
6083 dest = gen_rtx_MEM (BLKmode,
6084 plus_constant (crtl->args.internal_arg_pointer,
6086 set_mem_alias_set (dest, get_varargs_alias_set ());
6087 set_mem_align (dest, BITS_PER_WORD);
6088 move_block_from_reg (23, dest, 4);
6090 /* move_block_from_reg will emit code to store the argument registers
6091 individually as scalar stores.
6093 However, other insns may later load from the same addresses for
6094 a structure load (passing a struct to a varargs routine).
6096 The alias code assumes that such aliasing can never happen, so we
6097 have to keep memory referencing insns from moving up beyond the
6098 last argument register store. So we emit a blockage insn here. */
6099 emit_insn (gen_blockage ());
6101 return copy_to_reg (expand_binop (Pmode, add_optab,
6102 crtl->args.internal_arg_pointer,
6103 offset, 0, 0, OPTAB_LIB_WIDEN));
6107 hppa_va_start (tree valist, rtx nextarg)
6109 nextarg = expand_builtin_saveregs ();
6110 std_expand_builtin_va_start (valist, nextarg);
6114 hppa_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
6119 /* Args grow upward. We can use the generic routines. */
6120 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6122 else /* !TARGET_64BIT */
6124 tree ptr = build_pointer_type (type);
6127 unsigned int size, ofs;
6130 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
6134 ptr = build_pointer_type (type);
6136 size = int_size_in_bytes (type);
6137 valist_type = TREE_TYPE (valist);
6139 /* Args grow down. Not handled by generic routines. */
6141 u = fold_convert (sizetype, size_in_bytes (type));
6142 u = fold_build1 (NEGATE_EXPR, sizetype, u);
6143 t = fold_build_pointer_plus (valist, u);
6145 /* Align to 4 or 8 byte boundary depending on argument size. */
6147 u = build_int_cst (TREE_TYPE (t), (HOST_WIDE_INT)(size > 4 ? -8 : -4));
6148 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t, u);
6149 t = fold_convert (valist_type, t);
6151 t = build2 (MODIFY_EXPR, valist_type, valist, t);
6153 ofs = (8 - size) % 4;
6155 t = fold_build_pointer_plus_hwi (t, ofs);
6157 t = fold_convert (ptr, t);
6158 t = build_va_arg_indirect_ref (t);
6161 t = build_va_arg_indirect_ref (t);
6167 /* True if MODE is valid for the target. By "valid", we mean able to
6168 be manipulated in non-trivial ways. In particular, this means all
6169 the arithmetic is supported.
6171 Currently, TImode is not valid as the HP 64-bit runtime documentation
6172 doesn't document the alignment and calling conventions for this type.
6173 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
6174 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
6177 pa_scalar_mode_supported_p (enum machine_mode mode)
6179 int precision = GET_MODE_PRECISION (mode);
6181 switch (GET_MODE_CLASS (mode))
6183 case MODE_PARTIAL_INT:
6185 if (precision == CHAR_TYPE_SIZE)
6187 if (precision == SHORT_TYPE_SIZE)
6189 if (precision == INT_TYPE_SIZE)
6191 if (precision == LONG_TYPE_SIZE)
6193 if (precision == LONG_LONG_TYPE_SIZE)
6198 if (precision == FLOAT_TYPE_SIZE)
6200 if (precision == DOUBLE_TYPE_SIZE)
6202 if (precision == LONG_DOUBLE_TYPE_SIZE)
6206 case MODE_DECIMAL_FLOAT:
6214 /* Return TRUE if INSN, a jump insn, has an unfilled delay slot and
6215 it branches into the delay slot. Otherwise, return FALSE. */
6218 branch_to_delay_slot_p (rtx insn)
6222 if (dbr_sequence_length ())
6225 jump_insn = next_active_insn (JUMP_LABEL (insn));
6228 insn = next_active_insn (insn);
6229 if (jump_insn == insn)
6232 /* We can't rely on the length of asms. So, we return FALSE when
6233 the branch is followed by an asm. */
6235 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6236 || extract_asm_operands (PATTERN (insn)) != NULL_RTX
6237 || get_attr_length (insn) > 0)
6244 /* Return TRUE if INSN, a forward jump insn, needs a nop in its delay slot.
6246 This occurs when INSN has an unfilled delay slot and is followed
6247 by an asm. Disaster can occur if the asm is empty and the jump
6248 branches into the delay slot. So, we add a nop in the delay slot
6249 when this occurs. */
6252 branch_needs_nop_p (rtx insn)
6256 if (dbr_sequence_length ())
6259 jump_insn = next_active_insn (JUMP_LABEL (insn));
6262 insn = next_active_insn (insn);
6263 if (!insn || jump_insn == insn)
6266 if (!(GET_CODE (PATTERN (insn)) == ASM_INPUT
6267 || extract_asm_operands (PATTERN (insn)) != NULL_RTX)
6268 && get_attr_length (insn) > 0)
6275 /* Return TRUE if INSN, a forward jump insn, can use nullification
6276 to skip the following instruction. This avoids an extra cycle due
6277 to a mis-predicted branch when we fall through. */
6280 use_skip_p (rtx insn)
6282 rtx jump_insn = next_active_insn (JUMP_LABEL (insn));
6286 insn = next_active_insn (insn);
6288 /* We can't rely on the length of asms, so we can't skip asms. */
6290 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6291 || extract_asm_operands (PATTERN (insn)) != NULL_RTX)
6293 if (get_attr_length (insn) == 4
6294 && jump_insn == next_active_insn (insn))
6296 if (get_attr_length (insn) > 0)
6303 /* This routine handles all the normal conditional branch sequences we
6304 might need to generate. It handles compare immediate vs compare
6305 register, nullification of delay slots, varying length branches,
6306 negated branches, and all combinations of the above. It returns the
6307 output appropriate to emit the branch corresponding to all given
6311 output_cbranch (rtx *operands, int negated, rtx insn)
6313 static char buf[100];
6315 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6316 int length = get_attr_length (insn);
6319 /* A conditional branch to the following instruction (e.g. the delay slot)
6320 is asking for a disaster. This can happen when not optimizing and
6321 when jump optimization fails.
6323 While it is usually safe to emit nothing, this can fail if the
6324 preceding instruction is a nullified branch with an empty delay
6325 slot and the same branch target as this branch. We could check
6326 for this but jump optimization should eliminate nop jumps. It
6327 is always safe to emit a nop. */
6328 if (branch_to_delay_slot_p (insn))
6331 /* The doubleword form of the cmpib instruction doesn't have the LEU
6332 and GTU conditions while the cmpb instruction does. Since we accept
6333 zero for cmpb, we must ensure that we use cmpb for the comparison. */
6334 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
6335 operands[2] = gen_rtx_REG (DImode, 0);
6336 if (GET_MODE (operands[2]) == DImode && operands[1] == const0_rtx)
6337 operands[1] = gen_rtx_REG (DImode, 0);
6339 /* If this is a long branch with its delay slot unfilled, set `nullify'
6340 as it can nullify the delay slot and save a nop. */
6341 if (length == 8 && dbr_sequence_length () == 0)
6344 /* If this is a short forward conditional branch which did not get
6345 its delay slot filled, the delay slot can still be nullified. */
6346 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6347 nullify = forward_branch_p (insn);
6349 /* A forward branch over a single nullified insn can be done with a
6350 comclr instruction. This avoids a single cycle penalty due to
6351 mis-predicted branch if we fall through (branch not taken). */
6352 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6356 /* All short conditional branches except backwards with an unfilled
6360 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6362 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6363 if (GET_MODE (operands[1]) == DImode)
6366 strcat (buf, "%B3");
6368 strcat (buf, "%S3");
6370 strcat (buf, " %2,%r1,%%r0");
6373 if (branch_needs_nop_p (insn))
6374 strcat (buf, ",n %2,%r1,%0%#");
6376 strcat (buf, ",n %2,%r1,%0");
6379 strcat (buf, " %2,%r1,%0");
6382 /* All long conditionals. Note a short backward branch with an
6383 unfilled delay slot is treated just like a long backward branch
6384 with an unfilled delay slot. */
6386 /* Handle weird backwards branch with a filled delay slot
6387 which is nullified. */
6388 if (dbr_sequence_length () != 0
6389 && ! forward_branch_p (insn)
6392 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6393 if (GET_MODE (operands[1]) == DImode)
6396 strcat (buf, "%S3");
6398 strcat (buf, "%B3");
6399 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
6401 /* Handle short backwards branch with an unfilled delay slot.
6402 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6403 taken and untaken branches. */
6404 else if (dbr_sequence_length () == 0
6405 && ! forward_branch_p (insn)
6406 && INSN_ADDRESSES_SET_P ()
6407 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6408 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6410 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6411 if (GET_MODE (operands[1]) == DImode)
6414 strcat (buf, "%B3 %2,%r1,%0%#");
6416 strcat (buf, "%S3 %2,%r1,%0%#");
6420 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6421 if (GET_MODE (operands[1]) == DImode)
6424 strcat (buf, "%S3");
6426 strcat (buf, "%B3");
6428 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6430 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6435 /* The reversed conditional branch must branch over one additional
6436 instruction if the delay slot is filled and needs to be extracted
6437 by output_lbranch. If the delay slot is empty or this is a
6438 nullified forward branch, the instruction after the reversed
6439 condition branch must be nullified. */
6440 if (dbr_sequence_length () == 0
6441 || (nullify && forward_branch_p (insn)))
6445 operands[4] = GEN_INT (length);
6450 operands[4] = GEN_INT (length + 4);
6453 /* Create a reversed conditional branch which branches around
6454 the following insns. */
6455 if (GET_MODE (operands[1]) != DImode)
6461 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6464 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6470 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6473 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6482 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6485 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6491 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6494 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6498 output_asm_insn (buf, operands);
6499 return output_lbranch (operands[0], insn, xdelay);
6504 /* This routine handles output of long unconditional branches that
6505 exceed the maximum range of a simple branch instruction. Since
6506 we don't have a register available for the branch, we save register
6507 %r1 in the frame marker, load the branch destination DEST into %r1,
6508 execute the branch, and restore %r1 in the delay slot of the branch.
6510 Since long branches may have an insn in the delay slot and the
6511 delay slot is used to restore %r1, we in general need to extract
6512 this insn and execute it before the branch. However, to facilitate
6513 use of this function by conditional branches, we also provide an
6514 option to not extract the delay insn so that it will be emitted
6515 after the long branch. So, if there is an insn in the delay slot,
6516 it is extracted if XDELAY is nonzero.
6518 The lengths of the various long-branch sequences are 20, 16 and 24
6519 bytes for the portable runtime, non-PIC and PIC cases, respectively. */
6522 output_lbranch (rtx dest, rtx insn, int xdelay)
6526 xoperands[0] = dest;
6528 /* First, free up the delay slot. */
6529 if (xdelay && dbr_sequence_length () != 0)
6531 /* We can't handle a jump in the delay slot. */
6532 gcc_assert (GET_CODE (NEXT_INSN (insn)) != JUMP_INSN);
6534 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6537 /* Now delete the delay insn. */
6538 SET_INSN_DELETED (NEXT_INSN (insn));
6541 /* Output an insn to save %r1. The runtime documentation doesn't
6542 specify whether the "Clean Up" slot in the callers frame can
6543 be clobbered by the callee. It isn't copied by HP's builtin
6544 alloca, so this suggests that it can be clobbered if necessary.
6545 The "Static Link" location is copied by HP builtin alloca, so
6546 we avoid using it. Using the cleanup slot might be a problem
6547 if we have to interoperate with languages that pass cleanup
6548 information. However, it should be possible to handle these
6549 situations with GCC's asm feature.
6551 The "Current RP" slot is reserved for the called procedure, so
6552 we try to use it when we don't have a frame of our own. It's
6553 rather unlikely that we won't have a frame when we need to emit
6556 Really the way to go long term is a register scavenger; goto
6557 the target of the jump and find a register which we can use
6558 as a scratch to hold the value in %r1. Then, we wouldn't have
6559 to free up the delay slot or clobber a slot that may be needed
6560 for other purposes. */
6563 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6564 /* Use the return pointer slot in the frame marker. */
6565 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6567 /* Use the slot at -40 in the frame marker since HP builtin
6568 alloca doesn't copy it. */
6569 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6573 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6574 /* Use the return pointer slot in the frame marker. */
6575 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6577 /* Use the "Clean Up" slot in the frame marker. In GCC,
6578 the only other use of this location is for copying a
6579 floating point double argument from a floating-point
6580 register to two general registers. The copy is done
6581 as an "atomic" operation when outputting a call, so it
6582 won't interfere with our using the location here. */
6583 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6586 if (TARGET_PORTABLE_RUNTIME)
6588 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6589 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6590 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6594 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
6595 if (TARGET_SOM || !TARGET_GAS)
6597 xoperands[1] = gen_label_rtx ();
6598 output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands);
6599 targetm.asm_out.internal_label (asm_out_file, "L",
6600 CODE_LABEL_NUMBER (xoperands[1]));
6601 output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands);
6605 output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands);
6606 output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
6608 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6611 /* Now output a very long branch to the original target. */
6612 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6614 /* Now restore the value of %r1 in the delay slot. */
6617 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6618 return "ldd -16(%%r30),%%r1";
6620 return "ldd -40(%%r30),%%r1";
6624 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6625 return "ldw -20(%%r30),%%r1";
6627 return "ldw -12(%%r30),%%r1";
6631 /* This routine handles all the branch-on-bit conditional branch sequences we
6632 might need to generate. It handles nullification of delay slots,
6633 varying length branches, negated branches and all combinations of the
6634 above. it returns the appropriate output template to emit the branch. */
6637 output_bb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn, int which)
6639 static char buf[100];
6641 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6642 int length = get_attr_length (insn);
6645 /* A conditional branch to the following instruction (e.g. the delay slot) is
6646 asking for a disaster. I do not think this can happen as this pattern
6647 is only used when optimizing; jump optimization should eliminate the
6648 jump. But be prepared just in case. */
6650 if (branch_to_delay_slot_p (insn))
6653 /* If this is a long branch with its delay slot unfilled, set `nullify'
6654 as it can nullify the delay slot and save a nop. */
6655 if (length == 8 && dbr_sequence_length () == 0)
6658 /* If this is a short forward conditional branch which did not get
6659 its delay slot filled, the delay slot can still be nullified. */
6660 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6661 nullify = forward_branch_p (insn);
6663 /* A forward branch over a single nullified insn can be done with a
6664 extrs instruction. This avoids a single cycle penalty due to
6665 mis-predicted branch if we fall through (branch not taken). */
6666 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6671 /* All short conditional branches except backwards with an unfilled
6675 strcpy (buf, "{extrs,|extrw,s,}");
6677 strcpy (buf, "bb,");
6678 if (useskip && GET_MODE (operands[0]) == DImode)
6679 strcpy (buf, "extrd,s,*");
6680 else if (GET_MODE (operands[0]) == DImode)
6681 strcpy (buf, "bb,*");
6682 if ((which == 0 && negated)
6683 || (which == 1 && ! negated))
6688 strcat (buf, " %0,%1,1,%%r0");
6689 else if (nullify && negated)
6691 if (branch_needs_nop_p (insn))
6692 strcat (buf, ",n %0,%1,%3%#");
6694 strcat (buf, ",n %0,%1,%3");
6696 else if (nullify && ! negated)
6698 if (branch_needs_nop_p (insn))
6699 strcat (buf, ",n %0,%1,%2%#");
6701 strcat (buf, ",n %0,%1,%2");
6703 else if (! nullify && negated)
6704 strcat (buf, " %0,%1,%3");
6705 else if (! nullify && ! negated)
6706 strcat (buf, " %0,%1,%2");
6709 /* All long conditionals. Note a short backward branch with an
6710 unfilled delay slot is treated just like a long backward branch
6711 with an unfilled delay slot. */
6713 /* Handle weird backwards branch with a filled delay slot
6714 which is nullified. */
6715 if (dbr_sequence_length () != 0
6716 && ! forward_branch_p (insn)
6719 strcpy (buf, "bb,");
6720 if (GET_MODE (operands[0]) == DImode)
6722 if ((which == 0 && negated)
6723 || (which == 1 && ! negated))
6728 strcat (buf, ",n %0,%1,.+12\n\tb %3");
6730 strcat (buf, ",n %0,%1,.+12\n\tb %2");
6732 /* Handle short backwards branch with an unfilled delay slot.
6733 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6734 taken and untaken branches. */
6735 else if (dbr_sequence_length () == 0
6736 && ! forward_branch_p (insn)
6737 && INSN_ADDRESSES_SET_P ()
6738 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6739 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6741 strcpy (buf, "bb,");
6742 if (GET_MODE (operands[0]) == DImode)
6744 if ((which == 0 && negated)
6745 || (which == 1 && ! negated))
6750 strcat (buf, " %0,%1,%3%#");
6752 strcat (buf, " %0,%1,%2%#");
6756 if (GET_MODE (operands[0]) == DImode)
6757 strcpy (buf, "extrd,s,*");
6759 strcpy (buf, "{extrs,|extrw,s,}");
6760 if ((which == 0 && negated)
6761 || (which == 1 && ! negated))
6765 if (nullify && negated)
6766 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
6767 else if (nullify && ! negated)
6768 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
6770 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
6772 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
6777 /* The reversed conditional branch must branch over one additional
6778 instruction if the delay slot is filled and needs to be extracted
6779 by output_lbranch. If the delay slot is empty or this is a
6780 nullified forward branch, the instruction after the reversed
6781 condition branch must be nullified. */
6782 if (dbr_sequence_length () == 0
6783 || (nullify && forward_branch_p (insn)))
6787 operands[4] = GEN_INT (length);
6792 operands[4] = GEN_INT (length + 4);
6795 if (GET_MODE (operands[0]) == DImode)
6796 strcpy (buf, "bb,*");
6798 strcpy (buf, "bb,");
6799 if ((which == 0 && negated)
6800 || (which == 1 && !negated))
6805 strcat (buf, ",n %0,%1,.+%4");
6807 strcat (buf, " %0,%1,.+%4");
6808 output_asm_insn (buf, operands);
6809 return output_lbranch (negated ? operands[3] : operands[2],
6815 /* This routine handles all the branch-on-variable-bit conditional branch
6816 sequences we might need to generate. It handles nullification of delay
6817 slots, varying length branches, negated branches and all combinations
6818 of the above. it returns the appropriate output template to emit the
6822 output_bvb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn, int which)
6824 static char buf[100];
6826 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6827 int length = get_attr_length (insn);
6830 /* A conditional branch to the following instruction (e.g. the delay slot) is
6831 asking for a disaster. I do not think this can happen as this pattern
6832 is only used when optimizing; jump optimization should eliminate the
6833 jump. But be prepared just in case. */
6835 if (branch_to_delay_slot_p (insn))
6838 /* If this is a long branch with its delay slot unfilled, set `nullify'
6839 as it can nullify the delay slot and save a nop. */
6840 if (length == 8 && dbr_sequence_length () == 0)
6843 /* If this is a short forward conditional branch which did not get
6844 its delay slot filled, the delay slot can still be nullified. */
6845 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6846 nullify = forward_branch_p (insn);
6848 /* A forward branch over a single nullified insn can be done with a
6849 extrs instruction. This avoids a single cycle penalty due to
6850 mis-predicted branch if we fall through (branch not taken). */
6851 useskip = (length == 4 && nullify) ? use_skip_p (insn) : FALSE;
6856 /* All short conditional branches except backwards with an unfilled
6860 strcpy (buf, "{vextrs,|extrw,s,}");
6862 strcpy (buf, "{bvb,|bb,}");
6863 if (useskip && GET_MODE (operands[0]) == DImode)
6864 strcpy (buf, "extrd,s,*");
6865 else if (GET_MODE (operands[0]) == DImode)
6866 strcpy (buf, "bb,*");
6867 if ((which == 0 && negated)
6868 || (which == 1 && ! negated))
6873 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
6874 else if (nullify && negated)
6876 if (branch_needs_nop_p (insn))
6877 strcat (buf, "{,n %0,%3%#|,n %0,%%sar,%3%#}");
6879 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
6881 else if (nullify && ! negated)
6883 if (branch_needs_nop_p (insn))
6884 strcat (buf, "{,n %0,%2%#|,n %0,%%sar,%2%#}");
6886 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
6888 else if (! nullify && negated)
6889 strcat (buf, "{ %0,%3| %0,%%sar,%3}");
6890 else if (! nullify && ! negated)
6891 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
6894 /* All long conditionals. Note a short backward branch with an
6895 unfilled delay slot is treated just like a long backward branch
6896 with an unfilled delay slot. */
6898 /* Handle weird backwards branch with a filled delay slot
6899 which is nullified. */
6900 if (dbr_sequence_length () != 0
6901 && ! forward_branch_p (insn)
6904 strcpy (buf, "{bvb,|bb,}");
6905 if (GET_MODE (operands[0]) == DImode)
6907 if ((which == 0 && negated)
6908 || (which == 1 && ! negated))
6913 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
6915 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
6917 /* Handle short backwards branch with an unfilled delay slot.
6918 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6919 taken and untaken branches. */
6920 else if (dbr_sequence_length () == 0
6921 && ! forward_branch_p (insn)
6922 && INSN_ADDRESSES_SET_P ()
6923 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6924 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6926 strcpy (buf, "{bvb,|bb,}");
6927 if (GET_MODE (operands[0]) == DImode)
6929 if ((which == 0 && negated)
6930 || (which == 1 && ! negated))
6935 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
6937 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
6941 strcpy (buf, "{vextrs,|extrw,s,}");
6942 if (GET_MODE (operands[0]) == DImode)
6943 strcpy (buf, "extrd,s,*");
6944 if ((which == 0 && negated)
6945 || (which == 1 && ! negated))
6949 if (nullify && negated)
6950 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
6951 else if (nullify && ! negated)
6952 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
6954 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
6956 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
6961 /* The reversed conditional branch must branch over one additional
6962 instruction if the delay slot is filled and needs to be extracted
6963 by output_lbranch. If the delay slot is empty or this is a
6964 nullified forward branch, the instruction after the reversed
6965 condition branch must be nullified. */
6966 if (dbr_sequence_length () == 0
6967 || (nullify && forward_branch_p (insn)))
6971 operands[4] = GEN_INT (length);
6976 operands[4] = GEN_INT (length + 4);
6979 if (GET_MODE (operands[0]) == DImode)
6980 strcpy (buf, "bb,*");
6982 strcpy (buf, "{bvb,|bb,}");
6983 if ((which == 0 && negated)
6984 || (which == 1 && !negated))
6989 strcat (buf, ",n {%0,.+%4|%0,%%sar,.+%4}");
6991 strcat (buf, " {%0,.+%4|%0,%%sar,.+%4}");
6992 output_asm_insn (buf, operands);
6993 return output_lbranch (negated ? operands[3] : operands[2],
6999 /* Return the output template for emitting a dbra type insn.
7001 Note it may perform some output operations on its own before
7002 returning the final output string. */
7004 output_dbra (rtx *operands, rtx insn, int which_alternative)
7006 int length = get_attr_length (insn);
7008 /* A conditional branch to the following instruction (e.g. the delay slot) is
7009 asking for a disaster. Be prepared! */
7011 if (branch_to_delay_slot_p (insn))
7013 if (which_alternative == 0)
7014 return "ldo %1(%0),%0";
7015 else if (which_alternative == 1)
7017 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
7018 output_asm_insn ("ldw -16(%%r30),%4", operands);
7019 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7020 return "{fldws|fldw} -16(%%r30),%0";
7024 output_asm_insn ("ldw %0,%4", operands);
7025 return "ldo %1(%4),%4\n\tstw %4,%0";
7029 if (which_alternative == 0)
7031 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7034 /* If this is a long branch with its delay slot unfilled, set `nullify'
7035 as it can nullify the delay slot and save a nop. */
7036 if (length == 8 && dbr_sequence_length () == 0)
7039 /* If this is a short forward conditional branch which did not get
7040 its delay slot filled, the delay slot can still be nullified. */
7041 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7042 nullify = forward_branch_p (insn);
7049 if (branch_needs_nop_p (insn))
7050 return "addib,%C2,n %1,%0,%3%#";
7052 return "addib,%C2,n %1,%0,%3";
7055 return "addib,%C2 %1,%0,%3";
7058 /* Handle weird backwards branch with a fulled delay slot
7059 which is nullified. */
7060 if (dbr_sequence_length () != 0
7061 && ! forward_branch_p (insn)
7063 return "addib,%N2,n %1,%0,.+12\n\tb %3";
7064 /* Handle short backwards branch with an unfilled delay slot.
7065 Using a addb;nop rather than addi;bl saves 1 cycle for both
7066 taken and untaken branches. */
7067 else if (dbr_sequence_length () == 0
7068 && ! forward_branch_p (insn)
7069 && INSN_ADDRESSES_SET_P ()
7070 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7071 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7072 return "addib,%C2 %1,%0,%3%#";
7074 /* Handle normal cases. */
7076 return "addi,%N2 %1,%0,%0\n\tb,n %3";
7078 return "addi,%N2 %1,%0,%0\n\tb %3";
7081 /* The reversed conditional branch must branch over one additional
7082 instruction if the delay slot is filled and needs to be extracted
7083 by output_lbranch. If the delay slot is empty or this is a
7084 nullified forward branch, the instruction after the reversed
7085 condition branch must be nullified. */
7086 if (dbr_sequence_length () == 0
7087 || (nullify && forward_branch_p (insn)))
7091 operands[4] = GEN_INT (length);
7096 operands[4] = GEN_INT (length + 4);
7100 output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands);
7102 output_asm_insn ("addib,%N2 %1,%0,.+%4", operands);
7104 return output_lbranch (operands[3], insn, xdelay);
7108 /* Deal with gross reload from FP register case. */
7109 else if (which_alternative == 1)
7111 /* Move loop counter from FP register to MEM then into a GR,
7112 increment the GR, store the GR into MEM, and finally reload
7113 the FP register from MEM from within the branch's delay slot. */
7114 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
7116 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
7118 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
7119 else if (length == 28)
7120 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7123 operands[5] = GEN_INT (length - 16);
7124 output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands);
7125 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7126 return output_lbranch (operands[3], insn, 0);
7129 /* Deal with gross reload from memory case. */
7132 /* Reload loop counter from memory, the store back to memory
7133 happens in the branch's delay slot. */
7134 output_asm_insn ("ldw %0,%4", operands);
7136 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
7137 else if (length == 16)
7138 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
7141 operands[5] = GEN_INT (length - 4);
7142 output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands);
7143 return output_lbranch (operands[3], insn, 0);
7148 /* Return the output template for emitting a movb type insn.
7150 Note it may perform some output operations on its own before
7151 returning the final output string. */
7153 output_movb (rtx *operands, rtx insn, int which_alternative,
7154 int reverse_comparison)
7156 int length = get_attr_length (insn);
7158 /* A conditional branch to the following instruction (e.g. the delay slot) is
7159 asking for a disaster. Be prepared! */
7161 if (branch_to_delay_slot_p (insn))
7163 if (which_alternative == 0)
7164 return "copy %1,%0";
7165 else if (which_alternative == 1)
7167 output_asm_insn ("stw %1,-16(%%r30)", operands);
7168 return "{fldws|fldw} -16(%%r30),%0";
7170 else if (which_alternative == 2)
7176 /* Support the second variant. */
7177 if (reverse_comparison)
7178 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
7180 if (which_alternative == 0)
7182 int nullify = INSN_ANNULLED_BRANCH_P (insn);
7185 /* If this is a long branch with its delay slot unfilled, set `nullify'
7186 as it can nullify the delay slot and save a nop. */
7187 if (length == 8 && dbr_sequence_length () == 0)
7190 /* If this is a short forward conditional branch which did not get
7191 its delay slot filled, the delay slot can still be nullified. */
7192 if (! nullify && length == 4 && dbr_sequence_length () == 0)
7193 nullify = forward_branch_p (insn);
7200 if (branch_needs_nop_p (insn))
7201 return "movb,%C2,n %1,%0,%3%#";
7203 return "movb,%C2,n %1,%0,%3";
7206 return "movb,%C2 %1,%0,%3";
7209 /* Handle weird backwards branch with a filled delay slot
7210 which is nullified. */
7211 if (dbr_sequence_length () != 0
7212 && ! forward_branch_p (insn)
7214 return "movb,%N2,n %1,%0,.+12\n\tb %3";
7216 /* Handle short backwards branch with an unfilled delay slot.
7217 Using a movb;nop rather than or;bl saves 1 cycle for both
7218 taken and untaken branches. */
7219 else if (dbr_sequence_length () == 0
7220 && ! forward_branch_p (insn)
7221 && INSN_ADDRESSES_SET_P ()
7222 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7223 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7224 return "movb,%C2 %1,%0,%3%#";
7225 /* Handle normal cases. */
7227 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
7229 return "or,%N2 %1,%%r0,%0\n\tb %3";
7232 /* The reversed conditional branch must branch over one additional
7233 instruction if the delay slot is filled and needs to be extracted
7234 by output_lbranch. If the delay slot is empty or this is a
7235 nullified forward branch, the instruction after the reversed
7236 condition branch must be nullified. */
7237 if (dbr_sequence_length () == 0
7238 || (nullify && forward_branch_p (insn)))
7242 operands[4] = GEN_INT (length);
7247 operands[4] = GEN_INT (length + 4);
7251 output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands);
7253 output_asm_insn ("movb,%N2 %1,%0,.+%4", operands);
7255 return output_lbranch (operands[3], insn, xdelay);
7258 /* Deal with gross reload for FP destination register case. */
7259 else if (which_alternative == 1)
7261 /* Move source register to MEM, perform the branch test, then
7262 finally load the FP register from MEM from within the branch's
7264 output_asm_insn ("stw %1,-16(%%r30)", operands);
7266 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
7267 else if (length == 16)
7268 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7271 operands[4] = GEN_INT (length - 4);
7272 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands);
7273 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7274 return output_lbranch (operands[3], insn, 0);
7277 /* Deal with gross reload from memory case. */
7278 else if (which_alternative == 2)
7280 /* Reload loop counter from memory, the store back to memory
7281 happens in the branch's delay slot. */
7283 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
7284 else if (length == 12)
7285 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
7288 operands[4] = GEN_INT (length);
7289 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
7291 return output_lbranch (operands[3], insn, 0);
7294 /* Handle SAR as a destination. */
7298 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
7299 else if (length == 12)
7300 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
7303 operands[4] = GEN_INT (length);
7304 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
7306 return output_lbranch (operands[3], insn, 0);
7311 /* Copy any FP arguments in INSN into integer registers. */
7313 copy_fp_args (rtx insn)
7318 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7320 int arg_mode, regno;
7321 rtx use = XEXP (link, 0);
7323 if (! (GET_CODE (use) == USE
7324 && GET_CODE (XEXP (use, 0)) == REG
7325 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7328 arg_mode = GET_MODE (XEXP (use, 0));
7329 regno = REGNO (XEXP (use, 0));
7331 /* Is it a floating point register? */
7332 if (regno >= 32 && regno <= 39)
7334 /* Copy the FP register into an integer register via memory. */
7335 if (arg_mode == SFmode)
7337 xoperands[0] = XEXP (use, 0);
7338 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
7339 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
7340 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7344 xoperands[0] = XEXP (use, 0);
7345 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
7346 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
7347 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
7348 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7354 /* Compute length of the FP argument copy sequence for INSN. */
7356 length_fp_args (rtx insn)
7361 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7363 int arg_mode, regno;
7364 rtx use = XEXP (link, 0);
7366 if (! (GET_CODE (use) == USE
7367 && GET_CODE (XEXP (use, 0)) == REG
7368 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7371 arg_mode = GET_MODE (XEXP (use, 0));
7372 regno = REGNO (XEXP (use, 0));
7374 /* Is it a floating point register? */
7375 if (regno >= 32 && regno <= 39)
7377 if (arg_mode == SFmode)
7387 /* Return the attribute length for the millicode call instruction INSN.
7388 The length must match the code generated by output_millicode_call.
7389 We include the delay slot in the returned length as it is better to
7390 over estimate the length than to under estimate it. */
7393 attr_length_millicode_call (rtx insn)
7395 unsigned long distance = -1;
7396 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7398 if (INSN_ADDRESSES_SET_P ())
7400 distance = (total + insn_current_reference_address (insn));
7401 if (distance < total)
7407 if (!TARGET_LONG_CALLS && distance < 7600000)
7412 else if (TARGET_PORTABLE_RUNTIME)
7416 if (!TARGET_LONG_CALLS && distance < 240000)
7419 if (TARGET_LONG_ABS_CALL && !flag_pic)
7426 /* INSN is a function call. It may have an unconditional jump
7429 CALL_DEST is the routine we are calling. */
7432 output_millicode_call (rtx insn, rtx call_dest)
7434 int attr_length = get_attr_length (insn);
7435 int seq_length = dbr_sequence_length ();
7440 xoperands[0] = call_dest;
7441 xoperands[2] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
7443 /* Handle the common case where we are sure that the branch will
7444 reach the beginning of the $CODE$ subspace. The within reach
7445 form of the $$sh_func_adrs call has a length of 28. Because
7446 it has an attribute type of multi, it never has a nonzero
7447 sequence length. The length of the $$sh_func_adrs is the same
7448 as certain out of reach PIC calls to other routines. */
7449 if (!TARGET_LONG_CALLS
7450 && ((seq_length == 0
7451 && (attr_length == 12
7452 || (attr_length == 28 && get_attr_type (insn) == TYPE_MULTI)))
7453 || (seq_length != 0 && attr_length == 8)))
7455 output_asm_insn ("{bl|b,l} %0,%2", xoperands);
7461 /* It might seem that one insn could be saved by accessing
7462 the millicode function using the linkage table. However,
7463 this doesn't work in shared libraries and other dynamically
7464 loaded objects. Using a pc-relative sequence also avoids
7465 problems related to the implicit use of the gp register. */
7466 output_asm_insn ("b,l .+8,%%r1", xoperands);
7470 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7471 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
7475 xoperands[1] = gen_label_rtx ();
7476 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7477 targetm.asm_out.internal_label (asm_out_file, "L",
7478 CODE_LABEL_NUMBER (xoperands[1]));
7479 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7482 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7484 else if (TARGET_PORTABLE_RUNTIME)
7486 /* Pure portable runtime doesn't allow be/ble; we also don't
7487 have PIC support in the assembler/linker, so this sequence
7490 /* Get the address of our target into %r1. */
7491 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7492 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
7494 /* Get our return address into %r31. */
7495 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
7496 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
7498 /* Jump to our target address in %r1. */
7499 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7503 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7505 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
7507 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7511 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7512 output_asm_insn ("addi 16,%%r1,%%r31", xoperands);
7514 if (TARGET_SOM || !TARGET_GAS)
7516 /* The HP assembler can generate relocations for the
7517 difference of two symbols. GAS can do this for a
7518 millicode symbol but not an arbitrary external
7519 symbol when generating SOM output. */
7520 xoperands[1] = gen_label_rtx ();
7521 targetm.asm_out.internal_label (asm_out_file, "L",
7522 CODE_LABEL_NUMBER (xoperands[1]));
7523 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7524 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7528 output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands);
7529 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1",
7533 /* Jump to our target address in %r1. */
7534 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7538 if (seq_length == 0)
7539 output_asm_insn ("nop", xoperands);
7541 /* We are done if there isn't a jump in the delay slot. */
7542 if (seq_length == 0 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
7545 /* This call has an unconditional jump in its delay slot. */
7546 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7548 /* See if the return address can be adjusted. Use the containing
7549 sequence insn's address. */
7550 if (INSN_ADDRESSES_SET_P ())
7552 seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7553 distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7554 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7556 if (VAL_14_BITS_P (distance))
7558 xoperands[1] = gen_label_rtx ();
7559 output_asm_insn ("ldo %0-%1(%2),%2", xoperands);
7560 targetm.asm_out.internal_label (asm_out_file, "L",
7561 CODE_LABEL_NUMBER (xoperands[1]));
7564 /* ??? This branch may not reach its target. */
7565 output_asm_insn ("nop\n\tb,n %0", xoperands);
7568 /* ??? This branch may not reach its target. */
7569 output_asm_insn ("nop\n\tb,n %0", xoperands);
7571 /* Delete the jump. */
7572 SET_INSN_DELETED (NEXT_INSN (insn));
7577 /* Return the attribute length of the call instruction INSN. The SIBCALL
7578 flag indicates whether INSN is a regular call or a sibling call. The
7579 length returned must be longer than the code actually generated by
7580 output_call. Since branch shortening is done before delay branch
7581 sequencing, there is no way to determine whether or not the delay
7582 slot will be filled during branch shortening. Even when the delay
7583 slot is filled, we may have to add a nop if the delay slot contains
7584 a branch that can't reach its target. Thus, we always have to include
7585 the delay slot in the length estimate. This used to be done in
7586 pa_adjust_insn_length but we do it here now as some sequences always
7587 fill the delay slot and we can save four bytes in the estimate for
7591 attr_length_call (rtx insn, int sibcall)
7594 rtx call, call_dest;
7597 rtx pat = PATTERN (insn);
7598 unsigned long distance = -1;
7600 gcc_assert (GET_CODE (insn) == CALL_INSN);
7602 if (INSN_ADDRESSES_SET_P ())
7604 unsigned long total;
7606 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7607 distance = (total + insn_current_reference_address (insn));
7608 if (distance < total)
7612 gcc_assert (GET_CODE (pat) == PARALLEL);
7614 /* Get the call rtx. */
7615 call = XVECEXP (pat, 0, 0);
7616 if (GET_CODE (call) == SET)
7617 call = SET_SRC (call);
7619 gcc_assert (GET_CODE (call) == CALL);
7621 /* Determine if this is a local call. */
7622 call_dest = XEXP (XEXP (call, 0), 0);
7623 call_decl = SYMBOL_REF_DECL (call_dest);
7624 local_call = call_decl && targetm.binds_local_p (call_decl);
7626 /* pc-relative branch. */
7627 if (!TARGET_LONG_CALLS
7628 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7629 || distance < 240000))
7632 /* 64-bit plabel sequence. */
7633 else if (TARGET_64BIT && !local_call)
7634 length += sibcall ? 28 : 24;
7636 /* non-pic long absolute branch sequence. */
7637 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7640 /* long pc-relative branch sequence. */
7641 else if (TARGET_LONG_PIC_SDIFF_CALL
7642 || (TARGET_GAS && !TARGET_SOM
7643 && (TARGET_LONG_PIC_PCREL_CALL || local_call)))
7647 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7651 /* 32-bit plabel sequence. */
7657 length += length_fp_args (insn);
7667 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7675 /* INSN is a function call. It may have an unconditional jump
7678 CALL_DEST is the routine we are calling. */
7681 output_call (rtx insn, rtx call_dest, int sibcall)
7683 int delay_insn_deleted = 0;
7684 int delay_slot_filled = 0;
7685 int seq_length = dbr_sequence_length ();
7686 tree call_decl = SYMBOL_REF_DECL (call_dest);
7687 int local_call = call_decl && targetm.binds_local_p (call_decl);
7690 xoperands[0] = call_dest;
7692 /* Handle the common case where we're sure that the branch will reach
7693 the beginning of the "$CODE$" subspace. This is the beginning of
7694 the current function if we are in a named section. */
7695 if (!TARGET_LONG_CALLS && attr_length_call (insn, sibcall) == 8)
7697 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7698 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7702 if (TARGET_64BIT && !local_call)
7704 /* ??? As far as I can tell, the HP linker doesn't support the
7705 long pc-relative sequence described in the 64-bit runtime
7706 architecture. So, we use a slightly longer indirect call. */
7707 xoperands[0] = get_deferred_plabel (call_dest);
7708 xoperands[1] = gen_label_rtx ();
7710 /* If this isn't a sibcall, we put the load of %r27 into the
7711 delay slot. We can't do this in a sibcall as we don't
7712 have a second call-clobbered scratch register available. */
7714 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7717 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7720 /* Now delete the delay insn. */
7721 SET_INSN_DELETED (NEXT_INSN (insn));
7722 delay_insn_deleted = 1;
7725 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7726 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7727 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7731 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7732 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7733 output_asm_insn ("bve (%%r1)", xoperands);
7737 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7738 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7739 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7740 delay_slot_filled = 1;
7745 int indirect_call = 0;
7747 /* Emit a long call. There are several different sequences
7748 of increasing length and complexity. In most cases,
7749 they don't allow an instruction in the delay slot. */
7750 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7751 && !TARGET_LONG_PIC_SDIFF_CALL
7752 && !(TARGET_GAS && !TARGET_SOM
7753 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7758 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7762 || ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)))
7764 /* A non-jump insn in the delay slot. By definition we can
7765 emit this insn before the call (and in fact before argument
7767 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0,
7770 /* Now delete the delay insn. */
7771 SET_INSN_DELETED (NEXT_INSN (insn));
7772 delay_insn_deleted = 1;
7775 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7777 /* This is the best sequence for making long calls in
7778 non-pic code. Unfortunately, GNU ld doesn't provide
7779 the stub needed for external calls, and GAS's support
7780 for this with the SOM linker is buggy. It is safe
7781 to use this for local calls. */
7782 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7784 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
7788 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7791 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7793 output_asm_insn ("copy %%r31,%%r2", xoperands);
7794 delay_slot_filled = 1;
7799 if (TARGET_LONG_PIC_SDIFF_CALL)
7801 /* The HP assembler and linker can handle relocations
7802 for the difference of two symbols. The HP assembler
7803 recognizes the sequence as a pc-relative call and
7804 the linker provides stubs when needed. */
7805 xoperands[1] = gen_label_rtx ();
7806 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7807 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7808 targetm.asm_out.internal_label (asm_out_file, "L",
7809 CODE_LABEL_NUMBER (xoperands[1]));
7810 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7812 else if (TARGET_GAS && !TARGET_SOM
7813 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7815 /* GAS currently can't generate the relocations that
7816 are needed for the SOM linker under HP-UX using this
7817 sequence. The GNU linker doesn't generate the stubs
7818 that are needed for external calls on TARGET_ELF32
7819 with this sequence. For now, we have to use a
7820 longer plabel sequence when using GAS. */
7821 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7822 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1",
7824 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1",
7829 /* Emit a long plabel-based call sequence. This is
7830 essentially an inline implementation of $$dyncall.
7831 We don't actually try to call $$dyncall as this is
7832 as difficult as calling the function itself. */
7833 xoperands[0] = get_deferred_plabel (call_dest);
7834 xoperands[1] = gen_label_rtx ();
7836 /* Since the call is indirect, FP arguments in registers
7837 need to be copied to the general registers. Then, the
7838 argument relocation stub will copy them back. */
7840 copy_fp_args (insn);
7844 output_asm_insn ("addil LT'%0,%%r19", xoperands);
7845 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
7846 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
7850 output_asm_insn ("addil LR'%0-$global$,%%r27",
7852 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
7856 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
7857 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
7858 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
7859 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
7861 if (!sibcall && !TARGET_PA_20)
7863 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
7864 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
7865 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
7867 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
7874 output_asm_insn ("bve (%%r1)", xoperands);
7879 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7880 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
7881 delay_slot_filled = 1;
7884 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7889 if (!TARGET_NO_SPACE_REGS && (!local_call || flag_pic))
7890 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
7895 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
7896 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
7898 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
7902 if (TARGET_NO_SPACE_REGS || (local_call && !flag_pic))
7903 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
7905 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
7908 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
7910 output_asm_insn ("copy %%r31,%%r2", xoperands);
7911 delay_slot_filled = 1;
7918 if (!delay_slot_filled && (seq_length == 0 || delay_insn_deleted))
7919 output_asm_insn ("nop", xoperands);
7921 /* We are done if there isn't a jump in the delay slot. */
7923 || delay_insn_deleted
7924 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
7927 /* A sibcall should never have a branch in the delay slot. */
7928 gcc_assert (!sibcall);
7930 /* This call has an unconditional jump in its delay slot. */
7931 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7933 if (!delay_slot_filled && INSN_ADDRESSES_SET_P ())
7935 /* See if the return address can be adjusted. Use the containing
7936 sequence insn's address. This would break the regular call/return@
7937 relationship assumed by the table based eh unwinder, so only do that
7938 if the call is not possibly throwing. */
7939 rtx seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7940 int distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7941 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7943 if (VAL_14_BITS_P (distance)
7944 && !(can_throw_internal (insn) || can_throw_external (insn)))
7946 xoperands[1] = gen_label_rtx ();
7947 output_asm_insn ("ldo %0-%1(%%r2),%%r2", xoperands);
7948 targetm.asm_out.internal_label (asm_out_file, "L",
7949 CODE_LABEL_NUMBER (xoperands[1]));
7952 output_asm_insn ("nop\n\tb,n %0", xoperands);
7955 output_asm_insn ("b,n %0", xoperands);
7957 /* Delete the jump. */
7958 SET_INSN_DELETED (NEXT_INSN (insn));
7963 /* Return the attribute length of the indirect call instruction INSN.
7964 The length must match the code generated by output_indirect call.
7965 The returned length includes the delay slot. Currently, the delay
7966 slot of an indirect call sequence is not exposed and it is used by
7967 the sequence itself. */
7970 attr_length_indirect_call (rtx insn)
7972 unsigned long distance = -1;
7973 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7975 if (INSN_ADDRESSES_SET_P ())
7977 distance = (total + insn_current_reference_address (insn));
7978 if (distance < total)
7985 if (TARGET_FAST_INDIRECT_CALLS
7986 || (!TARGET_PORTABLE_RUNTIME
7987 && ((TARGET_PA_20 && !TARGET_SOM && distance < 7600000)
7988 || distance < 240000)))
7994 if (TARGET_PORTABLE_RUNTIME)
7997 /* Out of reach, can use ble. */
8002 output_indirect_call (rtx insn, rtx call_dest)
8008 xoperands[0] = call_dest;
8009 output_asm_insn ("ldd 16(%0),%%r2", xoperands);
8010 output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands);
8014 /* First the special case for kernels, level 0 systems, etc. */
8015 if (TARGET_FAST_INDIRECT_CALLS)
8016 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
8018 /* Now the normal case -- we can reach $$dyncall directly or
8019 we're sure that we can get there via a long-branch stub.
8021 No need to check target flags as the length uniquely identifies
8022 the remaining cases. */
8023 if (attr_length_indirect_call (insn) == 8)
8025 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
8026 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
8027 variant of the B,L instruction can't be used on the SOM target. */
8028 if (TARGET_PA_20 && !TARGET_SOM)
8029 return ".CALL\tARGW0=GR\n\tb,l $$dyncall,%%r2\n\tcopy %%r2,%%r31";
8031 return ".CALL\tARGW0=GR\n\tbl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
8034 /* Long millicode call, but we are not generating PIC or portable runtime
8036 if (attr_length_indirect_call (insn) == 12)
8037 return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
8039 /* Long millicode call for portable runtime. */
8040 if (attr_length_indirect_call (insn) == 20)
8041 return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)\n\tnop";
8043 /* We need a long PIC call to $$dyncall. */
8044 xoperands[0] = NULL_RTX;
8045 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
8046 if (TARGET_SOM || !TARGET_GAS)
8048 xoperands[0] = gen_label_rtx ();
8049 output_asm_insn ("addil L'$$dyncall-%0,%%r1", xoperands);
8050 targetm.asm_out.internal_label (asm_out_file, "L",
8051 CODE_LABEL_NUMBER (xoperands[0]));
8052 output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands);
8056 output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r1", xoperands);
8057 output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1",
8060 output_asm_insn ("blr %%r0,%%r2", xoperands);
8061 output_asm_insn ("bv,n %%r0(%%r1)\n\tnop", xoperands);
8065 /* Return the total length of the save and restore instructions needed for
8066 the data linkage table pointer (i.e., the PIC register) across the call
8067 instruction INSN. No-return calls do not require a save and restore.
8068 In addition, we may be able to avoid the save and restore for calls
8069 within the same translation unit. */
8072 attr_length_save_restore_dltp (rtx insn)
8074 if (find_reg_note (insn, REG_NORETURN, NULL_RTX))
8080 /* In HPUX 8.0's shared library scheme, special relocations are needed
8081 for function labels if they might be passed to a function
8082 in a shared library (because shared libraries don't live in code
8083 space), and special magic is needed to construct their address. */
8086 hppa_encode_label (rtx sym)
8088 const char *str = XSTR (sym, 0);
8089 int len = strlen (str) + 1;
8092 p = newstr = XALLOCAVEC (char, len + 1);
8096 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
8100 pa_encode_section_info (tree decl, rtx rtl, int first)
8102 int old_referenced = 0;
8104 if (!first && MEM_P (rtl) && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF)
8106 = SYMBOL_REF_FLAGS (XEXP (rtl, 0)) & SYMBOL_FLAG_REFERENCED;
8108 default_encode_section_info (decl, rtl, first);
8110 if (first && TEXT_SPACE_P (decl))
8112 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
8113 if (TREE_CODE (decl) == FUNCTION_DECL)
8114 hppa_encode_label (XEXP (rtl, 0));
8116 else if (old_referenced)
8117 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= old_referenced;
8120 /* This is sort of inverse to pa_encode_section_info. */
8123 pa_strip_name_encoding (const char *str)
8125 str += (*str == '@');
8126 str += (*str == '*');
8130 /* Returns 1 if OP is a function label involved in a simple addition
8131 with a constant. Used to keep certain patterns from matching
8132 during instruction combination. */
8134 is_function_label_plus_const (rtx op)
8136 /* Strip off any CONST. */
8137 if (GET_CODE (op) == CONST)
8140 return (GET_CODE (op) == PLUS
8141 && function_label_operand (XEXP (op, 0), VOIDmode)
8142 && GET_CODE (XEXP (op, 1)) == CONST_INT);
8145 /* Output assembly code for a thunk to FUNCTION. */
8148 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
8149 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
8152 static unsigned int current_thunk_number;
8153 int val_14 = VAL_14_BITS_P (delta);
8154 unsigned int old_last_address = last_address, nbytes = 0;
8158 xoperands[0] = XEXP (DECL_RTL (function), 0);
8159 xoperands[1] = XEXP (DECL_RTL (thunk_fndecl), 0);
8160 xoperands[2] = GEN_INT (delta);
8162 ASM_OUTPUT_LABEL (file, XSTR (xoperands[1], 0));
8163 fprintf (file, "\t.PROC\n\t.CALLINFO FRAME=0,NO_CALLS\n\t.ENTRY\n");
8165 /* Output the thunk. We know that the function is in the same
8166 translation unit (i.e., the same space) as the thunk, and that
8167 thunks are output after their method. Thus, we don't need an
8168 external branch to reach the function. With SOM and GAS,
8169 functions and thunks are effectively in different sections.
8170 Thus, we can always use a IA-relative branch and the linker
8171 will add a long branch stub if necessary.
8173 However, we have to be careful when generating PIC code on the
8174 SOM port to ensure that the sequence does not transfer to an
8175 import stub for the target function as this could clobber the
8176 return value saved at SP-24. This would also apply to the
8177 32-bit linux port if the multi-space model is implemented. */
8178 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8179 && !(flag_pic && TREE_PUBLIC (function))
8180 && (TARGET_GAS || last_address < 262132))
8181 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
8182 && ((targetm_common.have_named_sections
8183 && DECL_SECTION_NAME (thunk_fndecl) != NULL
8184 /* The GNU 64-bit linker has rather poor stub management.
8185 So, we use a long branch from thunks that aren't in
8186 the same section as the target function. */
8188 && (DECL_SECTION_NAME (thunk_fndecl)
8189 != DECL_SECTION_NAME (function)))
8190 || ((DECL_SECTION_NAME (thunk_fndecl)
8191 == DECL_SECTION_NAME (function))
8192 && last_address < 262132)))
8193 || (targetm_common.have_named_sections
8194 && DECL_SECTION_NAME (thunk_fndecl) == NULL
8195 && DECL_SECTION_NAME (function) == NULL
8196 && last_address < 262132)
8197 || (!targetm_common.have_named_sections
8198 && last_address < 262132))))
8201 output_asm_insn ("addil L'%2,%%r26", xoperands);
8203 output_asm_insn ("b %0", xoperands);
8207 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8212 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8216 else if (TARGET_64BIT)
8218 /* We only have one call-clobbered scratch register, so we can't
8219 make use of the delay slot if delta doesn't fit in 14 bits. */
8222 output_asm_insn ("addil L'%2,%%r26", xoperands);
8223 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8226 output_asm_insn ("b,l .+8,%%r1", xoperands);
8230 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
8231 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
8235 xoperands[3] = GEN_INT (val_14 ? 8 : 16);
8236 output_asm_insn ("addil L'%0-%1-%3,%%r1", xoperands);
8241 output_asm_insn ("bv %%r0(%%r1)", xoperands);
8242 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8247 output_asm_insn ("bv,n %%r0(%%r1)", xoperands);
8251 else if (TARGET_PORTABLE_RUNTIME)
8253 output_asm_insn ("ldil L'%0,%%r1", xoperands);
8254 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands);
8257 output_asm_insn ("addil L'%2,%%r26", xoperands);
8259 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8263 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8268 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8272 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8274 /* The function is accessible from outside this module. The only
8275 way to avoid an import stub between the thunk and function is to
8276 call the function directly with an indirect sequence similar to
8277 that used by $$dyncall. This is possible because $$dyncall acts
8278 as the import stub in an indirect call. */
8279 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
8280 xoperands[3] = gen_rtx_SYMBOL_REF (Pmode, label);
8281 output_asm_insn ("addil LT'%3,%%r19", xoperands);
8282 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands);
8283 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8284 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands);
8285 output_asm_insn ("depi 0,31,2,%%r22", xoperands);
8286 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands);
8287 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8291 output_asm_insn ("addil L'%2,%%r26", xoperands);
8297 output_asm_insn ("bve (%%r22)", xoperands);
8300 else if (TARGET_NO_SPACE_REGS)
8302 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands);
8307 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands);
8308 output_asm_insn ("mtsp %%r21,%%sr0", xoperands);
8309 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands);
8314 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8316 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8320 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
8322 if (TARGET_SOM || !TARGET_GAS)
8324 output_asm_insn ("addil L'%0-%1-8,%%r1", xoperands);
8325 output_asm_insn ("ldo R'%0-%1-8(%%r1),%%r22", xoperands);
8329 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
8330 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r22", xoperands);
8334 output_asm_insn ("addil L'%2,%%r26", xoperands);
8336 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8340 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8345 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8352 output_asm_insn ("addil L'%2,%%r26", xoperands);
8354 output_asm_insn ("ldil L'%0,%%r22", xoperands);
8355 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands);
8359 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8364 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8369 fprintf (file, "\t.EXIT\n\t.PROCEND\n");
8371 if (TARGET_SOM && TARGET_GAS)
8373 /* We done with this subspace except possibly for some additional
8374 debug information. Forget that we are in this subspace to ensure
8375 that the next function is output in its own subspace. */
8377 cfun->machine->in_nsubspa = 2;
8380 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8382 switch_to_section (data_section);
8383 output_asm_insn (".align 4", xoperands);
8384 ASM_OUTPUT_LABEL (file, label);
8385 output_asm_insn (".word P'%0", xoperands);
8388 current_thunk_number++;
8389 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
8390 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
8391 last_address += nbytes;
8392 if (old_last_address > last_address)
8393 last_address = UINT_MAX;
8394 update_total_code_bytes (nbytes);
8397 /* Only direct calls to static functions are allowed to be sibling (tail)
8400 This restriction is necessary because some linker generated stubs will
8401 store return pointers into rp' in some cases which might clobber a
8402 live value already in rp'.
8404 In a sibcall the current function and the target function share stack
8405 space. Thus if the path to the current function and the path to the
8406 target function save a value in rp', they save the value into the
8407 same stack slot, which has undesirable consequences.
8409 Because of the deferred binding nature of shared libraries any function
8410 with external scope could be in a different load module and thus require
8411 rp' to be saved when calling that function. So sibcall optimizations
8412 can only be safe for static function.
8414 Note that GCC never needs return value relocations, so we don't have to
8415 worry about static calls with return value relocations (which require
8418 It is safe to perform a sibcall optimization when the target function
8419 will never return. */
8421 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8423 if (TARGET_PORTABLE_RUNTIME)
8426 /* Sibcalls are ok for TARGET_ELF32 as along as the linker is used in
8427 single subspace mode and the call is not indirect. As far as I know,
8428 there is no operating system support for the multiple subspace mode.
8429 It might be possible to support indirect calls if we didn't use
8430 $$dyncall (see the indirect sequence generated in output_call). */
8432 return (decl != NULL_TREE);
8434 /* Sibcalls are not ok because the arg pointer register is not a fixed
8435 register. This prevents the sibcall optimization from occurring. In
8436 addition, there are problems with stub placement using GNU ld. This
8437 is because a normal sibcall branch uses a 17-bit relocation while
8438 a regular call branch uses a 22-bit relocation. As a result, more
8439 care needs to be taken in the placement of long-branch stubs. */
8443 /* Sibcalls are only ok within a translation unit. */
8444 return (decl && !TREE_PUBLIC (decl));
8447 /* ??? Addition is not commutative on the PA due to the weird implicit
8448 space register selection rules for memory addresses. Therefore, we
8449 don't consider a + b == b + a, as this might be inside a MEM. */
8451 pa_commutative_p (const_rtx x, int outer_code)
8453 return (COMMUTATIVE_P (x)
8454 && (TARGET_NO_SPACE_REGS
8455 || (outer_code != UNKNOWN && outer_code != MEM)
8456 || GET_CODE (x) != PLUS));
8459 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8460 use in fmpyadd instructions. */
8462 fmpyaddoperands (rtx *operands)
8464 enum machine_mode mode = GET_MODE (operands[0]);
8466 /* Must be a floating point mode. */
8467 if (mode != SFmode && mode != DFmode)
8470 /* All modes must be the same. */
8471 if (! (mode == GET_MODE (operands[1])
8472 && mode == GET_MODE (operands[2])
8473 && mode == GET_MODE (operands[3])
8474 && mode == GET_MODE (operands[4])
8475 && mode == GET_MODE (operands[5])))
8478 /* All operands must be registers. */
8479 if (! (GET_CODE (operands[1]) == REG
8480 && GET_CODE (operands[2]) == REG
8481 && GET_CODE (operands[3]) == REG
8482 && GET_CODE (operands[4]) == REG
8483 && GET_CODE (operands[5]) == REG))
8486 /* Only 2 real operands to the addition. One of the input operands must
8487 be the same as the output operand. */
8488 if (! rtx_equal_p (operands[3], operands[4])
8489 && ! rtx_equal_p (operands[3], operands[5]))
8492 /* Inout operand of add cannot conflict with any operands from multiply. */
8493 if (rtx_equal_p (operands[3], operands[0])
8494 || rtx_equal_p (operands[3], operands[1])
8495 || rtx_equal_p (operands[3], operands[2]))
8498 /* multiply cannot feed into addition operands. */
8499 if (rtx_equal_p (operands[4], operands[0])
8500 || rtx_equal_p (operands[5], operands[0]))
8503 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8505 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8506 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8507 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8508 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8509 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8510 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8513 /* Passed. Operands are suitable for fmpyadd. */
8517 #if !defined(USE_COLLECT2)
8519 pa_asm_out_constructor (rtx symbol, int priority)
8521 if (!function_label_operand (symbol, VOIDmode))
8522 hppa_encode_label (symbol);
8524 #ifdef CTORS_SECTION_ASM_OP
8525 default_ctor_section_asm_out_constructor (symbol, priority);
8527 # ifdef TARGET_ASM_NAMED_SECTION
8528 default_named_section_asm_out_constructor (symbol, priority);
8530 default_stabs_asm_out_constructor (symbol, priority);
8536 pa_asm_out_destructor (rtx symbol, int priority)
8538 if (!function_label_operand (symbol, VOIDmode))
8539 hppa_encode_label (symbol);
8541 #ifdef DTORS_SECTION_ASM_OP
8542 default_dtor_section_asm_out_destructor (symbol, priority);
8544 # ifdef TARGET_ASM_NAMED_SECTION
8545 default_named_section_asm_out_destructor (symbol, priority);
8547 default_stabs_asm_out_destructor (symbol, priority);
8553 /* This function places uninitialized global data in the bss section.
8554 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8555 function on the SOM port to prevent uninitialized global data from
8556 being placed in the data section. */
8559 pa_asm_output_aligned_bss (FILE *stream,
8561 unsigned HOST_WIDE_INT size,
8564 switch_to_section (bss_section);
8565 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8567 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8568 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
8571 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8572 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8575 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8576 ASM_OUTPUT_LABEL (stream, name);
8577 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8580 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8581 that doesn't allow the alignment of global common storage to be directly
8582 specified. The SOM linker aligns common storage based on the rounded
8583 value of the NUM_BYTES parameter in the .comm directive. It's not
8584 possible to use the .align directive as it doesn't affect the alignment
8585 of the label associated with a .comm directive. */
8588 pa_asm_output_aligned_common (FILE *stream,
8590 unsigned HOST_WIDE_INT size,
8593 unsigned int max_common_align;
8595 max_common_align = TARGET_64BIT ? 128 : (size >= 4096 ? 256 : 64);
8596 if (align > max_common_align)
8598 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8599 "for global common data. Using %u",
8600 align / BITS_PER_UNIT, name, max_common_align / BITS_PER_UNIT);
8601 align = max_common_align;
8604 switch_to_section (bss_section);
8606 assemble_name (stream, name);
8607 fprintf (stream, "\t.comm "HOST_WIDE_INT_PRINT_UNSIGNED"\n",
8608 MAX (size, align / BITS_PER_UNIT));
8611 /* We can't use .comm for local common storage as the SOM linker effectively
8612 treats the symbol as universal and uses the same storage for local symbols
8613 with the same name in different object files. The .block directive
8614 reserves an uninitialized block of storage. However, it's not common
8615 storage. Fortunately, GCC never requests common storage with the same
8616 name in any given translation unit. */
8619 pa_asm_output_aligned_local (FILE *stream,
8621 unsigned HOST_WIDE_INT size,
8624 switch_to_section (bss_section);
8625 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8628 fprintf (stream, "%s", LOCAL_ASM_OP);
8629 assemble_name (stream, name);
8630 fprintf (stream, "\n");
8633 ASM_OUTPUT_LABEL (stream, name);
8634 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8637 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8638 use in fmpysub instructions. */
8640 fmpysuboperands (rtx *operands)
8642 enum machine_mode mode = GET_MODE (operands[0]);
8644 /* Must be a floating point mode. */
8645 if (mode != SFmode && mode != DFmode)
8648 /* All modes must be the same. */
8649 if (! (mode == GET_MODE (operands[1])
8650 && mode == GET_MODE (operands[2])
8651 && mode == GET_MODE (operands[3])
8652 && mode == GET_MODE (operands[4])
8653 && mode == GET_MODE (operands[5])))
8656 /* All operands must be registers. */
8657 if (! (GET_CODE (operands[1]) == REG
8658 && GET_CODE (operands[2]) == REG
8659 && GET_CODE (operands[3]) == REG
8660 && GET_CODE (operands[4]) == REG
8661 && GET_CODE (operands[5]) == REG))
8664 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8665 operation, so operands[4] must be the same as operand[3]. */
8666 if (! rtx_equal_p (operands[3], operands[4]))
8669 /* multiply cannot feed into subtraction. */
8670 if (rtx_equal_p (operands[5], operands[0]))
8673 /* Inout operand of sub cannot conflict with any operands from multiply. */
8674 if (rtx_equal_p (operands[3], operands[0])
8675 || rtx_equal_p (operands[3], operands[1])
8676 || rtx_equal_p (operands[3], operands[2]))
8679 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8681 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8682 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8683 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8684 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8685 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8686 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8689 /* Passed. Operands are suitable for fmpysub. */
8693 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8694 constants for shadd instructions. */
8696 shadd_constant_p (int val)
8698 if (val == 2 || val == 4 || val == 8)
8704 /* Return TRUE if INSN branches forward. */
8707 forward_branch_p (rtx insn)
8709 rtx lab = JUMP_LABEL (insn);
8711 /* The INSN must have a jump label. */
8712 gcc_assert (lab != NULL_RTX);
8714 if (INSN_ADDRESSES_SET_P ())
8715 return INSN_ADDRESSES (INSN_UID (lab)) > INSN_ADDRESSES (INSN_UID (insn));
8722 insn = NEXT_INSN (insn);
8728 /* Return 1 if INSN is in the delay slot of a call instruction. */
8730 jump_in_call_delay (rtx insn)
8733 if (GET_CODE (insn) != JUMP_INSN)
8736 if (PREV_INSN (insn)
8737 && PREV_INSN (PREV_INSN (insn))
8738 && GET_CODE (next_real_insn (PREV_INSN (PREV_INSN (insn)))) == INSN)
8740 rtx test_insn = next_real_insn (PREV_INSN (PREV_INSN (insn)));
8742 return (GET_CODE (PATTERN (test_insn)) == SEQUENCE
8743 && XVECEXP (PATTERN (test_insn), 0, 1) == insn);
8750 /* Output an unconditional move and branch insn. */
8753 output_parallel_movb (rtx *operands, rtx insn)
8755 int length = get_attr_length (insn);
8757 /* These are the cases in which we win. */
8759 return "mov%I1b,tr %1,%0,%2";
8761 /* None of the following cases win, but they don't lose either. */
8764 if (dbr_sequence_length () == 0)
8766 /* Nothing in the delay slot, fake it by putting the combined
8767 insn (the copy or add) in the delay slot of a bl. */
8768 if (GET_CODE (operands[1]) == CONST_INT)
8769 return "b %2\n\tldi %1,%0";
8771 return "b %2\n\tcopy %1,%0";
8775 /* Something in the delay slot, but we've got a long branch. */
8776 if (GET_CODE (operands[1]) == CONST_INT)
8777 return "ldi %1,%0\n\tb %2";
8779 return "copy %1,%0\n\tb %2";
8783 if (GET_CODE (operands[1]) == CONST_INT)
8784 output_asm_insn ("ldi %1,%0", operands);
8786 output_asm_insn ("copy %1,%0", operands);
8787 return output_lbranch (operands[2], insn, 1);
8790 /* Output an unconditional add and branch insn. */
8793 output_parallel_addb (rtx *operands, rtx insn)
8795 int length = get_attr_length (insn);
8797 /* To make life easy we want operand0 to be the shared input/output
8798 operand and operand1 to be the readonly operand. */
8799 if (operands[0] == operands[1])
8800 operands[1] = operands[2];
8802 /* These are the cases in which we win. */
8804 return "add%I1b,tr %1,%0,%3";
8806 /* None of the following cases win, but they don't lose either. */
8809 if (dbr_sequence_length () == 0)
8810 /* Nothing in the delay slot, fake it by putting the combined
8811 insn (the copy or add) in the delay slot of a bl. */
8812 return "b %3\n\tadd%I1 %1,%0,%0";
8814 /* Something in the delay slot, but we've got a long branch. */
8815 return "add%I1 %1,%0,%0\n\tb %3";
8818 output_asm_insn ("add%I1 %1,%0,%0", operands);
8819 return output_lbranch (operands[3], insn, 1);
8822 /* Return nonzero if INSN (a jump insn) immediately follows a call
8823 to a named function. This is used to avoid filling the delay slot
8824 of the jump since it can usually be eliminated by modifying RP in
8825 the delay slot of the call. */
8828 following_call (rtx insn)
8830 if (! TARGET_JUMP_IN_DELAY)
8833 /* Find the previous real insn, skipping NOTEs. */
8834 insn = PREV_INSN (insn);
8835 while (insn && GET_CODE (insn) == NOTE)
8836 insn = PREV_INSN (insn);
8838 /* Check for CALL_INSNs and millicode calls. */
8840 && ((GET_CODE (insn) == CALL_INSN
8841 && get_attr_type (insn) != TYPE_DYNCALL)
8842 || (GET_CODE (insn) == INSN
8843 && GET_CODE (PATTERN (insn)) != SEQUENCE
8844 && GET_CODE (PATTERN (insn)) != USE
8845 && GET_CODE (PATTERN (insn)) != CLOBBER
8846 && get_attr_type (insn) == TYPE_MILLI)))
8852 /* We use this hook to perform a PA specific optimization which is difficult
8853 to do in earlier passes.
8855 We want the delay slots of branches within jump tables to be filled.
8856 None of the compiler passes at the moment even has the notion that a
8857 PA jump table doesn't contain addresses, but instead contains actual
8860 Because we actually jump into the table, the addresses of each entry
8861 must stay constant in relation to the beginning of the table (which
8862 itself must stay constant relative to the instruction to jump into
8863 it). I don't believe we can guarantee earlier passes of the compiler
8864 will adhere to those rules.
8866 So, late in the compilation process we find all the jump tables, and
8867 expand them into real code -- e.g. each entry in the jump table vector
8868 will get an appropriate label followed by a jump to the final target.
8870 Reorg and the final jump pass can then optimize these branches and
8871 fill their delay slots. We end up with smaller, more efficient code.
8873 The jump instructions within the table are special; we must be able
8874 to identify them during assembly output (if the jumps don't get filled
8875 we need to emit a nop rather than nullifying the delay slot)). We
8876 identify jumps in switch tables by using insns with the attribute
8877 type TYPE_BTABLE_BRANCH.
8879 We also surround the jump table itself with BEGIN_BRTAB and END_BRTAB
8880 insns. This serves two purposes, first it prevents jump.c from
8881 noticing that the last N entries in the table jump to the instruction
8882 immediately after the table and deleting the jumps. Second, those
8883 insns mark where we should emit .begin_brtab and .end_brtab directives
8884 when using GAS (allows for better link time optimizations). */
8891 remove_useless_addtr_insns (1);
8893 if (pa_cpu < PROCESSOR_8000)
8894 pa_combine_instructions ();
8897 /* This is fairly cheap, so always run it if optimizing. */
8898 if (optimize > 0 && !TARGET_BIG_SWITCH)
8900 /* Find and explode all ADDR_VEC or ADDR_DIFF_VEC insns. */
8901 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8903 rtx pattern, tmp, location, label;
8904 unsigned int length, i;
8906 /* Find an ADDR_VEC or ADDR_DIFF_VEC insn to explode. */
8907 if (GET_CODE (insn) != JUMP_INSN
8908 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8909 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8912 /* Emit marker for the beginning of the branch table. */
8913 emit_insn_before (gen_begin_brtab (), insn);
8915 pattern = PATTERN (insn);
8916 location = PREV_INSN (insn);
8917 length = XVECLEN (pattern, GET_CODE (pattern) == ADDR_DIFF_VEC);
8919 for (i = 0; i < length; i++)
8921 /* Emit a label before each jump to keep jump.c from
8922 removing this code. */
8923 tmp = gen_label_rtx ();
8924 LABEL_NUSES (tmp) = 1;
8925 emit_label_after (tmp, location);
8926 location = NEXT_INSN (location);
8928 if (GET_CODE (pattern) == ADDR_VEC)
8929 label = XEXP (XVECEXP (pattern, 0, i), 0);
8931 label = XEXP (XVECEXP (pattern, 1, i), 0);
8933 tmp = gen_short_jump (label);
8935 /* Emit the jump itself. */
8936 tmp = emit_jump_insn_after (tmp, location);
8937 JUMP_LABEL (tmp) = label;
8938 LABEL_NUSES (label)++;
8939 location = NEXT_INSN (location);
8941 /* Emit a BARRIER after the jump. */
8942 emit_barrier_after (location);
8943 location = NEXT_INSN (location);
8946 /* Emit marker for the end of the branch table. */
8947 emit_insn_before (gen_end_brtab (), location);
8948 location = NEXT_INSN (location);
8949 emit_barrier_after (location);
8951 /* Delete the ADDR_VEC or ADDR_DIFF_VEC. */
8957 /* Still need brtab marker insns. FIXME: the presence of these
8958 markers disables output of the branch table to readonly memory,
8959 and any alignment directives that might be needed. Possibly,
8960 the begin_brtab insn should be output before the label for the
8961 table. This doesn't matter at the moment since the tables are
8962 always output in the text section. */
8963 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8965 /* Find an ADDR_VEC insn. */
8966 if (GET_CODE (insn) != JUMP_INSN
8967 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8968 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8971 /* Now generate markers for the beginning and end of the
8973 emit_insn_before (gen_begin_brtab (), insn);
8974 emit_insn_after (gen_end_brtab (), insn);
8979 /* The PA has a number of odd instructions which can perform multiple
8980 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
8981 it may be profitable to combine two instructions into one instruction
8982 with two outputs. It's not profitable PA2.0 machines because the
8983 two outputs would take two slots in the reorder buffers.
8985 This routine finds instructions which can be combined and combines
8986 them. We only support some of the potential combinations, and we
8987 only try common ways to find suitable instructions.
8989 * addb can add two registers or a register and a small integer
8990 and jump to a nearby (+-8k) location. Normally the jump to the
8991 nearby location is conditional on the result of the add, but by
8992 using the "true" condition we can make the jump unconditional.
8993 Thus addb can perform two independent operations in one insn.
8995 * movb is similar to addb in that it can perform a reg->reg
8996 or small immediate->reg copy and jump to a nearby (+-8k location).
8998 * fmpyadd and fmpysub can perform a FP multiply and either an
8999 FP add or FP sub if the operands of the multiply and add/sub are
9000 independent (there are other minor restrictions). Note both
9001 the fmpy and fadd/fsub can in theory move to better spots according
9002 to data dependencies, but for now we require the fmpy stay at a
9005 * Many of the memory operations can perform pre & post updates
9006 of index registers. GCC's pre/post increment/decrement addressing
9007 is far too simple to take advantage of all the possibilities. This
9008 pass may not be suitable since those insns may not be independent.
9010 * comclr can compare two ints or an int and a register, nullify
9011 the following instruction and zero some other register. This
9012 is more difficult to use as it's harder to find an insn which
9013 will generate a comclr than finding something like an unconditional
9014 branch. (conditional moves & long branches create comclr insns).
9016 * Most arithmetic operations can conditionally skip the next
9017 instruction. They can be viewed as "perform this operation
9018 and conditionally jump to this nearby location" (where nearby
9019 is an insns away). These are difficult to use due to the
9020 branch length restrictions. */
9023 pa_combine_instructions (void)
9025 rtx anchor, new_rtx;
9027 /* This can get expensive since the basic algorithm is on the
9028 order of O(n^2) (or worse). Only do it for -O2 or higher
9029 levels of optimization. */
9033 /* Walk down the list of insns looking for "anchor" insns which
9034 may be combined with "floating" insns. As the name implies,
9035 "anchor" instructions don't move, while "floating" insns may
9037 new_rtx = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
9038 new_rtx = make_insn_raw (new_rtx);
9040 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
9042 enum attr_pa_combine_type anchor_attr;
9043 enum attr_pa_combine_type floater_attr;
9045 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
9046 Also ignore any special USE insns. */
9047 if ((GET_CODE (anchor) != INSN
9048 && GET_CODE (anchor) != JUMP_INSN
9049 && GET_CODE (anchor) != CALL_INSN)
9050 || GET_CODE (PATTERN (anchor)) == USE
9051 || GET_CODE (PATTERN (anchor)) == CLOBBER
9052 || GET_CODE (PATTERN (anchor)) == ADDR_VEC
9053 || GET_CODE (PATTERN (anchor)) == ADDR_DIFF_VEC)
9056 anchor_attr = get_attr_pa_combine_type (anchor);
9057 /* See if anchor is an insn suitable for combination. */
9058 if (anchor_attr == PA_COMBINE_TYPE_FMPY
9059 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
9060 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9061 && ! forward_branch_p (anchor)))
9065 for (floater = PREV_INSN (anchor);
9067 floater = PREV_INSN (floater))
9069 if (GET_CODE (floater) == NOTE
9070 || (GET_CODE (floater) == INSN
9071 && (GET_CODE (PATTERN (floater)) == USE
9072 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9075 /* Anything except a regular INSN will stop our search. */
9076 if (GET_CODE (floater) != INSN
9077 || GET_CODE (PATTERN (floater)) == ADDR_VEC
9078 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
9084 /* See if FLOATER is suitable for combination with the
9086 floater_attr = get_attr_pa_combine_type (floater);
9087 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9088 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9089 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9090 && floater_attr == PA_COMBINE_TYPE_FMPY))
9092 /* If ANCHOR and FLOATER can be combined, then we're
9093 done with this pass. */
9094 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9095 SET_DEST (PATTERN (floater)),
9096 XEXP (SET_SRC (PATTERN (floater)), 0),
9097 XEXP (SET_SRC (PATTERN (floater)), 1)))
9101 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
9102 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
9104 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
9106 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9107 SET_DEST (PATTERN (floater)),
9108 XEXP (SET_SRC (PATTERN (floater)), 0),
9109 XEXP (SET_SRC (PATTERN (floater)), 1)))
9114 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
9115 SET_DEST (PATTERN (floater)),
9116 SET_SRC (PATTERN (floater)),
9117 SET_SRC (PATTERN (floater))))
9123 /* If we didn't find anything on the backwards scan try forwards. */
9125 && (anchor_attr == PA_COMBINE_TYPE_FMPY
9126 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
9128 for (floater = anchor; floater; floater = NEXT_INSN (floater))
9130 if (GET_CODE (floater) == NOTE
9131 || (GET_CODE (floater) == INSN
9132 && (GET_CODE (PATTERN (floater)) == USE
9133 || GET_CODE (PATTERN (floater)) == CLOBBER)))
9137 /* Anything except a regular INSN will stop our search. */
9138 if (GET_CODE (floater) != INSN
9139 || GET_CODE (PATTERN (floater)) == ADDR_VEC
9140 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
9146 /* See if FLOATER is suitable for combination with the
9148 floater_attr = get_attr_pa_combine_type (floater);
9149 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
9150 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
9151 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9152 && floater_attr == PA_COMBINE_TYPE_FMPY))
9154 /* If ANCHOR and FLOATER can be combined, then we're
9155 done with this pass. */
9156 if (pa_can_combine_p (new_rtx, anchor, floater, 1,
9157 SET_DEST (PATTERN (floater)),
9158 XEXP (SET_SRC (PATTERN (floater)),
9160 XEXP (SET_SRC (PATTERN (floater)),
9167 /* FLOATER will be nonzero if we found a suitable floating
9168 insn for combination with ANCHOR. */
9170 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
9171 || anchor_attr == PA_COMBINE_TYPE_FMPY))
9173 /* Emit the new instruction and delete the old anchor. */
9174 emit_insn_before (gen_rtx_PARALLEL
9176 gen_rtvec (2, PATTERN (anchor),
9177 PATTERN (floater))),
9180 SET_INSN_DELETED (anchor);
9182 /* Emit a special USE insn for FLOATER, then delete
9183 the floating insn. */
9184 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9185 delete_insn (floater);
9190 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
9193 /* Emit the new_jump instruction and delete the old anchor. */
9195 = emit_jump_insn_before (gen_rtx_PARALLEL
9197 gen_rtvec (2, PATTERN (anchor),
9198 PATTERN (floater))),
9201 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
9202 SET_INSN_DELETED (anchor);
9204 /* Emit a special USE insn for FLOATER, then delete
9205 the floating insn. */
9206 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9207 delete_insn (floater);
9215 pa_can_combine_p (rtx new_rtx, rtx anchor, rtx floater, int reversed, rtx dest,
9218 int insn_code_number;
9221 /* Create a PARALLEL with the patterns of ANCHOR and
9222 FLOATER, try to recognize it, then test constraints
9223 for the resulting pattern.
9225 If the pattern doesn't match or the constraints
9226 aren't met keep searching for a suitable floater
9228 XVECEXP (PATTERN (new_rtx), 0, 0) = PATTERN (anchor);
9229 XVECEXP (PATTERN (new_rtx), 0, 1) = PATTERN (floater);
9230 INSN_CODE (new_rtx) = -1;
9231 insn_code_number = recog_memoized (new_rtx);
9232 if (insn_code_number < 0
9233 || (extract_insn (new_rtx), ! constrain_operands (1)))
9247 /* There's up to three operands to consider. One
9248 output and two inputs.
9250 The output must not be used between FLOATER & ANCHOR
9251 exclusive. The inputs must not be set between
9252 FLOATER and ANCHOR exclusive. */
9254 if (reg_used_between_p (dest, start, end))
9257 if (reg_set_between_p (src1, start, end))
9260 if (reg_set_between_p (src2, start, end))
9263 /* If we get here, then everything is good. */
9267 /* Return nonzero if references for INSN are delayed.
9269 Millicode insns are actually function calls with some special
9270 constraints on arguments and register usage.
9272 Millicode calls always expect their arguments in the integer argument
9273 registers, and always return their result in %r29 (ret1). They
9274 are expected to clobber their arguments, %r1, %r29, and the return
9275 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
9277 This function tells reorg that the references to arguments and
9278 millicode calls do not appear to happen until after the millicode call.
9279 This allows reorg to put insns which set the argument registers into the
9280 delay slot of the millicode call -- thus they act more like traditional
9283 Note we cannot consider side effects of the insn to be delayed because
9284 the branch and link insn will clobber the return pointer. If we happened
9285 to use the return pointer in the delay slot of the call, then we lose.
9287 get_attr_type will try to recognize the given insn, so make sure to
9288 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
9291 insn_refs_are_delayed (rtx insn)
9293 return ((GET_CODE (insn) == INSN
9294 && GET_CODE (PATTERN (insn)) != SEQUENCE
9295 && GET_CODE (PATTERN (insn)) != USE
9296 && GET_CODE (PATTERN (insn)) != CLOBBER
9297 && get_attr_type (insn) == TYPE_MILLI));
9300 /* Promote the return value, but not the arguments. */
9302 static enum machine_mode
9303 pa_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
9304 enum machine_mode mode,
9305 int *punsignedp ATTRIBUTE_UNUSED,
9306 const_tree fntype ATTRIBUTE_UNUSED,
9309 if (for_return == 0)
9311 return promote_mode (type, mode, punsignedp);
9314 /* On the HP-PA the value is found in register(s) 28(-29), unless
9315 the mode is SF or DF. Then the value is returned in fr4 (32).
9317 This must perform the same promotions as PROMOTE_MODE, else promoting
9318 return values in TARGET_PROMOTE_FUNCTION_MODE will not work correctly.
9320 Small structures must be returned in a PARALLEL on PA64 in order
9321 to match the HP Compiler ABI. */
9324 pa_function_value (const_tree valtype,
9325 const_tree func ATTRIBUTE_UNUSED,
9326 bool outgoing ATTRIBUTE_UNUSED)
9328 enum machine_mode valmode;
9330 if (AGGREGATE_TYPE_P (valtype)
9331 || TREE_CODE (valtype) == COMPLEX_TYPE
9332 || TREE_CODE (valtype) == VECTOR_TYPE)
9336 /* Aggregates with a size less than or equal to 128 bits are
9337 returned in GR 28(-29). They are left justified. The pad
9338 bits are undefined. Larger aggregates are returned in
9342 int ub = int_size_in_bytes (valtype) <= UNITS_PER_WORD ? 1 : 2;
9344 for (i = 0; i < ub; i++)
9346 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9347 gen_rtx_REG (DImode, 28 + i),
9352 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
9354 else if (int_size_in_bytes (valtype) > UNITS_PER_WORD)
9356 /* Aggregates 5 to 8 bytes in size are returned in general
9357 registers r28-r29 in the same manner as other non
9358 floating-point objects. The data is right-justified and
9359 zero-extended to 64 bits. This is opposite to the normal
9360 justification used on big endian targets and requires
9361 special treatment. */
9362 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9363 gen_rtx_REG (DImode, 28), const0_rtx);
9364 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9368 if ((INTEGRAL_TYPE_P (valtype)
9369 && GET_MODE_BITSIZE (TYPE_MODE (valtype)) < BITS_PER_WORD)
9370 || POINTER_TYPE_P (valtype))
9371 valmode = word_mode;
9373 valmode = TYPE_MODE (valtype);
9375 if (TREE_CODE (valtype) == REAL_TYPE
9376 && !AGGREGATE_TYPE_P (valtype)
9377 && TYPE_MODE (valtype) != TFmode
9378 && !TARGET_SOFT_FLOAT)
9379 return gen_rtx_REG (valmode, 32);
9381 return gen_rtx_REG (valmode, 28);
9384 /* Implement the TARGET_LIBCALL_VALUE hook. */
9387 pa_libcall_value (enum machine_mode mode,
9388 const_rtx fun ATTRIBUTE_UNUSED)
9390 if (! TARGET_SOFT_FLOAT
9391 && (mode == SFmode || mode == DFmode))
9392 return gen_rtx_REG (mode, 32);
9394 return gen_rtx_REG (mode, 28);
9397 /* Implement the TARGET_FUNCTION_VALUE_REGNO_P hook. */
9400 pa_function_value_regno_p (const unsigned int regno)
9403 || (! TARGET_SOFT_FLOAT && regno == 32))
9409 /* Update the data in CUM to advance over an argument
9410 of mode MODE and data type TYPE.
9411 (TYPE is null for libcalls where that information may not be available.) */
9414 pa_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
9415 const_tree type, bool named ATTRIBUTE_UNUSED)
9417 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9418 int arg_size = FUNCTION_ARG_SIZE (mode, type);
9420 cum->nargs_prototype--;
9421 cum->words += (arg_size
9422 + ((cum->words & 01)
9423 && type != NULL_TREE
9427 /* Return the location of a parameter that is passed in a register or NULL
9428 if the parameter has any component that is passed in memory.
9430 This is new code and will be pushed to into the net sources after
9433 ??? We might want to restructure this so that it looks more like other
9436 pa_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
9437 const_tree type, bool named ATTRIBUTE_UNUSED)
9439 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9440 int max_arg_words = (TARGET_64BIT ? 8 : 4);
9447 if (mode == VOIDmode)
9450 arg_size = FUNCTION_ARG_SIZE (mode, type);
9452 /* If this arg would be passed partially or totally on the stack, then
9453 this routine should return zero. pa_arg_partial_bytes will
9454 handle arguments which are split between regs and stack slots if
9455 the ABI mandates split arguments. */
9458 /* The 32-bit ABI does not split arguments. */
9459 if (cum->words + arg_size > max_arg_words)
9465 alignment = cum->words & 1;
9466 if (cum->words + alignment >= max_arg_words)
9470 /* The 32bit ABIs and the 64bit ABIs are rather different,
9471 particularly in their handling of FP registers. We might
9472 be able to cleverly share code between them, but I'm not
9473 going to bother in the hope that splitting them up results
9474 in code that is more easily understood. */
9478 /* Advance the base registers to their current locations.
9480 Remember, gprs grow towards smaller register numbers while
9481 fprs grow to higher register numbers. Also remember that
9482 although FP regs are 32-bit addressable, we pretend that
9483 the registers are 64-bits wide. */
9484 gpr_reg_base = 26 - cum->words;
9485 fpr_reg_base = 32 + cum->words;
9487 /* Arguments wider than one word and small aggregates need special
9491 || (type && (AGGREGATE_TYPE_P (type)
9492 || TREE_CODE (type) == COMPLEX_TYPE
9493 || TREE_CODE (type) == VECTOR_TYPE)))
9495 /* Double-extended precision (80-bit), quad-precision (128-bit)
9496 and aggregates including complex numbers are aligned on
9497 128-bit boundaries. The first eight 64-bit argument slots
9498 are associated one-to-one, with general registers r26
9499 through r19, and also with floating-point registers fr4
9500 through fr11. Arguments larger than one word are always
9501 passed in general registers.
9503 Using a PARALLEL with a word mode register results in left
9504 justified data on a big-endian target. */
9507 int i, offset = 0, ub = arg_size;
9509 /* Align the base register. */
9510 gpr_reg_base -= alignment;
9512 ub = MIN (ub, max_arg_words - cum->words - alignment);
9513 for (i = 0; i < ub; i++)
9515 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9516 gen_rtx_REG (DImode, gpr_reg_base),
9522 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
9527 /* If the argument is larger than a word, then we know precisely
9528 which registers we must use. */
9542 /* Structures 5 to 8 bytes in size are passed in the general
9543 registers in the same manner as other non floating-point
9544 objects. The data is right-justified and zero-extended
9545 to 64 bits. This is opposite to the normal justification
9546 used on big endian targets and requires special treatment.
9547 We now define BLOCK_REG_PADDING to pad these objects.
9548 Aggregates, complex and vector types are passed in the same
9549 manner as structures. */
9551 || (type && (AGGREGATE_TYPE_P (type)
9552 || TREE_CODE (type) == COMPLEX_TYPE
9553 || TREE_CODE (type) == VECTOR_TYPE)))
9555 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9556 gen_rtx_REG (DImode, gpr_reg_base),
9558 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9563 /* We have a single word (32 bits). A simple computation
9564 will get us the register #s we need. */
9565 gpr_reg_base = 26 - cum->words;
9566 fpr_reg_base = 32 + 2 * cum->words;
9570 /* Determine if the argument needs to be passed in both general and
9571 floating point registers. */
9572 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
9573 /* If we are doing soft-float with portable runtime, then there
9574 is no need to worry about FP regs. */
9575 && !TARGET_SOFT_FLOAT
9576 /* The parameter must be some kind of scalar float, else we just
9577 pass it in integer registers. */
9578 && GET_MODE_CLASS (mode) == MODE_FLOAT
9579 /* The target function must not have a prototype. */
9580 && cum->nargs_prototype <= 0
9581 /* libcalls do not need to pass items in both FP and general
9583 && type != NULL_TREE
9584 /* All this hair applies to "outgoing" args only. This includes
9585 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9587 /* Also pass outgoing floating arguments in both registers in indirect
9588 calls with the 32 bit ABI and the HP assembler since there is no
9589 way to the specify argument locations in static functions. */
9594 && GET_MODE_CLASS (mode) == MODE_FLOAT))
9600 gen_rtx_EXPR_LIST (VOIDmode,
9601 gen_rtx_REG (mode, fpr_reg_base),
9603 gen_rtx_EXPR_LIST (VOIDmode,
9604 gen_rtx_REG (mode, gpr_reg_base),
9609 /* See if we should pass this parameter in a general register. */
9610 if (TARGET_SOFT_FLOAT
9611 /* Indirect calls in the normal 32bit ABI require all arguments
9612 to be passed in general registers. */
9613 || (!TARGET_PORTABLE_RUNTIME
9617 /* If the parameter is not a scalar floating-point parameter,
9618 then it belongs in GPRs. */
9619 || GET_MODE_CLASS (mode) != MODE_FLOAT
9620 /* Structure with single SFmode field belongs in GPR. */
9621 || (type && AGGREGATE_TYPE_P (type)))
9622 retval = gen_rtx_REG (mode, gpr_reg_base);
9624 retval = gen_rtx_REG (mode, fpr_reg_base);
9629 /* Arguments larger than one word are double word aligned. */
9632 pa_function_arg_boundary (enum machine_mode mode, const_tree type)
9634 bool singleword = (type
9635 ? (integer_zerop (TYPE_SIZE (type))
9636 || !TREE_CONSTANT (TYPE_SIZE (type))
9637 || int_size_in_bytes (type) <= UNITS_PER_WORD)
9638 : GET_MODE_SIZE (mode) <= UNITS_PER_WORD);
9640 return singleword ? PARM_BOUNDARY : MAX_PARM_BOUNDARY;
9643 /* If this arg would be passed totally in registers or totally on the stack,
9644 then this routine should return zero. */
9647 pa_arg_partial_bytes (cumulative_args_t cum_v, enum machine_mode mode,
9648 tree type, bool named ATTRIBUTE_UNUSED)
9650 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9651 unsigned int max_arg_words = 8;
9652 unsigned int offset = 0;
9657 if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1))
9660 if (cum->words + offset + FUNCTION_ARG_SIZE (mode, type) <= max_arg_words)
9661 /* Arg fits fully into registers. */
9663 else if (cum->words + offset >= max_arg_words)
9664 /* Arg fully on the stack. */
9668 return (max_arg_words - cum->words - offset) * UNITS_PER_WORD;
9672 /* A get_unnamed_section callback for switching to the text section.
9674 This function is only used with SOM. Because we don't support
9675 named subspaces, we can only create a new subspace or switch back
9676 to the default text subspace. */
9679 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9681 gcc_assert (TARGET_SOM);
9684 if (cfun && cfun->machine && !cfun->machine->in_nsubspa)
9686 /* We only want to emit a .nsubspa directive once at the
9687 start of the function. */
9688 cfun->machine->in_nsubspa = 1;
9690 /* Create a new subspace for the text. This provides
9691 better stub placement and one-only functions. */
9693 && DECL_ONE_ONLY (cfun->decl)
9694 && !DECL_WEAK (cfun->decl))
9696 output_section_asm_op ("\t.SPACE $TEXT$\n"
9697 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9698 "ACCESS=44,SORT=24,COMDAT");
9704 /* There isn't a current function or the body of the current
9705 function has been completed. So, we are changing to the
9706 text section to output debugging information. Thus, we
9707 need to forget that we are in the text section so that
9708 varasm.c will call us when text_section is selected again. */
9709 gcc_assert (!cfun || !cfun->machine
9710 || cfun->machine->in_nsubspa == 2);
9713 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9716 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9719 /* A get_unnamed_section callback for switching to comdat data
9720 sections. This function is only used with SOM. */
9723 som_output_comdat_data_section_asm_op (const void *data)
9726 output_section_asm_op (data);
9729 /* Implement TARGET_ASM_INITIALIZE_SECTIONS */
9732 pa_som_asm_init_sections (void)
9735 = get_unnamed_section (0, som_output_text_section_asm_op, NULL);
9737 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9738 is not being generated. */
9739 som_readonly_data_section
9740 = get_unnamed_section (0, output_section_asm_op,
9741 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9743 /* When secondary definitions are not supported, SOM makes readonly
9744 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9746 som_one_only_readonly_data_section
9747 = get_unnamed_section (0, som_output_comdat_data_section_asm_op,
9749 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9750 "ACCESS=0x2c,SORT=16,COMDAT");
9753 /* When secondary definitions are not supported, SOM makes data one-only
9754 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9755 som_one_only_data_section
9756 = get_unnamed_section (SECTION_WRITE,
9757 som_output_comdat_data_section_asm_op,
9758 "\t.SPACE $PRIVATE$\n"
9759 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9760 "ACCESS=31,SORT=24,COMDAT");
9762 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9763 which reference data within the $TEXT$ space (for example constant
9764 strings in the $LIT$ subspace).
9766 The assemblers (GAS and HP as) both have problems with handling
9767 the difference of two symbols which is the other correct way to
9768 reference constant data during PIC code generation.
9770 So, there's no way to reference constant data which is in the
9771 $TEXT$ space during PIC generation. Instead place all constant
9772 data into the $PRIVATE$ subspace (this reduces sharing, but it
9773 works correctly). */
9774 readonly_data_section = flag_pic ? data_section : som_readonly_data_section;
9776 /* We must not have a reference to an external symbol defined in a
9777 shared library in a readonly section, else the SOM linker will
9780 So, we force exception information into the data section. */
9781 exception_section = data_section;
9784 /* On hpux10, the linker will give an error if we have a reference
9785 in the read-only data section to a symbol defined in a shared
9786 library. Therefore, expressions that might require a reloc can
9787 not be placed in the read-only data section. */
9790 pa_select_section (tree exp, int reloc,
9791 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
9793 if (TREE_CODE (exp) == VAR_DECL
9794 && TREE_READONLY (exp)
9795 && !TREE_THIS_VOLATILE (exp)
9796 && DECL_INITIAL (exp)
9797 && (DECL_INITIAL (exp) == error_mark_node
9798 || TREE_CONSTANT (DECL_INITIAL (exp)))
9802 && DECL_ONE_ONLY (exp)
9803 && !DECL_WEAK (exp))
9804 return som_one_only_readonly_data_section;
9806 return readonly_data_section;
9808 else if (CONSTANT_CLASS_P (exp) && !reloc)
9809 return readonly_data_section;
9811 && TREE_CODE (exp) == VAR_DECL
9812 && DECL_ONE_ONLY (exp)
9813 && !DECL_WEAK (exp))
9814 return som_one_only_data_section;
9816 return data_section;
9820 pa_globalize_label (FILE *stream, const char *name)
9822 /* We only handle DATA objects here, functions are globalized in
9823 ASM_DECLARE_FUNCTION_NAME. */
9824 if (! FUNCTION_NAME_P (name))
9826 fputs ("\t.EXPORT ", stream);
9827 assemble_name (stream, name);
9828 fputs (",DATA\n", stream);
9832 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9835 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
9836 int incoming ATTRIBUTE_UNUSED)
9838 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
9841 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9844 pa_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
9846 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9847 PA64 ABI says that objects larger than 128 bits are returned in memory.
9848 Note, int_size_in_bytes can return -1 if the size of the object is
9849 variable or larger than the maximum value that can be expressed as
9850 a HOST_WIDE_INT. It can also return zero for an empty type. The
9851 simplest way to handle variable and empty types is to pass them in
9852 memory. This avoids problems in defining the boundaries of argument
9853 slots, allocating registers, etc. */
9854 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
9855 || int_size_in_bytes (type) <= 0);
9858 /* Structure to hold declaration and name of external symbols that are
9859 emitted by GCC. We generate a vector of these symbols and output them
9860 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9861 This avoids putting out names that are never really used. */
9863 typedef struct GTY(()) extern_symbol
9869 /* Define gc'd vector type for extern_symbol. */
9870 DEF_VEC_O(extern_symbol);
9871 DEF_VEC_ALLOC_O(extern_symbol,gc);
9873 /* Vector of extern_symbol pointers. */
9874 static GTY(()) VEC(extern_symbol,gc) *extern_symbols;
9876 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9877 /* Mark DECL (name NAME) as an external reference (assembler output
9878 file FILE). This saves the names to output at the end of the file
9879 if actually referenced. */
9882 pa_hpux_asm_output_external (FILE *file, tree decl, const char *name)
9884 extern_symbol * p = VEC_safe_push (extern_symbol, gc, extern_symbols, NULL);
9886 gcc_assert (file == asm_out_file);
9891 /* Output text required at the end of an assembler file.
9892 This includes deferred plabels and .import directives for
9893 all external symbols that were actually referenced. */
9896 pa_hpux_file_end (void)
9901 if (!NO_DEFERRED_PROFILE_COUNTERS)
9902 output_deferred_profile_counters ();
9904 output_deferred_plabels ();
9906 for (i = 0; VEC_iterate (extern_symbol, extern_symbols, i, p); i++)
9908 tree decl = p->decl;
9910 if (!TREE_ASM_WRITTEN (decl)
9911 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl), 0)))
9912 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file, decl, p->name);
9915 VEC_free (extern_symbol, gc, extern_symbols);
9919 /* Return true if a change from mode FROM to mode TO for a register
9920 in register class RCLASS is invalid. */
9923 pa_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
9924 enum reg_class rclass)
9929 /* Reject changes to/from complex and vector modes. */
9930 if (COMPLEX_MODE_P (from) || VECTOR_MODE_P (from)
9931 || COMPLEX_MODE_P (to) || VECTOR_MODE_P (to))
9934 if (GET_MODE_SIZE (from) == GET_MODE_SIZE (to))
9937 /* There is no way to load QImode or HImode values directly from
9938 memory. SImode loads to the FP registers are not zero extended.
9939 On the 64-bit target, this conflicts with the definition of
9940 LOAD_EXTEND_OP. Thus, we can't allow changing between modes
9941 with different sizes in the floating-point registers. */
9942 if (MAYBE_FP_REG_CLASS_P (rclass))
9945 /* HARD_REGNO_MODE_OK places modes with sizes larger than a word
9946 in specific sets of registers. Thus, we cannot allow changing
9947 to a larger mode when it's larger than a word. */
9948 if (GET_MODE_SIZE (to) > UNITS_PER_WORD
9949 && GET_MODE_SIZE (to) > GET_MODE_SIZE (from))
9955 /* Returns TRUE if it is a good idea to tie two pseudo registers
9956 when one has mode MODE1 and one has mode MODE2.
9957 If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
9958 for any hard reg, then this must be FALSE for correct output.
9960 We should return FALSE for QImode and HImode because these modes
9961 are not ok in the floating-point registers. However, this prevents
9962 tieing these modes to SImode and DImode in the general registers.
9963 So, this isn't a good idea. We rely on HARD_REGNO_MODE_OK and
9964 CANNOT_CHANGE_MODE_CLASS to prevent these modes from being used
9965 in the floating-point registers. */
9968 pa_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
9970 /* Don't tie modes in different classes. */
9971 if (GET_MODE_CLASS (mode1) != GET_MODE_CLASS (mode2))
9978 /* Length in units of the trampoline instruction code. */
9980 #define TRAMPOLINE_CODE_SIZE (TARGET_64BIT ? 24 : (TARGET_PA_20 ? 32 : 40))
9983 /* Output assembler code for a block containing the constant parts
9984 of a trampoline, leaving space for the variable parts.\
9986 The trampoline sets the static chain pointer to STATIC_CHAIN_REGNUM
9987 and then branches to the specified routine.
9989 This code template is copied from text segment to stack location
9990 and then patched with pa_trampoline_init to contain valid values,
9991 and then entered as a subroutine.
9993 It is best to keep this as small as possible to avoid having to
9994 flush multiple lines in the cache. */
9997 pa_asm_trampoline_template (FILE *f)
10001 fputs ("\tldw 36(%r22),%r21\n", f);
10002 fputs ("\tbb,>=,n %r21,30,.+16\n", f);
10003 if (ASSEMBLER_DIALECT == 0)
10004 fputs ("\tdepi 0,31,2,%r21\n", f);
10006 fputs ("\tdepwi 0,31,2,%r21\n", f);
10007 fputs ("\tldw 4(%r21),%r19\n", f);
10008 fputs ("\tldw 0(%r21),%r21\n", f);
10011 fputs ("\tbve (%r21)\n", f);
10012 fputs ("\tldw 40(%r22),%r29\n", f);
10013 fputs ("\t.word 0\n", f);
10014 fputs ("\t.word 0\n", f);
10018 fputs ("\tldsid (%r21),%r1\n", f);
10019 fputs ("\tmtsp %r1,%sr0\n", f);
10020 fputs ("\tbe 0(%sr0,%r21)\n", f);
10021 fputs ("\tldw 40(%r22),%r29\n", f);
10023 fputs ("\t.word 0\n", f);
10024 fputs ("\t.word 0\n", f);
10025 fputs ("\t.word 0\n", f);
10026 fputs ("\t.word 0\n", f);
10030 fputs ("\t.dword 0\n", f);
10031 fputs ("\t.dword 0\n", f);
10032 fputs ("\t.dword 0\n", f);
10033 fputs ("\t.dword 0\n", f);
10034 fputs ("\tmfia %r31\n", f);
10035 fputs ("\tldd 24(%r31),%r1\n", f);
10036 fputs ("\tldd 24(%r1),%r27\n", f);
10037 fputs ("\tldd 16(%r1),%r1\n", f);
10038 fputs ("\tbve (%r1)\n", f);
10039 fputs ("\tldd 32(%r31),%r31\n", f);
10040 fputs ("\t.dword 0 ; fptr\n", f);
10041 fputs ("\t.dword 0 ; static link\n", f);
10045 /* Emit RTL insns to initialize the variable parts of a trampoline.
10046 FNADDR is an RTX for the address of the function's pure code.
10047 CXT is an RTX for the static chain value for the function.
10049 Move the function address to the trampoline template at offset 36.
10050 Move the static chain value to trampoline template at offset 40.
10051 Move the trampoline address to trampoline template at offset 44.
10052 Move r19 to trampoline template at offset 48. The latter two
10053 words create a plabel for the indirect call to the trampoline.
10055 A similar sequence is used for the 64-bit port but the plabel is
10056 at the beginning of the trampoline.
10058 Finally, the cache entries for the trampoline code are flushed.
10059 This is necessary to ensure that the trampoline instruction sequence
10060 is written to memory prior to any attempts at prefetching the code
10064 pa_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
10066 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
10067 rtx start_addr = gen_reg_rtx (Pmode);
10068 rtx end_addr = gen_reg_rtx (Pmode);
10069 rtx line_length = gen_reg_rtx (Pmode);
10072 emit_block_move (m_tramp, assemble_trampoline_template (),
10073 GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
10074 r_tramp = force_reg (Pmode, XEXP (m_tramp, 0));
10078 tmp = adjust_address (m_tramp, Pmode, 36);
10079 emit_move_insn (tmp, fnaddr);
10080 tmp = adjust_address (m_tramp, Pmode, 40);
10081 emit_move_insn (tmp, chain_value);
10083 /* Create a fat pointer for the trampoline. */
10084 tmp = adjust_address (m_tramp, Pmode, 44);
10085 emit_move_insn (tmp, r_tramp);
10086 tmp = adjust_address (m_tramp, Pmode, 48);
10087 emit_move_insn (tmp, gen_rtx_REG (Pmode, 19));
10089 /* fdc and fic only use registers for the address to flush,
10090 they do not accept integer displacements. We align the
10091 start and end addresses to the beginning of their respective
10092 cache lines to minimize the number of lines flushed. */
10093 emit_insn (gen_andsi3 (start_addr, r_tramp,
10094 GEN_INT (-MIN_CACHELINE_SIZE)));
10095 tmp = force_reg (Pmode, plus_constant (r_tramp, TRAMPOLINE_CODE_SIZE-1));
10096 emit_insn (gen_andsi3 (end_addr, tmp,
10097 GEN_INT (-MIN_CACHELINE_SIZE)));
10098 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10099 emit_insn (gen_dcacheflushsi (start_addr, end_addr, line_length));
10100 emit_insn (gen_icacheflushsi (start_addr, end_addr, line_length,
10101 gen_reg_rtx (Pmode),
10102 gen_reg_rtx (Pmode)));
10106 tmp = adjust_address (m_tramp, Pmode, 56);
10107 emit_move_insn (tmp, fnaddr);
10108 tmp = adjust_address (m_tramp, Pmode, 64);
10109 emit_move_insn (tmp, chain_value);
10111 /* Create a fat pointer for the trampoline. */
10112 tmp = adjust_address (m_tramp, Pmode, 16);
10113 emit_move_insn (tmp, force_reg (Pmode, plus_constant (r_tramp, 32)));
10114 tmp = adjust_address (m_tramp, Pmode, 24);
10115 emit_move_insn (tmp, gen_rtx_REG (Pmode, 27));
10117 /* fdc and fic only use registers for the address to flush,
10118 they do not accept integer displacements. We align the
10119 start and end addresses to the beginning of their respective
10120 cache lines to minimize the number of lines flushed. */
10121 tmp = force_reg (Pmode, plus_constant (r_tramp, 32));
10122 emit_insn (gen_anddi3 (start_addr, tmp,
10123 GEN_INT (-MIN_CACHELINE_SIZE)));
10124 tmp = force_reg (Pmode, plus_constant (tmp, TRAMPOLINE_CODE_SIZE - 1));
10125 emit_insn (gen_anddi3 (end_addr, tmp,
10126 GEN_INT (-MIN_CACHELINE_SIZE)));
10127 emit_move_insn (line_length, GEN_INT (MIN_CACHELINE_SIZE));
10128 emit_insn (gen_dcacheflushdi (start_addr, end_addr, line_length));
10129 emit_insn (gen_icacheflushdi (start_addr, end_addr, line_length,
10130 gen_reg_rtx (Pmode),
10131 gen_reg_rtx (Pmode)));
10135 /* Perform any machine-specific adjustment in the address of the trampoline.
10136 ADDR contains the address that was passed to pa_trampoline_init.
10137 Adjust the trampoline address to point to the plabel at offset 44. */
10140 pa_trampoline_adjust_address (rtx addr)
10143 addr = memory_address (Pmode, plus_constant (addr, 46));
10148 pa_delegitimize_address (rtx orig_x)
10150 rtx x = delegitimize_mem_from_attrs (orig_x);
10152 if (GET_CODE (x) == LO_SUM
10153 && GET_CODE (XEXP (x, 1)) == UNSPEC
10154 && XINT (XEXP (x, 1), 1) == UNSPEC_DLTIND14R)
10155 return gen_const_mem (Pmode, XVECEXP (XEXP (x, 1), 0, 0));
10160 pa_internal_arg_pointer (void)
10162 /* The argument pointer and the hard frame pointer are the same in
10163 the 32-bit runtime, so we don't need a copy. */
10165 return copy_to_reg (virtual_incoming_args_rtx);
10167 return virtual_incoming_args_rtx;
10170 /* Given FROM and TO register numbers, say whether this elimination is allowed.
10171 Frame pointer elimination is automatically handled. */
10174 pa_can_eliminate (const int from, const int to)
10176 /* The argument cannot be eliminated in the 64-bit runtime. */
10177 if (TARGET_64BIT && from == ARG_POINTER_REGNUM)
10180 return (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM
10181 ? ! frame_pointer_needed
10185 /* Define the offset between two registers, FROM to be eliminated and its
10186 replacement TO, at the start of a routine. */
10188 pa_initial_elimination_offset (int from, int to)
10190 HOST_WIDE_INT offset;
10192 if ((from == HARD_FRAME_POINTER_REGNUM || from == FRAME_POINTER_REGNUM)
10193 && to == STACK_POINTER_REGNUM)
10194 offset = -compute_frame_size (get_frame_size (), 0);
10195 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
10198 gcc_unreachable ();
10204 pa_conditional_register_usage (void)
10208 if (!TARGET_64BIT && !TARGET_PA_11)
10210 for (i = 56; i <= FP_REG_LAST; i++)
10211 fixed_regs[i] = call_used_regs[i] = 1;
10212 for (i = 33; i < 56; i += 2)
10213 fixed_regs[i] = call_used_regs[i] = 1;
10215 if (TARGET_DISABLE_FPREGS || TARGET_SOFT_FLOAT)
10217 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
10218 fixed_regs[i] = call_used_regs[i] = 1;
10221 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10224 /* Target hook for c_mode_for_suffix. */
10226 static enum machine_mode
10227 pa_c_mode_for_suffix (char suffix)
10229 if (HPUX_LONG_DOUBLE_LIBRARY)
10238 /* Target hook for function_section. */
10241 pa_function_section (tree decl, enum node_frequency freq,
10242 bool startup, bool exit)
10244 /* Put functions in text section if target doesn't have named sections. */
10245 if (!targetm_common.have_named_sections)
10246 return text_section;
10248 /* Force nested functions into the same section as the containing
10251 && DECL_SECTION_NAME (decl) == NULL_TREE
10252 && DECL_CONTEXT (decl) != NULL_TREE
10253 && TREE_CODE (DECL_CONTEXT (decl)) == FUNCTION_DECL
10254 && DECL_SECTION_NAME (DECL_CONTEXT (decl)) == NULL_TREE)
10255 return function_section (DECL_CONTEXT (decl));
10257 /* Otherwise, use the default function section. */
10258 return default_function_section (decl, freq, startup, exit);
10261 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
10263 In 64-bit mode, we reject CONST_DOUBLES. We also reject CONST_INTS
10264 that need more than three instructions to load prior to reload. This
10265 limit is somewhat arbitrary. It takes three instructions to load a
10266 CONST_INT from memory but two are memory accesses. It may be better
10267 to increase the allowed range for CONST_INTS. We may also be able
10268 to handle CONST_DOUBLES. */
10271 pa_legitimate_constant_p (enum machine_mode mode, rtx x)
10273 if (GET_MODE_CLASS (mode) == MODE_FLOAT && x != CONST0_RTX (mode))
10276 if (!NEW_HP_ASSEMBLER && !TARGET_GAS && GET_CODE (x) == LABEL_REF)
10279 /* TLS_MODEL_GLOBAL_DYNAMIC and TLS_MODEL_LOCAL_DYNAMIC are not
10280 legitimate constants. */
10281 if (PA_SYMBOL_REF_TLS_P (x))
10283 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
10285 if (model == TLS_MODEL_GLOBAL_DYNAMIC || model == TLS_MODEL_LOCAL_DYNAMIC)
10289 if (TARGET_64BIT && GET_CODE (x) == CONST_DOUBLE)
10293 && HOST_BITS_PER_WIDE_INT > 32
10294 && GET_CODE (x) == CONST_INT
10295 && !reload_in_progress
10296 && !reload_completed
10297 && !LEGITIMATE_64BIT_CONST_INT_P (INTVAL (x))
10298 && !cint_ok_for_move (INTVAL (x)))
10301 if (function_label_operand (x, mode))