1 /* Definitions of target machine for GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008,
4 Free Software Foundation, Inc.
5 Contributed by James E. Wilson <wilson@cygnus.com> and
6 David Mosberger <davidm@hpl.hp.com>.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3, or (at your option)
15 GCC is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
26 #include "coretypes.h"
31 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
35 #include "insn-attr.h"
43 #include "basic-block.h"
45 #include "diagnostic-core.h"
46 #include "sched-int.h"
49 #include "target-def.h"
52 #include "langhooks.h"
53 #include "cfglayout.h"
60 #include "tm-constrs.h"
61 #include "sel-sched.h"
63 #include "dwarf2out.h"
66 /* This is used for communication between ASM_OUTPUT_LABEL and
67 ASM_OUTPUT_LABELREF. */
68 int ia64_asm_output_label = 0;
70 /* Register names for ia64_expand_prologue. */
71 static const char * const ia64_reg_numbers[96] =
72 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
73 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
74 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
75 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
76 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
77 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
78 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
79 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
80 "r96", "r97", "r98", "r99", "r100","r101","r102","r103",
81 "r104","r105","r106","r107","r108","r109","r110","r111",
82 "r112","r113","r114","r115","r116","r117","r118","r119",
83 "r120","r121","r122","r123","r124","r125","r126","r127"};
85 /* ??? These strings could be shared with REGISTER_NAMES. */
86 static const char * const ia64_input_reg_names[8] =
87 { "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7" };
89 /* ??? These strings could be shared with REGISTER_NAMES. */
90 static const char * const ia64_local_reg_names[80] =
91 { "loc0", "loc1", "loc2", "loc3", "loc4", "loc5", "loc6", "loc7",
92 "loc8", "loc9", "loc10","loc11","loc12","loc13","loc14","loc15",
93 "loc16","loc17","loc18","loc19","loc20","loc21","loc22","loc23",
94 "loc24","loc25","loc26","loc27","loc28","loc29","loc30","loc31",
95 "loc32","loc33","loc34","loc35","loc36","loc37","loc38","loc39",
96 "loc40","loc41","loc42","loc43","loc44","loc45","loc46","loc47",
97 "loc48","loc49","loc50","loc51","loc52","loc53","loc54","loc55",
98 "loc56","loc57","loc58","loc59","loc60","loc61","loc62","loc63",
99 "loc64","loc65","loc66","loc67","loc68","loc69","loc70","loc71",
100 "loc72","loc73","loc74","loc75","loc76","loc77","loc78","loc79" };
102 /* ??? These strings could be shared with REGISTER_NAMES. */
103 static const char * const ia64_output_reg_names[8] =
104 { "out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7" };
106 /* Variables which are this size or smaller are put in the sdata/sbss
109 unsigned int ia64_section_threshold;
111 /* The following variable is used by the DFA insn scheduler. The value is
112 TRUE if we do insn bundling instead of insn scheduling. */
124 number_of_ia64_frame_regs
127 /* Structure to be filled in by ia64_compute_frame_size with register
128 save masks and offsets for the current function. */
130 struct ia64_frame_info
132 HOST_WIDE_INT total_size; /* size of the stack frame, not including
133 the caller's scratch area. */
134 HOST_WIDE_INT spill_cfa_off; /* top of the reg spill area from the cfa. */
135 HOST_WIDE_INT spill_size; /* size of the gr/br/fr spill area. */
136 HOST_WIDE_INT extra_spill_size; /* size of spill area for others. */
137 HARD_REG_SET mask; /* mask of saved registers. */
138 unsigned int gr_used_mask; /* mask of registers in use as gr spill
139 registers or long-term scratches. */
140 int n_spilled; /* number of spilled registers. */
141 int r[number_of_ia64_frame_regs]; /* Frame related registers. */
142 int n_input_regs; /* number of input registers used. */
143 int n_local_regs; /* number of local registers used. */
144 int n_output_regs; /* number of output registers used. */
145 int n_rotate_regs; /* number of rotating registers used. */
147 char need_regstk; /* true if a .regstk directive needed. */
148 char initialized; /* true if the data is finalized. */
151 /* Current frame information calculated by ia64_compute_frame_size. */
152 static struct ia64_frame_info current_frame_info;
153 /* The actual registers that are emitted. */
154 static int emitted_frame_related_regs[number_of_ia64_frame_regs];
156 static int ia64_first_cycle_multipass_dfa_lookahead (void);
157 static void ia64_dependencies_evaluation_hook (rtx, rtx);
158 static void ia64_init_dfa_pre_cycle_insn (void);
159 static rtx ia64_dfa_pre_cycle_insn (void);
160 static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx);
161 static bool ia64_first_cycle_multipass_dfa_lookahead_guard_spec (const_rtx);
162 static int ia64_dfa_new_cycle (FILE *, int, rtx, int, int, int *);
163 static void ia64_h_i_d_extended (void);
164 static void * ia64_alloc_sched_context (void);
165 static void ia64_init_sched_context (void *, bool);
166 static void ia64_set_sched_context (void *);
167 static void ia64_clear_sched_context (void *);
168 static void ia64_free_sched_context (void *);
169 static int ia64_mode_to_int (enum machine_mode);
170 static void ia64_set_sched_flags (spec_info_t);
171 static ds_t ia64_get_insn_spec_ds (rtx);
172 static ds_t ia64_get_insn_checked_ds (rtx);
173 static bool ia64_skip_rtx_p (const_rtx);
174 static int ia64_speculate_insn (rtx, ds_t, rtx *);
175 static bool ia64_needs_block_p (int);
176 static rtx ia64_gen_spec_check (rtx, rtx, ds_t);
177 static int ia64_spec_check_p (rtx);
178 static int ia64_spec_check_src_p (rtx);
179 static rtx gen_tls_get_addr (void);
180 static rtx gen_thread_pointer (void);
181 static int find_gr_spill (enum ia64_frame_regs, int);
182 static int next_scratch_gr_reg (void);
183 static void mark_reg_gr_used_mask (rtx, void *);
184 static void ia64_compute_frame_size (HOST_WIDE_INT);
185 static void setup_spill_pointers (int, rtx, HOST_WIDE_INT);
186 static void finish_spill_pointers (void);
187 static rtx spill_restore_mem (rtx, HOST_WIDE_INT);
188 static void do_spill (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT, rtx);
189 static void do_restore (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT);
190 static rtx gen_movdi_x (rtx, rtx, rtx);
191 static rtx gen_fr_spill_x (rtx, rtx, rtx);
192 static rtx gen_fr_restore_x (rtx, rtx, rtx);
194 static void ia64_option_override (void);
195 static bool ia64_can_eliminate (const int, const int);
196 static enum machine_mode hfa_element_mode (const_tree, bool);
197 static void ia64_setup_incoming_varargs (cumulative_args_t, enum machine_mode,
199 static int ia64_arg_partial_bytes (cumulative_args_t, enum machine_mode,
201 static rtx ia64_function_arg_1 (cumulative_args_t, enum machine_mode,
202 const_tree, bool, bool);
203 static rtx ia64_function_arg (cumulative_args_t, enum machine_mode,
205 static rtx ia64_function_incoming_arg (cumulative_args_t,
206 enum machine_mode, const_tree, bool);
207 static void ia64_function_arg_advance (cumulative_args_t, enum machine_mode,
209 static unsigned int ia64_function_arg_boundary (enum machine_mode,
211 static bool ia64_function_ok_for_sibcall (tree, tree);
212 static bool ia64_return_in_memory (const_tree, const_tree);
213 static rtx ia64_function_value (const_tree, const_tree, bool);
214 static rtx ia64_libcall_value (enum machine_mode, const_rtx);
215 static bool ia64_function_value_regno_p (const unsigned int);
216 static int ia64_register_move_cost (enum machine_mode, reg_class_t,
218 static int ia64_memory_move_cost (enum machine_mode mode, reg_class_t,
220 static bool ia64_rtx_costs (rtx, int, int, int, int *, bool);
221 static int ia64_unspec_may_trap_p (const_rtx, unsigned);
222 static void fix_range (const char *);
223 static struct machine_function * ia64_init_machine_status (void);
224 static void emit_insn_group_barriers (FILE *);
225 static void emit_all_insn_group_barriers (FILE *);
226 static void final_emit_insn_group_barriers (FILE *);
227 static void emit_predicate_relation_info (void);
228 static void ia64_reorg (void);
229 static bool ia64_in_small_data_p (const_tree);
230 static void process_epilogue (FILE *, rtx, bool, bool);
232 static bool ia64_assemble_integer (rtx, unsigned int, int);
233 static void ia64_output_function_prologue (FILE *, HOST_WIDE_INT);
234 static void ia64_output_function_epilogue (FILE *, HOST_WIDE_INT);
235 static void ia64_output_function_end_prologue (FILE *);
237 static void ia64_print_operand (FILE *, rtx, int);
238 static void ia64_print_operand_address (FILE *, rtx);
239 static bool ia64_print_operand_punct_valid_p (unsigned char code);
241 static int ia64_issue_rate (void);
242 static int ia64_adjust_cost_2 (rtx, int, rtx, int, dw_t);
243 static void ia64_sched_init (FILE *, int, int);
244 static void ia64_sched_init_global (FILE *, int, int);
245 static void ia64_sched_finish_global (FILE *, int);
246 static void ia64_sched_finish (FILE *, int);
247 static int ia64_dfa_sched_reorder (FILE *, int, rtx *, int *, int, int);
248 static int ia64_sched_reorder (FILE *, int, rtx *, int *, int);
249 static int ia64_sched_reorder2 (FILE *, int, rtx *, int *, int);
250 static int ia64_variable_issue (FILE *, int, rtx, int);
252 static void ia64_asm_unwind_emit (FILE *, rtx);
253 static void ia64_asm_emit_except_personality (rtx);
254 static void ia64_asm_init_sections (void);
256 static enum unwind_info_type ia64_debug_unwind_info (void);
258 static struct bundle_state *get_free_bundle_state (void);
259 static void free_bundle_state (struct bundle_state *);
260 static void initiate_bundle_states (void);
261 static void finish_bundle_states (void);
262 static unsigned bundle_state_hash (const void *);
263 static int bundle_state_eq_p (const void *, const void *);
264 static int insert_bundle_state (struct bundle_state *);
265 static void initiate_bundle_state_table (void);
266 static void finish_bundle_state_table (void);
267 static int try_issue_nops (struct bundle_state *, int);
268 static int try_issue_insn (struct bundle_state *, rtx);
269 static void issue_nops_and_insn (struct bundle_state *, int, rtx, int, int);
270 static int get_max_pos (state_t);
271 static int get_template (state_t, int);
273 static rtx get_next_important_insn (rtx, rtx);
274 static bool important_for_bundling_p (rtx);
275 static void bundling (FILE *, int, rtx, rtx);
277 static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
278 HOST_WIDE_INT, tree);
279 static void ia64_file_start (void);
280 static void ia64_globalize_decl_name (FILE *, tree);
282 static int ia64_hpux_reloc_rw_mask (void) ATTRIBUTE_UNUSED;
283 static int ia64_reloc_rw_mask (void) ATTRIBUTE_UNUSED;
284 static section *ia64_select_rtx_section (enum machine_mode, rtx,
285 unsigned HOST_WIDE_INT);
286 static void ia64_output_dwarf_dtprel (FILE *, int, rtx)
288 static unsigned int ia64_section_type_flags (tree, const char *, int);
289 static void ia64_init_libfuncs (void)
291 static void ia64_hpux_init_libfuncs (void)
293 static void ia64_sysv4_init_libfuncs (void)
295 static void ia64_vms_init_libfuncs (void)
297 static void ia64_soft_fp_init_libfuncs (void)
299 static bool ia64_vms_valid_pointer_mode (enum machine_mode mode)
301 static tree ia64_vms_common_object_attribute (tree *, tree, tree, int, bool *)
304 static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *);
305 static tree ia64_handle_version_id_attribute (tree *, tree, tree, int, bool *);
306 static void ia64_encode_section_info (tree, rtx, int);
307 static rtx ia64_struct_value_rtx (tree, int);
308 static tree ia64_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
309 static bool ia64_scalar_mode_supported_p (enum machine_mode mode);
310 static bool ia64_vector_mode_supported_p (enum machine_mode mode);
311 static bool ia64_legitimate_constant_p (enum machine_mode, rtx);
312 static bool ia64_cannot_force_const_mem (enum machine_mode, rtx);
313 static const char *ia64_mangle_type (const_tree);
314 static const char *ia64_invalid_conversion (const_tree, const_tree);
315 static const char *ia64_invalid_unary_op (int, const_tree);
316 static const char *ia64_invalid_binary_op (int, const_tree, const_tree);
317 static enum machine_mode ia64_c_mode_for_suffix (char);
318 static enum machine_mode ia64_promote_function_mode (const_tree,
323 static void ia64_trampoline_init (rtx, tree, rtx);
324 static void ia64_override_options_after_change (void);
326 static tree ia64_builtin_decl (unsigned, bool);
328 static reg_class_t ia64_preferred_reload_class (rtx, reg_class_t);
329 static enum machine_mode ia64_get_reg_raw_mode (int regno);
330 static section * ia64_hpux_function_section (tree, enum node_frequency,
333 /* Table of valid machine attributes. */
334 static const struct attribute_spec ia64_attribute_table[] =
336 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
337 affects_type_identity } */
338 { "syscall_linkage", 0, 0, false, true, true, NULL, false },
339 { "model", 1, 1, true, false, false, ia64_handle_model_attribute,
341 #if TARGET_ABI_OPEN_VMS
342 { "common_object", 1, 1, true, false, false,
343 ia64_vms_common_object_attribute, false },
345 { "version_id", 1, 1, true, false, false,
346 ia64_handle_version_id_attribute, false },
347 { NULL, 0, 0, false, false, false, NULL, false }
350 /* Initialize the GCC target structure. */
351 #undef TARGET_ATTRIBUTE_TABLE
352 #define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
354 #undef TARGET_INIT_BUILTINS
355 #define TARGET_INIT_BUILTINS ia64_init_builtins
357 #undef TARGET_EXPAND_BUILTIN
358 #define TARGET_EXPAND_BUILTIN ia64_expand_builtin
360 #undef TARGET_BUILTIN_DECL
361 #define TARGET_BUILTIN_DECL ia64_builtin_decl
363 #undef TARGET_ASM_BYTE_OP
364 #define TARGET_ASM_BYTE_OP "\tdata1\t"
365 #undef TARGET_ASM_ALIGNED_HI_OP
366 #define TARGET_ASM_ALIGNED_HI_OP "\tdata2\t"
367 #undef TARGET_ASM_ALIGNED_SI_OP
368 #define TARGET_ASM_ALIGNED_SI_OP "\tdata4\t"
369 #undef TARGET_ASM_ALIGNED_DI_OP
370 #define TARGET_ASM_ALIGNED_DI_OP "\tdata8\t"
371 #undef TARGET_ASM_UNALIGNED_HI_OP
372 #define TARGET_ASM_UNALIGNED_HI_OP "\tdata2.ua\t"
373 #undef TARGET_ASM_UNALIGNED_SI_OP
374 #define TARGET_ASM_UNALIGNED_SI_OP "\tdata4.ua\t"
375 #undef TARGET_ASM_UNALIGNED_DI_OP
376 #define TARGET_ASM_UNALIGNED_DI_OP "\tdata8.ua\t"
377 #undef TARGET_ASM_INTEGER
378 #define TARGET_ASM_INTEGER ia64_assemble_integer
380 #undef TARGET_OPTION_OVERRIDE
381 #define TARGET_OPTION_OVERRIDE ia64_option_override
383 #undef TARGET_ASM_FUNCTION_PROLOGUE
384 #define TARGET_ASM_FUNCTION_PROLOGUE ia64_output_function_prologue
385 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
386 #define TARGET_ASM_FUNCTION_END_PROLOGUE ia64_output_function_end_prologue
387 #undef TARGET_ASM_FUNCTION_EPILOGUE
388 #define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue
390 #undef TARGET_PRINT_OPERAND
391 #define TARGET_PRINT_OPERAND ia64_print_operand
392 #undef TARGET_PRINT_OPERAND_ADDRESS
393 #define TARGET_PRINT_OPERAND_ADDRESS ia64_print_operand_address
394 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
395 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P ia64_print_operand_punct_valid_p
397 #undef TARGET_IN_SMALL_DATA_P
398 #define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p
400 #undef TARGET_SCHED_ADJUST_COST_2
401 #define TARGET_SCHED_ADJUST_COST_2 ia64_adjust_cost_2
402 #undef TARGET_SCHED_ISSUE_RATE
403 #define TARGET_SCHED_ISSUE_RATE ia64_issue_rate
404 #undef TARGET_SCHED_VARIABLE_ISSUE
405 #define TARGET_SCHED_VARIABLE_ISSUE ia64_variable_issue
406 #undef TARGET_SCHED_INIT
407 #define TARGET_SCHED_INIT ia64_sched_init
408 #undef TARGET_SCHED_FINISH
409 #define TARGET_SCHED_FINISH ia64_sched_finish
410 #undef TARGET_SCHED_INIT_GLOBAL
411 #define TARGET_SCHED_INIT_GLOBAL ia64_sched_init_global
412 #undef TARGET_SCHED_FINISH_GLOBAL
413 #define TARGET_SCHED_FINISH_GLOBAL ia64_sched_finish_global
414 #undef TARGET_SCHED_REORDER
415 #define TARGET_SCHED_REORDER ia64_sched_reorder
416 #undef TARGET_SCHED_REORDER2
417 #define TARGET_SCHED_REORDER2 ia64_sched_reorder2
419 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
420 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ia64_dependencies_evaluation_hook
422 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
423 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ia64_first_cycle_multipass_dfa_lookahead
425 #undef TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN
426 #define TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN ia64_init_dfa_pre_cycle_insn
427 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
428 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN ia64_dfa_pre_cycle_insn
430 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
431 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD\
432 ia64_first_cycle_multipass_dfa_lookahead_guard
434 #undef TARGET_SCHED_DFA_NEW_CYCLE
435 #define TARGET_SCHED_DFA_NEW_CYCLE ia64_dfa_new_cycle
437 #undef TARGET_SCHED_H_I_D_EXTENDED
438 #define TARGET_SCHED_H_I_D_EXTENDED ia64_h_i_d_extended
440 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
441 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT ia64_alloc_sched_context
443 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
444 #define TARGET_SCHED_INIT_SCHED_CONTEXT ia64_init_sched_context
446 #undef TARGET_SCHED_SET_SCHED_CONTEXT
447 #define TARGET_SCHED_SET_SCHED_CONTEXT ia64_set_sched_context
449 #undef TARGET_SCHED_CLEAR_SCHED_CONTEXT
450 #define TARGET_SCHED_CLEAR_SCHED_CONTEXT ia64_clear_sched_context
452 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
453 #define TARGET_SCHED_FREE_SCHED_CONTEXT ia64_free_sched_context
455 #undef TARGET_SCHED_SET_SCHED_FLAGS
456 #define TARGET_SCHED_SET_SCHED_FLAGS ia64_set_sched_flags
458 #undef TARGET_SCHED_GET_INSN_SPEC_DS
459 #define TARGET_SCHED_GET_INSN_SPEC_DS ia64_get_insn_spec_ds
461 #undef TARGET_SCHED_GET_INSN_CHECKED_DS
462 #define TARGET_SCHED_GET_INSN_CHECKED_DS ia64_get_insn_checked_ds
464 #undef TARGET_SCHED_SPECULATE_INSN
465 #define TARGET_SCHED_SPECULATE_INSN ia64_speculate_insn
467 #undef TARGET_SCHED_NEEDS_BLOCK_P
468 #define TARGET_SCHED_NEEDS_BLOCK_P ia64_needs_block_p
470 #undef TARGET_SCHED_GEN_SPEC_CHECK
471 #define TARGET_SCHED_GEN_SPEC_CHECK ia64_gen_spec_check
473 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC
474 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC\
475 ia64_first_cycle_multipass_dfa_lookahead_guard_spec
477 #undef TARGET_SCHED_SKIP_RTX_P
478 #define TARGET_SCHED_SKIP_RTX_P ia64_skip_rtx_p
480 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
481 #define TARGET_FUNCTION_OK_FOR_SIBCALL ia64_function_ok_for_sibcall
482 #undef TARGET_ARG_PARTIAL_BYTES
483 #define TARGET_ARG_PARTIAL_BYTES ia64_arg_partial_bytes
484 #undef TARGET_FUNCTION_ARG
485 #define TARGET_FUNCTION_ARG ia64_function_arg
486 #undef TARGET_FUNCTION_INCOMING_ARG
487 #define TARGET_FUNCTION_INCOMING_ARG ia64_function_incoming_arg
488 #undef TARGET_FUNCTION_ARG_ADVANCE
489 #define TARGET_FUNCTION_ARG_ADVANCE ia64_function_arg_advance
490 #undef TARGET_FUNCTION_ARG_BOUNDARY
491 #define TARGET_FUNCTION_ARG_BOUNDARY ia64_function_arg_boundary
493 #undef TARGET_ASM_OUTPUT_MI_THUNK
494 #define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk
495 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
496 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
498 #undef TARGET_ASM_FILE_START
499 #define TARGET_ASM_FILE_START ia64_file_start
501 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
502 #define TARGET_ASM_GLOBALIZE_DECL_NAME ia64_globalize_decl_name
504 #undef TARGET_REGISTER_MOVE_COST
505 #define TARGET_REGISTER_MOVE_COST ia64_register_move_cost
506 #undef TARGET_MEMORY_MOVE_COST
507 #define TARGET_MEMORY_MOVE_COST ia64_memory_move_cost
508 #undef TARGET_RTX_COSTS
509 #define TARGET_RTX_COSTS ia64_rtx_costs
510 #undef TARGET_ADDRESS_COST
511 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
513 #undef TARGET_UNSPEC_MAY_TRAP_P
514 #define TARGET_UNSPEC_MAY_TRAP_P ia64_unspec_may_trap_p
516 #undef TARGET_MACHINE_DEPENDENT_REORG
517 #define TARGET_MACHINE_DEPENDENT_REORG ia64_reorg
519 #undef TARGET_ENCODE_SECTION_INFO
520 #define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info
522 #undef TARGET_SECTION_TYPE_FLAGS
523 #define TARGET_SECTION_TYPE_FLAGS ia64_section_type_flags
526 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
527 #define TARGET_ASM_OUTPUT_DWARF_DTPREL ia64_output_dwarf_dtprel
530 #undef TARGET_PROMOTE_FUNCTION_MODE
531 #define TARGET_PROMOTE_FUNCTION_MODE ia64_promote_function_mode
533 /* ??? Investigate. */
535 #undef TARGET_PROMOTE_PROTOTYPES
536 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
539 #undef TARGET_FUNCTION_VALUE
540 #define TARGET_FUNCTION_VALUE ia64_function_value
541 #undef TARGET_LIBCALL_VALUE
542 #define TARGET_LIBCALL_VALUE ia64_libcall_value
543 #undef TARGET_FUNCTION_VALUE_REGNO_P
544 #define TARGET_FUNCTION_VALUE_REGNO_P ia64_function_value_regno_p
546 #undef TARGET_STRUCT_VALUE_RTX
547 #define TARGET_STRUCT_VALUE_RTX ia64_struct_value_rtx
548 #undef TARGET_RETURN_IN_MEMORY
549 #define TARGET_RETURN_IN_MEMORY ia64_return_in_memory
550 #undef TARGET_SETUP_INCOMING_VARARGS
551 #define TARGET_SETUP_INCOMING_VARARGS ia64_setup_incoming_varargs
552 #undef TARGET_STRICT_ARGUMENT_NAMING
553 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
554 #undef TARGET_MUST_PASS_IN_STACK
555 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
556 #undef TARGET_GET_RAW_RESULT_MODE
557 #define TARGET_GET_RAW_RESULT_MODE ia64_get_reg_raw_mode
558 #undef TARGET_GET_RAW_ARG_MODE
559 #define TARGET_GET_RAW_ARG_MODE ia64_get_reg_raw_mode
561 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
562 #define TARGET_GIMPLIFY_VA_ARG_EXPR ia64_gimplify_va_arg
564 #undef TARGET_ASM_UNWIND_EMIT
565 #define TARGET_ASM_UNWIND_EMIT ia64_asm_unwind_emit
566 #undef TARGET_ASM_EMIT_EXCEPT_PERSONALITY
567 #define TARGET_ASM_EMIT_EXCEPT_PERSONALITY ia64_asm_emit_except_personality
568 #undef TARGET_ASM_INIT_SECTIONS
569 #define TARGET_ASM_INIT_SECTIONS ia64_asm_init_sections
571 #undef TARGET_DEBUG_UNWIND_INFO
572 #define TARGET_DEBUG_UNWIND_INFO ia64_debug_unwind_info
574 #undef TARGET_SCALAR_MODE_SUPPORTED_P
575 #define TARGET_SCALAR_MODE_SUPPORTED_P ia64_scalar_mode_supported_p
576 #undef TARGET_VECTOR_MODE_SUPPORTED_P
577 #define TARGET_VECTOR_MODE_SUPPORTED_P ia64_vector_mode_supported_p
579 /* ia64 architecture manual 4.4.7: ... reads, writes, and flushes may occur
580 in an order different from the specified program order. */
581 #undef TARGET_RELAXED_ORDERING
582 #define TARGET_RELAXED_ORDERING true
584 #undef TARGET_LEGITIMATE_CONSTANT_P
585 #define TARGET_LEGITIMATE_CONSTANT_P ia64_legitimate_constant_p
587 #undef TARGET_CANNOT_FORCE_CONST_MEM
588 #define TARGET_CANNOT_FORCE_CONST_MEM ia64_cannot_force_const_mem
590 #undef TARGET_MANGLE_TYPE
591 #define TARGET_MANGLE_TYPE ia64_mangle_type
593 #undef TARGET_INVALID_CONVERSION
594 #define TARGET_INVALID_CONVERSION ia64_invalid_conversion
595 #undef TARGET_INVALID_UNARY_OP
596 #define TARGET_INVALID_UNARY_OP ia64_invalid_unary_op
597 #undef TARGET_INVALID_BINARY_OP
598 #define TARGET_INVALID_BINARY_OP ia64_invalid_binary_op
600 #undef TARGET_C_MODE_FOR_SUFFIX
601 #define TARGET_C_MODE_FOR_SUFFIX ia64_c_mode_for_suffix
603 #undef TARGET_CAN_ELIMINATE
604 #define TARGET_CAN_ELIMINATE ia64_can_eliminate
606 #undef TARGET_TRAMPOLINE_INIT
607 #define TARGET_TRAMPOLINE_INIT ia64_trampoline_init
609 #undef TARGET_INVALID_WITHIN_DOLOOP
610 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_null
612 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
613 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE ia64_override_options_after_change
615 #undef TARGET_PREFERRED_RELOAD_CLASS
616 #define TARGET_PREFERRED_RELOAD_CLASS ia64_preferred_reload_class
618 #undef TARGET_DELAY_SCHED2
619 #define TARGET_DELAY_SCHED2 true
621 /* Variable tracking should be run after all optimizations which
622 change order of insns. It also needs a valid CFG. */
623 #undef TARGET_DELAY_VARTRACK
624 #define TARGET_DELAY_VARTRACK true
626 struct gcc_target targetm = TARGET_INITIALIZER;
630 ADDR_AREA_NORMAL, /* normal address area */
631 ADDR_AREA_SMALL /* addressable by "addl" (-2MB < addr < 2MB) */
635 static GTY(()) tree small_ident1;
636 static GTY(()) tree small_ident2;
641 if (small_ident1 == 0)
643 small_ident1 = get_identifier ("small");
644 small_ident2 = get_identifier ("__small__");
648 /* Retrieve the address area that has been chosen for the given decl. */
650 static ia64_addr_area
651 ia64_get_addr_area (tree decl)
655 model_attr = lookup_attribute ("model", DECL_ATTRIBUTES (decl));
661 id = TREE_VALUE (TREE_VALUE (model_attr));
662 if (id == small_ident1 || id == small_ident2)
663 return ADDR_AREA_SMALL;
665 return ADDR_AREA_NORMAL;
669 ia64_handle_model_attribute (tree *node, tree name, tree args,
670 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
672 ia64_addr_area addr_area = ADDR_AREA_NORMAL;
674 tree arg, decl = *node;
677 arg = TREE_VALUE (args);
678 if (arg == small_ident1 || arg == small_ident2)
680 addr_area = ADDR_AREA_SMALL;
684 warning (OPT_Wattributes, "invalid argument of %qE attribute",
686 *no_add_attrs = true;
689 switch (TREE_CODE (decl))
692 if ((DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl))
694 && !TREE_STATIC (decl))
696 error_at (DECL_SOURCE_LOCATION (decl),
697 "an address area attribute cannot be specified for "
699 *no_add_attrs = true;
701 area = ia64_get_addr_area (decl);
702 if (area != ADDR_AREA_NORMAL && addr_area != area)
704 error ("address area of %q+D conflicts with previous "
705 "declaration", decl);
706 *no_add_attrs = true;
711 error_at (DECL_SOURCE_LOCATION (decl),
712 "address area attribute cannot be specified for "
714 *no_add_attrs = true;
718 warning (OPT_Wattributes, "%qE attribute ignored",
720 *no_add_attrs = true;
727 /* The section must have global and overlaid attributes. */
728 #define SECTION_VMS_OVERLAY SECTION_MACH_DEP
730 /* Part of the low level implementation of DEC Ada pragma Common_Object which
731 enables the shared use of variables stored in overlaid linker areas
732 corresponding to the use of Fortran COMMON. */
735 ia64_vms_common_object_attribute (tree *node, tree name, tree args,
736 int flags ATTRIBUTE_UNUSED,
744 DECL_COMMON (decl) = 1;
745 id = TREE_VALUE (args);
746 if (TREE_CODE (id) == IDENTIFIER_NODE)
747 val = build_string (IDENTIFIER_LENGTH (id), IDENTIFIER_POINTER (id));
748 else if (TREE_CODE (id) == STRING_CST)
752 warning (OPT_Wattributes,
753 "%qE attribute requires a string constant argument", name);
754 *no_add_attrs = true;
757 DECL_SECTION_NAME (decl) = val;
761 /* Part of the low level implementation of DEC Ada pragma Common_Object. */
764 ia64_vms_output_aligned_decl_common (FILE *file, tree decl, const char *name,
765 unsigned HOST_WIDE_INT size,
768 tree attr = DECL_ATTRIBUTES (decl);
770 /* As common_object attribute set DECL_SECTION_NAME check it before
771 looking up the attribute. */
772 if (DECL_SECTION_NAME (decl) && attr)
773 attr = lookup_attribute ("common_object", attr);
779 /* Code from elfos.h. */
780 fprintf (file, "%s", COMMON_ASM_OP);
781 assemble_name (file, name);
782 fprintf (file, ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n",
783 size, align / BITS_PER_UNIT);
787 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
788 ASM_OUTPUT_LABEL (file, name);
789 ASM_OUTPUT_SKIP (file, size ? size : 1);
793 /* Definition of TARGET_ASM_NAMED_SECTION for VMS. */
796 ia64_vms_elf_asm_named_section (const char *name, unsigned int flags,
799 if (!(flags & SECTION_VMS_OVERLAY))
801 default_elf_asm_named_section (name, flags, decl);
804 if (flags != (SECTION_VMS_OVERLAY | SECTION_WRITE))
807 if (flags & SECTION_DECLARED)
809 fprintf (asm_out_file, "\t.section\t%s\n", name);
813 fprintf (asm_out_file, "\t.section\t%s,\"awgO\"\n", name);
817 ia64_encode_addr_area (tree decl, rtx symbol)
821 flags = SYMBOL_REF_FLAGS (symbol);
822 switch (ia64_get_addr_area (decl))
824 case ADDR_AREA_NORMAL: break;
825 case ADDR_AREA_SMALL: flags |= SYMBOL_FLAG_SMALL_ADDR; break;
826 default: gcc_unreachable ();
828 SYMBOL_REF_FLAGS (symbol) = flags;
832 ia64_encode_section_info (tree decl, rtx rtl, int first)
834 default_encode_section_info (decl, rtl, first);
836 /* Careful not to prod global register variables. */
837 if (TREE_CODE (decl) == VAR_DECL
838 && GET_CODE (DECL_RTL (decl)) == MEM
839 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == SYMBOL_REF
840 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
841 ia64_encode_addr_area (decl, XEXP (rtl, 0));
844 /* Return 1 if the operands of a move are ok. */
847 ia64_move_ok (rtx dst, rtx src)
849 /* If we're under init_recog_no_volatile, we'll not be able to use
850 memory_operand. So check the code directly and don't worry about
851 the validity of the underlying address, which should have been
852 checked elsewhere anyway. */
853 if (GET_CODE (dst) != MEM)
855 if (GET_CODE (src) == MEM)
857 if (register_operand (src, VOIDmode))
860 /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0. */
861 if (INTEGRAL_MODE_P (GET_MODE (dst)))
862 return src == const0_rtx;
864 return satisfies_constraint_G (src);
867 /* Return 1 if the operands are ok for a floating point load pair. */
870 ia64_load_pair_ok (rtx dst, rtx src)
872 if (GET_CODE (dst) != REG || !FP_REGNO_P (REGNO (dst)))
874 if (GET_CODE (src) != MEM || MEM_VOLATILE_P (src))
876 switch (GET_CODE (XEXP (src, 0)))
885 rtx adjust = XEXP (XEXP (XEXP (src, 0), 1), 1);
887 if (GET_CODE (adjust) != CONST_INT
888 || INTVAL (adjust) != GET_MODE_SIZE (GET_MODE (src)))
899 addp4_optimize_ok (rtx op1, rtx op2)
901 return (basereg_operand (op1, GET_MODE(op1)) !=
902 basereg_operand (op2, GET_MODE(op2)));
905 /* Check if OP is a mask suitable for use with SHIFT in a dep.z instruction.
906 Return the length of the field, or <= 0 on failure. */
909 ia64_depz_field_mask (rtx rop, rtx rshift)
911 unsigned HOST_WIDE_INT op = INTVAL (rop);
912 unsigned HOST_WIDE_INT shift = INTVAL (rshift);
914 /* Get rid of the zero bits we're shifting in. */
917 /* We must now have a solid block of 1's at bit 0. */
918 return exact_log2 (op + 1);
921 /* Return the TLS model to use for ADDR. */
923 static enum tls_model
924 tls_symbolic_operand_type (rtx addr)
926 enum tls_model tls_kind = TLS_MODEL_NONE;
928 if (GET_CODE (addr) == CONST)
930 if (GET_CODE (XEXP (addr, 0)) == PLUS
931 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF)
932 tls_kind = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (addr, 0), 0));
934 else if (GET_CODE (addr) == SYMBOL_REF)
935 tls_kind = SYMBOL_REF_TLS_MODEL (addr);
940 /* Return true if X is a constant that is valid for some immediate
941 field in an instruction. */
944 ia64_legitimate_constant_p (enum machine_mode mode, rtx x)
946 switch (GET_CODE (x))
953 if (GET_MODE (x) == VOIDmode || mode == SFmode || mode == DFmode)
955 return satisfies_constraint_G (x);
959 /* ??? Short term workaround for PR 28490. We must make the code here
960 match the code in ia64_expand_move and move_operand, even though they
961 are both technically wrong. */
962 if (tls_symbolic_operand_type (x) == 0)
964 HOST_WIDE_INT addend = 0;
967 if (GET_CODE (op) == CONST
968 && GET_CODE (XEXP (op, 0)) == PLUS
969 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT)
971 addend = INTVAL (XEXP (XEXP (op, 0), 1));
972 op = XEXP (XEXP (op, 0), 0);
975 if (any_offset_symbol_operand (op, mode)
976 || function_operand (op, mode))
978 if (aligned_offset_symbol_operand (op, mode))
979 return (addend & 0x3fff) == 0;
985 if (mode == V2SFmode)
986 return satisfies_constraint_Y (x);
988 return (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
989 && GET_MODE_SIZE (mode) <= 8);
996 /* Don't allow TLS addresses to get spilled to memory. */
999 ia64_cannot_force_const_mem (enum machine_mode mode, rtx x)
1003 return tls_symbolic_operand_type (x) != 0;
1006 /* Expand a symbolic constant load. */
1009 ia64_expand_load_address (rtx dest, rtx src)
1011 gcc_assert (GET_CODE (dest) == REG);
1013 /* ILP32 mode still loads 64-bits of data from the GOT. This avoids
1014 having to pointer-extend the value afterward. Other forms of address
1015 computation below are also more natural to compute as 64-bit quantities.
1016 If we've been given an SImode destination register, change it. */
1017 if (GET_MODE (dest) != Pmode)
1018 dest = gen_rtx_REG_offset (dest, Pmode, REGNO (dest),
1019 byte_lowpart_offset (Pmode, GET_MODE (dest)));
1023 if (small_addr_symbolic_operand (src, VOIDmode))
1026 if (TARGET_AUTO_PIC)
1027 emit_insn (gen_load_gprel64 (dest, src));
1028 else if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (src))
1029 emit_insn (gen_load_fptr (dest, src));
1030 else if (sdata_symbolic_operand (src, VOIDmode))
1031 emit_insn (gen_load_gprel (dest, src));
1034 HOST_WIDE_INT addend = 0;
1037 /* We did split constant offsets in ia64_expand_move, and we did try
1038 to keep them split in move_operand, but we also allowed reload to
1039 rematerialize arbitrary constants rather than spill the value to
1040 the stack and reload it. So we have to be prepared here to split
1041 them apart again. */
1042 if (GET_CODE (src) == CONST)
1044 HOST_WIDE_INT hi, lo;
1046 hi = INTVAL (XEXP (XEXP (src, 0), 1));
1047 lo = ((hi & 0x3fff) ^ 0x2000) - 0x2000;
1053 src = plus_constant (XEXP (XEXP (src, 0), 0), hi);
1057 tmp = gen_rtx_HIGH (Pmode, src);
1058 tmp = gen_rtx_PLUS (Pmode, tmp, pic_offset_table_rtx);
1059 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1061 tmp = gen_rtx_LO_SUM (Pmode, gen_const_mem (Pmode, dest), src);
1062 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1066 tmp = gen_rtx_PLUS (Pmode, dest, GEN_INT (addend));
1067 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1074 static GTY(()) rtx gen_tls_tga;
1076 gen_tls_get_addr (void)
1079 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
1083 static GTY(()) rtx thread_pointer_rtx;
1085 gen_thread_pointer (void)
1087 if (!thread_pointer_rtx)
1088 thread_pointer_rtx = gen_rtx_REG (Pmode, 13);
1089 return thread_pointer_rtx;
1093 ia64_expand_tls_address (enum tls_model tls_kind, rtx op0, rtx op1,
1094 rtx orig_op1, HOST_WIDE_INT addend)
1096 rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp, insns;
1098 HOST_WIDE_INT addend_lo, addend_hi;
1102 case TLS_MODEL_GLOBAL_DYNAMIC:
1105 tga_op1 = gen_reg_rtx (Pmode);
1106 emit_insn (gen_load_dtpmod (tga_op1, op1));
1108 tga_op2 = gen_reg_rtx (Pmode);
1109 emit_insn (gen_load_dtprel (tga_op2, op1));
1111 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
1112 LCT_CONST, Pmode, 2, tga_op1,
1113 Pmode, tga_op2, Pmode);
1115 insns = get_insns ();
1118 if (GET_MODE (op0) != Pmode)
1120 emit_libcall_block (insns, op0, tga_ret, op1);
1123 case TLS_MODEL_LOCAL_DYNAMIC:
1124 /* ??? This isn't the completely proper way to do local-dynamic
1125 If the call to __tls_get_addr is used only by a single symbol,
1126 then we should (somehow) move the dtprel to the second arg
1127 to avoid the extra add. */
1130 tga_op1 = gen_reg_rtx (Pmode);
1131 emit_insn (gen_load_dtpmod (tga_op1, op1));
1133 tga_op2 = const0_rtx;
1135 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
1136 LCT_CONST, Pmode, 2, tga_op1,
1137 Pmode, tga_op2, Pmode);
1139 insns = get_insns ();
1142 tga_eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1144 tmp = gen_reg_rtx (Pmode);
1145 emit_libcall_block (insns, tmp, tga_ret, tga_eqv);
1147 if (!register_operand (op0, Pmode))
1148 op0 = gen_reg_rtx (Pmode);
1151 emit_insn (gen_load_dtprel (op0, op1));
1152 emit_insn (gen_adddi3 (op0, tmp, op0));
1155 emit_insn (gen_add_dtprel (op0, op1, tmp));
1158 case TLS_MODEL_INITIAL_EXEC:
1159 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
1160 addend_hi = addend - addend_lo;
1162 op1 = plus_constant (op1, addend_hi);
1165 tmp = gen_reg_rtx (Pmode);
1166 emit_insn (gen_load_tprel (tmp, op1));
1168 if (!register_operand (op0, Pmode))
1169 op0 = gen_reg_rtx (Pmode);
1170 emit_insn (gen_adddi3 (op0, tmp, gen_thread_pointer ()));
1173 case TLS_MODEL_LOCAL_EXEC:
1174 if (!register_operand (op0, Pmode))
1175 op0 = gen_reg_rtx (Pmode);
1181 emit_insn (gen_load_tprel (op0, op1));
1182 emit_insn (gen_adddi3 (op0, op0, gen_thread_pointer ()));
1185 emit_insn (gen_add_tprel (op0, op1, gen_thread_pointer ()));
1193 op0 = expand_simple_binop (Pmode, PLUS, op0, GEN_INT (addend),
1194 orig_op0, 1, OPTAB_DIRECT);
1195 if (orig_op0 == op0)
1197 if (GET_MODE (orig_op0) == Pmode)
1199 return gen_lowpart (GET_MODE (orig_op0), op0);
1203 ia64_expand_move (rtx op0, rtx op1)
1205 enum machine_mode mode = GET_MODE (op0);
1207 if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1))
1208 op1 = force_reg (mode, op1);
1210 if ((mode == Pmode || mode == ptr_mode) && symbolic_operand (op1, VOIDmode))
1212 HOST_WIDE_INT addend = 0;
1213 enum tls_model tls_kind;
1216 if (GET_CODE (op1) == CONST
1217 && GET_CODE (XEXP (op1, 0)) == PLUS
1218 && GET_CODE (XEXP (XEXP (op1, 0), 1)) == CONST_INT)
1220 addend = INTVAL (XEXP (XEXP (op1, 0), 1));
1221 sym = XEXP (XEXP (op1, 0), 0);
1224 tls_kind = tls_symbolic_operand_type (sym);
1226 return ia64_expand_tls_address (tls_kind, op0, sym, op1, addend);
1228 if (any_offset_symbol_operand (sym, mode))
1230 else if (aligned_offset_symbol_operand (sym, mode))
1232 HOST_WIDE_INT addend_lo, addend_hi;
1234 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
1235 addend_hi = addend - addend_lo;
1239 op1 = plus_constant (sym, addend_hi);
1248 if (reload_completed)
1250 /* We really should have taken care of this offset earlier. */
1251 gcc_assert (addend == 0);
1252 if (ia64_expand_load_address (op0, op1))
1258 rtx subtarget = !can_create_pseudo_p () ? op0 : gen_reg_rtx (mode);
1260 emit_insn (gen_rtx_SET (VOIDmode, subtarget, op1));
1262 op1 = expand_simple_binop (mode, PLUS, subtarget,
1263 GEN_INT (addend), op0, 1, OPTAB_DIRECT);
1272 /* Split a move from OP1 to OP0 conditional on COND. */
1275 ia64_emit_cond_move (rtx op0, rtx op1, rtx cond)
1277 rtx insn, first = get_last_insn ();
1279 emit_move_insn (op0, op1);
1281 for (insn = get_last_insn (); insn != first; insn = PREV_INSN (insn))
1283 PATTERN (insn) = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond),
1287 /* Split a post-reload TImode or TFmode reference into two DImode
1288 components. This is made extra difficult by the fact that we do
1289 not get any scratch registers to work with, because reload cannot
1290 be prevented from giving us a scratch that overlaps the register
1291 pair involved. So instead, when addressing memory, we tweak the
1292 pointer register up and back down with POST_INCs. Or up and not
1293 back down when we can get away with it.
1295 REVERSED is true when the loads must be done in reversed order
1296 (high word first) for correctness. DEAD is true when the pointer
1297 dies with the second insn we generate and therefore the second
1298 address must not carry a postmodify.
1300 May return an insn which is to be emitted after the moves. */
1303 ia64_split_tmode (rtx out[2], rtx in, bool reversed, bool dead)
1307 switch (GET_CODE (in))
1310 out[reversed] = gen_rtx_REG (DImode, REGNO (in));
1311 out[!reversed] = gen_rtx_REG (DImode, REGNO (in) + 1);
1316 /* Cannot occur reversed. */
1317 gcc_assert (!reversed);
1319 if (GET_MODE (in) != TFmode)
1320 split_double (in, &out[0], &out[1]);
1322 /* split_double does not understand how to split a TFmode
1323 quantity into a pair of DImode constants. */
1326 unsigned HOST_WIDE_INT p[2];
1327 long l[4]; /* TFmode is 128 bits */
1329 REAL_VALUE_FROM_CONST_DOUBLE (r, in);
1330 real_to_target (l, &r, TFmode);
1332 if (FLOAT_WORDS_BIG_ENDIAN)
1334 p[0] = (((unsigned HOST_WIDE_INT) l[0]) << 32) + l[1];
1335 p[1] = (((unsigned HOST_WIDE_INT) l[2]) << 32) + l[3];
1339 p[0] = (((unsigned HOST_WIDE_INT) l[1]) << 32) + l[0];
1340 p[1] = (((unsigned HOST_WIDE_INT) l[3]) << 32) + l[2];
1342 out[0] = GEN_INT (p[0]);
1343 out[1] = GEN_INT (p[1]);
1349 rtx base = XEXP (in, 0);
1352 switch (GET_CODE (base))
1357 out[0] = adjust_automodify_address
1358 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1359 out[1] = adjust_automodify_address
1360 (in, DImode, dead ? 0 : gen_rtx_POST_DEC (Pmode, base), 8);
1364 /* Reversal requires a pre-increment, which can only
1365 be done as a separate insn. */
1366 emit_insn (gen_adddi3 (base, base, GEN_INT (8)));
1367 out[0] = adjust_automodify_address
1368 (in, DImode, gen_rtx_POST_DEC (Pmode, base), 8);
1369 out[1] = adjust_address (in, DImode, 0);
1374 gcc_assert (!reversed && !dead);
1376 /* Just do the increment in two steps. */
1377 out[0] = adjust_automodify_address (in, DImode, 0, 0);
1378 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1382 gcc_assert (!reversed && !dead);
1384 /* Add 8, subtract 24. */
1385 base = XEXP (base, 0);
1386 out[0] = adjust_automodify_address
1387 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1388 out[1] = adjust_automodify_address
1390 gen_rtx_POST_MODIFY (Pmode, base, plus_constant (base, -24)),
1395 gcc_assert (!reversed && !dead);
1397 /* Extract and adjust the modification. This case is
1398 trickier than the others, because we might have an
1399 index register, or we might have a combined offset that
1400 doesn't fit a signed 9-bit displacement field. We can
1401 assume the incoming expression is already legitimate. */
1402 offset = XEXP (base, 1);
1403 base = XEXP (base, 0);
1405 out[0] = adjust_automodify_address
1406 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1408 if (GET_CODE (XEXP (offset, 1)) == REG)
1410 /* Can't adjust the postmodify to match. Emit the
1411 original, then a separate addition insn. */
1412 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1413 fixup = gen_adddi3 (base, base, GEN_INT (-8));
1417 gcc_assert (GET_CODE (XEXP (offset, 1)) == CONST_INT);
1418 if (INTVAL (XEXP (offset, 1)) < -256 + 8)
1420 /* Again the postmodify cannot be made to match,
1421 but in this case it's more efficient to get rid
1422 of the postmodify entirely and fix up with an
1424 out[1] = adjust_automodify_address (in, DImode, base, 8);
1426 (base, base, GEN_INT (INTVAL (XEXP (offset, 1)) - 8));
1430 /* Combined offset still fits in the displacement field.
1431 (We cannot overflow it at the high end.) */
1432 out[1] = adjust_automodify_address
1433 (in, DImode, gen_rtx_POST_MODIFY
1434 (Pmode, base, gen_rtx_PLUS
1436 GEN_INT (INTVAL (XEXP (offset, 1)) - 8))),
1455 /* Split a TImode or TFmode move instruction after reload.
1456 This is used by *movtf_internal and *movti_internal. */
1458 ia64_split_tmode_move (rtx operands[])
1460 rtx in[2], out[2], insn;
1463 bool reversed = false;
1465 /* It is possible for reload to decide to overwrite a pointer with
1466 the value it points to. In that case we have to do the loads in
1467 the appropriate order so that the pointer is not destroyed too
1468 early. Also we must not generate a postmodify for that second
1469 load, or rws_access_regno will die. */
1470 if (GET_CODE (operands[1]) == MEM
1471 && reg_overlap_mentioned_p (operands[0], operands[1]))
1473 rtx base = XEXP (operands[1], 0);
1474 while (GET_CODE (base) != REG)
1475 base = XEXP (base, 0);
1477 if (REGNO (base) == REGNO (operands[0]))
1481 /* Another reason to do the moves in reversed order is if the first
1482 element of the target register pair is also the second element of
1483 the source register pair. */
1484 if (GET_CODE (operands[0]) == REG && GET_CODE (operands[1]) == REG
1485 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
1488 fixup[0] = ia64_split_tmode (in, operands[1], reversed, dead);
1489 fixup[1] = ia64_split_tmode (out, operands[0], reversed, dead);
1491 #define MAYBE_ADD_REG_INC_NOTE(INSN, EXP) \
1492 if (GET_CODE (EXP) == MEM \
1493 && (GET_CODE (XEXP (EXP, 0)) == POST_MODIFY \
1494 || GET_CODE (XEXP (EXP, 0)) == POST_INC \
1495 || GET_CODE (XEXP (EXP, 0)) == POST_DEC)) \
1496 add_reg_note (insn, REG_INC, XEXP (XEXP (EXP, 0), 0))
1498 insn = emit_insn (gen_rtx_SET (VOIDmode, out[0], in[0]));
1499 MAYBE_ADD_REG_INC_NOTE (insn, in[0]);
1500 MAYBE_ADD_REG_INC_NOTE (insn, out[0]);
1502 insn = emit_insn (gen_rtx_SET (VOIDmode, out[1], in[1]));
1503 MAYBE_ADD_REG_INC_NOTE (insn, in[1]);
1504 MAYBE_ADD_REG_INC_NOTE (insn, out[1]);
1507 emit_insn (fixup[0]);
1509 emit_insn (fixup[1]);
1511 #undef MAYBE_ADD_REG_INC_NOTE
1514 /* ??? Fixing GR->FR XFmode moves during reload is hard. You need to go
1515 through memory plus an extra GR scratch register. Except that you can
1516 either get the first from SECONDARY_MEMORY_NEEDED or the second from
1517 SECONDARY_RELOAD_CLASS, but not both.
1519 We got into problems in the first place by allowing a construct like
1520 (subreg:XF (reg:TI)), which we got from a union containing a long double.
1521 This solution attempts to prevent this situation from occurring. When
1522 we see something like the above, we spill the inner register to memory. */
1525 spill_xfmode_rfmode_operand (rtx in, int force, enum machine_mode mode)
1527 if (GET_CODE (in) == SUBREG
1528 && GET_MODE (SUBREG_REG (in)) == TImode
1529 && GET_CODE (SUBREG_REG (in)) == REG)
1531 rtx memt = assign_stack_temp (TImode, 16, 0);
1532 emit_move_insn (memt, SUBREG_REG (in));
1533 return adjust_address (memt, mode, 0);
1535 else if (force && GET_CODE (in) == REG)
1537 rtx memx = assign_stack_temp (mode, 16, 0);
1538 emit_move_insn (memx, in);
1545 /* Expand the movxf or movrf pattern (MODE says which) with the given
1546 OPERANDS, returning true if the pattern should then invoke
1550 ia64_expand_movxf_movrf (enum machine_mode mode, rtx operands[])
1552 rtx op0 = operands[0];
1554 if (GET_CODE (op0) == SUBREG)
1555 op0 = SUBREG_REG (op0);
1557 /* We must support XFmode loads into general registers for stdarg/vararg,
1558 unprototyped calls, and a rare case where a long double is passed as
1559 an argument after a float HFA fills the FP registers. We split them into
1560 DImode loads for convenience. We also need to support XFmode stores
1561 for the last case. This case does not happen for stdarg/vararg routines,
1562 because we do a block store to memory of unnamed arguments. */
1564 if (GET_CODE (op0) == REG && GR_REGNO_P (REGNO (op0)))
1568 /* We're hoping to transform everything that deals with XFmode
1569 quantities and GR registers early in the compiler. */
1570 gcc_assert (can_create_pseudo_p ());
1572 /* Struct to register can just use TImode instead. */
1573 if ((GET_CODE (operands[1]) == SUBREG
1574 && GET_MODE (SUBREG_REG (operands[1])) == TImode)
1575 || (GET_CODE (operands[1]) == REG
1576 && GR_REGNO_P (REGNO (operands[1]))))
1578 rtx op1 = operands[1];
1580 if (GET_CODE (op1) == SUBREG)
1581 op1 = SUBREG_REG (op1);
1583 op1 = gen_rtx_REG (TImode, REGNO (op1));
1585 emit_move_insn (gen_rtx_REG (TImode, REGNO (op0)), op1);
1589 if (GET_CODE (operands[1]) == CONST_DOUBLE)
1591 /* Don't word-swap when reading in the constant. */
1592 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0)),
1593 operand_subword (operands[1], WORDS_BIG_ENDIAN,
1595 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0) + 1),
1596 operand_subword (operands[1], !WORDS_BIG_ENDIAN,
1601 /* If the quantity is in a register not known to be GR, spill it. */
1602 if (register_operand (operands[1], mode))
1603 operands[1] = spill_xfmode_rfmode_operand (operands[1], 1, mode);
1605 gcc_assert (GET_CODE (operands[1]) == MEM);
1607 /* Don't word-swap when reading in the value. */
1608 out[0] = gen_rtx_REG (DImode, REGNO (op0));
1609 out[1] = gen_rtx_REG (DImode, REGNO (op0) + 1);
1611 emit_move_insn (out[0], adjust_address (operands[1], DImode, 0));
1612 emit_move_insn (out[1], adjust_address (operands[1], DImode, 8));
1616 if (GET_CODE (operands[1]) == REG && GR_REGNO_P (REGNO (operands[1])))
1618 /* We're hoping to transform everything that deals with XFmode
1619 quantities and GR registers early in the compiler. */
1620 gcc_assert (can_create_pseudo_p ());
1622 /* Op0 can't be a GR_REG here, as that case is handled above.
1623 If op0 is a register, then we spill op1, so that we now have a
1624 MEM operand. This requires creating an XFmode subreg of a TImode reg
1625 to force the spill. */
1626 if (register_operand (operands[0], mode))
1628 rtx op1 = gen_rtx_REG (TImode, REGNO (operands[1]));
1629 op1 = gen_rtx_SUBREG (mode, op1, 0);
1630 operands[1] = spill_xfmode_rfmode_operand (op1, 0, mode);
1637 gcc_assert (GET_CODE (operands[0]) == MEM);
1639 /* Don't word-swap when writing out the value. */
1640 in[0] = gen_rtx_REG (DImode, REGNO (operands[1]));
1641 in[1] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
1643 emit_move_insn (adjust_address (operands[0], DImode, 0), in[0]);
1644 emit_move_insn (adjust_address (operands[0], DImode, 8), in[1]);
1649 if (!reload_in_progress && !reload_completed)
1651 operands[1] = spill_xfmode_rfmode_operand (operands[1], 0, mode);
1653 if (GET_MODE (op0) == TImode && GET_CODE (op0) == REG)
1655 rtx memt, memx, in = operands[1];
1656 if (CONSTANT_P (in))
1657 in = validize_mem (force_const_mem (mode, in));
1658 if (GET_CODE (in) == MEM)
1659 memt = adjust_address (in, TImode, 0);
1662 memt = assign_stack_temp (TImode, 16, 0);
1663 memx = adjust_address (memt, mode, 0);
1664 emit_move_insn (memx, in);
1666 emit_move_insn (op0, memt);
1670 if (!ia64_move_ok (operands[0], operands[1]))
1671 operands[1] = force_reg (mode, operands[1]);
1677 /* Emit comparison instruction if necessary, replacing *EXPR, *OP0, *OP1
1678 with the expression that holds the compare result (in VOIDmode). */
1680 static GTY(()) rtx cmptf_libfunc;
1683 ia64_expand_compare (rtx *expr, rtx *op0, rtx *op1)
1685 enum rtx_code code = GET_CODE (*expr);
1688 /* If we have a BImode input, then we already have a compare result, and
1689 do not need to emit another comparison. */
1690 if (GET_MODE (*op0) == BImode)
1692 gcc_assert ((code == NE || code == EQ) && *op1 == const0_rtx);
1695 /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a
1696 magic number as its third argument, that indicates what to do.
1697 The return value is an integer to be compared against zero. */
1698 else if (TARGET_HPUX && GET_MODE (*op0) == TFmode)
1701 QCMP_INV = 1, /* Raise FP_INVALID on SNaN as a side effect. */
1708 enum rtx_code ncode;
1711 gcc_assert (cmptf_libfunc && GET_MODE (*op1) == TFmode);
1714 /* 1 = equal, 0 = not equal. Equality operators do
1715 not raise FP_INVALID when given an SNaN operand. */
1716 case EQ: magic = QCMP_EQ; ncode = NE; break;
1717 case NE: magic = QCMP_EQ; ncode = EQ; break;
1718 /* isunordered() from C99. */
1719 case UNORDERED: magic = QCMP_UNORD; ncode = NE; break;
1720 case ORDERED: magic = QCMP_UNORD; ncode = EQ; break;
1721 /* Relational operators raise FP_INVALID when given
1723 case LT: magic = QCMP_LT |QCMP_INV; ncode = NE; break;
1724 case LE: magic = QCMP_LT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1725 case GT: magic = QCMP_GT |QCMP_INV; ncode = NE; break;
1726 case GE: magic = QCMP_GT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1727 /* FUTURE: Implement UNEQ, UNLT, UNLE, UNGT, UNGE, LTGT.
1728 Expanders for buneq etc. weuld have to be added to ia64.md
1729 for this to be useful. */
1730 default: gcc_unreachable ();
1735 ret = emit_library_call_value (cmptf_libfunc, 0, LCT_CONST, DImode, 3,
1736 *op0, TFmode, *op1, TFmode,
1737 GEN_INT (magic), DImode);
1738 cmp = gen_reg_rtx (BImode);
1739 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1740 gen_rtx_fmt_ee (ncode, BImode,
1743 insns = get_insns ();
1746 emit_libcall_block (insns, cmp, cmp,
1747 gen_rtx_fmt_ee (code, BImode, *op0, *op1));
1752 cmp = gen_reg_rtx (BImode);
1753 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1754 gen_rtx_fmt_ee (code, BImode, *op0, *op1)));
1758 *expr = gen_rtx_fmt_ee (code, VOIDmode, cmp, const0_rtx);
1763 /* Generate an integral vector comparison. Return true if the condition has
1764 been reversed, and so the sense of the comparison should be inverted. */
1767 ia64_expand_vecint_compare (enum rtx_code code, enum machine_mode mode,
1768 rtx dest, rtx op0, rtx op1)
1770 bool negate = false;
1773 /* Canonicalize the comparison to EQ, GT, GTU. */
1784 code = reverse_condition (code);
1790 code = reverse_condition (code);
1796 code = swap_condition (code);
1797 x = op0, op0 = op1, op1 = x;
1804 /* Unsigned parallel compare is not supported by the hardware. Play some
1805 tricks to turn this into a signed comparison against 0. */
1814 /* Subtract (-(INT MAX) - 1) from both operands to make
1816 mask = GEN_INT (0x80000000);
1817 mask = gen_rtx_CONST_VECTOR (V2SImode, gen_rtvec (2, mask, mask));
1818 mask = force_reg (mode, mask);
1819 t1 = gen_reg_rtx (mode);
1820 emit_insn (gen_subv2si3 (t1, op0, mask));
1821 t2 = gen_reg_rtx (mode);
1822 emit_insn (gen_subv2si3 (t2, op1, mask));
1831 /* Perform a parallel unsigned saturating subtraction. */
1832 x = gen_reg_rtx (mode);
1833 emit_insn (gen_rtx_SET (VOIDmode, x,
1834 gen_rtx_US_MINUS (mode, op0, op1)));
1838 op1 = CONST0_RTX (mode);
1847 x = gen_rtx_fmt_ee (code, mode, op0, op1);
1848 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
1853 /* Emit an integral vector conditional move. */
1856 ia64_expand_vecint_cmov (rtx operands[])
1858 enum machine_mode mode = GET_MODE (operands[0]);
1859 enum rtx_code code = GET_CODE (operands[3]);
1863 cmp = gen_reg_rtx (mode);
1864 negate = ia64_expand_vecint_compare (code, mode, cmp,
1865 operands[4], operands[5]);
1867 ot = operands[1+negate];
1868 of = operands[2-negate];
1870 if (ot == CONST0_RTX (mode))
1872 if (of == CONST0_RTX (mode))
1874 emit_move_insn (operands[0], ot);
1878 x = gen_rtx_NOT (mode, cmp);
1879 x = gen_rtx_AND (mode, x, of);
1880 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1882 else if (of == CONST0_RTX (mode))
1884 x = gen_rtx_AND (mode, cmp, ot);
1885 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1891 t = gen_reg_rtx (mode);
1892 x = gen_rtx_AND (mode, cmp, operands[1+negate]);
1893 emit_insn (gen_rtx_SET (VOIDmode, t, x));
1895 f = gen_reg_rtx (mode);
1896 x = gen_rtx_NOT (mode, cmp);
1897 x = gen_rtx_AND (mode, x, operands[2-negate]);
1898 emit_insn (gen_rtx_SET (VOIDmode, f, x));
1900 x = gen_rtx_IOR (mode, t, f);
1901 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1905 /* Emit an integral vector min or max operation. Return true if all done. */
1908 ia64_expand_vecint_minmax (enum rtx_code code, enum machine_mode mode,
1913 /* These four combinations are supported directly. */
1914 if (mode == V8QImode && (code == UMIN || code == UMAX))
1916 if (mode == V4HImode && (code == SMIN || code == SMAX))
1919 /* This combination can be implemented with only saturating subtraction. */
1920 if (mode == V4HImode && code == UMAX)
1922 rtx x, tmp = gen_reg_rtx (mode);
1924 x = gen_rtx_US_MINUS (mode, operands[1], operands[2]);
1925 emit_insn (gen_rtx_SET (VOIDmode, tmp, x));
1927 emit_insn (gen_addv4hi3 (operands[0], tmp, operands[2]));
1931 /* Everything else implemented via vector comparisons. */
1932 xops[0] = operands[0];
1933 xops[4] = xops[1] = operands[1];
1934 xops[5] = xops[2] = operands[2];
1953 xops[3] = gen_rtx_fmt_ee (code, VOIDmode, operands[1], operands[2]);
1955 ia64_expand_vecint_cmov (xops);
1959 /* The vectors LO and HI each contain N halves of a double-wide vector.
1960 Reassemble either the first N/2 or the second N/2 elements. */
1963 ia64_unpack_assemble (rtx out, rtx lo, rtx hi, bool highp)
1965 enum machine_mode mode = GET_MODE (lo);
1966 rtx (*gen) (rtx, rtx, rtx);
1972 gen = highp ? gen_vec_interleave_highv8qi : gen_vec_interleave_lowv8qi;
1975 gen = highp ? gen_vec_interleave_highv4hi : gen_vec_interleave_lowv4hi;
1981 x = gen_lowpart (mode, out);
1982 if (TARGET_BIG_ENDIAN)
1983 x = gen (x, hi, lo);
1985 x = gen (x, lo, hi);
1989 /* Return a vector of the sign-extension of VEC. */
1992 ia64_unpack_sign (rtx vec, bool unsignedp)
1994 enum machine_mode mode = GET_MODE (vec);
1995 rtx zero = CONST0_RTX (mode);
2001 rtx sign = gen_reg_rtx (mode);
2004 neg = ia64_expand_vecint_compare (LT, mode, sign, vec, zero);
2011 /* Emit an integral vector unpack operation. */
2014 ia64_expand_unpack (rtx operands[3], bool unsignedp, bool highp)
2016 rtx sign = ia64_unpack_sign (operands[1], unsignedp);
2017 ia64_unpack_assemble (operands[0], operands[1], sign, highp);
2020 /* Emit an integral vector widening sum operations. */
2023 ia64_expand_widen_sum (rtx operands[3], bool unsignedp)
2025 enum machine_mode wmode;
2028 sign = ia64_unpack_sign (operands[1], unsignedp);
2030 wmode = GET_MODE (operands[0]);
2031 l = gen_reg_rtx (wmode);
2032 h = gen_reg_rtx (wmode);
2034 ia64_unpack_assemble (l, operands[1], sign, false);
2035 ia64_unpack_assemble (h, operands[1], sign, true);
2037 t = expand_binop (wmode, add_optab, l, operands[2], NULL, 0, OPTAB_DIRECT);
2038 t = expand_binop (wmode, add_optab, h, t, operands[0], 0, OPTAB_DIRECT);
2039 if (t != operands[0])
2040 emit_move_insn (operands[0], t);
2043 /* Emit a signed or unsigned V8QI dot product operation. */
2046 ia64_expand_dot_prod_v8qi (rtx operands[4], bool unsignedp)
2048 rtx op1, op2, sn1, sn2, l1, l2, h1, h2;
2049 rtx p1, p2, p3, p4, s1, s2, s3;
2053 sn1 = ia64_unpack_sign (op1, unsignedp);
2054 sn2 = ia64_unpack_sign (op2, unsignedp);
2056 l1 = gen_reg_rtx (V4HImode);
2057 l2 = gen_reg_rtx (V4HImode);
2058 h1 = gen_reg_rtx (V4HImode);
2059 h2 = gen_reg_rtx (V4HImode);
2060 ia64_unpack_assemble (l1, op1, sn1, false);
2061 ia64_unpack_assemble (l2, op2, sn2, false);
2062 ia64_unpack_assemble (h1, op1, sn1, true);
2063 ia64_unpack_assemble (h2, op2, sn2, true);
2065 p1 = gen_reg_rtx (V2SImode);
2066 p2 = gen_reg_rtx (V2SImode);
2067 p3 = gen_reg_rtx (V2SImode);
2068 p4 = gen_reg_rtx (V2SImode);
2069 emit_insn (gen_pmpy2_even (p1, l1, l2));
2070 emit_insn (gen_pmpy2_even (p2, h1, h2));
2071 emit_insn (gen_pmpy2_odd (p3, l1, l2));
2072 emit_insn (gen_pmpy2_odd (p4, h1, h2));
2074 s1 = gen_reg_rtx (V2SImode);
2075 s2 = gen_reg_rtx (V2SImode);
2076 s3 = gen_reg_rtx (V2SImode);
2077 emit_insn (gen_addv2si3 (s1, p1, p2));
2078 emit_insn (gen_addv2si3 (s2, p3, p4));
2079 emit_insn (gen_addv2si3 (s3, s1, operands[3]));
2080 emit_insn (gen_addv2si3 (operands[0], s2, s3));
2083 /* Emit the appropriate sequence for a call. */
2086 ia64_expand_call (rtx retval, rtx addr, rtx nextarg ATTRIBUTE_UNUSED,
2091 addr = XEXP (addr, 0);
2092 addr = convert_memory_address (DImode, addr);
2093 b0 = gen_rtx_REG (DImode, R_BR (0));
2095 /* ??? Should do this for functions known to bind local too. */
2096 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
2099 insn = gen_sibcall_nogp (addr);
2101 insn = gen_call_nogp (addr, b0);
2103 insn = gen_call_value_nogp (retval, addr, b0);
2104 insn = emit_call_insn (insn);
2109 insn = gen_sibcall_gp (addr);
2111 insn = gen_call_gp (addr, b0);
2113 insn = gen_call_value_gp (retval, addr, b0);
2114 insn = emit_call_insn (insn);
2116 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
2120 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0);
2122 if (TARGET_ABI_OPEN_VMS)
2123 use_reg (&CALL_INSN_FUNCTION_USAGE (insn),
2124 gen_rtx_REG (DImode, GR_REG (25)));
2128 reg_emitted (enum ia64_frame_regs r)
2130 if (emitted_frame_related_regs[r] == 0)
2131 emitted_frame_related_regs[r] = current_frame_info.r[r];
2133 gcc_assert (emitted_frame_related_regs[r] == current_frame_info.r[r]);
2137 get_reg (enum ia64_frame_regs r)
2140 return current_frame_info.r[r];
2144 is_emitted (int regno)
2148 for (r = reg_fp; r < number_of_ia64_frame_regs; r++)
2149 if (emitted_frame_related_regs[r] == regno)
2155 ia64_reload_gp (void)
2159 if (current_frame_info.r[reg_save_gp])
2161 tmp = gen_rtx_REG (DImode, get_reg (reg_save_gp));
2165 HOST_WIDE_INT offset;
2168 offset = (current_frame_info.spill_cfa_off
2169 + current_frame_info.spill_size);
2170 if (frame_pointer_needed)
2172 tmp = hard_frame_pointer_rtx;
2177 tmp = stack_pointer_rtx;
2178 offset = current_frame_info.total_size - offset;
2181 offset_r = GEN_INT (offset);
2182 if (satisfies_constraint_I (offset_r))
2183 emit_insn (gen_adddi3 (pic_offset_table_rtx, tmp, offset_r));
2186 emit_move_insn (pic_offset_table_rtx, offset_r);
2187 emit_insn (gen_adddi3 (pic_offset_table_rtx,
2188 pic_offset_table_rtx, tmp));
2191 tmp = gen_rtx_MEM (DImode, pic_offset_table_rtx);
2194 emit_move_insn (pic_offset_table_rtx, tmp);
2198 ia64_split_call (rtx retval, rtx addr, rtx retaddr, rtx scratch_r,
2199 rtx scratch_b, int noreturn_p, int sibcall_p)
2202 bool is_desc = false;
2204 /* If we find we're calling through a register, then we're actually
2205 calling through a descriptor, so load up the values. */
2206 if (REG_P (addr) && GR_REGNO_P (REGNO (addr)))
2211 /* ??? We are currently constrained to *not* use peep2, because
2212 we can legitimately change the global lifetime of the GP
2213 (in the form of killing where previously live). This is
2214 because a call through a descriptor doesn't use the previous
2215 value of the GP, while a direct call does, and we do not
2216 commit to either form until the split here.
2218 That said, this means that we lack precise life info for
2219 whether ADDR is dead after this call. This is not terribly
2220 important, since we can fix things up essentially for free
2221 with the POST_DEC below, but it's nice to not use it when we
2222 can immediately tell it's not necessary. */
2223 addr_dead_p = ((noreturn_p || sibcall_p
2224 || TEST_HARD_REG_BIT (regs_invalidated_by_call,
2226 && !FUNCTION_ARG_REGNO_P (REGNO (addr)));
2228 /* Load the code address into scratch_b. */
2229 tmp = gen_rtx_POST_INC (Pmode, addr);
2230 tmp = gen_rtx_MEM (Pmode, tmp);
2231 emit_move_insn (scratch_r, tmp);
2232 emit_move_insn (scratch_b, scratch_r);
2234 /* Load the GP address. If ADDR is not dead here, then we must
2235 revert the change made above via the POST_INCREMENT. */
2237 tmp = gen_rtx_POST_DEC (Pmode, addr);
2240 tmp = gen_rtx_MEM (Pmode, tmp);
2241 emit_move_insn (pic_offset_table_rtx, tmp);
2248 insn = gen_sibcall_nogp (addr);
2250 insn = gen_call_value_nogp (retval, addr, retaddr);
2252 insn = gen_call_nogp (addr, retaddr);
2253 emit_call_insn (insn);
2255 if ((!TARGET_CONST_GP || is_desc) && !noreturn_p && !sibcall_p)
2259 /* Expand an atomic operation. We want to perform MEM <CODE>= VAL atomically.
2261 This differs from the generic code in that we know about the zero-extending
2262 properties of cmpxchg, and the zero-extending requirements of ar.ccv. We
2263 also know that ld.acq+cmpxchg.rel equals a full barrier.
2265 The loop we want to generate looks like
2270 new_reg = cmp_reg op val;
2271 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
2272 if (cmp_reg != old_reg)
2275 Note that we only do the plain load from memory once. Subsequent
2276 iterations use the value loaded by the compare-and-swap pattern. */
2279 ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
2280 rtx old_dst, rtx new_dst, enum memmodel model)
2282 enum machine_mode mode = GET_MODE (mem);
2283 rtx old_reg, new_reg, cmp_reg, ar_ccv, label;
2284 enum insn_code icode;
2286 /* Special case for using fetchadd. */
2287 if ((mode == SImode || mode == DImode)
2288 && (code == PLUS || code == MINUS)
2289 && fetchadd_operand (val, mode))
2292 val = GEN_INT (-INTVAL (val));
2295 old_dst = gen_reg_rtx (mode);
2299 case MEMMODEL_ACQ_REL:
2300 case MEMMODEL_SEQ_CST:
2301 emit_insn (gen_memory_barrier ());
2303 case MEMMODEL_RELAXED:
2304 case MEMMODEL_ACQUIRE:
2305 case MEMMODEL_CONSUME:
2307 icode = CODE_FOR_fetchadd_acq_si;
2309 icode = CODE_FOR_fetchadd_acq_di;
2311 case MEMMODEL_RELEASE:
2313 icode = CODE_FOR_fetchadd_rel_si;
2315 icode = CODE_FOR_fetchadd_rel_di;
2322 emit_insn (GEN_FCN (icode) (old_dst, mem, val));
2326 new_reg = expand_simple_binop (mode, PLUS, old_dst, val, new_dst,
2328 if (new_reg != new_dst)
2329 emit_move_insn (new_dst, new_reg);
2334 /* Because of the volatile mem read, we get an ld.acq, which is the
2335 front half of the full barrier. The end half is the cmpxchg.rel.
2336 For relaxed and release memory models, we don't need this. But we
2337 also don't bother trying to prevent it either. */
2338 gcc_assert (model == MEMMODEL_RELAXED
2339 || model == MEMMODEL_RELEASE
2340 || MEM_VOLATILE_P (mem));
2342 old_reg = gen_reg_rtx (DImode);
2343 cmp_reg = gen_reg_rtx (DImode);
2344 label = gen_label_rtx ();
2348 val = simplify_gen_subreg (DImode, val, mode, 0);
2349 emit_insn (gen_extend_insn (cmp_reg, mem, DImode, mode, 1));
2352 emit_move_insn (cmp_reg, mem);
2356 ar_ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
2357 emit_move_insn (old_reg, cmp_reg);
2358 emit_move_insn (ar_ccv, cmp_reg);
2361 emit_move_insn (old_dst, gen_lowpart (mode, cmp_reg));
2366 new_reg = expand_simple_binop (DImode, AND, new_reg, val, NULL_RTX,
2367 true, OPTAB_DIRECT);
2368 new_reg = expand_simple_unop (DImode, code, new_reg, NULL_RTX, true);
2371 new_reg = expand_simple_binop (DImode, code, new_reg, val, NULL_RTX,
2372 true, OPTAB_DIRECT);
2375 new_reg = gen_lowpart (mode, new_reg);
2377 emit_move_insn (new_dst, new_reg);
2381 case MEMMODEL_RELAXED:
2382 case MEMMODEL_ACQUIRE:
2383 case MEMMODEL_CONSUME:
2386 case QImode: icode = CODE_FOR_cmpxchg_acq_qi; break;
2387 case HImode: icode = CODE_FOR_cmpxchg_acq_hi; break;
2388 case SImode: icode = CODE_FOR_cmpxchg_acq_si; break;
2389 case DImode: icode = CODE_FOR_cmpxchg_acq_di; break;
2395 case MEMMODEL_RELEASE:
2396 case MEMMODEL_ACQ_REL:
2397 case MEMMODEL_SEQ_CST:
2400 case QImode: icode = CODE_FOR_cmpxchg_rel_qi; break;
2401 case HImode: icode = CODE_FOR_cmpxchg_rel_hi; break;
2402 case SImode: icode = CODE_FOR_cmpxchg_rel_si; break;
2403 case DImode: icode = CODE_FOR_cmpxchg_rel_di; break;
2413 emit_insn (GEN_FCN (icode) (cmp_reg, mem, ar_ccv, new_reg));
2415 emit_cmp_and_jump_insns (cmp_reg, old_reg, NE, NULL, DImode, true, label);
2418 /* Begin the assembly file. */
2421 ia64_file_start (void)
2423 default_file_start ();
2424 emit_safe_across_calls ();
2428 emit_safe_across_calls (void)
2430 unsigned int rs, re;
2437 while (rs < 64 && call_used_regs[PR_REG (rs)])
2441 for (re = rs + 1; re < 64 && ! call_used_regs[PR_REG (re)]; re++)
2445 fputs ("\t.pred.safe_across_calls ", asm_out_file);
2449 fputc (',', asm_out_file);
2451 fprintf (asm_out_file, "p%u", rs);
2453 fprintf (asm_out_file, "p%u-p%u", rs, re - 1);
2457 fputc ('\n', asm_out_file);
2460 /* Globalize a declaration. */
2463 ia64_globalize_decl_name (FILE * stream, tree decl)
2465 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2466 tree version_attr = lookup_attribute ("version_id", DECL_ATTRIBUTES (decl));
2469 tree v = TREE_VALUE (TREE_VALUE (version_attr));
2470 const char *p = TREE_STRING_POINTER (v);
2471 fprintf (stream, "\t.alias %s#, \"%s{%s}\"\n", name, name, p);
2473 targetm.asm_out.globalize_label (stream, name);
2474 if (TREE_CODE (decl) == FUNCTION_DECL)
2475 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "function");
2478 /* Helper function for ia64_compute_frame_size: find an appropriate general
2479 register to spill some special register to. SPECIAL_SPILL_MASK contains
2480 bits in GR0 to GR31 that have already been allocated by this routine.
2481 TRY_LOCALS is true if we should attempt to locate a local regnum. */
2484 find_gr_spill (enum ia64_frame_regs r, int try_locals)
2488 if (emitted_frame_related_regs[r] != 0)
2490 regno = emitted_frame_related_regs[r];
2491 if (regno >= LOC_REG (0) && regno < LOC_REG (80 - frame_pointer_needed)
2492 && current_frame_info.n_local_regs < regno - LOC_REG (0) + 1)
2493 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2494 else if (current_function_is_leaf
2495 && regno >= GR_REG (1) && regno <= GR_REG (31))
2496 current_frame_info.gr_used_mask |= 1 << regno;
2501 /* If this is a leaf function, first try an otherwise unused
2502 call-clobbered register. */
2503 if (current_function_is_leaf)
2505 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2506 if (! df_regs_ever_live_p (regno)
2507 && call_used_regs[regno]
2508 && ! fixed_regs[regno]
2509 && ! global_regs[regno]
2510 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0
2511 && ! is_emitted (regno))
2513 current_frame_info.gr_used_mask |= 1 << regno;
2520 regno = current_frame_info.n_local_regs;
2521 /* If there is a frame pointer, then we can't use loc79, because
2522 that is HARD_FRAME_POINTER_REGNUM. In particular, see the
2523 reg_name switching code in ia64_expand_prologue. */
2524 while (regno < (80 - frame_pointer_needed))
2525 if (! is_emitted (LOC_REG (regno++)))
2527 current_frame_info.n_local_regs = regno;
2528 return LOC_REG (regno - 1);
2532 /* Failed to find a general register to spill to. Must use stack. */
2536 /* In order to make for nice schedules, we try to allocate every temporary
2537 to a different register. We must of course stay away from call-saved,
2538 fixed, and global registers. We must also stay away from registers
2539 allocated in current_frame_info.gr_used_mask, since those include regs
2540 used all through the prologue.
2542 Any register allocated here must be used immediately. The idea is to
2543 aid scheduling, not to solve data flow problems. */
2545 static int last_scratch_gr_reg;
2548 next_scratch_gr_reg (void)
2552 for (i = 0; i < 32; ++i)
2554 regno = (last_scratch_gr_reg + i + 1) & 31;
2555 if (call_used_regs[regno]
2556 && ! fixed_regs[regno]
2557 && ! global_regs[regno]
2558 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
2560 last_scratch_gr_reg = regno;
2565 /* There must be _something_ available. */
2569 /* Helper function for ia64_compute_frame_size, called through
2570 diddle_return_value. Mark REG in current_frame_info.gr_used_mask. */
2573 mark_reg_gr_used_mask (rtx reg, void *data ATTRIBUTE_UNUSED)
2575 unsigned int regno = REGNO (reg);
2578 unsigned int i, n = hard_regno_nregs[regno][GET_MODE (reg)];
2579 for (i = 0; i < n; ++i)
2580 current_frame_info.gr_used_mask |= 1 << (regno + i);
2585 /* Returns the number of bytes offset between the frame pointer and the stack
2586 pointer for the current function. SIZE is the number of bytes of space
2587 needed for local variables. */
2590 ia64_compute_frame_size (HOST_WIDE_INT size)
2592 HOST_WIDE_INT total_size;
2593 HOST_WIDE_INT spill_size = 0;
2594 HOST_WIDE_INT extra_spill_size = 0;
2595 HOST_WIDE_INT pretend_args_size;
2598 int spilled_gr_p = 0;
2599 int spilled_fr_p = 0;
2605 if (current_frame_info.initialized)
2608 memset (¤t_frame_info, 0, sizeof current_frame_info);
2609 CLEAR_HARD_REG_SET (mask);
2611 /* Don't allocate scratches to the return register. */
2612 diddle_return_value (mark_reg_gr_used_mask, NULL);
2614 /* Don't allocate scratches to the EH scratch registers. */
2615 if (cfun->machine->ia64_eh_epilogue_sp)
2616 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_sp, NULL);
2617 if (cfun->machine->ia64_eh_epilogue_bsp)
2618 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
2620 /* Find the size of the register stack frame. We have only 80 local
2621 registers, because we reserve 8 for the inputs and 8 for the
2624 /* Skip HARD_FRAME_POINTER_REGNUM (loc79) when frame_pointer_needed,
2625 since we'll be adjusting that down later. */
2626 regno = LOC_REG (78) + ! frame_pointer_needed;
2627 for (; regno >= LOC_REG (0); regno--)
2628 if (df_regs_ever_live_p (regno) && !is_emitted (regno))
2630 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2632 /* For functions marked with the syscall_linkage attribute, we must mark
2633 all eight input registers as in use, so that locals aren't visible to
2636 if (cfun->machine->n_varargs > 0
2637 || lookup_attribute ("syscall_linkage",
2638 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
2639 current_frame_info.n_input_regs = 8;
2642 for (regno = IN_REG (7); regno >= IN_REG (0); regno--)
2643 if (df_regs_ever_live_p (regno))
2645 current_frame_info.n_input_regs = regno - IN_REG (0) + 1;
2648 for (regno = OUT_REG (7); regno >= OUT_REG (0); regno--)
2649 if (df_regs_ever_live_p (regno))
2651 i = regno - OUT_REG (0) + 1;
2653 #ifndef PROFILE_HOOK
2654 /* When -p profiling, we need one output register for the mcount argument.
2655 Likewise for -a profiling for the bb_init_func argument. For -ax
2656 profiling, we need two output registers for the two bb_init_trace_func
2661 current_frame_info.n_output_regs = i;
2663 /* ??? No rotating register support yet. */
2664 current_frame_info.n_rotate_regs = 0;
2666 /* Discover which registers need spilling, and how much room that
2667 will take. Begin with floating point and general registers,
2668 which will always wind up on the stack. */
2670 for (regno = FR_REG (2); regno <= FR_REG (127); regno++)
2671 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2673 SET_HARD_REG_BIT (mask, regno);
2679 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2680 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2682 SET_HARD_REG_BIT (mask, regno);
2688 for (regno = BR_REG (1); regno <= BR_REG (7); regno++)
2689 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2691 SET_HARD_REG_BIT (mask, regno);
2696 /* Now come all special registers that might get saved in other
2697 general registers. */
2699 if (frame_pointer_needed)
2701 current_frame_info.r[reg_fp] = find_gr_spill (reg_fp, 1);
2702 /* If we did not get a register, then we take LOC79. This is guaranteed
2703 to be free, even if regs_ever_live is already set, because this is
2704 HARD_FRAME_POINTER_REGNUM. This requires incrementing n_local_regs,
2705 as we don't count loc79 above. */
2706 if (current_frame_info.r[reg_fp] == 0)
2708 current_frame_info.r[reg_fp] = LOC_REG (79);
2709 current_frame_info.n_local_regs = LOC_REG (79) - LOC_REG (0) + 1;
2713 if (! current_function_is_leaf)
2715 /* Emit a save of BR0 if we call other functions. Do this even
2716 if this function doesn't return, as EH depends on this to be
2717 able to unwind the stack. */
2718 SET_HARD_REG_BIT (mask, BR_REG (0));
2720 current_frame_info.r[reg_save_b0] = find_gr_spill (reg_save_b0, 1);
2721 if (current_frame_info.r[reg_save_b0] == 0)
2723 extra_spill_size += 8;
2727 /* Similarly for ar.pfs. */
2728 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2729 current_frame_info.r[reg_save_ar_pfs] = find_gr_spill (reg_save_ar_pfs, 1);
2730 if (current_frame_info.r[reg_save_ar_pfs] == 0)
2732 extra_spill_size += 8;
2736 /* Similarly for gp. Note that if we're calling setjmp, the stacked
2737 registers are clobbered, so we fall back to the stack. */
2738 current_frame_info.r[reg_save_gp]
2739 = (cfun->calls_setjmp ? 0 : find_gr_spill (reg_save_gp, 1));
2740 if (current_frame_info.r[reg_save_gp] == 0)
2742 SET_HARD_REG_BIT (mask, GR_REG (1));
2749 if (df_regs_ever_live_p (BR_REG (0)) && ! call_used_regs[BR_REG (0)])
2751 SET_HARD_REG_BIT (mask, BR_REG (0));
2752 extra_spill_size += 8;
2756 if (df_regs_ever_live_p (AR_PFS_REGNUM))
2758 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2759 current_frame_info.r[reg_save_ar_pfs]
2760 = find_gr_spill (reg_save_ar_pfs, 1);
2761 if (current_frame_info.r[reg_save_ar_pfs] == 0)
2763 extra_spill_size += 8;
2769 /* Unwind descriptor hackery: things are most efficient if we allocate
2770 consecutive GR save registers for RP, PFS, FP in that order. However,
2771 it is absolutely critical that FP get the only hard register that's
2772 guaranteed to be free, so we allocated it first. If all three did
2773 happen to be allocated hard regs, and are consecutive, rearrange them
2774 into the preferred order now.
2776 If we have already emitted code for any of those registers,
2777 then it's already too late to change. */
2778 min_regno = MIN (current_frame_info.r[reg_fp],
2779 MIN (current_frame_info.r[reg_save_b0],
2780 current_frame_info.r[reg_save_ar_pfs]));
2781 max_regno = MAX (current_frame_info.r[reg_fp],
2782 MAX (current_frame_info.r[reg_save_b0],
2783 current_frame_info.r[reg_save_ar_pfs]));
2785 && min_regno + 2 == max_regno
2786 && (current_frame_info.r[reg_fp] == min_regno + 1
2787 || current_frame_info.r[reg_save_b0] == min_regno + 1
2788 || current_frame_info.r[reg_save_ar_pfs] == min_regno + 1)
2789 && (emitted_frame_related_regs[reg_save_b0] == 0
2790 || emitted_frame_related_regs[reg_save_b0] == min_regno)
2791 && (emitted_frame_related_regs[reg_save_ar_pfs] == 0
2792 || emitted_frame_related_regs[reg_save_ar_pfs] == min_regno + 1)
2793 && (emitted_frame_related_regs[reg_fp] == 0
2794 || emitted_frame_related_regs[reg_fp] == min_regno + 2))
2796 current_frame_info.r[reg_save_b0] = min_regno;
2797 current_frame_info.r[reg_save_ar_pfs] = min_regno + 1;
2798 current_frame_info.r[reg_fp] = min_regno + 2;
2801 /* See if we need to store the predicate register block. */
2802 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2803 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2805 if (regno <= PR_REG (63))
2807 SET_HARD_REG_BIT (mask, PR_REG (0));
2808 current_frame_info.r[reg_save_pr] = find_gr_spill (reg_save_pr, 1);
2809 if (current_frame_info.r[reg_save_pr] == 0)
2811 extra_spill_size += 8;
2815 /* ??? Mark them all as used so that register renaming and such
2816 are free to use them. */
2817 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2818 df_set_regs_ever_live (regno, true);
2821 /* If we're forced to use st8.spill, we're forced to save and restore
2822 ar.unat as well. The check for existing liveness allows inline asm
2823 to touch ar.unat. */
2824 if (spilled_gr_p || cfun->machine->n_varargs
2825 || df_regs_ever_live_p (AR_UNAT_REGNUM))
2827 df_set_regs_ever_live (AR_UNAT_REGNUM, true);
2828 SET_HARD_REG_BIT (mask, AR_UNAT_REGNUM);
2829 current_frame_info.r[reg_save_ar_unat]
2830 = find_gr_spill (reg_save_ar_unat, spill_size == 0);
2831 if (current_frame_info.r[reg_save_ar_unat] == 0)
2833 extra_spill_size += 8;
2838 if (df_regs_ever_live_p (AR_LC_REGNUM))
2840 SET_HARD_REG_BIT (mask, AR_LC_REGNUM);
2841 current_frame_info.r[reg_save_ar_lc]
2842 = find_gr_spill (reg_save_ar_lc, spill_size == 0);
2843 if (current_frame_info.r[reg_save_ar_lc] == 0)
2845 extra_spill_size += 8;
2850 /* If we have an odd number of words of pretend arguments written to
2851 the stack, then the FR save area will be unaligned. We round the
2852 size of this area up to keep things 16 byte aligned. */
2854 pretend_args_size = IA64_STACK_ALIGN (crtl->args.pretend_args_size);
2856 pretend_args_size = crtl->args.pretend_args_size;
2858 total_size = (spill_size + extra_spill_size + size + pretend_args_size
2859 + crtl->outgoing_args_size);
2860 total_size = IA64_STACK_ALIGN (total_size);
2862 /* We always use the 16-byte scratch area provided by the caller, but
2863 if we are a leaf function, there's no one to which we need to provide
2865 if (current_function_is_leaf)
2866 total_size = MAX (0, total_size - 16);
2868 current_frame_info.total_size = total_size;
2869 current_frame_info.spill_cfa_off = pretend_args_size - 16;
2870 current_frame_info.spill_size = spill_size;
2871 current_frame_info.extra_spill_size = extra_spill_size;
2872 COPY_HARD_REG_SET (current_frame_info.mask, mask);
2873 current_frame_info.n_spilled = n_spilled;
2874 current_frame_info.initialized = reload_completed;
2877 /* Worker function for TARGET_CAN_ELIMINATE. */
2880 ia64_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
2882 return (to == BR_REG (0) ? current_function_is_leaf : true);
2885 /* Compute the initial difference between the specified pair of registers. */
2888 ia64_initial_elimination_offset (int from, int to)
2890 HOST_WIDE_INT offset;
2892 ia64_compute_frame_size (get_frame_size ());
2895 case FRAME_POINTER_REGNUM:
2898 case HARD_FRAME_POINTER_REGNUM:
2899 if (current_function_is_leaf)
2900 offset = -current_frame_info.total_size;
2902 offset = -(current_frame_info.total_size
2903 - crtl->outgoing_args_size - 16);
2906 case STACK_POINTER_REGNUM:
2907 if (current_function_is_leaf)
2910 offset = 16 + crtl->outgoing_args_size;
2918 case ARG_POINTER_REGNUM:
2919 /* Arguments start above the 16 byte save area, unless stdarg
2920 in which case we store through the 16 byte save area. */
2923 case HARD_FRAME_POINTER_REGNUM:
2924 offset = 16 - crtl->args.pretend_args_size;
2927 case STACK_POINTER_REGNUM:
2928 offset = (current_frame_info.total_size
2929 + 16 - crtl->args.pretend_args_size);
2944 /* If there are more than a trivial number of register spills, we use
2945 two interleaved iterators so that we can get two memory references
2948 In order to simplify things in the prologue and epilogue expanders,
2949 we use helper functions to fix up the memory references after the
2950 fact with the appropriate offsets to a POST_MODIFY memory mode.
2951 The following data structure tracks the state of the two iterators
2952 while insns are being emitted. */
2954 struct spill_fill_data
2956 rtx init_after; /* point at which to emit initializations */
2957 rtx init_reg[2]; /* initial base register */
2958 rtx iter_reg[2]; /* the iterator registers */
2959 rtx *prev_addr[2]; /* address of last memory use */
2960 rtx prev_insn[2]; /* the insn corresponding to prev_addr */
2961 HOST_WIDE_INT prev_off[2]; /* last offset */
2962 int n_iter; /* number of iterators in use */
2963 int next_iter; /* next iterator to use */
2964 unsigned int save_gr_used_mask;
2967 static struct spill_fill_data spill_fill_data;
2970 setup_spill_pointers (int n_spills, rtx init_reg, HOST_WIDE_INT cfa_off)
2974 spill_fill_data.init_after = get_last_insn ();
2975 spill_fill_data.init_reg[0] = init_reg;
2976 spill_fill_data.init_reg[1] = init_reg;
2977 spill_fill_data.prev_addr[0] = NULL;
2978 spill_fill_data.prev_addr[1] = NULL;
2979 spill_fill_data.prev_insn[0] = NULL;
2980 spill_fill_data.prev_insn[1] = NULL;
2981 spill_fill_data.prev_off[0] = cfa_off;
2982 spill_fill_data.prev_off[1] = cfa_off;
2983 spill_fill_data.next_iter = 0;
2984 spill_fill_data.save_gr_used_mask = current_frame_info.gr_used_mask;
2986 spill_fill_data.n_iter = 1 + (n_spills > 2);
2987 for (i = 0; i < spill_fill_data.n_iter; ++i)
2989 int regno = next_scratch_gr_reg ();
2990 spill_fill_data.iter_reg[i] = gen_rtx_REG (DImode, regno);
2991 current_frame_info.gr_used_mask |= 1 << regno;
2996 finish_spill_pointers (void)
2998 current_frame_info.gr_used_mask = spill_fill_data.save_gr_used_mask;
3002 spill_restore_mem (rtx reg, HOST_WIDE_INT cfa_off)
3004 int iter = spill_fill_data.next_iter;
3005 HOST_WIDE_INT disp = spill_fill_data.prev_off[iter] - cfa_off;
3006 rtx disp_rtx = GEN_INT (disp);
3009 if (spill_fill_data.prev_addr[iter])
3011 if (satisfies_constraint_N (disp_rtx))
3013 *spill_fill_data.prev_addr[iter]
3014 = gen_rtx_POST_MODIFY (DImode, spill_fill_data.iter_reg[iter],
3015 gen_rtx_PLUS (DImode,
3016 spill_fill_data.iter_reg[iter],
3018 add_reg_note (spill_fill_data.prev_insn[iter],
3019 REG_INC, spill_fill_data.iter_reg[iter]);
3023 /* ??? Could use register post_modify for loads. */
3024 if (!satisfies_constraint_I (disp_rtx))
3026 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
3027 emit_move_insn (tmp, disp_rtx);
3030 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
3031 spill_fill_data.iter_reg[iter], disp_rtx));
3034 /* Micro-optimization: if we've created a frame pointer, it's at
3035 CFA 0, which may allow the real iterator to be initialized lower,
3036 slightly increasing parallelism. Also, if there are few saves
3037 it may eliminate the iterator entirely. */
3039 && spill_fill_data.init_reg[iter] == stack_pointer_rtx
3040 && frame_pointer_needed)
3042 mem = gen_rtx_MEM (GET_MODE (reg), hard_frame_pointer_rtx);
3043 set_mem_alias_set (mem, get_varargs_alias_set ());
3051 seq = gen_movdi (spill_fill_data.iter_reg[iter],
3052 spill_fill_data.init_reg[iter]);
3057 if (!satisfies_constraint_I (disp_rtx))
3059 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
3060 emit_move_insn (tmp, disp_rtx);
3064 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
3065 spill_fill_data.init_reg[iter],
3072 /* Careful for being the first insn in a sequence. */
3073 if (spill_fill_data.init_after)
3074 insn = emit_insn_after (seq, spill_fill_data.init_after);
3077 rtx first = get_insns ();
3079 insn = emit_insn_before (seq, first);
3081 insn = emit_insn (seq);
3083 spill_fill_data.init_after = insn;
3086 mem = gen_rtx_MEM (GET_MODE (reg), spill_fill_data.iter_reg[iter]);
3088 /* ??? Not all of the spills are for varargs, but some of them are.
3089 The rest of the spills belong in an alias set of their own. But
3090 it doesn't actually hurt to include them here. */
3091 set_mem_alias_set (mem, get_varargs_alias_set ());
3093 spill_fill_data.prev_addr[iter] = &XEXP (mem, 0);
3094 spill_fill_data.prev_off[iter] = cfa_off;
3096 if (++iter >= spill_fill_data.n_iter)
3098 spill_fill_data.next_iter = iter;
3104 do_spill (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off,
3107 int iter = spill_fill_data.next_iter;
3110 mem = spill_restore_mem (reg, cfa_off);
3111 insn = emit_insn ((*move_fn) (mem, reg, GEN_INT (cfa_off)));
3112 spill_fill_data.prev_insn[iter] = insn;
3119 RTX_FRAME_RELATED_P (insn) = 1;
3121 /* Don't even pretend that the unwind code can intuit its way
3122 through a pair of interleaved post_modify iterators. Just
3123 provide the correct answer. */
3125 if (frame_pointer_needed)
3127 base = hard_frame_pointer_rtx;
3132 base = stack_pointer_rtx;
3133 off = current_frame_info.total_size - cfa_off;
3136 add_reg_note (insn, REG_CFA_OFFSET,
3137 gen_rtx_SET (VOIDmode,
3138 gen_rtx_MEM (GET_MODE (reg),
3139 plus_constant (base, off)),
3145 do_restore (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off)
3147 int iter = spill_fill_data.next_iter;
3150 insn = emit_insn ((*move_fn) (reg, spill_restore_mem (reg, cfa_off),
3151 GEN_INT (cfa_off)));
3152 spill_fill_data.prev_insn[iter] = insn;
3155 /* Wrapper functions that discards the CONST_INT spill offset. These
3156 exist so that we can give gr_spill/gr_fill the offset they need and
3157 use a consistent function interface. */
3160 gen_movdi_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
3162 return gen_movdi (dest, src);
3166 gen_fr_spill_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
3168 return gen_fr_spill (dest, src);
3172 gen_fr_restore_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
3174 return gen_fr_restore (dest, src);
3177 /* Called after register allocation to add any instructions needed for the
3178 prologue. Using a prologue insn is favored compared to putting all of the
3179 instructions in output_function_prologue(), since it allows the scheduler
3180 to intermix instructions with the saves of the caller saved registers. In
3181 some cases, it might be necessary to emit a barrier instruction as the last
3182 insn to prevent such scheduling.
3184 Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
3185 so that the debug info generation code can handle them properly.
3187 The register save area is layed out like so:
3189 [ varargs spill area ]
3190 [ fr register spill area ]
3191 [ br register spill area ]
3192 [ ar register spill area ]
3193 [ pr register spill area ]
3194 [ gr register spill area ] */
3196 /* ??? Get inefficient code when the frame size is larger than can fit in an
3197 adds instruction. */
3200 ia64_expand_prologue (void)
3202 rtx insn, ar_pfs_save_reg, ar_unat_save_reg;
3203 int i, epilogue_p, regno, alt_regno, cfa_off, n_varargs;
3206 ia64_compute_frame_size (get_frame_size ());
3207 last_scratch_gr_reg = 15;
3209 if (flag_stack_usage_info)
3210 current_function_static_stack_size = current_frame_info.total_size;
3214 fprintf (dump_file, "ia64 frame related registers "
3215 "recorded in current_frame_info.r[]:\n");
3216 #define PRINTREG(a) if (current_frame_info.r[a]) \
3217 fprintf(dump_file, "%s = %d\n", #a, current_frame_info.r[a])
3219 PRINTREG(reg_save_b0);
3220 PRINTREG(reg_save_pr);
3221 PRINTREG(reg_save_ar_pfs);
3222 PRINTREG(reg_save_ar_unat);
3223 PRINTREG(reg_save_ar_lc);
3224 PRINTREG(reg_save_gp);
3228 /* If there is no epilogue, then we don't need some prologue insns.
3229 We need to avoid emitting the dead prologue insns, because flow
3230 will complain about them. */
3236 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
3237 if ((e->flags & EDGE_FAKE) == 0
3238 && (e->flags & EDGE_FALLTHRU) != 0)
3240 epilogue_p = (e != NULL);
3245 /* Set the local, input, and output register names. We need to do this
3246 for GNU libc, which creates crti.S/crtn.S by splitting initfini.c in
3247 half. If we use in/loc/out register names, then we get assembler errors
3248 in crtn.S because there is no alloc insn or regstk directive in there. */
3249 if (! TARGET_REG_NAMES)
3251 int inputs = current_frame_info.n_input_regs;
3252 int locals = current_frame_info.n_local_regs;
3253 int outputs = current_frame_info.n_output_regs;
3255 for (i = 0; i < inputs; i++)
3256 reg_names[IN_REG (i)] = ia64_reg_numbers[i];
3257 for (i = 0; i < locals; i++)
3258 reg_names[LOC_REG (i)] = ia64_reg_numbers[inputs + i];
3259 for (i = 0; i < outputs; i++)
3260 reg_names[OUT_REG (i)] = ia64_reg_numbers[inputs + locals + i];
3263 /* Set the frame pointer register name. The regnum is logically loc79,
3264 but of course we'll not have allocated that many locals. Rather than
3265 worrying about renumbering the existing rtxs, we adjust the name. */
3266 /* ??? This code means that we can never use one local register when
3267 there is a frame pointer. loc79 gets wasted in this case, as it is
3268 renamed to a register that will never be used. See also the try_locals
3269 code in find_gr_spill. */
3270 if (current_frame_info.r[reg_fp])
3272 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
3273 reg_names[HARD_FRAME_POINTER_REGNUM]
3274 = reg_names[current_frame_info.r[reg_fp]];
3275 reg_names[current_frame_info.r[reg_fp]] = tmp;
3278 /* We don't need an alloc instruction if we've used no outputs or locals. */
3279 if (current_frame_info.n_local_regs == 0
3280 && current_frame_info.n_output_regs == 0
3281 && current_frame_info.n_input_regs <= crtl->args.info.int_regs
3282 && !TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3284 /* If there is no alloc, but there are input registers used, then we
3285 need a .regstk directive. */
3286 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
3287 ar_pfs_save_reg = NULL_RTX;
3291 current_frame_info.need_regstk = 0;
3293 if (current_frame_info.r[reg_save_ar_pfs])
3295 regno = current_frame_info.r[reg_save_ar_pfs];
3296 reg_emitted (reg_save_ar_pfs);
3299 regno = next_scratch_gr_reg ();
3300 ar_pfs_save_reg = gen_rtx_REG (DImode, regno);
3302 insn = emit_insn (gen_alloc (ar_pfs_save_reg,
3303 GEN_INT (current_frame_info.n_input_regs),
3304 GEN_INT (current_frame_info.n_local_regs),
3305 GEN_INT (current_frame_info.n_output_regs),
3306 GEN_INT (current_frame_info.n_rotate_regs)));
3307 if (current_frame_info.r[reg_save_ar_pfs])
3309 RTX_FRAME_RELATED_P (insn) = 1;
3310 add_reg_note (insn, REG_CFA_REGISTER,
3311 gen_rtx_SET (VOIDmode,
3313 gen_rtx_REG (DImode, AR_PFS_REGNUM)));
3317 /* Set up frame pointer, stack pointer, and spill iterators. */
3319 n_varargs = cfun->machine->n_varargs;
3320 setup_spill_pointers (current_frame_info.n_spilled + n_varargs,
3321 stack_pointer_rtx, 0);
3323 if (frame_pointer_needed)
3325 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3326 RTX_FRAME_RELATED_P (insn) = 1;
3328 /* Force the unwind info to recognize this as defining a new CFA,
3329 rather than some temp register setup. */
3330 add_reg_note (insn, REG_CFA_ADJUST_CFA, NULL_RTX);
3333 if (current_frame_info.total_size != 0)
3335 rtx frame_size_rtx = GEN_INT (- current_frame_info.total_size);
3338 if (satisfies_constraint_I (frame_size_rtx))
3339 offset = frame_size_rtx;
3342 regno = next_scratch_gr_reg ();
3343 offset = gen_rtx_REG (DImode, regno);
3344 emit_move_insn (offset, frame_size_rtx);
3347 insn = emit_insn (gen_adddi3 (stack_pointer_rtx,
3348 stack_pointer_rtx, offset));
3350 if (! frame_pointer_needed)
3352 RTX_FRAME_RELATED_P (insn) = 1;
3353 add_reg_note (insn, REG_CFA_ADJUST_CFA,
3354 gen_rtx_SET (VOIDmode,
3356 gen_rtx_PLUS (DImode,
3361 /* ??? At this point we must generate a magic insn that appears to
3362 modify the stack pointer, the frame pointer, and all spill
3363 iterators. This would allow the most scheduling freedom. For
3364 now, just hard stop. */
3365 emit_insn (gen_blockage ());
3368 /* Must copy out ar.unat before doing any integer spills. */
3369 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3371 if (current_frame_info.r[reg_save_ar_unat])
3374 = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_unat]);
3375 reg_emitted (reg_save_ar_unat);
3379 alt_regno = next_scratch_gr_reg ();
3380 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3381 current_frame_info.gr_used_mask |= 1 << alt_regno;
3384 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3385 insn = emit_move_insn (ar_unat_save_reg, reg);
3386 if (current_frame_info.r[reg_save_ar_unat])
3388 RTX_FRAME_RELATED_P (insn) = 1;
3389 add_reg_note (insn, REG_CFA_REGISTER, NULL_RTX);
3392 /* Even if we're not going to generate an epilogue, we still
3393 need to save the register so that EH works. */
3394 if (! epilogue_p && current_frame_info.r[reg_save_ar_unat])
3395 emit_insn (gen_prologue_use (ar_unat_save_reg));
3398 ar_unat_save_reg = NULL_RTX;
3400 /* Spill all varargs registers. Do this before spilling any GR registers,
3401 since we want the UNAT bits for the GR registers to override the UNAT
3402 bits from varargs, which we don't care about. */
3405 for (regno = GR_ARG_FIRST + 7; n_varargs > 0; --n_varargs, --regno)
3407 reg = gen_rtx_REG (DImode, regno);
3408 do_spill (gen_gr_spill, reg, cfa_off += 8, NULL_RTX);
3411 /* Locate the bottom of the register save area. */
3412 cfa_off = (current_frame_info.spill_cfa_off
3413 + current_frame_info.spill_size
3414 + current_frame_info.extra_spill_size);
3416 /* Save the predicate register block either in a register or in memory. */
3417 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3419 reg = gen_rtx_REG (DImode, PR_REG (0));
3420 if (current_frame_info.r[reg_save_pr] != 0)
3422 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_pr]);
3423 reg_emitted (reg_save_pr);
3424 insn = emit_move_insn (alt_reg, reg);
3426 /* ??? Denote pr spill/fill by a DImode move that modifies all
3427 64 hard registers. */
3428 RTX_FRAME_RELATED_P (insn) = 1;
3429 add_reg_note (insn, REG_CFA_REGISTER, NULL_RTX);
3431 /* Even if we're not going to generate an epilogue, we still
3432 need to save the register so that EH works. */
3434 emit_insn (gen_prologue_use (alt_reg));
3438 alt_regno = next_scratch_gr_reg ();
3439 alt_reg = gen_rtx_REG (DImode, alt_regno);
3440 insn = emit_move_insn (alt_reg, reg);
3441 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3446 /* Handle AR regs in numerical order. All of them get special handling. */
3447 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM)
3448 && current_frame_info.r[reg_save_ar_unat] == 0)
3450 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3451 do_spill (gen_movdi_x, ar_unat_save_reg, cfa_off, reg);
3455 /* The alloc insn already copied ar.pfs into a general register. The
3456 only thing we have to do now is copy that register to a stack slot
3457 if we'd not allocated a local register for the job. */
3458 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM)
3459 && current_frame_info.r[reg_save_ar_pfs] == 0)
3461 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3462 do_spill (gen_movdi_x, ar_pfs_save_reg, cfa_off, reg);
3466 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3468 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3469 if (current_frame_info.r[reg_save_ar_lc] != 0)
3471 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_lc]);
3472 reg_emitted (reg_save_ar_lc);
3473 insn = emit_move_insn (alt_reg, reg);
3474 RTX_FRAME_RELATED_P (insn) = 1;
3475 add_reg_note (insn, REG_CFA_REGISTER, NULL_RTX);
3477 /* Even if we're not going to generate an epilogue, we still
3478 need to save the register so that EH works. */
3480 emit_insn (gen_prologue_use (alt_reg));
3484 alt_regno = next_scratch_gr_reg ();
3485 alt_reg = gen_rtx_REG (DImode, alt_regno);
3486 emit_move_insn (alt_reg, reg);
3487 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3492 /* Save the return pointer. */
3493 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3495 reg = gen_rtx_REG (DImode, BR_REG (0));
3496 if (current_frame_info.r[reg_save_b0] != 0)
3498 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3499 reg_emitted (reg_save_b0);
3500 insn = emit_move_insn (alt_reg, reg);
3501 RTX_FRAME_RELATED_P (insn) = 1;
3502 add_reg_note (insn, REG_CFA_REGISTER,
3503 gen_rtx_SET (VOIDmode, alt_reg, pc_rtx));
3505 /* Even if we're not going to generate an epilogue, we still
3506 need to save the register so that EH works. */
3508 emit_insn (gen_prologue_use (alt_reg));
3512 alt_regno = next_scratch_gr_reg ();
3513 alt_reg = gen_rtx_REG (DImode, alt_regno);
3514 emit_move_insn (alt_reg, reg);
3515 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3520 if (current_frame_info.r[reg_save_gp])
3522 reg_emitted (reg_save_gp);
3523 insn = emit_move_insn (gen_rtx_REG (DImode,
3524 current_frame_info.r[reg_save_gp]),
3525 pic_offset_table_rtx);
3528 /* We should now be at the base of the gr/br/fr spill area. */
3529 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3530 + current_frame_info.spill_size));
3532 /* Spill all general registers. */
3533 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3534 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3536 reg = gen_rtx_REG (DImode, regno);
3537 do_spill (gen_gr_spill, reg, cfa_off, reg);
3541 /* Spill the rest of the BR registers. */
3542 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3543 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3545 alt_regno = next_scratch_gr_reg ();
3546 alt_reg = gen_rtx_REG (DImode, alt_regno);
3547 reg = gen_rtx_REG (DImode, regno);
3548 emit_move_insn (alt_reg, reg);
3549 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3553 /* Align the frame and spill all FR registers. */
3554 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3555 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3557 gcc_assert (!(cfa_off & 15));
3558 reg = gen_rtx_REG (XFmode, regno);
3559 do_spill (gen_fr_spill_x, reg, cfa_off, reg);
3563 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3565 finish_spill_pointers ();
3568 /* Output the textual info surrounding the prologue. */
3571 ia64_start_function (FILE *file, const char *fnname,
3572 tree decl ATTRIBUTE_UNUSED)
3574 #if VMS_DEBUGGING_INFO
3576 && debug_info_level > DINFO_LEVEL_NONE
3577 && strncmp (vms_debug_main, fnname, strlen (vms_debug_main)) == 0)
3579 targetm.asm_out.globalize_label (asm_out_file, VMS_DEBUG_MAIN_POINTER);
3580 ASM_OUTPUT_DEF (asm_out_file, VMS_DEBUG_MAIN_POINTER, fnname);
3581 dwarf2out_vms_debug_main_pointer ();
3586 fputs ("\t.proc ", file);
3587 assemble_name (file, fnname);
3589 ASM_OUTPUT_LABEL (file, fnname);
3592 /* Called after register allocation to add any instructions needed for the
3593 epilogue. Using an epilogue insn is favored compared to putting all of the
3594 instructions in output_function_prologue(), since it allows the scheduler
3595 to intermix instructions with the saves of the caller saved registers. In
3596 some cases, it might be necessary to emit a barrier instruction as the last
3597 insn to prevent such scheduling. */
3600 ia64_expand_epilogue (int sibcall_p)
3602 rtx insn, reg, alt_reg, ar_unat_save_reg;
3603 int regno, alt_regno, cfa_off;
3605 ia64_compute_frame_size (get_frame_size ());
3607 /* If there is a frame pointer, then we use it instead of the stack
3608 pointer, so that the stack pointer does not need to be valid when
3609 the epilogue starts. See EXIT_IGNORE_STACK. */
3610 if (frame_pointer_needed)
3611 setup_spill_pointers (current_frame_info.n_spilled,
3612 hard_frame_pointer_rtx, 0);
3614 setup_spill_pointers (current_frame_info.n_spilled, stack_pointer_rtx,
3615 current_frame_info.total_size);
3617 if (current_frame_info.total_size != 0)
3619 /* ??? At this point we must generate a magic insn that appears to
3620 modify the spill iterators and the frame pointer. This would
3621 allow the most scheduling freedom. For now, just hard stop. */
3622 emit_insn (gen_blockage ());
3625 /* Locate the bottom of the register save area. */
3626 cfa_off = (current_frame_info.spill_cfa_off
3627 + current_frame_info.spill_size
3628 + current_frame_info.extra_spill_size);
3630 /* Restore the predicate registers. */
3631 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3633 if (current_frame_info.r[reg_save_pr] != 0)
3635 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_pr]);
3636 reg_emitted (reg_save_pr);
3640 alt_regno = next_scratch_gr_reg ();
3641 alt_reg = gen_rtx_REG (DImode, alt_regno);
3642 do_restore (gen_movdi_x, alt_reg, cfa_off);
3645 reg = gen_rtx_REG (DImode, PR_REG (0));
3646 emit_move_insn (reg, alt_reg);
3649 /* Restore the application registers. */
3651 /* Load the saved unat from the stack, but do not restore it until
3652 after the GRs have been restored. */
3653 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3655 if (current_frame_info.r[reg_save_ar_unat] != 0)
3658 = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_unat]);
3659 reg_emitted (reg_save_ar_unat);
3663 alt_regno = next_scratch_gr_reg ();
3664 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3665 current_frame_info.gr_used_mask |= 1 << alt_regno;
3666 do_restore (gen_movdi_x, ar_unat_save_reg, cfa_off);
3671 ar_unat_save_reg = NULL_RTX;
3673 if (current_frame_info.r[reg_save_ar_pfs] != 0)
3675 reg_emitted (reg_save_ar_pfs);
3676 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_pfs]);
3677 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3678 emit_move_insn (reg, alt_reg);
3680 else if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3682 alt_regno = next_scratch_gr_reg ();
3683 alt_reg = gen_rtx_REG (DImode, alt_regno);
3684 do_restore (gen_movdi_x, alt_reg, cfa_off);
3686 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3687 emit_move_insn (reg, alt_reg);
3690 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3692 if (current_frame_info.r[reg_save_ar_lc] != 0)
3694 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_lc]);
3695 reg_emitted (reg_save_ar_lc);
3699 alt_regno = next_scratch_gr_reg ();
3700 alt_reg = gen_rtx_REG (DImode, alt_regno);
3701 do_restore (gen_movdi_x, alt_reg, cfa_off);
3704 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3705 emit_move_insn (reg, alt_reg);
3708 /* Restore the return pointer. */
3709 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3711 if (current_frame_info.r[reg_save_b0] != 0)
3713 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3714 reg_emitted (reg_save_b0);
3718 alt_regno = next_scratch_gr_reg ();
3719 alt_reg = gen_rtx_REG (DImode, alt_regno);
3720 do_restore (gen_movdi_x, alt_reg, cfa_off);
3723 reg = gen_rtx_REG (DImode, BR_REG (0));
3724 emit_move_insn (reg, alt_reg);
3727 /* We should now be at the base of the gr/br/fr spill area. */
3728 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3729 + current_frame_info.spill_size));
3731 /* The GP may be stored on the stack in the prologue, but it's
3732 never restored in the epilogue. Skip the stack slot. */
3733 if (TEST_HARD_REG_BIT (current_frame_info.mask, GR_REG (1)))
3736 /* Restore all general registers. */
3737 for (regno = GR_REG (2); regno <= GR_REG (31); ++regno)
3738 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3740 reg = gen_rtx_REG (DImode, regno);
3741 do_restore (gen_gr_restore, reg, cfa_off);
3745 /* Restore the branch registers. */
3746 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3747 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3749 alt_regno = next_scratch_gr_reg ();
3750 alt_reg = gen_rtx_REG (DImode, alt_regno);
3751 do_restore (gen_movdi_x, alt_reg, cfa_off);
3753 reg = gen_rtx_REG (DImode, regno);
3754 emit_move_insn (reg, alt_reg);
3757 /* Restore floating point registers. */
3758 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3759 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3761 gcc_assert (!(cfa_off & 15));
3762 reg = gen_rtx_REG (XFmode, regno);
3763 do_restore (gen_fr_restore_x, reg, cfa_off);
3767 /* Restore ar.unat for real. */
3768 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3770 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3771 emit_move_insn (reg, ar_unat_save_reg);
3774 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3776 finish_spill_pointers ();
3778 if (current_frame_info.total_size
3779 || cfun->machine->ia64_eh_epilogue_sp
3780 || frame_pointer_needed)
3782 /* ??? At this point we must generate a magic insn that appears to
3783 modify the spill iterators, the stack pointer, and the frame
3784 pointer. This would allow the most scheduling freedom. For now,
3786 emit_insn (gen_blockage ());
3789 if (cfun->machine->ia64_eh_epilogue_sp)
3790 emit_move_insn (stack_pointer_rtx, cfun->machine->ia64_eh_epilogue_sp);
3791 else if (frame_pointer_needed)
3793 insn = emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
3794 RTX_FRAME_RELATED_P (insn) = 1;
3795 add_reg_note (insn, REG_CFA_ADJUST_CFA, NULL);
3797 else if (current_frame_info.total_size)
3799 rtx offset, frame_size_rtx;
3801 frame_size_rtx = GEN_INT (current_frame_info.total_size);
3802 if (satisfies_constraint_I (frame_size_rtx))
3803 offset = frame_size_rtx;
3806 regno = next_scratch_gr_reg ();
3807 offset = gen_rtx_REG (DImode, regno);
3808 emit_move_insn (offset, frame_size_rtx);
3811 insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
3814 RTX_FRAME_RELATED_P (insn) = 1;
3815 add_reg_note (insn, REG_CFA_ADJUST_CFA,
3816 gen_rtx_SET (VOIDmode,
3818 gen_rtx_PLUS (DImode,
3823 if (cfun->machine->ia64_eh_epilogue_bsp)
3824 emit_insn (gen_set_bsp (cfun->machine->ia64_eh_epilogue_bsp));
3827 emit_jump_insn (gen_return_internal (gen_rtx_REG (DImode, BR_REG (0))));
3830 int fp = GR_REG (2);
3831 /* We need a throw away register here, r0 and r1 are reserved,
3832 so r2 is the first available call clobbered register. If
3833 there was a frame_pointer register, we may have swapped the
3834 names of r2 and HARD_FRAME_POINTER_REGNUM, so we have to make
3835 sure we're using the string "r2" when emitting the register
3836 name for the assembler. */
3837 if (current_frame_info.r[reg_fp]
3838 && current_frame_info.r[reg_fp] == GR_REG (2))
3839 fp = HARD_FRAME_POINTER_REGNUM;
3841 /* We must emit an alloc to force the input registers to become output
3842 registers. Otherwise, if the callee tries to pass its parameters
3843 through to another call without an intervening alloc, then these
3845 /* ??? We don't need to preserve all input registers. We only need to
3846 preserve those input registers used as arguments to the sibling call.
3847 It is unclear how to compute that number here. */
3848 if (current_frame_info.n_input_regs != 0)
3850 rtx n_inputs = GEN_INT (current_frame_info.n_input_regs);
3852 insn = emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
3853 const0_rtx, const0_rtx,
3854 n_inputs, const0_rtx));
3855 RTX_FRAME_RELATED_P (insn) = 1;
3857 /* ??? We need to mark the alloc as frame-related so that it gets
3858 passed into ia64_asm_unwind_emit for ia64-specific unwinding.
3859 But there's nothing dwarf2 related to be done wrt the register
3860 windows. If we do nothing, dwarf2out will abort on the UNSPEC;
3861 the empty parallel means dwarf2out will not see anything. */
3862 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3863 gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (0)));
3868 /* Return 1 if br.ret can do all the work required to return from a
3872 ia64_direct_return (void)
3874 if (reload_completed && ! frame_pointer_needed)
3876 ia64_compute_frame_size (get_frame_size ());
3878 return (current_frame_info.total_size == 0
3879 && current_frame_info.n_spilled == 0
3880 && current_frame_info.r[reg_save_b0] == 0
3881 && current_frame_info.r[reg_save_pr] == 0
3882 && current_frame_info.r[reg_save_ar_pfs] == 0
3883 && current_frame_info.r[reg_save_ar_unat] == 0
3884 && current_frame_info.r[reg_save_ar_lc] == 0);
3889 /* Return the magic cookie that we use to hold the return address
3890 during early compilation. */
3893 ia64_return_addr_rtx (HOST_WIDE_INT count, rtx frame ATTRIBUTE_UNUSED)
3897 return gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_RET_ADDR);
3900 /* Split this value after reload, now that we know where the return
3901 address is saved. */
3904 ia64_split_return_addr_rtx (rtx dest)
3908 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3910 if (current_frame_info.r[reg_save_b0] != 0)
3912 src = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3913 reg_emitted (reg_save_b0);
3921 /* Compute offset from CFA for BR0. */
3922 /* ??? Must be kept in sync with ia64_expand_prologue. */
3923 off = (current_frame_info.spill_cfa_off
3924 + current_frame_info.spill_size);
3925 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3926 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3929 /* Convert CFA offset to a register based offset. */
3930 if (frame_pointer_needed)
3931 src = hard_frame_pointer_rtx;
3934 src = stack_pointer_rtx;
3935 off += current_frame_info.total_size;
3938 /* Load address into scratch register. */
3939 off_r = GEN_INT (off);
3940 if (satisfies_constraint_I (off_r))
3941 emit_insn (gen_adddi3 (dest, src, off_r));
3944 emit_move_insn (dest, off_r);
3945 emit_insn (gen_adddi3 (dest, src, dest));
3948 src = gen_rtx_MEM (Pmode, dest);
3952 src = gen_rtx_REG (DImode, BR_REG (0));
3954 emit_move_insn (dest, src);
3958 ia64_hard_regno_rename_ok (int from, int to)
3960 /* Don't clobber any of the registers we reserved for the prologue. */
3963 for (r = reg_fp; r <= reg_save_ar_lc; r++)
3964 if (to == current_frame_info.r[r]
3965 || from == current_frame_info.r[r]
3966 || to == emitted_frame_related_regs[r]
3967 || from == emitted_frame_related_regs[r])
3970 /* Don't use output registers outside the register frame. */
3971 if (OUT_REGNO_P (to) && to >= OUT_REG (current_frame_info.n_output_regs))
3974 /* Retain even/oddness on predicate register pairs. */
3975 if (PR_REGNO_P (from) && PR_REGNO_P (to))
3976 return (from & 1) == (to & 1);
3981 /* Target hook for assembling integer objects. Handle word-sized
3982 aligned objects and detect the cases when @fptr is needed. */
3985 ia64_assemble_integer (rtx x, unsigned int size, int aligned_p)
3987 if (size == POINTER_SIZE / BITS_PER_UNIT
3988 && !(TARGET_NO_PIC || TARGET_AUTO_PIC)
3989 && GET_CODE (x) == SYMBOL_REF
3990 && SYMBOL_REF_FUNCTION_P (x))
3992 static const char * const directive[2][2] = {
3993 /* 64-bit pointer */ /* 32-bit pointer */
3994 { "\tdata8.ua\t@fptr(", "\tdata4.ua\t@fptr("}, /* unaligned */
3995 { "\tdata8\t@fptr(", "\tdata4\t@fptr("} /* aligned */
3997 fputs (directive[(aligned_p != 0)][POINTER_SIZE == 32], asm_out_file);
3998 output_addr_const (asm_out_file, x);
3999 fputs (")\n", asm_out_file);
4002 return default_assemble_integer (x, size, aligned_p);
4005 /* Emit the function prologue. */
4008 ia64_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4010 int mask, grsave, grsave_prev;
4012 if (current_frame_info.need_regstk)
4013 fprintf (file, "\t.regstk %d, %d, %d, %d\n",
4014 current_frame_info.n_input_regs,
4015 current_frame_info.n_local_regs,
4016 current_frame_info.n_output_regs,
4017 current_frame_info.n_rotate_regs);
4019 if (ia64_except_unwind_info (&global_options) != UI_TARGET)
4022 /* Emit the .prologue directive. */
4025 grsave = grsave_prev = 0;
4026 if (current_frame_info.r[reg_save_b0] != 0)
4029 grsave = grsave_prev = current_frame_info.r[reg_save_b0];
4031 if (current_frame_info.r[reg_save_ar_pfs] != 0
4032 && (grsave_prev == 0
4033 || current_frame_info.r[reg_save_ar_pfs] == grsave_prev + 1))
4036 if (grsave_prev == 0)
4037 grsave = current_frame_info.r[reg_save_ar_pfs];
4038 grsave_prev = current_frame_info.r[reg_save_ar_pfs];
4040 if (current_frame_info.r[reg_fp] != 0
4041 && (grsave_prev == 0
4042 || current_frame_info.r[reg_fp] == grsave_prev + 1))
4045 if (grsave_prev == 0)
4046 grsave = HARD_FRAME_POINTER_REGNUM;
4047 grsave_prev = current_frame_info.r[reg_fp];
4049 if (current_frame_info.r[reg_save_pr] != 0
4050 && (grsave_prev == 0
4051 || current_frame_info.r[reg_save_pr] == grsave_prev + 1))
4054 if (grsave_prev == 0)
4055 grsave = current_frame_info.r[reg_save_pr];
4058 if (mask && TARGET_GNU_AS)
4059 fprintf (file, "\t.prologue %d, %d\n", mask,
4060 ia64_dbx_register_number (grsave));
4062 fputs ("\t.prologue\n", file);
4064 /* Emit a .spill directive, if necessary, to relocate the base of
4065 the register spill area. */
4066 if (current_frame_info.spill_cfa_off != -16)
4067 fprintf (file, "\t.spill %ld\n",
4068 (long) (current_frame_info.spill_cfa_off
4069 + current_frame_info.spill_size));
4072 /* Emit the .body directive at the scheduled end of the prologue. */
4075 ia64_output_function_end_prologue (FILE *file)
4077 if (ia64_except_unwind_info (&global_options) != UI_TARGET)
4080 fputs ("\t.body\n", file);
4083 /* Emit the function epilogue. */
4086 ia64_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
4087 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4091 if (current_frame_info.r[reg_fp])
4093 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
4094 reg_names[HARD_FRAME_POINTER_REGNUM]
4095 = reg_names[current_frame_info.r[reg_fp]];
4096 reg_names[current_frame_info.r[reg_fp]] = tmp;
4097 reg_emitted (reg_fp);
4099 if (! TARGET_REG_NAMES)
4101 for (i = 0; i < current_frame_info.n_input_regs; i++)
4102 reg_names[IN_REG (i)] = ia64_input_reg_names[i];
4103 for (i = 0; i < current_frame_info.n_local_regs; i++)
4104 reg_names[LOC_REG (i)] = ia64_local_reg_names[i];
4105 for (i = 0; i < current_frame_info.n_output_regs; i++)
4106 reg_names[OUT_REG (i)] = ia64_output_reg_names[i];
4109 current_frame_info.initialized = 0;
4113 ia64_dbx_register_number (int regno)
4115 /* In ia64_expand_prologue we quite literally renamed the frame pointer
4116 from its home at loc79 to something inside the register frame. We
4117 must perform the same renumbering here for the debug info. */
4118 if (current_frame_info.r[reg_fp])
4120 if (regno == HARD_FRAME_POINTER_REGNUM)
4121 regno = current_frame_info.r[reg_fp];
4122 else if (regno == current_frame_info.r[reg_fp])
4123 regno = HARD_FRAME_POINTER_REGNUM;
4126 if (IN_REGNO_P (regno))
4127 return 32 + regno - IN_REG (0);
4128 else if (LOC_REGNO_P (regno))
4129 return 32 + current_frame_info.n_input_regs + regno - LOC_REG (0);
4130 else if (OUT_REGNO_P (regno))
4131 return (32 + current_frame_info.n_input_regs
4132 + current_frame_info.n_local_regs + regno - OUT_REG (0));
4137 /* Implement TARGET_TRAMPOLINE_INIT.
4139 The trampoline should set the static chain pointer to value placed
4140 into the trampoline and should branch to the specified routine.
4141 To make the normal indirect-subroutine calling convention work,
4142 the trampoline must look like a function descriptor; the first
4143 word being the target address and the second being the target's
4146 We abuse the concept of a global pointer by arranging for it
4147 to point to the data we need to load. The complete trampoline
4148 has the following form:
4150 +-------------------+ \
4151 TRAMP: | __ia64_trampoline | |
4152 +-------------------+ > fake function descriptor
4154 +-------------------+ /
4155 | target descriptor |
4156 +-------------------+
4158 +-------------------+
4162 ia64_trampoline_init (rtx m_tramp, tree fndecl, rtx static_chain)
4164 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
4165 rtx addr, addr_reg, tramp, eight = GEN_INT (8);
4167 /* The Intel assembler requires that the global __ia64_trampoline symbol
4168 be declared explicitly */
4171 static bool declared_ia64_trampoline = false;
4173 if (!declared_ia64_trampoline)
4175 declared_ia64_trampoline = true;
4176 (*targetm.asm_out.globalize_label) (asm_out_file,
4177 "__ia64_trampoline");
4181 /* Make sure addresses are Pmode even if we are in ILP32 mode. */
4182 addr = convert_memory_address (Pmode, XEXP (m_tramp, 0));
4183 fnaddr = convert_memory_address (Pmode, fnaddr);
4184 static_chain = convert_memory_address (Pmode, static_chain);
4186 /* Load up our iterator. */
4187 addr_reg = copy_to_reg (addr);
4188 m_tramp = adjust_automodify_address (m_tramp, Pmode, addr_reg, 0);
4190 /* The first two words are the fake descriptor:
4191 __ia64_trampoline, ADDR+16. */
4192 tramp = gen_rtx_SYMBOL_REF (Pmode, "__ia64_trampoline");
4193 if (TARGET_ABI_OPEN_VMS)
4195 /* HP decided to break the ELF ABI on VMS (to deal with an ambiguity
4196 in the Macro-32 compiler) and changed the semantics of the LTOFF22
4197 relocation against function symbols to make it identical to the
4198 LTOFF_FPTR22 relocation. Emit the latter directly to stay within
4199 strict ELF and dereference to get the bare code address. */
4200 rtx reg = gen_reg_rtx (Pmode);
4201 SYMBOL_REF_FLAGS (tramp) |= SYMBOL_FLAG_FUNCTION;
4202 emit_move_insn (reg, tramp);
4203 emit_move_insn (reg, gen_rtx_MEM (Pmode, reg));
4206 emit_move_insn (m_tramp, tramp);
4207 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
4208 m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
4210 emit_move_insn (m_tramp, force_reg (Pmode, plus_constant (addr, 16)));
4211 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
4212 m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
4214 /* The third word is the target descriptor. */
4215 emit_move_insn (m_tramp, force_reg (Pmode, fnaddr));
4216 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
4217 m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
4219 /* The fourth word is the static chain. */
4220 emit_move_insn (m_tramp, static_chain);
4223 /* Do any needed setup for a variadic function. CUM has not been updated
4224 for the last named argument which has type TYPE and mode MODE.
4226 We generate the actual spill instructions during prologue generation. */
4229 ia64_setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
4230 tree type, int * pretend_size,
4231 int second_time ATTRIBUTE_UNUSED)
4233 CUMULATIVE_ARGS next_cum = *get_cumulative_args (cum);
4235 /* Skip the current argument. */
4236 ia64_function_arg_advance (pack_cumulative_args (&next_cum), mode, type, 1);
4238 if (next_cum.words < MAX_ARGUMENT_SLOTS)
4240 int n = MAX_ARGUMENT_SLOTS - next_cum.words;
4241 *pretend_size = n * UNITS_PER_WORD;
4242 cfun->machine->n_varargs = n;
4246 /* Check whether TYPE is a homogeneous floating point aggregate. If
4247 it is, return the mode of the floating point type that appears
4248 in all leafs. If it is not, return VOIDmode.
4250 An aggregate is a homogeneous floating point aggregate is if all
4251 fields/elements in it have the same floating point type (e.g,
4252 SFmode). 128-bit quad-precision floats are excluded.
4254 Variable sized aggregates should never arrive here, since we should
4255 have already decided to pass them by reference. Top-level zero-sized
4256 aggregates are excluded because our parallels crash the middle-end. */
4258 static enum machine_mode
4259 hfa_element_mode (const_tree type, bool nested)
4261 enum machine_mode element_mode = VOIDmode;
4262 enum machine_mode mode;
4263 enum tree_code code = TREE_CODE (type);
4264 int know_element_mode = 0;
4267 if (!nested && (!TYPE_SIZE (type) || integer_zerop (TYPE_SIZE (type))))
4272 case VOID_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE:
4273 case BOOLEAN_TYPE: case POINTER_TYPE:
4274 case OFFSET_TYPE: case REFERENCE_TYPE: case METHOD_TYPE:
4275 case LANG_TYPE: case FUNCTION_TYPE:
4278 /* Fortran complex types are supposed to be HFAs, so we need to handle
4279 gcc's COMPLEX_TYPEs as HFAs. We need to exclude the integral complex
4282 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_FLOAT
4283 && TYPE_MODE (type) != TCmode)
4284 return GET_MODE_INNER (TYPE_MODE (type));
4289 /* We want to return VOIDmode for raw REAL_TYPEs, but the actual
4290 mode if this is contained within an aggregate. */
4291 if (nested && TYPE_MODE (type) != TFmode)
4292 return TYPE_MODE (type);
4297 return hfa_element_mode (TREE_TYPE (type), 1);
4301 case QUAL_UNION_TYPE:
4302 for (t = TYPE_FIELDS (type); t; t = DECL_CHAIN (t))
4304 if (TREE_CODE (t) != FIELD_DECL)
4307 mode = hfa_element_mode (TREE_TYPE (t), 1);
4308 if (know_element_mode)
4310 if (mode != element_mode)
4313 else if (GET_MODE_CLASS (mode) != MODE_FLOAT)
4317 know_element_mode = 1;
4318 element_mode = mode;
4321 return element_mode;
4324 /* If we reach here, we probably have some front-end specific type
4325 that the backend doesn't know about. This can happen via the
4326 aggregate_value_p call in init_function_start. All we can do is
4327 ignore unknown tree types. */
4334 /* Return the number of words required to hold a quantity of TYPE and MODE
4335 when passed as an argument. */
4337 ia64_function_arg_words (const_tree type, enum machine_mode mode)
4341 if (mode == BLKmode)
4342 words = int_size_in_bytes (type);
4344 words = GET_MODE_SIZE (mode);
4346 return (words + UNITS_PER_WORD - 1) / UNITS_PER_WORD; /* round up */
4349 /* Return the number of registers that should be skipped so the current
4350 argument (described by TYPE and WORDS) will be properly aligned.
4352 Integer and float arguments larger than 8 bytes start at the next
4353 even boundary. Aggregates larger than 8 bytes start at the next
4354 even boundary if the aggregate has 16 byte alignment. Note that
4355 in the 32-bit ABI, TImode and TFmode have only 8-byte alignment
4356 but are still to be aligned in registers.
4358 ??? The ABI does not specify how to handle aggregates with
4359 alignment from 9 to 15 bytes, or greater than 16. We handle them
4360 all as if they had 16 byte alignment. Such aggregates can occur
4361 only if gcc extensions are used. */
4363 ia64_function_arg_offset (const CUMULATIVE_ARGS *cum,
4364 const_tree type, int words)
4366 /* No registers are skipped on VMS. */
4367 if (TARGET_ABI_OPEN_VMS || (cum->words & 1) == 0)
4371 && TREE_CODE (type) != INTEGER_TYPE
4372 && TREE_CODE (type) != REAL_TYPE)
4373 return TYPE_ALIGN (type) > 8 * BITS_PER_UNIT;
4378 /* Return rtx for register where argument is passed, or zero if it is passed
4380 /* ??? 128-bit quad-precision floats are always passed in general
4384 ia64_function_arg_1 (cumulative_args_t cum_v, enum machine_mode mode,
4385 const_tree type, bool named, bool incoming)
4387 const CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
4389 int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST);
4390 int words = ia64_function_arg_words (type, mode);
4391 int offset = ia64_function_arg_offset (cum, type, words);
4392 enum machine_mode hfa_mode = VOIDmode;
4394 /* For OPEN VMS, emit the instruction setting up the argument register here,
4395 when we know this will be together with the other arguments setup related
4396 insns. This is not the conceptually best place to do this, but this is
4397 the easiest as we have convenient access to cumulative args info. */
4399 if (TARGET_ABI_OPEN_VMS && mode == VOIDmode && type == void_type_node
4402 unsigned HOST_WIDE_INT regval = cum->words;
4405 for (i = 0; i < 8; i++)
4406 regval |= ((int) cum->atypes[i]) << (i * 3 + 8);
4408 emit_move_insn (gen_rtx_REG (DImode, GR_REG (25)),
4412 /* If all argument slots are used, then it must go on the stack. */
4413 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4416 /* Check for and handle homogeneous FP aggregates. */
4418 hfa_mode = hfa_element_mode (type, 0);
4420 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4421 and unprototyped hfas are passed specially. */
4422 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4426 int fp_regs = cum->fp_regs;
4427 int int_regs = cum->words + offset;
4428 int hfa_size = GET_MODE_SIZE (hfa_mode);
4432 /* If prototyped, pass it in FR regs then GR regs.
4433 If not prototyped, pass it in both FR and GR regs.
4435 If this is an SFmode aggregate, then it is possible to run out of
4436 FR regs while GR regs are still left. In that case, we pass the
4437 remaining part in the GR regs. */
4439 /* Fill the FP regs. We do this always. We stop if we reach the end
4440 of the argument, the last FP register, or the last argument slot. */
4442 byte_size = ((mode == BLKmode)
4443 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4444 args_byte_size = int_regs * UNITS_PER_WORD;
4446 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4447 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD)); i++)
4449 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4450 gen_rtx_REG (hfa_mode, (FR_ARG_FIRST
4454 args_byte_size += hfa_size;
4458 /* If no prototype, then the whole thing must go in GR regs. */
4459 if (! cum->prototype)
4461 /* If this is an SFmode aggregate, then we might have some left over
4462 that needs to go in GR regs. */
4463 else if (byte_size != offset)
4464 int_regs += offset / UNITS_PER_WORD;
4466 /* Fill in the GR regs. We must use DImode here, not the hfa mode. */
4468 for (; offset < byte_size && int_regs < MAX_ARGUMENT_SLOTS; i++)
4470 enum machine_mode gr_mode = DImode;
4471 unsigned int gr_size;
4473 /* If we have an odd 4 byte hunk because we ran out of FR regs,
4474 then this goes in a GR reg left adjusted/little endian, right
4475 adjusted/big endian. */
4476 /* ??? Currently this is handled wrong, because 4-byte hunks are
4477 always right adjusted/little endian. */
4480 /* If we have an even 4 byte hunk because the aggregate is a
4481 multiple of 4 bytes in size, then this goes in a GR reg right
4482 adjusted/little endian. */
4483 else if (byte_size - offset == 4)
4486 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4487 gen_rtx_REG (gr_mode, (basereg
4491 gr_size = GET_MODE_SIZE (gr_mode);
4493 if (gr_size == UNITS_PER_WORD
4494 || (gr_size < UNITS_PER_WORD && offset % UNITS_PER_WORD == 0))
4496 else if (gr_size > UNITS_PER_WORD)
4497 int_regs += gr_size / UNITS_PER_WORD;
4499 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4502 /* On OpenVMS variable argument is either in Rn or Fn. */
4503 else if (TARGET_ABI_OPEN_VMS && named == 0)
4505 if (FLOAT_MODE_P (mode))
4506 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->words);
4508 return gen_rtx_REG (mode, basereg + cum->words);
4511 /* Integral and aggregates go in general registers. If we have run out of
4512 FR registers, then FP values must also go in general registers. This can
4513 happen when we have a SFmode HFA. */
4514 else if (mode == TFmode || mode == TCmode
4515 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4517 int byte_size = ((mode == BLKmode)
4518 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4519 if (BYTES_BIG_ENDIAN
4520 && (mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
4521 && byte_size < UNITS_PER_WORD
4524 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4525 gen_rtx_REG (DImode,
4526 (basereg + cum->words
4529 return gen_rtx_PARALLEL (mode, gen_rtvec (1, gr_reg));
4532 return gen_rtx_REG (mode, basereg + cum->words + offset);
4536 /* If there is a prototype, then FP values go in a FR register when
4537 named, and in a GR register when unnamed. */
4538 else if (cum->prototype)
4541 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->fp_regs);
4542 /* In big-endian mode, an anonymous SFmode value must be represented
4543 as (parallel:SF [(expr_list (reg:DI n) (const_int 0))]) to force
4544 the value into the high half of the general register. */
4545 else if (BYTES_BIG_ENDIAN && mode == SFmode)
4546 return gen_rtx_PARALLEL (mode,
4548 gen_rtx_EXPR_LIST (VOIDmode,
4549 gen_rtx_REG (DImode, basereg + cum->words + offset),
4552 return gen_rtx_REG (mode, basereg + cum->words + offset);
4554 /* If there is no prototype, then FP values go in both FR and GR
4558 /* See comment above. */
4559 enum machine_mode inner_mode =
4560 (BYTES_BIG_ENDIAN && mode == SFmode) ? DImode : mode;
4562 rtx fp_reg = gen_rtx_EXPR_LIST (VOIDmode,
4563 gen_rtx_REG (mode, (FR_ARG_FIRST
4566 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4567 gen_rtx_REG (inner_mode,
4568 (basereg + cum->words
4572 return gen_rtx_PARALLEL (mode, gen_rtvec (2, fp_reg, gr_reg));
4576 /* Implement TARGET_FUNCION_ARG target hook. */
4579 ia64_function_arg (cumulative_args_t cum, enum machine_mode mode,
4580 const_tree type, bool named)
4582 return ia64_function_arg_1 (cum, mode, type, named, false);
4585 /* Implement TARGET_FUNCION_INCOMING_ARG target hook. */
4588 ia64_function_incoming_arg (cumulative_args_t cum,
4589 enum machine_mode mode,
4590 const_tree type, bool named)
4592 return ia64_function_arg_1 (cum, mode, type, named, true);
4595 /* Return number of bytes, at the beginning of the argument, that must be
4596 put in registers. 0 is the argument is entirely in registers or entirely
4600 ia64_arg_partial_bytes (cumulative_args_t cum_v, enum machine_mode mode,
4601 tree type, bool named ATTRIBUTE_UNUSED)
4603 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
4605 int words = ia64_function_arg_words (type, mode);
4606 int offset = ia64_function_arg_offset (cum, type, words);
4608 /* If all argument slots are used, then it must go on the stack. */
4609 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4612 /* It doesn't matter whether the argument goes in FR or GR regs. If
4613 it fits within the 8 argument slots, then it goes entirely in
4614 registers. If it extends past the last argument slot, then the rest
4615 goes on the stack. */
4617 if (words + cum->words + offset <= MAX_ARGUMENT_SLOTS)
4620 return (MAX_ARGUMENT_SLOTS - cum->words - offset) * UNITS_PER_WORD;
4623 /* Return ivms_arg_type based on machine_mode. */
4625 static enum ivms_arg_type
4626 ia64_arg_type (enum machine_mode mode)
4639 /* Update CUM to point after this argument. This is patterned after
4640 ia64_function_arg. */
4643 ia64_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
4644 const_tree type, bool named)
4646 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
4647 int words = ia64_function_arg_words (type, mode);
4648 int offset = ia64_function_arg_offset (cum, type, words);
4649 enum machine_mode hfa_mode = VOIDmode;
4651 /* If all arg slots are already full, then there is nothing to do. */
4652 if (cum->words >= MAX_ARGUMENT_SLOTS)
4654 cum->words += words + offset;
4658 cum->atypes[cum->words] = ia64_arg_type (mode);
4659 cum->words += words + offset;
4661 /* Check for and handle homogeneous FP aggregates. */
4663 hfa_mode = hfa_element_mode (type, 0);
4665 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4666 and unprototyped hfas are passed specially. */
4667 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4669 int fp_regs = cum->fp_regs;
4670 /* This is the original value of cum->words + offset. */
4671 int int_regs = cum->words - words;
4672 int hfa_size = GET_MODE_SIZE (hfa_mode);
4676 /* If prototyped, pass it in FR regs then GR regs.
4677 If not prototyped, pass it in both FR and GR regs.
4679 If this is an SFmode aggregate, then it is possible to run out of
4680 FR regs while GR regs are still left. In that case, we pass the
4681 remaining part in the GR regs. */
4683 /* Fill the FP regs. We do this always. We stop if we reach the end
4684 of the argument, the last FP register, or the last argument slot. */
4686 byte_size = ((mode == BLKmode)
4687 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4688 args_byte_size = int_regs * UNITS_PER_WORD;
4690 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4691 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD));)
4694 args_byte_size += hfa_size;
4698 cum->fp_regs = fp_regs;
4701 /* On OpenVMS variable argument is either in Rn or Fn. */
4702 else if (TARGET_ABI_OPEN_VMS && named == 0)
4704 cum->int_regs = cum->words;
4705 cum->fp_regs = cum->words;
4708 /* Integral and aggregates go in general registers. So do TFmode FP values.
4709 If we have run out of FR registers, then other FP values must also go in
4710 general registers. This can happen when we have a SFmode HFA. */
4711 else if (mode == TFmode || mode == TCmode
4712 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4713 cum->int_regs = cum->words;
4715 /* If there is a prototype, then FP values go in a FR register when
4716 named, and in a GR register when unnamed. */
4717 else if (cum->prototype)
4720 cum->int_regs = cum->words;
4722 /* ??? Complex types should not reach here. */
4723 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4725 /* If there is no prototype, then FP values go in both FR and GR
4729 /* ??? Complex types should not reach here. */
4730 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4731 cum->int_regs = cum->words;
4735 /* Arguments with alignment larger than 8 bytes start at the next even
4736 boundary. On ILP32 HPUX, TFmode arguments start on next even boundary
4737 even though their normal alignment is 8 bytes. See ia64_function_arg. */
4740 ia64_function_arg_boundary (enum machine_mode mode, const_tree type)
4742 if (mode == TFmode && TARGET_HPUX && TARGET_ILP32)
4743 return PARM_BOUNDARY * 2;
4747 if (TYPE_ALIGN (type) > PARM_BOUNDARY)
4748 return PARM_BOUNDARY * 2;
4750 return PARM_BOUNDARY;
4753 if (GET_MODE_BITSIZE (mode) > PARM_BOUNDARY)
4754 return PARM_BOUNDARY * 2;
4756 return PARM_BOUNDARY;
4759 /* True if it is OK to do sibling call optimization for the specified
4760 call expression EXP. DECL will be the called function, or NULL if
4761 this is an indirect call. */
4763 ia64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
4765 /* We can't perform a sibcall if the current function has the syscall_linkage
4767 if (lookup_attribute ("syscall_linkage",
4768 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
4771 /* We must always return with our current GP. This means we can
4772 only sibcall to functions defined in the current module unless
4773 TARGET_CONST_GP is set to true. */
4774 return (decl && (*targetm.binds_local_p) (decl)) || TARGET_CONST_GP;
4778 /* Implement va_arg. */
4781 ia64_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
4784 /* Variable sized types are passed by reference. */
4785 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
4787 tree ptrtype = build_pointer_type (type);
4788 tree addr = std_gimplify_va_arg_expr (valist, ptrtype, pre_p, post_p);
4789 return build_va_arg_indirect_ref (addr);
4792 /* Aggregate arguments with alignment larger than 8 bytes start at
4793 the next even boundary. Integer and floating point arguments
4794 do so if they are larger than 8 bytes, whether or not they are
4795 also aligned larger than 8 bytes. */
4796 if ((TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == INTEGER_TYPE)
4797 ? int_size_in_bytes (type) > 8 : TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
4799 tree t = fold_build_pointer_plus_hwi (valist, 2 * UNITS_PER_WORD - 1);
4800 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
4801 build_int_cst (TREE_TYPE (t), -2 * UNITS_PER_WORD));
4802 gimplify_assign (unshare_expr (valist), t, pre_p);
4805 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4808 /* Return 1 if function return value returned in memory. Return 0 if it is
4812 ia64_return_in_memory (const_tree valtype, const_tree fntype ATTRIBUTE_UNUSED)
4814 enum machine_mode mode;
4815 enum machine_mode hfa_mode;
4816 HOST_WIDE_INT byte_size;
4818 mode = TYPE_MODE (valtype);
4819 byte_size = GET_MODE_SIZE (mode);
4820 if (mode == BLKmode)
4822 byte_size = int_size_in_bytes (valtype);
4827 /* Hfa's with up to 8 elements are returned in the FP argument registers. */
4829 hfa_mode = hfa_element_mode (valtype, 0);
4830 if (hfa_mode != VOIDmode)
4832 int hfa_size = GET_MODE_SIZE (hfa_mode);
4834 if (byte_size / hfa_size > MAX_ARGUMENT_SLOTS)
4839 else if (byte_size > UNITS_PER_WORD * MAX_INT_RETURN_SLOTS)
4845 /* Return rtx for register that holds the function return value. */
4848 ia64_function_value (const_tree valtype,
4849 const_tree fn_decl_or_type,
4850 bool outgoing ATTRIBUTE_UNUSED)
4852 enum machine_mode mode;
4853 enum machine_mode hfa_mode;
4855 const_tree func = fn_decl_or_type;
4858 && !DECL_P (fn_decl_or_type))
4861 mode = TYPE_MODE (valtype);
4862 hfa_mode = hfa_element_mode (valtype, 0);
4864 if (hfa_mode != VOIDmode)
4872 hfa_size = GET_MODE_SIZE (hfa_mode);
4873 byte_size = ((mode == BLKmode)
4874 ? int_size_in_bytes (valtype) : GET_MODE_SIZE (mode));
4876 for (i = 0; offset < byte_size; i++)
4878 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4879 gen_rtx_REG (hfa_mode, FR_ARG_FIRST + i),
4883 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4885 else if (FLOAT_TYPE_P (valtype) && mode != TFmode && mode != TCmode)
4886 return gen_rtx_REG (mode, FR_ARG_FIRST);
4889 bool need_parallel = false;
4891 /* In big-endian mode, we need to manage the layout of aggregates
4892 in the registers so that we get the bits properly aligned in
4893 the highpart of the registers. */
4894 if (BYTES_BIG_ENDIAN
4895 && (mode == BLKmode || (valtype && AGGREGATE_TYPE_P (valtype))))
4896 need_parallel = true;
4898 /* Something like struct S { long double x; char a[0] } is not an
4899 HFA structure, and therefore doesn't go in fp registers. But
4900 the middle-end will give it XFmode anyway, and XFmode values
4901 don't normally fit in integer registers. So we need to smuggle
4902 the value inside a parallel. */
4903 else if (mode == XFmode || mode == XCmode || mode == RFmode)
4904 need_parallel = true;
4914 bytesize = int_size_in_bytes (valtype);
4915 /* An empty PARALLEL is invalid here, but the return value
4916 doesn't matter for empty structs. */
4918 return gen_rtx_REG (mode, GR_RET_FIRST);
4919 for (i = 0; offset < bytesize; i++)
4921 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4922 gen_rtx_REG (DImode,
4925 offset += UNITS_PER_WORD;
4927 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4930 mode = ia64_promote_function_mode (valtype, mode, &unsignedp,
4931 func ? TREE_TYPE (func) : NULL_TREE,
4934 return gen_rtx_REG (mode, GR_RET_FIRST);
4938 /* Worker function for TARGET_LIBCALL_VALUE. */
4941 ia64_libcall_value (enum machine_mode mode,
4942 const_rtx fun ATTRIBUTE_UNUSED)
4944 return gen_rtx_REG (mode,
4945 (((GET_MODE_CLASS (mode) == MODE_FLOAT
4946 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4947 && (mode) != TFmode)
4948 ? FR_RET_FIRST : GR_RET_FIRST));
4951 /* Worker function for FUNCTION_VALUE_REGNO_P. */
4954 ia64_function_value_regno_p (const unsigned int regno)
4956 return ((regno >= GR_RET_FIRST && regno <= GR_RET_LAST)
4957 || (regno >= FR_RET_FIRST && regno <= FR_RET_LAST));
4960 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
4961 We need to emit DTP-relative relocations. */
4964 ia64_output_dwarf_dtprel (FILE *file, int size, rtx x)
4966 gcc_assert (size == 4 || size == 8);
4968 fputs ("\tdata4.ua\t@dtprel(", file);
4970 fputs ("\tdata8.ua\t@dtprel(", file);
4971 output_addr_const (file, x);
4975 /* Print a memory address as an operand to reference that memory location. */
4977 /* ??? Do we need this? It gets used only for 'a' operands. We could perhaps
4978 also call this from ia64_print_operand for memory addresses. */
4981 ia64_print_operand_address (FILE * stream ATTRIBUTE_UNUSED,
4982 rtx address ATTRIBUTE_UNUSED)
4986 /* Print an operand to an assembler instruction.
4987 C Swap and print a comparison operator.
4988 D Print an FP comparison operator.
4989 E Print 32 - constant, for SImode shifts as extract.
4990 e Print 64 - constant, for DImode rotates.
4991 F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or
4992 a floating point register emitted normally.
4993 G A floating point constant.
4994 I Invert a predicate register by adding 1.
4995 J Select the proper predicate register for a condition.
4996 j Select the inverse predicate register for a condition.
4997 O Append .acq for volatile load.
4998 P Postincrement of a MEM.
4999 Q Append .rel for volatile store.
5000 R Print .s .d or nothing for a single, double or no truncation.
5001 S Shift amount for shladd instruction.
5002 T Print an 8-bit sign extended number (K) as a 32-bit unsigned number
5003 for Intel assembler.
5004 U Print an 8-bit sign extended number (K) as a 64-bit unsigned number
5005 for Intel assembler.
5006 X A pair of floating point registers.
5007 r Print register name, or constant 0 as r0. HP compatibility for
5009 v Print vector constant value as an 8-byte integer value. */
5012 ia64_print_operand (FILE * file, rtx x, int code)
5019 /* Handled below. */
5024 enum rtx_code c = swap_condition (GET_CODE (x));
5025 fputs (GET_RTX_NAME (c), file);
5030 switch (GET_CODE (x))
5054 str = GET_RTX_NAME (GET_CODE (x));
5061 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - INTVAL (x));
5065 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - INTVAL (x));
5069 if (x == CONST0_RTX (GET_MODE (x)))
5070 str = reg_names [FR_REG (0)];
5071 else if (x == CONST1_RTX (GET_MODE (x)))
5072 str = reg_names [FR_REG (1)];
5075 gcc_assert (GET_CODE (x) == REG);
5076 str = reg_names [REGNO (x)];
5085 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
5086 real_to_target (val, &rv, GET_MODE (x));
5087 if (GET_MODE (x) == SFmode)
5088 fprintf (file, "0x%08lx", val[0] & 0xffffffff);
5089 else if (GET_MODE (x) == DFmode)
5090 fprintf (file, "0x%08lx%08lx", (WORDS_BIG_ENDIAN ? val[0] : val[1])
5092 (WORDS_BIG_ENDIAN ? val[1] : val[0])
5095 output_operand_lossage ("invalid %%G mode");
5100 fputs (reg_names [REGNO (x) + 1], file);
5106 unsigned int regno = REGNO (XEXP (x, 0));
5107 if (GET_CODE (x) == EQ)
5111 fputs (reg_names [regno], file);
5116 if (MEM_VOLATILE_P (x))
5117 fputs(".acq", file);
5122 HOST_WIDE_INT value;
5124 switch (GET_CODE (XEXP (x, 0)))
5130 x = XEXP (XEXP (XEXP (x, 0), 1), 1);
5131 if (GET_CODE (x) == CONST_INT)
5135 gcc_assert (GET_CODE (x) == REG);
5136 fprintf (file, ", %s", reg_names[REGNO (x)]);
5142 value = GET_MODE_SIZE (GET_MODE (x));
5146 value = - (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (x));
5150 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC, value);
5155 if (MEM_VOLATILE_P (x))
5156 fputs(".rel", file);
5160 if (x == CONST0_RTX (GET_MODE (x)))
5162 else if (x == CONST1_RTX (GET_MODE (x)))
5164 else if (x == CONST2_RTX (GET_MODE (x)))
5167 output_operand_lossage ("invalid %%R value");
5171 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5175 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
5177 fprintf (file, "0x%x", (int) INTVAL (x) & 0xffffffff);
5183 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
5185 const char *prefix = "0x";
5186 if (INTVAL (x) & 0x80000000)
5188 fprintf (file, "0xffffffff");
5191 fprintf (file, "%s%x", prefix, (int) INTVAL (x) & 0xffffffff);
5198 unsigned int regno = REGNO (x);
5199 fprintf (file, "%s, %s", reg_names [regno], reg_names [regno + 1]);
5204 /* If this operand is the constant zero, write it as register zero.
5205 Any register, zero, or CONST_INT value is OK here. */
5206 if (GET_CODE (x) == REG)
5207 fputs (reg_names[REGNO (x)], file);
5208 else if (x == CONST0_RTX (GET_MODE (x)))
5210 else if (GET_CODE (x) == CONST_INT)
5211 output_addr_const (file, x);
5213 output_operand_lossage ("invalid %%r value");
5217 gcc_assert (GET_CODE (x) == CONST_VECTOR);
5218 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
5225 /* For conditional branches, returns or calls, substitute
5226 sptk, dptk, dpnt, or spnt for %s. */
5227 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
5230 int pred_val = INTVAL (XEXP (x, 0));
5232 /* Guess top and bottom 10% statically predicted. */
5233 if (pred_val < REG_BR_PROB_BASE / 50
5234 && br_prob_note_reliable_p (x))
5236 else if (pred_val < REG_BR_PROB_BASE / 2)
5238 else if (pred_val < REG_BR_PROB_BASE / 100 * 98
5239 || !br_prob_note_reliable_p (x))
5244 else if (GET_CODE (current_output_insn) == CALL_INSN)
5249 fputs (which, file);
5254 x = current_insn_predicate;
5257 unsigned int regno = REGNO (XEXP (x, 0));
5258 if (GET_CODE (x) == EQ)
5260 fprintf (file, "(%s) ", reg_names [regno]);
5265 output_operand_lossage ("ia64_print_operand: unknown code");
5269 switch (GET_CODE (x))
5271 /* This happens for the spill/restore instructions. */
5276 /* ... fall through ... */
5279 fputs (reg_names [REGNO (x)], file);
5284 rtx addr = XEXP (x, 0);
5285 if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
5286 addr = XEXP (addr, 0);
5287 fprintf (file, "[%s]", reg_names [REGNO (addr)]);
5292 output_addr_const (file, x);
5299 /* Worker function for TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
5302 ia64_print_operand_punct_valid_p (unsigned char code)
5304 return (code == '+' || code == ',');
5307 /* Compute a (partial) cost for rtx X. Return true if the complete
5308 cost has been computed, and false if subexpressions should be
5309 scanned. In either case, *TOTAL contains the cost result. */
5310 /* ??? This is incomplete. */
5313 ia64_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
5314 int *total, bool speed ATTRIBUTE_UNUSED)
5322 *total = satisfies_constraint_J (x) ? 0 : COSTS_N_INSNS (1);
5325 if (satisfies_constraint_I (x))
5327 else if (satisfies_constraint_J (x))
5330 *total = COSTS_N_INSNS (1);
5333 if (satisfies_constraint_K (x) || satisfies_constraint_L (x))
5336 *total = COSTS_N_INSNS (1);
5341 *total = COSTS_N_INSNS (1);
5347 *total = COSTS_N_INSNS (3);
5351 *total = COSTS_N_INSNS (4);
5355 /* For multiplies wider than HImode, we have to go to the FPU,
5356 which normally involves copies. Plus there's the latency
5357 of the multiply itself, and the latency of the instructions to
5358 transfer integer regs to FP regs. */
5359 if (FLOAT_MODE_P (GET_MODE (x)))
5360 *total = COSTS_N_INSNS (4);
5361 else if (GET_MODE_SIZE (GET_MODE (x)) > 2)
5362 *total = COSTS_N_INSNS (10);
5364 *total = COSTS_N_INSNS (2);
5369 if (FLOAT_MODE_P (GET_MODE (x)))
5371 *total = COSTS_N_INSNS (4);
5379 *total = COSTS_N_INSNS (1);
5386 /* We make divide expensive, so that divide-by-constant will be
5387 optimized to a multiply. */
5388 *total = COSTS_N_INSNS (60);
5396 /* Calculate the cost of moving data from a register in class FROM to
5397 one in class TO, using MODE. */
5400 ia64_register_move_cost (enum machine_mode mode, reg_class_t from,
5403 /* ADDL_REGS is the same as GR_REGS for movement purposes. */
5404 if (to == ADDL_REGS)
5406 if (from == ADDL_REGS)
5409 /* All costs are symmetric, so reduce cases by putting the
5410 lower number class as the destination. */
5413 reg_class_t tmp = to;
5414 to = from, from = tmp;
5417 /* Moving from FR<->GR in XFmode must be more expensive than 2,
5418 so that we get secondary memory reloads. Between FR_REGS,
5419 we have to make this at least as expensive as memory_move_cost
5420 to avoid spectacularly poor register class preferencing. */
5421 if (mode == XFmode || mode == RFmode)
5423 if (to != GR_REGS || from != GR_REGS)
5424 return memory_move_cost (mode, to, false);
5432 /* Moving between PR registers takes two insns. */
5433 if (from == PR_REGS)
5435 /* Moving between PR and anything but GR is impossible. */
5436 if (from != GR_REGS)
5437 return memory_move_cost (mode, to, false);
5441 /* Moving between BR and anything but GR is impossible. */
5442 if (from != GR_REGS && from != GR_AND_BR_REGS)
5443 return memory_move_cost (mode, to, false);
5448 /* Moving between AR and anything but GR is impossible. */
5449 if (from != GR_REGS)
5450 return memory_move_cost (mode, to, false);
5456 case GR_AND_FR_REGS:
5457 case GR_AND_BR_REGS:
5468 /* Calculate the cost of moving data of MODE from a register to or from
5472 ia64_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
5474 bool in ATTRIBUTE_UNUSED)
5476 if (rclass == GENERAL_REGS
5477 || rclass == FR_REGS
5478 || rclass == FP_REGS
5479 || rclass == GR_AND_FR_REGS)
5485 /* Implement TARGET_PREFERRED_RELOAD_CLASS. Place additional restrictions
5486 on RCLASS to use when copying X into that class. */
5489 ia64_preferred_reload_class (rtx x, reg_class_t rclass)
5495 /* Don't allow volatile mem reloads into floating point registers.
5496 This is defined to force reload to choose the r/m case instead
5497 of the f/f case when reloading (set (reg fX) (mem/v)). */
5498 if (MEM_P (x) && MEM_VOLATILE_P (x))
5501 /* Force all unrecognized constants into the constant pool. */
5519 /* This function returns the register class required for a secondary
5520 register when copying between one of the registers in RCLASS, and X,
5521 using MODE. A return value of NO_REGS means that no secondary register
5525 ia64_secondary_reload_class (enum reg_class rclass,
5526 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
5530 if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
5531 regno = true_regnum (x);
5538 /* ??? BR<->BR register copies can happen due to a bad gcse/cse/global
5539 interaction. We end up with two pseudos with overlapping lifetimes
5540 both of which are equiv to the same constant, and both which need
5541 to be in BR_REGS. This seems to be a cse bug. cse_basic_block_end
5542 changes depending on the path length, which means the qty_first_reg
5543 check in make_regs_eqv can give different answers at different times.
5544 At some point I'll probably need a reload_indi pattern to handle
5547 We can also get GR_AND_FR_REGS to BR_REGS/AR_REGS copies, where we
5548 wound up with a FP register from GR_AND_FR_REGS. Extend that to all
5549 non-general registers for good measure. */
5550 if (regno >= 0 && ! GENERAL_REGNO_P (regno))
5553 /* This is needed if a pseudo used as a call_operand gets spilled to a
5555 if (GET_CODE (x) == MEM)
5561 /* Need to go through general registers to get to other class regs. */
5562 if (regno >= 0 && ! (FR_REGNO_P (regno) || GENERAL_REGNO_P (regno)))
5565 /* This can happen when a paradoxical subreg is an operand to the
5567 /* ??? This shouldn't be necessary after instruction scheduling is
5568 enabled, because paradoxical subregs are not accepted by
5569 register_operand when INSN_SCHEDULING is defined. Or alternatively,
5570 stop the paradoxical subreg stupidity in the *_operand functions
5572 if (GET_CODE (x) == MEM
5573 && (GET_MODE (x) == SImode || GET_MODE (x) == HImode
5574 || GET_MODE (x) == QImode))
5577 /* This can happen because of the ior/and/etc patterns that accept FP
5578 registers as operands. If the third operand is a constant, then it
5579 needs to be reloaded into a FP register. */
5580 if (GET_CODE (x) == CONST_INT)
5583 /* This can happen because of register elimination in a muldi3 insn.
5584 E.g. `26107 * (unsigned long)&u'. */
5585 if (GET_CODE (x) == PLUS)
5590 /* ??? This happens if we cse/gcse a BImode value across a call,
5591 and the function has a nonlocal goto. This is because global
5592 does not allocate call crossing pseudos to hard registers when
5593 crtl->has_nonlocal_goto is true. This is relatively
5594 common for C++ programs that use exceptions. To reproduce,
5595 return NO_REGS and compile libstdc++. */
5596 if (GET_CODE (x) == MEM)
5599 /* This can happen when we take a BImode subreg of a DImode value,
5600 and that DImode value winds up in some non-GR register. */
5601 if (regno >= 0 && ! GENERAL_REGNO_P (regno) && ! PR_REGNO_P (regno))
5613 /* Implement targetm.unspec_may_trap_p hook. */
5615 ia64_unspec_may_trap_p (const_rtx x, unsigned flags)
5617 if (GET_CODE (x) == UNSPEC)
5619 switch (XINT (x, 1))
5625 case UNSPEC_CHKACLR:
5627 /* These unspecs are just wrappers. */
5628 return may_trap_p_1 (XVECEXP (x, 0, 0), flags);
5632 return default_unspec_may_trap_p (x, flags);
5636 /* Parse the -mfixed-range= option string. */
5639 fix_range (const char *const_str)
5642 char *str, *dash, *comma;
5644 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
5645 REG2 are either register names or register numbers. The effect
5646 of this option is to mark the registers in the range from REG1 to
5647 REG2 as ``fixed'' so they won't be used by the compiler. This is
5648 used, e.g., to ensure that kernel mode code doesn't use f32-f127. */
5650 i = strlen (const_str);
5651 str = (char *) alloca (i + 1);
5652 memcpy (str, const_str, i + 1);
5656 dash = strchr (str, '-');
5659 warning (0, "value of -mfixed-range must have form REG1-REG2");
5664 comma = strchr (dash + 1, ',');
5668 first = decode_reg_name (str);
5671 warning (0, "unknown register name: %s", str);
5675 last = decode_reg_name (dash + 1);
5678 warning (0, "unknown register name: %s", dash + 1);
5686 warning (0, "%s-%s is an empty range", str, dash + 1);
5690 for (i = first; i <= last; ++i)
5691 fixed_regs[i] = call_used_regs[i] = 1;
5701 /* Implement TARGET_OPTION_OVERRIDE. */
5704 ia64_option_override (void)
5707 cl_deferred_option *opt;
5708 VEC(cl_deferred_option,heap) *vec
5709 = (VEC(cl_deferred_option,heap) *) ia64_deferred_options;
5711 FOR_EACH_VEC_ELT (cl_deferred_option, vec, i, opt)
5713 switch (opt->opt_index)
5715 case OPT_mfixed_range_:
5716 fix_range (opt->arg);
5724 if (TARGET_AUTO_PIC)
5725 target_flags |= MASK_CONST_GP;
5727 /* Numerous experiment shows that IRA based loop pressure
5728 calculation works better for RTL loop invariant motion on targets
5729 with enough (>= 32) registers. It is an expensive optimization.
5730 So it is on only for peak performance. */
5732 flag_ira_loop_pressure = 1;
5735 ia64_section_threshold = (global_options_set.x_g_switch_value
5737 : IA64_DEFAULT_GVALUE);
5739 init_machine_status = ia64_init_machine_status;
5741 if (align_functions <= 0)
5742 align_functions = 64;
5743 if (align_loops <= 0)
5745 if (TARGET_ABI_OPEN_VMS)
5748 ia64_override_options_after_change();
5751 /* Implement targetm.override_options_after_change. */
5754 ia64_override_options_after_change (void)
5757 && !global_options_set.x_flag_selective_scheduling
5758 && !global_options_set.x_flag_selective_scheduling2)
5760 flag_selective_scheduling2 = 1;
5761 flag_sel_sched_pipelining = 1;
5763 if (mflag_sched_control_spec == 2)
5765 /* Control speculation is on by default for the selective scheduler,
5766 but not for the Haifa scheduler. */
5767 mflag_sched_control_spec = flag_selective_scheduling2 ? 1 : 0;
5769 if (flag_sel_sched_pipelining && flag_auto_inc_dec)
5771 /* FIXME: remove this when we'd implement breaking autoinsns as
5772 a transformation. */
5773 flag_auto_inc_dec = 0;
5777 /* Initialize the record of emitted frame related registers. */
5779 void ia64_init_expanders (void)
5781 memset (&emitted_frame_related_regs, 0, sizeof (emitted_frame_related_regs));
5784 static struct machine_function *
5785 ia64_init_machine_status (void)
5787 return ggc_alloc_cleared_machine_function ();
5790 static enum attr_itanium_class ia64_safe_itanium_class (rtx);
5791 static enum attr_type ia64_safe_type (rtx);
5793 static enum attr_itanium_class
5794 ia64_safe_itanium_class (rtx insn)
5796 if (recog_memoized (insn) >= 0)
5797 return get_attr_itanium_class (insn);
5798 else if (DEBUG_INSN_P (insn))
5799 return ITANIUM_CLASS_IGNORE;
5801 return ITANIUM_CLASS_UNKNOWN;
5804 static enum attr_type
5805 ia64_safe_type (rtx insn)
5807 if (recog_memoized (insn) >= 0)
5808 return get_attr_type (insn);
5810 return TYPE_UNKNOWN;
5813 /* The following collection of routines emit instruction group stop bits as
5814 necessary to avoid dependencies. */
5816 /* Need to track some additional registers as far as serialization is
5817 concerned so we can properly handle br.call and br.ret. We could
5818 make these registers visible to gcc, but since these registers are
5819 never explicitly used in gcc generated code, it seems wasteful to
5820 do so (plus it would make the call and return patterns needlessly
5822 #define REG_RP (BR_REG (0))
5823 #define REG_AR_CFM (FIRST_PSEUDO_REGISTER + 1)
5824 /* This is used for volatile asms which may require a stop bit immediately
5825 before and after them. */
5826 #define REG_VOLATILE (FIRST_PSEUDO_REGISTER + 2)
5827 #define AR_UNAT_BIT_0 (FIRST_PSEUDO_REGISTER + 3)
5828 #define NUM_REGS (AR_UNAT_BIT_0 + 64)
5830 /* For each register, we keep track of how it has been written in the
5831 current instruction group.
5833 If a register is written unconditionally (no qualifying predicate),
5834 WRITE_COUNT is set to 2 and FIRST_PRED is ignored.
5836 If a register is written if its qualifying predicate P is true, we
5837 set WRITE_COUNT to 1 and FIRST_PRED to P. Later on, the same register
5838 may be written again by the complement of P (P^1) and when this happens,
5839 WRITE_COUNT gets set to 2.
5841 The result of this is that whenever an insn attempts to write a register
5842 whose WRITE_COUNT is two, we need to issue an insn group barrier first.
5844 If a predicate register is written by a floating-point insn, we set
5845 WRITTEN_BY_FP to true.
5847 If a predicate register is written by an AND.ORCM we set WRITTEN_BY_AND
5848 to true; if it was written by an OR.ANDCM we set WRITTEN_BY_OR to true. */
5850 #if GCC_VERSION >= 4000
5851 #define RWS_FIELD_TYPE __extension__ unsigned short
5853 #define RWS_FIELD_TYPE unsigned int
5855 struct reg_write_state
5857 RWS_FIELD_TYPE write_count : 2;
5858 RWS_FIELD_TYPE first_pred : 10;
5859 RWS_FIELD_TYPE written_by_fp : 1;
5860 RWS_FIELD_TYPE written_by_and : 1;
5861 RWS_FIELD_TYPE written_by_or : 1;
5864 /* Cumulative info for the current instruction group. */
5865 struct reg_write_state rws_sum[NUM_REGS];
5866 #ifdef ENABLE_CHECKING
5867 /* Bitmap whether a register has been written in the current insn. */
5868 HARD_REG_ELT_TYPE rws_insn[(NUM_REGS + HOST_BITS_PER_WIDEST_FAST_INT - 1)
5869 / HOST_BITS_PER_WIDEST_FAST_INT];
5872 rws_insn_set (int regno)
5874 gcc_assert (!TEST_HARD_REG_BIT (rws_insn, regno));
5875 SET_HARD_REG_BIT (rws_insn, regno);
5879 rws_insn_test (int regno)
5881 return TEST_HARD_REG_BIT (rws_insn, regno);
5884 /* When not checking, track just REG_AR_CFM and REG_VOLATILE. */
5885 unsigned char rws_insn[2];
5888 rws_insn_set (int regno)
5890 if (regno == REG_AR_CFM)
5892 else if (regno == REG_VOLATILE)
5897 rws_insn_test (int regno)
5899 if (regno == REG_AR_CFM)
5901 if (regno == REG_VOLATILE)
5907 /* Indicates whether this is the first instruction after a stop bit,
5908 in which case we don't need another stop bit. Without this,
5909 ia64_variable_issue will die when scheduling an alloc. */
5910 static int first_instruction;
5912 /* Misc flags needed to compute RAW/WAW dependencies while we are traversing
5913 RTL for one instruction. */
5916 unsigned int is_write : 1; /* Is register being written? */
5917 unsigned int is_fp : 1; /* Is register used as part of an fp op? */
5918 unsigned int is_branch : 1; /* Is register used as part of a branch? */
5919 unsigned int is_and : 1; /* Is register used as part of and.orcm? */
5920 unsigned int is_or : 1; /* Is register used as part of or.andcm? */
5921 unsigned int is_sibcall : 1; /* Is this a sibling or normal call? */
5924 static void rws_update (int, struct reg_flags, int);
5925 static int rws_access_regno (int, struct reg_flags, int);
5926 static int rws_access_reg (rtx, struct reg_flags, int);
5927 static void update_set_flags (rtx, struct reg_flags *);
5928 static int set_src_needs_barrier (rtx, struct reg_flags, int);
5929 static int rtx_needs_barrier (rtx, struct reg_flags, int);
5930 static void init_insn_group_barriers (void);
5931 static int group_barrier_needed (rtx);
5932 static int safe_group_barrier_needed (rtx);
5933 static int in_safe_group_barrier;
5935 /* Update *RWS for REGNO, which is being written by the current instruction,
5936 with predicate PRED, and associated register flags in FLAGS. */
5939 rws_update (int regno, struct reg_flags flags, int pred)
5942 rws_sum[regno].write_count++;
5944 rws_sum[regno].write_count = 2;
5945 rws_sum[regno].written_by_fp |= flags.is_fp;
5946 /* ??? Not tracking and/or across differing predicates. */
5947 rws_sum[regno].written_by_and = flags.is_and;
5948 rws_sum[regno].written_by_or = flags.is_or;
5949 rws_sum[regno].first_pred = pred;
5952 /* Handle an access to register REGNO of type FLAGS using predicate register
5953 PRED. Update rws_sum array. Return 1 if this access creates
5954 a dependency with an earlier instruction in the same group. */
5957 rws_access_regno (int regno, struct reg_flags flags, int pred)
5959 int need_barrier = 0;
5961 gcc_assert (regno < NUM_REGS);
5963 if (! PR_REGNO_P (regno))
5964 flags.is_and = flags.is_or = 0;
5970 rws_insn_set (regno);
5971 write_count = rws_sum[regno].write_count;
5973 switch (write_count)
5976 /* The register has not been written yet. */
5977 if (!in_safe_group_barrier)
5978 rws_update (regno, flags, pred);
5982 /* The register has been written via a predicate. Treat
5983 it like a unconditional write and do not try to check
5984 for complementary pred reg in earlier write. */
5985 if (flags.is_and && rws_sum[regno].written_by_and)
5987 else if (flags.is_or && rws_sum[regno].written_by_or)
5991 if (!in_safe_group_barrier)
5992 rws_update (regno, flags, pred);
5996 /* The register has been unconditionally written already. We
5998 if (flags.is_and && rws_sum[regno].written_by_and)
6000 else if (flags.is_or && rws_sum[regno].written_by_or)
6004 if (!in_safe_group_barrier)
6006 rws_sum[regno].written_by_and = flags.is_and;
6007 rws_sum[regno].written_by_or = flags.is_or;
6017 if (flags.is_branch)
6019 /* Branches have several RAW exceptions that allow to avoid
6022 if (REGNO_REG_CLASS (regno) == BR_REGS || regno == AR_PFS_REGNUM)
6023 /* RAW dependencies on branch regs are permissible as long
6024 as the writer is a non-branch instruction. Since we
6025 never generate code that uses a branch register written
6026 by a branch instruction, handling this case is
6030 if (REGNO_REG_CLASS (regno) == PR_REGS
6031 && ! rws_sum[regno].written_by_fp)
6032 /* The predicates of a branch are available within the
6033 same insn group as long as the predicate was written by
6034 something other than a floating-point instruction. */
6038 if (flags.is_and && rws_sum[regno].written_by_and)
6040 if (flags.is_or && rws_sum[regno].written_by_or)
6043 switch (rws_sum[regno].write_count)
6046 /* The register has not been written yet. */
6050 /* The register has been written via a predicate, assume we
6051 need a barrier (don't check for complementary regs). */
6056 /* The register has been unconditionally written already. We
6066 return need_barrier;
6070 rws_access_reg (rtx reg, struct reg_flags flags, int pred)
6072 int regno = REGNO (reg);
6073 int n = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
6076 return rws_access_regno (regno, flags, pred);
6079 int need_barrier = 0;
6081 need_barrier |= rws_access_regno (regno + n, flags, pred);
6082 return need_barrier;
6086 /* Examine X, which is a SET rtx, and update the flags, the predicate, and
6087 the condition, stored in *PFLAGS, *PPRED and *PCOND. */
6090 update_set_flags (rtx x, struct reg_flags *pflags)
6092 rtx src = SET_SRC (x);
6094 switch (GET_CODE (src))
6100 /* There are four cases here:
6101 (1) The destination is (pc), in which case this is a branch,
6102 nothing here applies.
6103 (2) The destination is ar.lc, in which case this is a
6104 doloop_end_internal,
6105 (3) The destination is an fp register, in which case this is
6106 an fselect instruction.
6107 (4) The condition has (unspec [(reg)] UNSPEC_LDC), in which case
6108 this is a check load.
6109 In all cases, nothing we do in this function applies. */
6113 if (COMPARISON_P (src)
6114 && SCALAR_FLOAT_MODE_P (GET_MODE (XEXP (src, 0))))
6115 /* Set pflags->is_fp to 1 so that we know we're dealing
6116 with a floating point comparison when processing the
6117 destination of the SET. */
6120 /* Discover if this is a parallel comparison. We only handle
6121 and.orcm and or.andcm at present, since we must retain a
6122 strict inverse on the predicate pair. */
6123 else if (GET_CODE (src) == AND)
6125 else if (GET_CODE (src) == IOR)
6132 /* Subroutine of rtx_needs_barrier; this function determines whether the
6133 source of a given SET rtx found in X needs a barrier. FLAGS and PRED
6134 are as in rtx_needs_barrier. COND is an rtx that holds the condition
6138 set_src_needs_barrier (rtx x, struct reg_flags flags, int pred)
6140 int need_barrier = 0;
6142 rtx src = SET_SRC (x);
6144 if (GET_CODE (src) == CALL)
6145 /* We don't need to worry about the result registers that
6146 get written by subroutine call. */
6147 return rtx_needs_barrier (src, flags, pred);
6148 else if (SET_DEST (x) == pc_rtx)
6150 /* X is a conditional branch. */
6151 /* ??? This seems redundant, as the caller sets this bit for
6153 if (!ia64_spec_check_src_p (src))
6154 flags.is_branch = 1;
6155 return rtx_needs_barrier (src, flags, pred);
6158 if (ia64_spec_check_src_p (src))
6159 /* Avoid checking one register twice (in condition
6160 and in 'then' section) for ldc pattern. */
6162 gcc_assert (REG_P (XEXP (src, 2)));
6163 need_barrier = rtx_needs_barrier (XEXP (src, 2), flags, pred);
6165 /* We process MEM below. */
6166 src = XEXP (src, 1);
6169 need_barrier |= rtx_needs_barrier (src, flags, pred);
6172 if (GET_CODE (dst) == ZERO_EXTRACT)
6174 need_barrier |= rtx_needs_barrier (XEXP (dst, 1), flags, pred);
6175 need_barrier |= rtx_needs_barrier (XEXP (dst, 2), flags, pred);
6177 return need_barrier;
6180 /* Handle an access to rtx X of type FLAGS using predicate register
6181 PRED. Return 1 if this access creates a dependency with an earlier
6182 instruction in the same group. */
6185 rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
6188 int is_complemented = 0;
6189 int need_barrier = 0;
6190 const char *format_ptr;
6191 struct reg_flags new_flags;
6199 switch (GET_CODE (x))
6202 update_set_flags (x, &new_flags);
6203 need_barrier = set_src_needs_barrier (x, new_flags, pred);
6204 if (GET_CODE (SET_SRC (x)) != CALL)
6206 new_flags.is_write = 1;
6207 need_barrier |= rtx_needs_barrier (SET_DEST (x), new_flags, pred);
6212 new_flags.is_write = 0;
6213 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
6215 /* Avoid multiple register writes, in case this is a pattern with
6216 multiple CALL rtx. This avoids a failure in rws_access_reg. */
6217 if (! flags.is_sibcall && ! rws_insn_test (REG_AR_CFM))
6219 new_flags.is_write = 1;
6220 need_barrier |= rws_access_regno (REG_RP, new_flags, pred);
6221 need_barrier |= rws_access_regno (AR_PFS_REGNUM, new_flags, pred);
6222 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
6227 /* X is a predicated instruction. */
6229 cond = COND_EXEC_TEST (x);
6231 need_barrier = rtx_needs_barrier (cond, flags, 0);
6233 if (GET_CODE (cond) == EQ)
6234 is_complemented = 1;
6235 cond = XEXP (cond, 0);
6236 gcc_assert (GET_CODE (cond) == REG
6237 && REGNO_REG_CLASS (REGNO (cond)) == PR_REGS);
6238 pred = REGNO (cond);
6239 if (is_complemented)
6242 need_barrier |= rtx_needs_barrier (COND_EXEC_CODE (x), flags, pred);
6243 return need_barrier;
6247 /* Clobber & use are for earlier compiler-phases only. */
6252 /* We always emit stop bits for traditional asms. We emit stop bits
6253 for volatile extended asms if TARGET_VOL_ASM_STOP is true. */
6254 if (GET_CODE (x) != ASM_OPERANDS
6255 || (MEM_VOLATILE_P (x) && TARGET_VOL_ASM_STOP))
6257 /* Avoid writing the register multiple times if we have multiple
6258 asm outputs. This avoids a failure in rws_access_reg. */
6259 if (! rws_insn_test (REG_VOLATILE))
6261 new_flags.is_write = 1;
6262 rws_access_regno (REG_VOLATILE, new_flags, pred);
6267 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
6268 We cannot just fall through here since then we would be confused
6269 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
6270 traditional asms unlike their normal usage. */
6272 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; --i)
6273 if (rtx_needs_barrier (ASM_OPERANDS_INPUT (x, i), flags, pred))
6278 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
6280 rtx pat = XVECEXP (x, 0, i);
6281 switch (GET_CODE (pat))
6284 update_set_flags (pat, &new_flags);
6285 need_barrier |= set_src_needs_barrier (pat, new_flags, pred);
6291 need_barrier |= rtx_needs_barrier (pat, flags, pred);
6295 if (REG_P (XEXP (pat, 0))
6296 && extract_asm_operands (x) != NULL_RTX
6297 && REGNO (XEXP (pat, 0)) != AR_UNAT_REGNUM)
6299 new_flags.is_write = 1;
6300 need_barrier |= rtx_needs_barrier (XEXP (pat, 0),
6313 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
6315 rtx pat = XVECEXP (x, 0, i);
6316 if (GET_CODE (pat) == SET)
6318 if (GET_CODE (SET_SRC (pat)) != CALL)
6320 new_flags.is_write = 1;
6321 need_barrier |= rtx_needs_barrier (SET_DEST (pat), new_flags,
6325 else if (GET_CODE (pat) == CLOBBER || GET_CODE (pat) == RETURN)
6326 need_barrier |= rtx_needs_barrier (pat, flags, pred);
6331 need_barrier |= rtx_needs_barrier (SUBREG_REG (x), flags, pred);
6334 if (REGNO (x) == AR_UNAT_REGNUM)
6336 for (i = 0; i < 64; ++i)
6337 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + i, flags, pred);
6340 need_barrier = rws_access_reg (x, flags, pred);
6344 /* Find the regs used in memory address computation. */
6345 new_flags.is_write = 0;
6346 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
6349 case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR:
6350 case SYMBOL_REF: case LABEL_REF: case CONST:
6353 /* Operators with side-effects. */
6354 case POST_INC: case POST_DEC:
6355 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
6357 new_flags.is_write = 0;
6358 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
6359 new_flags.is_write = 1;
6360 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
6364 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
6366 new_flags.is_write = 0;
6367 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
6368 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
6369 new_flags.is_write = 1;
6370 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
6373 /* Handle common unary and binary ops for efficiency. */
6374 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
6375 case MOD: case UDIV: case UMOD: case AND: case IOR:
6376 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
6377 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
6378 case NE: case EQ: case GE: case GT: case LE:
6379 case LT: case GEU: case GTU: case LEU: case LTU:
6380 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
6381 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
6384 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
6385 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
6386 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
6387 case SQRT: case FFS: case POPCOUNT:
6388 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
6392 /* VEC_SELECT's second argument is a PARALLEL with integers that
6393 describe the elements selected. On ia64, those integers are
6394 always constants. Avoid walking the PARALLEL so that we don't
6395 get confused with "normal" parallels and then die. */
6396 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
6400 switch (XINT (x, 1))
6402 case UNSPEC_LTOFF_DTPMOD:
6403 case UNSPEC_LTOFF_DTPREL:
6405 case UNSPEC_LTOFF_TPREL:
6407 case UNSPEC_PRED_REL_MUTEX:
6408 case UNSPEC_PIC_CALL:
6410 case UNSPEC_FETCHADD_ACQ:
6411 case UNSPEC_FETCHADD_REL:
6412 case UNSPEC_BSP_VALUE:
6413 case UNSPEC_FLUSHRS:
6414 case UNSPEC_BUNDLE_SELECTOR:
6417 case UNSPEC_GR_SPILL:
6418 case UNSPEC_GR_RESTORE:
6420 HOST_WIDE_INT offset = INTVAL (XVECEXP (x, 0, 1));
6421 HOST_WIDE_INT bit = (offset >> 3) & 63;
6423 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6424 new_flags.is_write = (XINT (x, 1) == UNSPEC_GR_SPILL);
6425 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + bit,
6430 case UNSPEC_FR_SPILL:
6431 case UNSPEC_FR_RESTORE:
6432 case UNSPEC_GETF_EXP:
6433 case UNSPEC_SETF_EXP:
6435 case UNSPEC_FR_SQRT_RECIP_APPROX:
6436 case UNSPEC_FR_SQRT_RECIP_APPROX_RES:
6441 case UNSPEC_CHKACLR:
6443 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6446 case UNSPEC_FR_RECIP_APPROX:
6448 case UNSPEC_COPYSIGN:
6449 case UNSPEC_FR_RECIP_APPROX_RES:
6450 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6451 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
6454 case UNSPEC_CMPXCHG_ACQ:
6455 case UNSPEC_CMPXCHG_REL:
6456 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
6457 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 2), flags, pred);
6465 case UNSPEC_VOLATILE:
6466 switch (XINT (x, 1))
6469 /* Alloc must always be the first instruction of a group.
6470 We force this by always returning true. */
6471 /* ??? We might get better scheduling if we explicitly check for
6472 input/local/output register dependencies, and modify the
6473 scheduler so that alloc is always reordered to the start of
6474 the current group. We could then eliminate all of the
6475 first_instruction code. */
6476 rws_access_regno (AR_PFS_REGNUM, flags, pred);
6478 new_flags.is_write = 1;
6479 rws_access_regno (REG_AR_CFM, new_flags, pred);
6482 case UNSPECV_SET_BSP:
6486 case UNSPECV_BLOCKAGE:
6487 case UNSPECV_INSN_GROUP_BARRIER:
6489 case UNSPECV_PSAC_ALL:
6490 case UNSPECV_PSAC_NORMAL:
6499 new_flags.is_write = 0;
6500 need_barrier = rws_access_regno (REG_RP, flags, pred);
6501 need_barrier |= rws_access_regno (AR_PFS_REGNUM, flags, pred);
6503 new_flags.is_write = 1;
6504 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
6505 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
6509 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
6510 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6511 switch (format_ptr[i])
6513 case '0': /* unused field */
6514 case 'i': /* integer */
6515 case 'n': /* note */
6516 case 'w': /* wide integer */
6517 case 's': /* pointer to string */
6518 case 'S': /* optional pointer to string */
6522 if (rtx_needs_barrier (XEXP (x, i), flags, pred))
6527 for (j = XVECLEN (x, i) - 1; j >= 0; --j)
6528 if (rtx_needs_barrier (XVECEXP (x, i, j), flags, pred))
6537 return need_barrier;
6540 /* Clear out the state for group_barrier_needed at the start of a
6541 sequence of insns. */
6544 init_insn_group_barriers (void)
6546 memset (rws_sum, 0, sizeof (rws_sum));
6547 first_instruction = 1;
6550 /* Given the current state, determine whether a group barrier (a stop bit) is
6551 necessary before INSN. Return nonzero if so. This modifies the state to
6552 include the effects of INSN as a side-effect. */
6555 group_barrier_needed (rtx insn)
6558 int need_barrier = 0;
6559 struct reg_flags flags;
6561 memset (&flags, 0, sizeof (flags));
6562 switch (GET_CODE (insn))
6569 /* A barrier doesn't imply an instruction group boundary. */
6573 memset (rws_insn, 0, sizeof (rws_insn));
6577 flags.is_branch = 1;
6578 flags.is_sibcall = SIBLING_CALL_P (insn);
6579 memset (rws_insn, 0, sizeof (rws_insn));
6581 /* Don't bundle a call following another call. */
6582 if ((pat = prev_active_insn (insn))
6583 && GET_CODE (pat) == CALL_INSN)
6589 need_barrier = rtx_needs_barrier (PATTERN (insn), flags, 0);
6593 if (!ia64_spec_check_p (insn))
6594 flags.is_branch = 1;
6596 /* Don't bundle a jump following a call. */
6597 if ((pat = prev_active_insn (insn))
6598 && GET_CODE (pat) == CALL_INSN)
6606 if (GET_CODE (PATTERN (insn)) == USE
6607 || GET_CODE (PATTERN (insn)) == CLOBBER)
6608 /* Don't care about USE and CLOBBER "insns"---those are used to
6609 indicate to the optimizer that it shouldn't get rid of
6610 certain operations. */
6613 pat = PATTERN (insn);
6615 /* Ug. Hack hacks hacked elsewhere. */
6616 switch (recog_memoized (insn))
6618 /* We play dependency tricks with the epilogue in order
6619 to get proper schedules. Undo this for dv analysis. */
6620 case CODE_FOR_epilogue_deallocate_stack:
6621 case CODE_FOR_prologue_allocate_stack:
6622 pat = XVECEXP (pat, 0, 0);
6625 /* The pattern we use for br.cloop confuses the code above.
6626 The second element of the vector is representative. */
6627 case CODE_FOR_doloop_end_internal:
6628 pat = XVECEXP (pat, 0, 1);
6631 /* Doesn't generate code. */
6632 case CODE_FOR_pred_rel_mutex:
6633 case CODE_FOR_prologue_use:
6640 memset (rws_insn, 0, sizeof (rws_insn));
6641 need_barrier = rtx_needs_barrier (pat, flags, 0);
6643 /* Check to see if the previous instruction was a volatile
6646 need_barrier = rws_access_regno (REG_VOLATILE, flags, 0);
6654 if (first_instruction && INSN_P (insn)
6655 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
6656 && GET_CODE (PATTERN (insn)) != USE
6657 && GET_CODE (PATTERN (insn)) != CLOBBER)
6660 first_instruction = 0;
6663 return need_barrier;
6666 /* Like group_barrier_needed, but do not clobber the current state. */
6669 safe_group_barrier_needed (rtx insn)
6671 int saved_first_instruction;
6674 saved_first_instruction = first_instruction;
6675 in_safe_group_barrier = 1;
6677 t = group_barrier_needed (insn);
6679 first_instruction = saved_first_instruction;
6680 in_safe_group_barrier = 0;
6685 /* Scan the current function and insert stop bits as necessary to
6686 eliminate dependencies. This function assumes that a final
6687 instruction scheduling pass has been run which has already
6688 inserted most of the necessary stop bits. This function only
6689 inserts new ones at basic block boundaries, since these are
6690 invisible to the scheduler. */
6693 emit_insn_group_barriers (FILE *dump)
6697 int insns_since_last_label = 0;
6699 init_insn_group_barriers ();
6701 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6703 if (GET_CODE (insn) == CODE_LABEL)
6705 if (insns_since_last_label)
6707 insns_since_last_label = 0;
6709 else if (GET_CODE (insn) == NOTE
6710 && NOTE_KIND (insn) == NOTE_INSN_BASIC_BLOCK)
6712 if (insns_since_last_label)
6714 insns_since_last_label = 0;
6716 else if (GET_CODE (insn) == INSN
6717 && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
6718 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
6720 init_insn_group_barriers ();
6723 else if (NONDEBUG_INSN_P (insn))
6725 insns_since_last_label = 1;
6727 if (group_barrier_needed (insn))
6732 fprintf (dump, "Emitting stop before label %d\n",
6733 INSN_UID (last_label));
6734 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), last_label);
6737 init_insn_group_barriers ();
6745 /* Like emit_insn_group_barriers, but run if no final scheduling pass was run.
6746 This function has to emit all necessary group barriers. */
6749 emit_all_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
6753 init_insn_group_barriers ();
6755 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6757 if (GET_CODE (insn) == BARRIER)
6759 rtx last = prev_active_insn (insn);
6763 if (GET_CODE (last) == JUMP_INSN
6764 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
6765 last = prev_active_insn (last);
6766 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
6767 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
6769 init_insn_group_barriers ();
6771 else if (NONDEBUG_INSN_P (insn))
6773 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
6774 init_insn_group_barriers ();
6775 else if (group_barrier_needed (insn))
6777 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
6778 init_insn_group_barriers ();
6779 group_barrier_needed (insn);
6787 /* Instruction scheduling support. */
6789 #define NR_BUNDLES 10
6791 /* A list of names of all available bundles. */
6793 static const char *bundle_name [NR_BUNDLES] =
6799 #if NR_BUNDLES == 10
6809 /* Nonzero if we should insert stop bits into the schedule. */
6811 int ia64_final_schedule = 0;
6813 /* Codes of the corresponding queried units: */
6815 static int _0mii_, _0mmi_, _0mfi_, _0mmf_;
6816 static int _0bbb_, _0mbb_, _0mib_, _0mmb_, _0mfb_, _0mlx_;
6818 static int _1mii_, _1mmi_, _1mfi_, _1mmf_;
6819 static int _1bbb_, _1mbb_, _1mib_, _1mmb_, _1mfb_, _1mlx_;
6821 static int pos_1, pos_2, pos_3, pos_4, pos_5, pos_6;
6823 /* The following variable value is an insn group barrier. */
6825 static rtx dfa_stop_insn;
6827 /* The following variable value is the last issued insn. */
6829 static rtx last_scheduled_insn;
6831 /* The following variable value is pointer to a DFA state used as
6832 temporary variable. */
6834 static state_t temp_dfa_state = NULL;
6836 /* The following variable value is DFA state after issuing the last
6839 static state_t prev_cycle_state = NULL;
6841 /* The following array element values are TRUE if the corresponding
6842 insn requires to add stop bits before it. */
6844 static char *stops_p = NULL;
6846 /* The following variable is used to set up the mentioned above array. */
6848 static int stop_before_p = 0;
6850 /* The following variable value is length of the arrays `clocks' and
6853 static int clocks_length;
6855 /* The following variable value is number of data speculations in progress. */
6856 static int pending_data_specs = 0;
6858 /* Number of memory references on current and three future processor cycles. */
6859 static char mem_ops_in_group[4];
6861 /* Number of current processor cycle (from scheduler's point of view). */
6862 static int current_cycle;
6864 static rtx ia64_single_set (rtx);
6865 static void ia64_emit_insn_before (rtx, rtx);
6867 /* Map a bundle number to its pseudo-op. */
6870 get_bundle_name (int b)
6872 return bundle_name[b];
6876 /* Return the maximum number of instructions a cpu can issue. */
6879 ia64_issue_rate (void)
6884 /* Helper function - like single_set, but look inside COND_EXEC. */
6887 ia64_single_set (rtx insn)
6889 rtx x = PATTERN (insn), ret;
6890 if (GET_CODE (x) == COND_EXEC)
6891 x = COND_EXEC_CODE (x);
6892 if (GET_CODE (x) == SET)
6895 /* Special case here prologue_allocate_stack and epilogue_deallocate_stack.
6896 Although they are not classical single set, the second set is there just
6897 to protect it from moving past FP-relative stack accesses. */
6898 switch (recog_memoized (insn))
6900 case CODE_FOR_prologue_allocate_stack:
6901 case CODE_FOR_epilogue_deallocate_stack:
6902 ret = XVECEXP (x, 0, 0);
6906 ret = single_set_2 (insn, x);
6913 /* Adjust the cost of a scheduling dependency.
6914 Return the new cost of a dependency of type DEP_TYPE or INSN on DEP_INSN.
6915 COST is the current cost, DW is dependency weakness. */
6917 ia64_adjust_cost_2 (rtx insn, int dep_type1, rtx dep_insn, int cost, dw_t dw)
6919 enum reg_note dep_type = (enum reg_note) dep_type1;
6920 enum attr_itanium_class dep_class;
6921 enum attr_itanium_class insn_class;
6923 insn_class = ia64_safe_itanium_class (insn);
6924 dep_class = ia64_safe_itanium_class (dep_insn);
6926 /* Treat true memory dependencies separately. Ignore apparent true
6927 dependence between store and call (call has a MEM inside a SYMBOL_REF). */
6928 if (dep_type == REG_DEP_TRUE
6929 && (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF)
6930 && (insn_class == ITANIUM_CLASS_BR || insn_class == ITANIUM_CLASS_SCALL))
6933 if (dw == MIN_DEP_WEAK)
6934 /* Store and load are likely to alias, use higher cost to avoid stall. */
6935 return PARAM_VALUE (PARAM_SCHED_MEM_TRUE_DEP_COST);
6936 else if (dw > MIN_DEP_WEAK)
6938 /* Store and load are less likely to alias. */
6939 if (mflag_sched_fp_mem_deps_zero_cost && dep_class == ITANIUM_CLASS_STF)
6940 /* Assume there will be no cache conflict for floating-point data.
6941 For integer data, L1 conflict penalty is huge (17 cycles), so we
6942 never assume it will not cause a conflict. */
6948 if (dep_type != REG_DEP_OUTPUT)
6951 if (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF
6952 || insn_class == ITANIUM_CLASS_ST || insn_class == ITANIUM_CLASS_STF)
6958 /* Like emit_insn_before, but skip cycle_display notes.
6959 ??? When cycle display notes are implemented, update this. */
6962 ia64_emit_insn_before (rtx insn, rtx before)
6964 emit_insn_before (insn, before);
6967 /* The following function marks insns who produce addresses for load
6968 and store insns. Such insns will be placed into M slots because it
6969 decrease latency time for Itanium1 (see function
6970 `ia64_produce_address_p' and the DFA descriptions). */
6973 ia64_dependencies_evaluation_hook (rtx head, rtx tail)
6975 rtx insn, next, next_tail;
6977 /* Before reload, which_alternative is not set, which means that
6978 ia64_safe_itanium_class will produce wrong results for (at least)
6979 move instructions. */
6980 if (!reload_completed)
6983 next_tail = NEXT_INSN (tail);
6984 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6987 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6989 && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IALU)
6991 sd_iterator_def sd_it;
6993 bool has_mem_op_consumer_p = false;
6995 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
6997 enum attr_itanium_class c;
6999 if (DEP_TYPE (dep) != REG_DEP_TRUE)
7002 next = DEP_CON (dep);
7003 c = ia64_safe_itanium_class (next);
7004 if ((c == ITANIUM_CLASS_ST
7005 || c == ITANIUM_CLASS_STF)
7006 && ia64_st_address_bypass_p (insn, next))
7008 has_mem_op_consumer_p = true;
7011 else if ((c == ITANIUM_CLASS_LD
7012 || c == ITANIUM_CLASS_FLD
7013 || c == ITANIUM_CLASS_FLDP)
7014 && ia64_ld_address_bypass_p (insn, next))
7016 has_mem_op_consumer_p = true;
7021 insn->call = has_mem_op_consumer_p;
7025 /* We're beginning a new block. Initialize data structures as necessary. */
7028 ia64_sched_init (FILE *dump ATTRIBUTE_UNUSED,
7029 int sched_verbose ATTRIBUTE_UNUSED,
7030 int max_ready ATTRIBUTE_UNUSED)
7032 #ifdef ENABLE_CHECKING
7035 if (!sel_sched_p () && reload_completed)
7036 for (insn = NEXT_INSN (current_sched_info->prev_head);
7037 insn != current_sched_info->next_tail;
7038 insn = NEXT_INSN (insn))
7039 gcc_assert (!SCHED_GROUP_P (insn));
7041 last_scheduled_insn = NULL_RTX;
7042 init_insn_group_barriers ();
7045 memset (mem_ops_in_group, 0, sizeof (mem_ops_in_group));
7048 /* We're beginning a scheduling pass. Check assertion. */
7051 ia64_sched_init_global (FILE *dump ATTRIBUTE_UNUSED,
7052 int sched_verbose ATTRIBUTE_UNUSED,
7053 int max_ready ATTRIBUTE_UNUSED)
7055 gcc_assert (pending_data_specs == 0);
7058 /* Scheduling pass is now finished. Free/reset static variable. */
7060 ia64_sched_finish_global (FILE *dump ATTRIBUTE_UNUSED,
7061 int sched_verbose ATTRIBUTE_UNUSED)
7063 gcc_assert (pending_data_specs == 0);
7066 /* Return TRUE if INSN is a load (either normal or speculative, but not a
7067 speculation check), FALSE otherwise. */
7069 is_load_p (rtx insn)
7071 enum attr_itanium_class insn_class = ia64_safe_itanium_class (insn);
7074 ((insn_class == ITANIUM_CLASS_LD || insn_class == ITANIUM_CLASS_FLD)
7075 && get_attr_check_load (insn) == CHECK_LOAD_NO);
7078 /* If INSN is a memory reference, memoize it in MEM_OPS_IN_GROUP global array
7079 (taking account for 3-cycle cache reference postponing for stores: Intel
7080 Itanium 2 Reference Manual for Software Development and Optimization,
7083 record_memory_reference (rtx insn)
7085 enum attr_itanium_class insn_class = ia64_safe_itanium_class (insn);
7087 switch (insn_class) {
7088 case ITANIUM_CLASS_FLD:
7089 case ITANIUM_CLASS_LD:
7090 mem_ops_in_group[current_cycle % 4]++;
7092 case ITANIUM_CLASS_STF:
7093 case ITANIUM_CLASS_ST:
7094 mem_ops_in_group[(current_cycle + 3) % 4]++;
7100 /* We are about to being issuing insns for this clock cycle.
7101 Override the default sort algorithm to better slot instructions. */
7104 ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx *ready,
7105 int *pn_ready, int clock_var,
7109 int n_ready = *pn_ready;
7110 rtx *e_ready = ready + n_ready;
7114 fprintf (dump, "// ia64_dfa_sched_reorder (type %d):\n", reorder_type);
7116 if (reorder_type == 0)
7118 /* First, move all USEs, CLOBBERs and other crud out of the way. */
7120 for (insnp = ready; insnp < e_ready; insnp++)
7121 if (insnp < e_ready)
7124 enum attr_type t = ia64_safe_type (insn);
7125 if (t == TYPE_UNKNOWN)
7127 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
7128 || asm_noperands (PATTERN (insn)) >= 0)
7130 rtx lowest = ready[n_asms];
7131 ready[n_asms] = insn;
7137 rtx highest = ready[n_ready - 1];
7138 ready[n_ready - 1] = insn;
7145 if (n_asms < n_ready)
7147 /* Some normal insns to process. Skip the asms. */
7151 else if (n_ready > 0)
7155 if (ia64_final_schedule)
7158 int nr_need_stop = 0;
7160 for (insnp = ready; insnp < e_ready; insnp++)
7161 if (safe_group_barrier_needed (*insnp))
7164 if (reorder_type == 1 && n_ready == nr_need_stop)
7166 if (reorder_type == 0)
7169 /* Move down everything that needs a stop bit, preserving
7171 while (insnp-- > ready + deleted)
7172 while (insnp >= ready + deleted)
7175 if (! safe_group_barrier_needed (insn))
7177 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
7185 current_cycle = clock_var;
7186 if (reload_completed && mem_ops_in_group[clock_var % 4] >= ia64_max_memory_insns)
7191 /* Move down loads/stores, preserving relative order. */
7192 while (insnp-- > ready + moved)
7193 while (insnp >= ready + moved)
7196 if (! is_load_p (insn))
7198 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
7209 /* We are about to being issuing insns for this clock cycle. Override
7210 the default sort algorithm to better slot instructions. */
7213 ia64_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, int *pn_ready,
7216 return ia64_dfa_sched_reorder (dump, sched_verbose, ready,
7217 pn_ready, clock_var, 0);
7220 /* Like ia64_sched_reorder, but called after issuing each insn.
7221 Override the default sort algorithm to better slot instructions. */
7224 ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
7225 int sched_verbose ATTRIBUTE_UNUSED, rtx *ready,
7226 int *pn_ready, int clock_var)
7228 return ia64_dfa_sched_reorder (dump, sched_verbose, ready, pn_ready,
7232 /* We are about to issue INSN. Return the number of insns left on the
7233 ready queue that can be issued this cycle. */
7236 ia64_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
7237 int sched_verbose ATTRIBUTE_UNUSED,
7238 rtx insn ATTRIBUTE_UNUSED,
7239 int can_issue_more ATTRIBUTE_UNUSED)
7241 if (sched_deps_info->generate_spec_deps && !sel_sched_p ())
7242 /* Modulo scheduling does not extend h_i_d when emitting
7243 new instructions. Don't use h_i_d, if we don't have to. */
7245 if (DONE_SPEC (insn) & BEGIN_DATA)
7246 pending_data_specs++;
7247 if (CHECK_SPEC (insn) & BEGIN_DATA)
7248 pending_data_specs--;
7251 if (DEBUG_INSN_P (insn))
7254 last_scheduled_insn = insn;
7255 memcpy (prev_cycle_state, curr_state, dfa_state_size);
7256 if (reload_completed)
7258 int needed = group_barrier_needed (insn);
7260 gcc_assert (!needed);
7261 if (GET_CODE (insn) == CALL_INSN)
7262 init_insn_group_barriers ();
7263 stops_p [INSN_UID (insn)] = stop_before_p;
7266 record_memory_reference (insn);
7271 /* We are choosing insn from the ready queue. Return nonzero if INSN
7275 ia64_first_cycle_multipass_dfa_lookahead_guard (rtx insn)
7277 gcc_assert (insn && INSN_P (insn));
7278 return ((!reload_completed
7279 || !safe_group_barrier_needed (insn))
7280 && ia64_first_cycle_multipass_dfa_lookahead_guard_spec (insn)
7281 && (!mflag_sched_mem_insns_hard_limit
7282 || !is_load_p (insn)
7283 || mem_ops_in_group[current_cycle % 4] < ia64_max_memory_insns));
7286 /* We are choosing insn from the ready queue. Return nonzero if INSN
7290 ia64_first_cycle_multipass_dfa_lookahead_guard_spec (const_rtx insn)
7292 gcc_assert (insn && INSN_P (insn));
7293 /* Size of ALAT is 32. As far as we perform conservative data speculation,
7294 we keep ALAT half-empty. */
7295 return (pending_data_specs < 16
7296 || !(TODO_SPEC (insn) & BEGIN_DATA));
7299 /* The following variable value is pseudo-insn used by the DFA insn
7300 scheduler to change the DFA state when the simulated clock is
7303 static rtx dfa_pre_cycle_insn;
7305 /* Returns 1 when a meaningful insn was scheduled between the last group
7306 barrier and LAST. */
7308 scheduled_good_insn (rtx last)
7310 if (last && recog_memoized (last) >= 0)
7314 last != NULL && !NOTE_INSN_BASIC_BLOCK_P (last)
7315 && !stops_p[INSN_UID (last)];
7316 last = PREV_INSN (last))
7317 /* We could hit a NOTE_INSN_DELETED here which is actually outside
7318 the ebb we're scheduling. */
7319 if (INSN_P (last) && recog_memoized (last) >= 0)
7325 /* We are about to being issuing INSN. Return nonzero if we cannot
7326 issue it on given cycle CLOCK and return zero if we should not sort
7327 the ready queue on the next clock start. */
7330 ia64_dfa_new_cycle (FILE *dump, int verbose, rtx insn, int last_clock,
7331 int clock, int *sort_p)
7333 gcc_assert (insn && INSN_P (insn));
7335 if (DEBUG_INSN_P (insn))
7338 /* When a group barrier is needed for insn, last_scheduled_insn
7340 gcc_assert (!(reload_completed && safe_group_barrier_needed (insn))
7341 || last_scheduled_insn);
7343 if ((reload_completed
7344 && (safe_group_barrier_needed (insn)
7345 || (mflag_sched_stop_bits_after_every_cycle
7346 && last_clock != clock
7347 && last_scheduled_insn
7348 && scheduled_good_insn (last_scheduled_insn))))
7349 || (last_scheduled_insn
7350 && (GET_CODE (last_scheduled_insn) == CALL_INSN
7351 || GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
7352 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)))
7354 init_insn_group_barriers ();
7356 if (verbose && dump)
7357 fprintf (dump, "// Stop should be before %d%s\n", INSN_UID (insn),
7358 last_clock == clock ? " + cycle advance" : "");
7361 current_cycle = clock;
7362 mem_ops_in_group[current_cycle % 4] = 0;
7364 if (last_clock == clock)
7366 state_transition (curr_state, dfa_stop_insn);
7367 if (TARGET_EARLY_STOP_BITS)
7368 *sort_p = (last_scheduled_insn == NULL_RTX
7369 || GET_CODE (last_scheduled_insn) != CALL_INSN);
7375 if (last_scheduled_insn)
7377 if (GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
7378 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)
7379 state_reset (curr_state);
7382 memcpy (curr_state, prev_cycle_state, dfa_state_size);
7383 state_transition (curr_state, dfa_stop_insn);
7384 state_transition (curr_state, dfa_pre_cycle_insn);
7385 state_transition (curr_state, NULL);
7392 /* Implement targetm.sched.h_i_d_extended hook.
7393 Extend internal data structures. */
7395 ia64_h_i_d_extended (void)
7397 if (stops_p != NULL)
7399 int new_clocks_length = get_max_uid () * 3 / 2;
7400 stops_p = (char *) xrecalloc (stops_p, new_clocks_length, clocks_length, 1);
7401 clocks_length = new_clocks_length;
7406 /* This structure describes the data used by the backend to guide scheduling.
7407 When the current scheduling point is switched, this data should be saved
7408 and restored later, if the scheduler returns to this point. */
7409 struct _ia64_sched_context
7411 state_t prev_cycle_state;
7412 rtx last_scheduled_insn;
7413 struct reg_write_state rws_sum[NUM_REGS];
7414 struct reg_write_state rws_insn[NUM_REGS];
7415 int first_instruction;
7416 int pending_data_specs;
7418 char mem_ops_in_group[4];
7420 typedef struct _ia64_sched_context *ia64_sched_context_t;
7422 /* Allocates a scheduling context. */
7424 ia64_alloc_sched_context (void)
7426 return xmalloc (sizeof (struct _ia64_sched_context));
7429 /* Initializes the _SC context with clean data, if CLEAN_P, and from
7430 the global context otherwise. */
7432 ia64_init_sched_context (void *_sc, bool clean_p)
7434 ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
7436 sc->prev_cycle_state = xmalloc (dfa_state_size);
7439 state_reset (sc->prev_cycle_state);
7440 sc->last_scheduled_insn = NULL_RTX;
7441 memset (sc->rws_sum, 0, sizeof (rws_sum));
7442 memset (sc->rws_insn, 0, sizeof (rws_insn));
7443 sc->first_instruction = 1;
7444 sc->pending_data_specs = 0;
7445 sc->current_cycle = 0;
7446 memset (sc->mem_ops_in_group, 0, sizeof (mem_ops_in_group));
7450 memcpy (sc->prev_cycle_state, prev_cycle_state, dfa_state_size);
7451 sc->last_scheduled_insn = last_scheduled_insn;
7452 memcpy (sc->rws_sum, rws_sum, sizeof (rws_sum));
7453 memcpy (sc->rws_insn, rws_insn, sizeof (rws_insn));
7454 sc->first_instruction = first_instruction;
7455 sc->pending_data_specs = pending_data_specs;
7456 sc->current_cycle = current_cycle;
7457 memcpy (sc->mem_ops_in_group, mem_ops_in_group, sizeof (mem_ops_in_group));
7461 /* Sets the global scheduling context to the one pointed to by _SC. */
7463 ia64_set_sched_context (void *_sc)
7465 ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
7467 gcc_assert (sc != NULL);
7469 memcpy (prev_cycle_state, sc->prev_cycle_state, dfa_state_size);
7470 last_scheduled_insn = sc->last_scheduled_insn;
7471 memcpy (rws_sum, sc->rws_sum, sizeof (rws_sum));
7472 memcpy (rws_insn, sc->rws_insn, sizeof (rws_insn));
7473 first_instruction = sc->first_instruction;
7474 pending_data_specs = sc->pending_data_specs;
7475 current_cycle = sc->current_cycle;
7476 memcpy (mem_ops_in_group, sc->mem_ops_in_group, sizeof (mem_ops_in_group));
7479 /* Clears the data in the _SC scheduling context. */
7481 ia64_clear_sched_context (void *_sc)
7483 ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
7485 free (sc->prev_cycle_state);
7486 sc->prev_cycle_state = NULL;
7489 /* Frees the _SC scheduling context. */
7491 ia64_free_sched_context (void *_sc)
7493 gcc_assert (_sc != NULL);
7498 typedef rtx (* gen_func_t) (rtx, rtx);
7500 /* Return a function that will generate a load of mode MODE_NO
7501 with speculation types TS. */
7503 get_spec_load_gen_function (ds_t ts, int mode_no)
7505 static gen_func_t gen_ld_[] = {
7515 gen_zero_extendqidi2,
7516 gen_zero_extendhidi2,
7517 gen_zero_extendsidi2,
7520 static gen_func_t gen_ld_a[] = {
7530 gen_zero_extendqidi2_advanced,
7531 gen_zero_extendhidi2_advanced,
7532 gen_zero_extendsidi2_advanced,
7534 static gen_func_t gen_ld_s[] = {
7535 gen_movbi_speculative,
7536 gen_movqi_speculative,
7537 gen_movhi_speculative,
7538 gen_movsi_speculative,
7539 gen_movdi_speculative,
7540 gen_movsf_speculative,
7541 gen_movdf_speculative,
7542 gen_movxf_speculative,
7543 gen_movti_speculative,
7544 gen_zero_extendqidi2_speculative,
7545 gen_zero_extendhidi2_speculative,
7546 gen_zero_extendsidi2_speculative,
7548 static gen_func_t gen_ld_sa[] = {
7549 gen_movbi_speculative_advanced,
7550 gen_movqi_speculative_advanced,
7551 gen_movhi_speculative_advanced,
7552 gen_movsi_speculative_advanced,
7553 gen_movdi_speculative_advanced,
7554 gen_movsf_speculative_advanced,
7555 gen_movdf_speculative_advanced,
7556 gen_movxf_speculative_advanced,
7557 gen_movti_speculative_advanced,
7558 gen_zero_extendqidi2_speculative_advanced,
7559 gen_zero_extendhidi2_speculative_advanced,
7560 gen_zero_extendsidi2_speculative_advanced,
7562 static gen_func_t gen_ld_s_a[] = {
7563 gen_movbi_speculative_a,
7564 gen_movqi_speculative_a,
7565 gen_movhi_speculative_a,
7566 gen_movsi_speculative_a,
7567 gen_movdi_speculative_a,
7568 gen_movsf_speculative_a,
7569 gen_movdf_speculative_a,
7570 gen_movxf_speculative_a,
7571 gen_movti_speculative_a,
7572 gen_zero_extendqidi2_speculative_a,
7573 gen_zero_extendhidi2_speculative_a,
7574 gen_zero_extendsidi2_speculative_a,
7579 if (ts & BEGIN_DATA)
7581 if (ts & BEGIN_CONTROL)
7586 else if (ts & BEGIN_CONTROL)
7588 if ((spec_info->flags & SEL_SCHED_SPEC_DONT_CHECK_CONTROL)
7589 || ia64_needs_block_p (ts))
7592 gen_ld = gen_ld_s_a;
7599 return gen_ld[mode_no];
7602 /* Constants that help mapping 'enum machine_mode' to int. */
7605 SPEC_MODE_INVALID = -1,
7606 SPEC_MODE_FIRST = 0,
7607 SPEC_MODE_FOR_EXTEND_FIRST = 1,
7608 SPEC_MODE_FOR_EXTEND_LAST = 3,
7614 /* Offset to reach ZERO_EXTEND patterns. */
7615 SPEC_GEN_EXTEND_OFFSET = SPEC_MODE_LAST - SPEC_MODE_FOR_EXTEND_FIRST + 1
7618 /* Return index of the MODE. */
7620 ia64_mode_to_int (enum machine_mode mode)
7624 case BImode: return 0; /* SPEC_MODE_FIRST */
7625 case QImode: return 1; /* SPEC_MODE_FOR_EXTEND_FIRST */
7626 case HImode: return 2;
7627 case SImode: return 3; /* SPEC_MODE_FOR_EXTEND_LAST */
7628 case DImode: return 4;
7629 case SFmode: return 5;
7630 case DFmode: return 6;
7631 case XFmode: return 7;
7633 /* ??? This mode needs testing. Bypasses for ldfp8 instruction are not
7634 mentioned in itanium[12].md. Predicate fp_register_operand also
7635 needs to be defined. Bottom line: better disable for now. */
7636 return SPEC_MODE_INVALID;
7637 default: return SPEC_MODE_INVALID;
7641 /* Provide information about speculation capabilities. */
7643 ia64_set_sched_flags (spec_info_t spec_info)
7645 unsigned int *flags = &(current_sched_info->flags);
7647 if (*flags & SCHED_RGN
7648 || *flags & SCHED_EBB
7649 || *flags & SEL_SCHED)
7653 if ((mflag_sched_br_data_spec && !reload_completed && optimize > 0)
7654 || (mflag_sched_ar_data_spec && reload_completed))
7659 && ((mflag_sched_br_in_data_spec && !reload_completed)
7660 || (mflag_sched_ar_in_data_spec && reload_completed)))
7664 if (mflag_sched_control_spec
7666 || reload_completed))
7668 mask |= BEGIN_CONTROL;
7670 if (!sel_sched_p () && mflag_sched_in_control_spec)
7671 mask |= BE_IN_CONTROL;
7674 spec_info->mask = mask;
7678 *flags |= USE_DEPS_LIST | DO_SPECULATION;
7680 if (mask & BE_IN_SPEC)
7683 spec_info->flags = 0;
7685 if ((mask & DATA_SPEC) && mflag_sched_prefer_non_data_spec_insns)
7686 spec_info->flags |= PREFER_NON_DATA_SPEC;
7688 if (mask & CONTROL_SPEC)
7690 if (mflag_sched_prefer_non_control_spec_insns)
7691 spec_info->flags |= PREFER_NON_CONTROL_SPEC;
7693 if (sel_sched_p () && mflag_sel_sched_dont_check_control_spec)
7694 spec_info->flags |= SEL_SCHED_SPEC_DONT_CHECK_CONTROL;
7697 if (sched_verbose >= 1)
7698 spec_info->dump = sched_dump;
7700 spec_info->dump = 0;
7702 if (mflag_sched_count_spec_in_critical_path)
7703 spec_info->flags |= COUNT_SPEC_IN_CRITICAL_PATH;
7707 spec_info->mask = 0;
7710 /* If INSN is an appropriate load return its mode.
7711 Return -1 otherwise. */
7713 get_mode_no_for_insn (rtx insn)
7715 rtx reg, mem, mode_rtx;
7719 extract_insn_cached (insn);
7721 /* We use WHICH_ALTERNATIVE only after reload. This will
7722 guarantee that reload won't touch a speculative insn. */
7724 if (recog_data.n_operands != 2)
7727 reg = recog_data.operand[0];
7728 mem = recog_data.operand[1];
7730 /* We should use MEM's mode since REG's mode in presence of
7731 ZERO_EXTEND will always be DImode. */
7732 if (get_attr_speculable1 (insn) == SPECULABLE1_YES)
7733 /* Process non-speculative ld. */
7735 if (!reload_completed)
7737 /* Do not speculate into regs like ar.lc. */
7738 if (!REG_P (reg) || AR_REGNO_P (REGNO (reg)))
7745 rtx mem_reg = XEXP (mem, 0);
7747 if (!REG_P (mem_reg))
7753 else if (get_attr_speculable2 (insn) == SPECULABLE2_YES)
7755 gcc_assert (REG_P (reg) && MEM_P (mem));
7761 else if (get_attr_data_speculative (insn) == DATA_SPECULATIVE_YES
7762 || get_attr_control_speculative (insn) == CONTROL_SPECULATIVE_YES
7763 || get_attr_check_load (insn) == CHECK_LOAD_YES)
7764 /* Process speculative ld or ld.c. */
7766 gcc_assert (REG_P (reg) && MEM_P (mem));
7771 enum attr_itanium_class attr_class = get_attr_itanium_class (insn);
7773 if (attr_class == ITANIUM_CLASS_CHK_A
7774 || attr_class == ITANIUM_CLASS_CHK_S_I
7775 || attr_class == ITANIUM_CLASS_CHK_S_F)
7782 mode_no = ia64_mode_to_int (GET_MODE (mode_rtx));
7784 if (mode_no == SPEC_MODE_INVALID)
7787 extend_p = (GET_MODE (reg) != GET_MODE (mode_rtx));
7791 if (!(SPEC_MODE_FOR_EXTEND_FIRST <= mode_no
7792 && mode_no <= SPEC_MODE_FOR_EXTEND_LAST))
7795 mode_no += SPEC_GEN_EXTEND_OFFSET;
7801 /* If X is an unspec part of a speculative load, return its code.
7802 Return -1 otherwise. */
7804 get_spec_unspec_code (const_rtx x)
7806 if (GET_CODE (x) != UNSPEC)
7828 /* Implement skip_rtx_p hook. */
7830 ia64_skip_rtx_p (const_rtx x)
7832 return get_spec_unspec_code (x) != -1;
7835 /* If INSN is a speculative load, return its UNSPEC code.
7836 Return -1 otherwise. */
7838 get_insn_spec_code (const_rtx insn)
7842 pat = PATTERN (insn);
7844 if (GET_CODE (pat) == COND_EXEC)
7845 pat = COND_EXEC_CODE (pat);
7847 if (GET_CODE (pat) != SET)
7850 reg = SET_DEST (pat);
7854 mem = SET_SRC (pat);
7855 if (GET_CODE (mem) == ZERO_EXTEND)
7856 mem = XEXP (mem, 0);
7858 return get_spec_unspec_code (mem);
7861 /* If INSN is a speculative load, return a ds with the speculation types.
7862 Otherwise [if INSN is a normal instruction] return 0. */
7864 ia64_get_insn_spec_ds (rtx insn)
7866 int code = get_insn_spec_code (insn);
7875 return BEGIN_CONTROL;
7878 return BEGIN_DATA | BEGIN_CONTROL;
7885 /* If INSN is a speculative load return a ds with the speculation types that
7887 Otherwise [if INSN is a normal instruction] return 0. */
7889 ia64_get_insn_checked_ds (rtx insn)
7891 int code = get_insn_spec_code (insn);
7896 return BEGIN_DATA | BEGIN_CONTROL;
7899 return BEGIN_CONTROL;
7903 return BEGIN_DATA | BEGIN_CONTROL;
7910 /* If GEN_P is true, calculate the index of needed speculation check and return
7911 speculative pattern for INSN with speculative mode TS, machine mode
7912 MODE_NO and with ZERO_EXTEND (if EXTEND_P is true).
7913 If GEN_P is false, just calculate the index of needed speculation check. */
7915 ia64_gen_spec_load (rtx insn, ds_t ts, int mode_no)
7918 gen_func_t gen_load;
7920 gen_load = get_spec_load_gen_function (ts, mode_no);
7922 new_pat = gen_load (copy_rtx (recog_data.operand[0]),
7923 copy_rtx (recog_data.operand[1]));
7925 pat = PATTERN (insn);
7926 if (GET_CODE (pat) == COND_EXEC)
7927 new_pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (COND_EXEC_TEST (pat)),
7934 insn_can_be_in_speculative_p (rtx insn ATTRIBUTE_UNUSED,
7935 ds_t ds ATTRIBUTE_UNUSED)
7940 /* Implement targetm.sched.speculate_insn hook.
7941 Check if the INSN can be TS speculative.
7942 If 'no' - return -1.
7943 If 'yes' - generate speculative pattern in the NEW_PAT and return 1.
7944 If current pattern of the INSN already provides TS speculation,
7947 ia64_speculate_insn (rtx insn, ds_t ts, rtx *new_pat)
7952 gcc_assert (!(ts & ~SPECULATIVE));
7954 if (ia64_spec_check_p (insn))
7957 if ((ts & BE_IN_SPEC)
7958 && !insn_can_be_in_speculative_p (insn, ts))
7961 mode_no = get_mode_no_for_insn (insn);
7963 if (mode_no != SPEC_MODE_INVALID)
7965 if (ia64_get_insn_spec_ds (insn) == ds_get_speculation_types (ts))
7970 *new_pat = ia64_gen_spec_load (insn, ts, mode_no);
7979 /* Return a function that will generate a check for speculation TS with mode
7981 If simple check is needed, pass true for SIMPLE_CHECK_P.
7982 If clearing check is needed, pass true for CLEARING_CHECK_P. */
7984 get_spec_check_gen_function (ds_t ts, int mode_no,
7985 bool simple_check_p, bool clearing_check_p)
7987 static gen_func_t gen_ld_c_clr[] = {
7997 gen_zero_extendqidi2_clr,
7998 gen_zero_extendhidi2_clr,
7999 gen_zero_extendsidi2_clr,
8001 static gen_func_t gen_ld_c_nc[] = {
8011 gen_zero_extendqidi2_nc,
8012 gen_zero_extendhidi2_nc,
8013 gen_zero_extendsidi2_nc,
8015 static gen_func_t gen_chk_a_clr[] = {
8016 gen_advanced_load_check_clr_bi,
8017 gen_advanced_load_check_clr_qi,
8018 gen_advanced_load_check_clr_hi,
8019 gen_advanced_load_check_clr_si,
8020 gen_advanced_load_check_clr_di,
8021 gen_advanced_load_check_clr_sf,
8022 gen_advanced_load_check_clr_df,
8023 gen_advanced_load_check_clr_xf,
8024 gen_advanced_load_check_clr_ti,
8025 gen_advanced_load_check_clr_di,
8026 gen_advanced_load_check_clr_di,
8027 gen_advanced_load_check_clr_di,
8029 static gen_func_t gen_chk_a_nc[] = {
8030 gen_advanced_load_check_nc_bi,
8031 gen_advanced_load_check_nc_qi,
8032 gen_advanced_load_check_nc_hi,
8033 gen_advanced_load_check_nc_si,
8034 gen_advanced_load_check_nc_di,
8035 gen_advanced_load_check_nc_sf,
8036 gen_advanced_load_check_nc_df,
8037 gen_advanced_load_check_nc_xf,
8038 gen_advanced_load_check_nc_ti,
8039 gen_advanced_load_check_nc_di,
8040 gen_advanced_load_check_nc_di,
8041 gen_advanced_load_check_nc_di,
8043 static gen_func_t gen_chk_s[] = {
8044 gen_speculation_check_bi,
8045 gen_speculation_check_qi,
8046 gen_speculation_check_hi,
8047 gen_speculation_check_si,
8048 gen_speculation_check_di,
8049 gen_speculation_check_sf,
8050 gen_speculation_check_df,
8051 gen_speculation_check_xf,
8052 gen_speculation_check_ti,
8053 gen_speculation_check_di,
8054 gen_speculation_check_di,
8055 gen_speculation_check_di,
8058 gen_func_t *gen_check;
8060 if (ts & BEGIN_DATA)
8062 /* We don't need recovery because even if this is ld.sa
8063 ALAT entry will be allocated only if NAT bit is set to zero.
8064 So it is enough to use ld.c here. */
8068 gcc_assert (mflag_sched_spec_ldc);
8070 if (clearing_check_p)
8071 gen_check = gen_ld_c_clr;
8073 gen_check = gen_ld_c_nc;
8077 if (clearing_check_p)
8078 gen_check = gen_chk_a_clr;
8080 gen_check = gen_chk_a_nc;
8083 else if (ts & BEGIN_CONTROL)
8086 /* We might want to use ld.sa -> ld.c instead of
8089 gcc_assert (!ia64_needs_block_p (ts));
8091 if (clearing_check_p)
8092 gen_check = gen_ld_c_clr;
8094 gen_check = gen_ld_c_nc;
8098 gen_check = gen_chk_s;
8104 gcc_assert (mode_no >= 0);
8105 return gen_check[mode_no];
8108 /* Return nonzero, if INSN needs branchy recovery check. */
8110 ia64_needs_block_p (ds_t ts)
8112 if (ts & BEGIN_DATA)
8113 return !mflag_sched_spec_ldc;
8115 gcc_assert ((ts & BEGIN_CONTROL) != 0);
8117 return !(mflag_sched_spec_control_ldc && mflag_sched_spec_ldc);
8120 /* Generate (or regenerate, if (MUTATE_P)) recovery check for INSN.
8121 If (LABEL != 0 || MUTATE_P), generate branchy recovery check.
8122 Otherwise, generate a simple check. */
8124 ia64_gen_spec_check (rtx insn, rtx label, ds_t ds)
8126 rtx op1, pat, check_pat;
8127 gen_func_t gen_check;
8130 mode_no = get_mode_no_for_insn (insn);
8131 gcc_assert (mode_no >= 0);
8137 gcc_assert (!ia64_needs_block_p (ds));
8138 op1 = copy_rtx (recog_data.operand[1]);
8141 gen_check = get_spec_check_gen_function (ds, mode_no, label == NULL_RTX,
8144 check_pat = gen_check (copy_rtx (recog_data.operand[0]), op1);
8146 pat = PATTERN (insn);
8147 if (GET_CODE (pat) == COND_EXEC)
8148 check_pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (COND_EXEC_TEST (pat)),
8154 /* Return nonzero, if X is branchy recovery check. */
8156 ia64_spec_check_p (rtx x)
8159 if (GET_CODE (x) == COND_EXEC)
8160 x = COND_EXEC_CODE (x);
8161 if (GET_CODE (x) == SET)
8162 return ia64_spec_check_src_p (SET_SRC (x));
8166 /* Return nonzero, if SRC belongs to recovery check. */
8168 ia64_spec_check_src_p (rtx src)
8170 if (GET_CODE (src) == IF_THEN_ELSE)
8175 if (GET_CODE (t) == NE)
8179 if (GET_CODE (t) == UNSPEC)
8185 if (code == UNSPEC_LDCCLR
8186 || code == UNSPEC_LDCNC
8187 || code == UNSPEC_CHKACLR
8188 || code == UNSPEC_CHKANC
8189 || code == UNSPEC_CHKS)
8191 gcc_assert (code != 0);
8201 /* The following page contains abstract data `bundle states' which are
8202 used for bundling insns (inserting nops and template generation). */
8204 /* The following describes state of insn bundling. */
8208 /* Unique bundle state number to identify them in the debugging
8211 rtx insn; /* corresponding insn, NULL for the 1st and the last state */
8212 /* number nops before and after the insn */
8213 short before_nops_num, after_nops_num;
8214 int insn_num; /* insn number (0 - for initial state, 1 - for the 1st
8216 int cost; /* cost of the state in cycles */
8217 int accumulated_insns_num; /* number of all previous insns including
8218 nops. L is considered as 2 insns */
8219 int branch_deviation; /* deviation of previous branches from 3rd slots */
8220 int middle_bundle_stops; /* number of stop bits in the middle of bundles */
8221 struct bundle_state *next; /* next state with the same insn_num */
8222 struct bundle_state *originator; /* originator (previous insn state) */
8223 /* All bundle states are in the following chain. */
8224 struct bundle_state *allocated_states_chain;
8225 /* The DFA State after issuing the insn and the nops. */
8229 /* The following is map insn number to the corresponding bundle state. */
8231 static struct bundle_state **index_to_bundle_states;
8233 /* The unique number of next bundle state. */
8235 static int bundle_states_num;
8237 /* All allocated bundle states are in the following chain. */
8239 static struct bundle_state *allocated_bundle_states_chain;
8241 /* All allocated but not used bundle states are in the following
8244 static struct bundle_state *free_bundle_state_chain;
8247 /* The following function returns a free bundle state. */
8249 static struct bundle_state *
8250 get_free_bundle_state (void)
8252 struct bundle_state *result;
8254 if (free_bundle_state_chain != NULL)
8256 result = free_bundle_state_chain;
8257 free_bundle_state_chain = result->next;
8261 result = XNEW (struct bundle_state);
8262 result->dfa_state = xmalloc (dfa_state_size);
8263 result->allocated_states_chain = allocated_bundle_states_chain;
8264 allocated_bundle_states_chain = result;
8266 result->unique_num = bundle_states_num++;
8271 /* The following function frees given bundle state. */
8274 free_bundle_state (struct bundle_state *state)
8276 state->next = free_bundle_state_chain;
8277 free_bundle_state_chain = state;
8280 /* Start work with abstract data `bundle states'. */
8283 initiate_bundle_states (void)
8285 bundle_states_num = 0;
8286 free_bundle_state_chain = NULL;
8287 allocated_bundle_states_chain = NULL;
8290 /* Finish work with abstract data `bundle states'. */
8293 finish_bundle_states (void)
8295 struct bundle_state *curr_state, *next_state;
8297 for (curr_state = allocated_bundle_states_chain;
8299 curr_state = next_state)
8301 next_state = curr_state->allocated_states_chain;
8302 free (curr_state->dfa_state);
8307 /* Hash table of the bundle states. The key is dfa_state and insn_num
8308 of the bundle states. */
8310 static htab_t bundle_state_table;
8312 /* The function returns hash of BUNDLE_STATE. */
8315 bundle_state_hash (const void *bundle_state)
8317 const struct bundle_state *const state
8318 = (const struct bundle_state *) bundle_state;
8321 for (result = i = 0; i < dfa_state_size; i++)
8322 result += (((unsigned char *) state->dfa_state) [i]
8323 << ((i % CHAR_BIT) * 3 + CHAR_BIT));
8324 return result + state->insn_num;
8327 /* The function returns nonzero if the bundle state keys are equal. */
8330 bundle_state_eq_p (const void *bundle_state_1, const void *bundle_state_2)
8332 const struct bundle_state *const state1
8333 = (const struct bundle_state *) bundle_state_1;
8334 const struct bundle_state *const state2
8335 = (const struct bundle_state *) bundle_state_2;
8337 return (state1->insn_num == state2->insn_num
8338 && memcmp (state1->dfa_state, state2->dfa_state,
8339 dfa_state_size) == 0);
8342 /* The function inserts the BUNDLE_STATE into the hash table. The
8343 function returns nonzero if the bundle has been inserted into the
8344 table. The table contains the best bundle state with given key. */
8347 insert_bundle_state (struct bundle_state *bundle_state)
8351 entry_ptr = htab_find_slot (bundle_state_table, bundle_state, INSERT);
8352 if (*entry_ptr == NULL)
8354 bundle_state->next = index_to_bundle_states [bundle_state->insn_num];
8355 index_to_bundle_states [bundle_state->insn_num] = bundle_state;
8356 *entry_ptr = (void *) bundle_state;
8359 else if (bundle_state->cost < ((struct bundle_state *) *entry_ptr)->cost
8360 || (bundle_state->cost == ((struct bundle_state *) *entry_ptr)->cost
8361 && (((struct bundle_state *)*entry_ptr)->accumulated_insns_num
8362 > bundle_state->accumulated_insns_num
8363 || (((struct bundle_state *)
8364 *entry_ptr)->accumulated_insns_num
8365 == bundle_state->accumulated_insns_num
8366 && (((struct bundle_state *)
8367 *entry_ptr)->branch_deviation
8368 > bundle_state->branch_deviation
8369 || (((struct bundle_state *)
8370 *entry_ptr)->branch_deviation
8371 == bundle_state->branch_deviation
8372 && ((struct bundle_state *)
8373 *entry_ptr)->middle_bundle_stops
8374 > bundle_state->middle_bundle_stops))))))
8377 struct bundle_state temp;
8379 temp = *(struct bundle_state *) *entry_ptr;
8380 *(struct bundle_state *) *entry_ptr = *bundle_state;
8381 ((struct bundle_state *) *entry_ptr)->next = temp.next;
8382 *bundle_state = temp;
8387 /* Start work with the hash table. */
8390 initiate_bundle_state_table (void)
8392 bundle_state_table = htab_create (50, bundle_state_hash, bundle_state_eq_p,
8396 /* Finish work with the hash table. */
8399 finish_bundle_state_table (void)
8401 htab_delete (bundle_state_table);
8406 /* The following variable is a insn `nop' used to check bundle states
8407 with different number of inserted nops. */
8409 static rtx ia64_nop;
8411 /* The following function tries to issue NOPS_NUM nops for the current
8412 state without advancing processor cycle. If it failed, the
8413 function returns FALSE and frees the current state. */
8416 try_issue_nops (struct bundle_state *curr_state, int nops_num)
8420 for (i = 0; i < nops_num; i++)
8421 if (state_transition (curr_state->dfa_state, ia64_nop) >= 0)
8423 free_bundle_state (curr_state);
8429 /* The following function tries to issue INSN for the current
8430 state without advancing processor cycle. If it failed, the
8431 function returns FALSE and frees the current state. */
8434 try_issue_insn (struct bundle_state *curr_state, rtx insn)
8436 if (insn && state_transition (curr_state->dfa_state, insn) >= 0)
8438 free_bundle_state (curr_state);
8444 /* The following function tries to issue BEFORE_NOPS_NUM nops and INSN
8445 starting with ORIGINATOR without advancing processor cycle. If
8446 TRY_BUNDLE_END_P is TRUE, the function also/only (if
8447 ONLY_BUNDLE_END_P is TRUE) tries to issue nops to fill all bundle.
8448 If it was successful, the function creates new bundle state and
8449 insert into the hash table and into `index_to_bundle_states'. */
8452 issue_nops_and_insn (struct bundle_state *originator, int before_nops_num,
8453 rtx insn, int try_bundle_end_p, int only_bundle_end_p)
8455 struct bundle_state *curr_state;
8457 curr_state = get_free_bundle_state ();
8458 memcpy (curr_state->dfa_state, originator->dfa_state, dfa_state_size);
8459 curr_state->insn = insn;
8460 curr_state->insn_num = originator->insn_num + 1;
8461 curr_state->cost = originator->cost;
8462 curr_state->originator = originator;
8463 curr_state->before_nops_num = before_nops_num;
8464 curr_state->after_nops_num = 0;
8465 curr_state->accumulated_insns_num
8466 = originator->accumulated_insns_num + before_nops_num;
8467 curr_state->branch_deviation = originator->branch_deviation;
8468 curr_state->middle_bundle_stops = originator->middle_bundle_stops;
8470 if (INSN_CODE (insn) == CODE_FOR_insn_group_barrier)
8472 gcc_assert (GET_MODE (insn) != TImode);
8473 if (!try_issue_nops (curr_state, before_nops_num))
8475 if (!try_issue_insn (curr_state, insn))
8477 memcpy (temp_dfa_state, curr_state->dfa_state, dfa_state_size);
8478 if (curr_state->accumulated_insns_num % 3 != 0)
8479 curr_state->middle_bundle_stops++;
8480 if (state_transition (temp_dfa_state, dfa_pre_cycle_insn) >= 0
8481 && curr_state->accumulated_insns_num % 3 != 0)
8483 free_bundle_state (curr_state);
8487 else if (GET_MODE (insn) != TImode)
8489 if (!try_issue_nops (curr_state, before_nops_num))
8491 if (!try_issue_insn (curr_state, insn))
8493 curr_state->accumulated_insns_num++;
8494 gcc_assert (GET_CODE (PATTERN (insn)) != ASM_INPUT
8495 && asm_noperands (PATTERN (insn)) < 0);
8497 if (ia64_safe_type (insn) == TYPE_L)
8498 curr_state->accumulated_insns_num++;
8502 /* If this is an insn that must be first in a group, then don't allow
8503 nops to be emitted before it. Currently, alloc is the only such
8504 supported instruction. */
8505 /* ??? The bundling automatons should handle this for us, but they do
8506 not yet have support for the first_insn attribute. */
8507 if (before_nops_num > 0 && get_attr_first_insn (insn) == FIRST_INSN_YES)
8509 free_bundle_state (curr_state);
8513 state_transition (curr_state->dfa_state, dfa_pre_cycle_insn);
8514 state_transition (curr_state->dfa_state, NULL);
8516 if (!try_issue_nops (curr_state, before_nops_num))
8518 if (!try_issue_insn (curr_state, insn))
8520 curr_state->accumulated_insns_num++;
8521 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
8522 || asm_noperands (PATTERN (insn)) >= 0)
8524 /* Finish bundle containing asm insn. */
8525 curr_state->after_nops_num
8526 = 3 - curr_state->accumulated_insns_num % 3;
8527 curr_state->accumulated_insns_num
8528 += 3 - curr_state->accumulated_insns_num % 3;
8530 else if (ia64_safe_type (insn) == TYPE_L)
8531 curr_state->accumulated_insns_num++;
8533 if (ia64_safe_type (insn) == TYPE_B)
8534 curr_state->branch_deviation
8535 += 2 - (curr_state->accumulated_insns_num - 1) % 3;
8536 if (try_bundle_end_p && curr_state->accumulated_insns_num % 3 != 0)
8538 if (!only_bundle_end_p && insert_bundle_state (curr_state))
8541 struct bundle_state *curr_state1;
8542 struct bundle_state *allocated_states_chain;
8544 curr_state1 = get_free_bundle_state ();
8545 dfa_state = curr_state1->dfa_state;
8546 allocated_states_chain = curr_state1->allocated_states_chain;
8547 *curr_state1 = *curr_state;
8548 curr_state1->dfa_state = dfa_state;
8549 curr_state1->allocated_states_chain = allocated_states_chain;
8550 memcpy (curr_state1->dfa_state, curr_state->dfa_state,
8552 curr_state = curr_state1;
8554 if (!try_issue_nops (curr_state,
8555 3 - curr_state->accumulated_insns_num % 3))
8557 curr_state->after_nops_num
8558 = 3 - curr_state->accumulated_insns_num % 3;
8559 curr_state->accumulated_insns_num
8560 += 3 - curr_state->accumulated_insns_num % 3;
8562 if (!insert_bundle_state (curr_state))
8563 free_bundle_state (curr_state);
8567 /* The following function returns position in the two window bundle
8571 get_max_pos (state_t state)
8573 if (cpu_unit_reservation_p (state, pos_6))
8575 else if (cpu_unit_reservation_p (state, pos_5))
8577 else if (cpu_unit_reservation_p (state, pos_4))
8579 else if (cpu_unit_reservation_p (state, pos_3))
8581 else if (cpu_unit_reservation_p (state, pos_2))
8583 else if (cpu_unit_reservation_p (state, pos_1))
8589 /* The function returns code of a possible template for given position
8590 and state. The function should be called only with 2 values of
8591 position equal to 3 or 6. We avoid generating F NOPs by putting
8592 templates containing F insns at the end of the template search
8593 because undocumented anomaly in McKinley derived cores which can
8594 cause stalls if an F-unit insn (including a NOP) is issued within a
8595 six-cycle window after reading certain application registers (such
8596 as ar.bsp). Furthermore, power-considerations also argue against
8597 the use of F-unit instructions unless they're really needed. */
8600 get_template (state_t state, int pos)
8605 if (cpu_unit_reservation_p (state, _0mmi_))
8607 else if (cpu_unit_reservation_p (state, _0mii_))
8609 else if (cpu_unit_reservation_p (state, _0mmb_))
8611 else if (cpu_unit_reservation_p (state, _0mib_))
8613 else if (cpu_unit_reservation_p (state, _0mbb_))
8615 else if (cpu_unit_reservation_p (state, _0bbb_))
8617 else if (cpu_unit_reservation_p (state, _0mmf_))
8619 else if (cpu_unit_reservation_p (state, _0mfi_))
8621 else if (cpu_unit_reservation_p (state, _0mfb_))
8623 else if (cpu_unit_reservation_p (state, _0mlx_))
8628 if (cpu_unit_reservation_p (state, _1mmi_))
8630 else if (cpu_unit_reservation_p (state, _1mii_))
8632 else if (cpu_unit_reservation_p (state, _1mmb_))
8634 else if (cpu_unit_reservation_p (state, _1mib_))
8636 else if (cpu_unit_reservation_p (state, _1mbb_))
8638 else if (cpu_unit_reservation_p (state, _1bbb_))
8640 else if (_1mmf_ >= 0 && cpu_unit_reservation_p (state, _1mmf_))
8642 else if (cpu_unit_reservation_p (state, _1mfi_))
8644 else if (cpu_unit_reservation_p (state, _1mfb_))
8646 else if (cpu_unit_reservation_p (state, _1mlx_))
8655 /* True when INSN is important for bundling. */
8657 important_for_bundling_p (rtx insn)
8659 return (INSN_P (insn)
8660 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
8661 && GET_CODE (PATTERN (insn)) != USE
8662 && GET_CODE (PATTERN (insn)) != CLOBBER);
8665 /* The following function returns an insn important for insn bundling
8666 followed by INSN and before TAIL. */
8669 get_next_important_insn (rtx insn, rtx tail)
8671 for (; insn && insn != tail; insn = NEXT_INSN (insn))
8672 if (important_for_bundling_p (insn))
8677 /* Add a bundle selector TEMPLATE0 before INSN. */
8680 ia64_add_bundle_selector_before (int template0, rtx insn)
8682 rtx b = gen_bundle_selector (GEN_INT (template0));
8684 ia64_emit_insn_before (b, insn);
8685 #if NR_BUNDLES == 10
8686 if ((template0 == 4 || template0 == 5)
8687 && ia64_except_unwind_info (&global_options) == UI_TARGET)
8690 rtx note = NULL_RTX;
8692 /* In .mbb and .bbb bundles, check if CALL_INSN isn't in the
8693 first or second slot. If it is and has REG_EH_NOTE set, copy it
8694 to following nops, as br.call sets rp to the address of following
8695 bundle and therefore an EH region end must be on a bundle
8697 insn = PREV_INSN (insn);
8698 for (i = 0; i < 3; i++)
8701 insn = next_active_insn (insn);
8702 while (GET_CODE (insn) == INSN
8703 && get_attr_empty (insn) == EMPTY_YES);
8704 if (GET_CODE (insn) == CALL_INSN)
8705 note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
8710 gcc_assert ((code = recog_memoized (insn)) == CODE_FOR_nop
8711 || code == CODE_FOR_nop_b);
8712 if (find_reg_note (insn, REG_EH_REGION, NULL_RTX))
8715 add_reg_note (insn, REG_EH_REGION, XEXP (note, 0));
8722 /* The following function does insn bundling. Bundling means
8723 inserting templates and nop insns to fit insn groups into permitted
8724 templates. Instruction scheduling uses NDFA (non-deterministic
8725 finite automata) encoding informations about the templates and the
8726 inserted nops. Nondeterminism of the automata permits follows
8727 all possible insn sequences very fast.
8729 Unfortunately it is not possible to get information about inserting
8730 nop insns and used templates from the automata states. The
8731 automata only says that we can issue an insn possibly inserting
8732 some nops before it and using some template. Therefore insn
8733 bundling in this function is implemented by using DFA
8734 (deterministic finite automata). We follow all possible insn
8735 sequences by inserting 0-2 nops (that is what the NDFA describe for
8736 insn scheduling) before/after each insn being bundled. We know the
8737 start of simulated processor cycle from insn scheduling (insn
8738 starting a new cycle has TImode).
8740 Simple implementation of insn bundling would create enormous
8741 number of possible insn sequences satisfying information about new
8742 cycle ticks taken from the insn scheduling. To make the algorithm
8743 practical we use dynamic programming. Each decision (about
8744 inserting nops and implicitly about previous decisions) is described
8745 by structure bundle_state (see above). If we generate the same
8746 bundle state (key is automaton state after issuing the insns and
8747 nops for it), we reuse already generated one. As consequence we
8748 reject some decisions which cannot improve the solution and
8749 reduce memory for the algorithm.
8751 When we reach the end of EBB (extended basic block), we choose the
8752 best sequence and then, moving back in EBB, insert templates for
8753 the best alternative. The templates are taken from querying
8754 automaton state for each insn in chosen bundle states.
8756 So the algorithm makes two (forward and backward) passes through
8760 bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
8762 struct bundle_state *curr_state, *next_state, *best_state;
8763 rtx insn, next_insn;
8765 int i, bundle_end_p, only_bundle_end_p, asm_p;
8766 int pos = 0, max_pos, template0, template1;
8769 enum attr_type type;
8772 /* Count insns in the EBB. */
8773 for (insn = NEXT_INSN (prev_head_insn);
8774 insn && insn != tail;
8775 insn = NEXT_INSN (insn))
8781 dfa_clean_insn_cache ();
8782 initiate_bundle_state_table ();
8783 index_to_bundle_states = XNEWVEC (struct bundle_state *, insn_num + 2);
8784 /* First (forward) pass -- generation of bundle states. */
8785 curr_state = get_free_bundle_state ();
8786 curr_state->insn = NULL;
8787 curr_state->before_nops_num = 0;
8788 curr_state->after_nops_num = 0;
8789 curr_state->insn_num = 0;
8790 curr_state->cost = 0;
8791 curr_state->accumulated_insns_num = 0;
8792 curr_state->branch_deviation = 0;
8793 curr_state->middle_bundle_stops = 0;
8794 curr_state->next = NULL;
8795 curr_state->originator = NULL;
8796 state_reset (curr_state->dfa_state);
8797 index_to_bundle_states [0] = curr_state;
8799 /* Shift cycle mark if it is put on insn which could be ignored. */
8800 for (insn = NEXT_INSN (prev_head_insn);
8802 insn = NEXT_INSN (insn))
8804 && (ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
8805 || GET_CODE (PATTERN (insn)) == USE
8806 || GET_CODE (PATTERN (insn)) == CLOBBER)
8807 && GET_MODE (insn) == TImode)
8809 PUT_MODE (insn, VOIDmode);
8810 for (next_insn = NEXT_INSN (insn);
8812 next_insn = NEXT_INSN (next_insn))
8813 if (INSN_P (next_insn)
8814 && ia64_safe_itanium_class (next_insn) != ITANIUM_CLASS_IGNORE
8815 && GET_CODE (PATTERN (next_insn)) != USE
8816 && GET_CODE (PATTERN (next_insn)) != CLOBBER
8817 && INSN_CODE (next_insn) != CODE_FOR_insn_group_barrier)
8819 PUT_MODE (next_insn, TImode);
8823 /* Forward pass: generation of bundle states. */
8824 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
8828 gcc_assert (INSN_P (insn)
8829 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
8830 && GET_CODE (PATTERN (insn)) != USE
8831 && GET_CODE (PATTERN (insn)) != CLOBBER);
8832 type = ia64_safe_type (insn);
8833 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
8835 index_to_bundle_states [insn_num] = NULL;
8836 for (curr_state = index_to_bundle_states [insn_num - 1];
8838 curr_state = next_state)
8840 pos = curr_state->accumulated_insns_num % 3;
8841 next_state = curr_state->next;
8842 /* We must fill up the current bundle in order to start a
8843 subsequent asm insn in a new bundle. Asm insn is always
8844 placed in a separate bundle. */
8846 = (next_insn != NULL_RTX
8847 && INSN_CODE (insn) == CODE_FOR_insn_group_barrier
8848 && ia64_safe_type (next_insn) == TYPE_UNKNOWN);
8849 /* We may fill up the current bundle if it is the cycle end
8850 without a group barrier. */
8852 = (only_bundle_end_p || next_insn == NULL_RTX
8853 || (GET_MODE (next_insn) == TImode
8854 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier));
8855 if (type == TYPE_F || type == TYPE_B || type == TYPE_L
8857 issue_nops_and_insn (curr_state, 2, insn, bundle_end_p,
8859 issue_nops_and_insn (curr_state, 1, insn, bundle_end_p,
8861 issue_nops_and_insn (curr_state, 0, insn, bundle_end_p,
8864 gcc_assert (index_to_bundle_states [insn_num]);
8865 for (curr_state = index_to_bundle_states [insn_num];
8867 curr_state = curr_state->next)
8868 if (verbose >= 2 && dump)
8870 /* This structure is taken from generated code of the
8871 pipeline hazard recognizer (see file insn-attrtab.c).
8872 Please don't forget to change the structure if a new
8873 automaton is added to .md file. */
8876 unsigned short one_automaton_state;
8877 unsigned short oneb_automaton_state;
8878 unsigned short two_automaton_state;
8879 unsigned short twob_automaton_state;
8884 "// Bundle state %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, mid.stops %d state %d) for %d\n",
8885 curr_state->unique_num,
8886 (curr_state->originator == NULL
8887 ? -1 : curr_state->originator->unique_num),
8889 curr_state->before_nops_num, curr_state->after_nops_num,
8890 curr_state->accumulated_insns_num, curr_state->branch_deviation,
8891 curr_state->middle_bundle_stops,
8892 ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state,
8897 /* We should find a solution because the 2nd insn scheduling has
8899 gcc_assert (index_to_bundle_states [insn_num]);
8900 /* Find a state corresponding to the best insn sequence. */
8902 for (curr_state = index_to_bundle_states [insn_num];
8904 curr_state = curr_state->next)
8905 /* We are just looking at the states with fully filled up last
8906 bundle. The first we prefer insn sequences with minimal cost
8907 then with minimal inserted nops and finally with branch insns
8908 placed in the 3rd slots. */
8909 if (curr_state->accumulated_insns_num % 3 == 0
8910 && (best_state == NULL || best_state->cost > curr_state->cost
8911 || (best_state->cost == curr_state->cost
8912 && (curr_state->accumulated_insns_num
8913 < best_state->accumulated_insns_num
8914 || (curr_state->accumulated_insns_num
8915 == best_state->accumulated_insns_num
8916 && (curr_state->branch_deviation
8917 < best_state->branch_deviation
8918 || (curr_state->branch_deviation
8919 == best_state->branch_deviation
8920 && curr_state->middle_bundle_stops
8921 < best_state->middle_bundle_stops)))))))
8922 best_state = curr_state;
8923 /* Second (backward) pass: adding nops and templates. */
8924 gcc_assert (best_state);
8925 insn_num = best_state->before_nops_num;
8926 template0 = template1 = -1;
8927 for (curr_state = best_state;
8928 curr_state->originator != NULL;
8929 curr_state = curr_state->originator)
8931 insn = curr_state->insn;
8932 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
8933 || asm_noperands (PATTERN (insn)) >= 0);
8935 if (verbose >= 2 && dump)
8939 unsigned short one_automaton_state;
8940 unsigned short oneb_automaton_state;
8941 unsigned short two_automaton_state;
8942 unsigned short twob_automaton_state;
8947 "// Best %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, mid.stops %d, state %d) for %d\n",
8948 curr_state->unique_num,
8949 (curr_state->originator == NULL
8950 ? -1 : curr_state->originator->unique_num),
8952 curr_state->before_nops_num, curr_state->after_nops_num,
8953 curr_state->accumulated_insns_num, curr_state->branch_deviation,
8954 curr_state->middle_bundle_stops,
8955 ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state,
8958 /* Find the position in the current bundle window. The window can
8959 contain at most two bundles. Two bundle window means that
8960 the processor will make two bundle rotation. */
8961 max_pos = get_max_pos (curr_state->dfa_state);
8963 /* The following (negative template number) means that the
8964 processor did one bundle rotation. */
8965 || (max_pos == 3 && template0 < 0))
8967 /* We are at the end of the window -- find template(s) for
8971 template0 = get_template (curr_state->dfa_state, 3);
8974 template1 = get_template (curr_state->dfa_state, 3);
8975 template0 = get_template (curr_state->dfa_state, 6);
8978 if (max_pos > 3 && template1 < 0)
8979 /* It may happen when we have the stop inside a bundle. */
8981 gcc_assert (pos <= 3);
8982 template1 = get_template (curr_state->dfa_state, 3);
8986 /* Emit nops after the current insn. */
8987 for (i = 0; i < curr_state->after_nops_num; i++)
8990 emit_insn_after (nop, insn);
8992 gcc_assert (pos >= 0);
8995 /* We are at the start of a bundle: emit the template
8996 (it should be defined). */
8997 gcc_assert (template0 >= 0);
8998 ia64_add_bundle_selector_before (template0, nop);
8999 /* If we have two bundle window, we make one bundle
9000 rotation. Otherwise template0 will be undefined
9001 (negative value). */
9002 template0 = template1;
9006 /* Move the position backward in the window. Group barrier has
9007 no slot. Asm insn takes all bundle. */
9008 if (INSN_CODE (insn) != CODE_FOR_insn_group_barrier
9009 && GET_CODE (PATTERN (insn)) != ASM_INPUT
9010 && asm_noperands (PATTERN (insn)) < 0)
9012 /* Long insn takes 2 slots. */
9013 if (ia64_safe_type (insn) == TYPE_L)
9015 gcc_assert (pos >= 0);
9017 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier
9018 && GET_CODE (PATTERN (insn)) != ASM_INPUT
9019 && asm_noperands (PATTERN (insn)) < 0)
9021 /* The current insn is at the bundle start: emit the
9023 gcc_assert (template0 >= 0);
9024 ia64_add_bundle_selector_before (template0, insn);
9025 b = PREV_INSN (insn);
9027 /* See comment above in analogous place for emitting nops
9029 template0 = template1;
9032 /* Emit nops after the current insn. */
9033 for (i = 0; i < curr_state->before_nops_num; i++)
9036 ia64_emit_insn_before (nop, insn);
9037 nop = PREV_INSN (insn);
9040 gcc_assert (pos >= 0);
9043 /* See comment above in analogous place for emitting nops
9045 gcc_assert (template0 >= 0);
9046 ia64_add_bundle_selector_before (template0, insn);
9047 b = PREV_INSN (insn);
9049 template0 = template1;
9055 #ifdef ENABLE_CHECKING
9057 /* Assert right calculation of middle_bundle_stops. */
9058 int num = best_state->middle_bundle_stops;
9059 bool start_bundle = true, end_bundle = false;
9061 for (insn = NEXT_INSN (prev_head_insn);
9062 insn && insn != tail;
9063 insn = NEXT_INSN (insn))
9067 if (recog_memoized (insn) == CODE_FOR_bundle_selector)
9068 start_bundle = true;
9073 for (next_insn = NEXT_INSN (insn);
9074 next_insn && next_insn != tail;
9075 next_insn = NEXT_INSN (next_insn))
9076 if (INSN_P (next_insn)
9077 && (ia64_safe_itanium_class (next_insn)
9078 != ITANIUM_CLASS_IGNORE
9079 || recog_memoized (next_insn)
9080 == CODE_FOR_bundle_selector)
9081 && GET_CODE (PATTERN (next_insn)) != USE
9082 && GET_CODE (PATTERN (next_insn)) != CLOBBER)
9085 end_bundle = next_insn == NULL_RTX
9086 || next_insn == tail
9087 || (INSN_P (next_insn)
9088 && recog_memoized (next_insn)
9089 == CODE_FOR_bundle_selector);
9090 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier
9091 && !start_bundle && !end_bundle
9093 && GET_CODE (PATTERN (next_insn)) != ASM_INPUT
9094 && asm_noperands (PATTERN (next_insn)) < 0)
9097 start_bundle = false;
9101 gcc_assert (num == 0);
9105 free (index_to_bundle_states);
9106 finish_bundle_state_table ();
9108 dfa_clean_insn_cache ();
9111 /* The following function is called at the end of scheduling BB or
9112 EBB. After reload, it inserts stop bits and does insn bundling. */
9115 ia64_sched_finish (FILE *dump, int sched_verbose)
9118 fprintf (dump, "// Finishing schedule.\n");
9119 if (!reload_completed)
9121 if (reload_completed)
9123 final_emit_insn_group_barriers (dump);
9124 bundling (dump, sched_verbose, current_sched_info->prev_head,
9125 current_sched_info->next_tail);
9126 if (sched_verbose && dump)
9127 fprintf (dump, "// finishing %d-%d\n",
9128 INSN_UID (NEXT_INSN (current_sched_info->prev_head)),
9129 INSN_UID (PREV_INSN (current_sched_info->next_tail)));
9135 /* The following function inserts stop bits in scheduled BB or EBB. */
9138 final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
9141 int need_barrier_p = 0;
9142 int seen_good_insn = 0;
9144 init_insn_group_barriers ();
9146 for (insn = NEXT_INSN (current_sched_info->prev_head);
9147 insn != current_sched_info->next_tail;
9148 insn = NEXT_INSN (insn))
9150 if (GET_CODE (insn) == BARRIER)
9152 rtx last = prev_active_insn (insn);
9156 if (GET_CODE (last) == JUMP_INSN
9157 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
9158 last = prev_active_insn (last);
9159 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
9160 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
9162 init_insn_group_barriers ();
9166 else if (NONDEBUG_INSN_P (insn))
9168 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
9170 init_insn_group_barriers ();
9174 else if (need_barrier_p || group_barrier_needed (insn)
9175 || (mflag_sched_stop_bits_after_every_cycle
9176 && GET_MODE (insn) == TImode
9179 if (TARGET_EARLY_STOP_BITS)
9184 last != current_sched_info->prev_head;
9185 last = PREV_INSN (last))
9186 if (INSN_P (last) && GET_MODE (last) == TImode
9187 && stops_p [INSN_UID (last)])
9189 if (last == current_sched_info->prev_head)
9191 last = prev_active_insn (last);
9193 && recog_memoized (last) != CODE_FOR_insn_group_barrier)
9194 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)),
9196 init_insn_group_barriers ();
9197 for (last = NEXT_INSN (last);
9199 last = NEXT_INSN (last))
9202 group_barrier_needed (last);
9203 if (recog_memoized (last) >= 0
9204 && important_for_bundling_p (last))
9210 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
9212 init_insn_group_barriers ();
9215 group_barrier_needed (insn);
9216 if (recog_memoized (insn) >= 0
9217 && important_for_bundling_p (insn))
9220 else if (recog_memoized (insn) >= 0
9221 && important_for_bundling_p (insn))
9223 need_barrier_p = (GET_CODE (insn) == CALL_INSN
9224 || GET_CODE (PATTERN (insn)) == ASM_INPUT
9225 || asm_noperands (PATTERN (insn)) >= 0);
9232 /* If the following function returns TRUE, we will use the DFA
9236 ia64_first_cycle_multipass_dfa_lookahead (void)
9238 return (reload_completed ? 6 : 4);
9241 /* The following function initiates variable `dfa_pre_cycle_insn'. */
9244 ia64_init_dfa_pre_cycle_insn (void)
9246 if (temp_dfa_state == NULL)
9248 dfa_state_size = state_size ();
9249 temp_dfa_state = xmalloc (dfa_state_size);
9250 prev_cycle_state = xmalloc (dfa_state_size);
9252 dfa_pre_cycle_insn = make_insn_raw (gen_pre_cycle ());
9253 PREV_INSN (dfa_pre_cycle_insn) = NEXT_INSN (dfa_pre_cycle_insn) = NULL_RTX;
9254 recog_memoized (dfa_pre_cycle_insn);
9255 dfa_stop_insn = make_insn_raw (gen_insn_group_barrier (GEN_INT (3)));
9256 PREV_INSN (dfa_stop_insn) = NEXT_INSN (dfa_stop_insn) = NULL_RTX;
9257 recog_memoized (dfa_stop_insn);
9260 /* The following function returns the pseudo insn DFA_PRE_CYCLE_INSN
9261 used by the DFA insn scheduler. */
9264 ia64_dfa_pre_cycle_insn (void)
9266 return dfa_pre_cycle_insn;
9269 /* The following function returns TRUE if PRODUCER (of type ilog or
9270 ld) produces address for CONSUMER (of type st or stf). */
9273 ia64_st_address_bypass_p (rtx producer, rtx consumer)
9277 gcc_assert (producer && consumer);
9278 dest = ia64_single_set (producer);
9280 reg = SET_DEST (dest);
9282 if (GET_CODE (reg) == SUBREG)
9283 reg = SUBREG_REG (reg);
9284 gcc_assert (GET_CODE (reg) == REG);
9286 dest = ia64_single_set (consumer);
9288 mem = SET_DEST (dest);
9289 gcc_assert (mem && GET_CODE (mem) == MEM);
9290 return reg_mentioned_p (reg, mem);
9293 /* The following function returns TRUE if PRODUCER (of type ilog or
9294 ld) produces address for CONSUMER (of type ld or fld). */
9297 ia64_ld_address_bypass_p (rtx producer, rtx consumer)
9299 rtx dest, src, reg, mem;
9301 gcc_assert (producer && consumer);
9302 dest = ia64_single_set (producer);
9304 reg = SET_DEST (dest);
9306 if (GET_CODE (reg) == SUBREG)
9307 reg = SUBREG_REG (reg);
9308 gcc_assert (GET_CODE (reg) == REG);
9310 src = ia64_single_set (consumer);
9312 mem = SET_SRC (src);
9315 if (GET_CODE (mem) == UNSPEC && XVECLEN (mem, 0) > 0)
9316 mem = XVECEXP (mem, 0, 0);
9317 else if (GET_CODE (mem) == IF_THEN_ELSE)
9318 /* ??? Is this bypass necessary for ld.c? */
9320 gcc_assert (XINT (XEXP (XEXP (mem, 0), 0), 1) == UNSPEC_LDCCLR);
9321 mem = XEXP (mem, 1);
9324 while (GET_CODE (mem) == SUBREG || GET_CODE (mem) == ZERO_EXTEND)
9325 mem = XEXP (mem, 0);
9327 if (GET_CODE (mem) == UNSPEC)
9329 int c = XINT (mem, 1);
9331 gcc_assert (c == UNSPEC_LDA || c == UNSPEC_LDS || c == UNSPEC_LDS_A
9332 || c == UNSPEC_LDSA);
9333 mem = XVECEXP (mem, 0, 0);
9336 /* Note that LO_SUM is used for GOT loads. */
9337 gcc_assert (GET_CODE (mem) == LO_SUM || GET_CODE (mem) == MEM);
9339 return reg_mentioned_p (reg, mem);
9342 /* The following function returns TRUE if INSN produces address for a
9343 load/store insn. We will place such insns into M slot because it
9344 decreases its latency time. */
9347 ia64_produce_address_p (rtx insn)
9353 /* Emit pseudo-ops for the assembler to describe predicate relations.
9354 At present this assumes that we only consider predicate pairs to
9355 be mutex, and that the assembler can deduce proper values from
9356 straight-line code. */
9359 emit_predicate_relation_info (void)
9363 FOR_EACH_BB_REVERSE (bb)
9366 rtx head = BB_HEAD (bb);
9368 /* We only need such notes at code labels. */
9369 if (GET_CODE (head) != CODE_LABEL)
9371 if (NOTE_INSN_BASIC_BLOCK_P (NEXT_INSN (head)))
9372 head = NEXT_INSN (head);
9374 /* Skip p0, which may be thought to be live due to (reg:DI p0)
9375 grabbing the entire block of predicate registers. */
9376 for (r = PR_REG (2); r < PR_REG (64); r += 2)
9377 if (REGNO_REG_SET_P (df_get_live_in (bb), r))
9379 rtx p = gen_rtx_REG (BImode, r);
9380 rtx n = emit_insn_after (gen_pred_rel_mutex (p), head);
9381 if (head == BB_END (bb))
9387 /* Look for conditional calls that do not return, and protect predicate
9388 relations around them. Otherwise the assembler will assume the call
9389 returns, and complain about uses of call-clobbered predicates after
9391 FOR_EACH_BB_REVERSE (bb)
9393 rtx insn = BB_HEAD (bb);
9397 if (GET_CODE (insn) == CALL_INSN
9398 && GET_CODE (PATTERN (insn)) == COND_EXEC
9399 && find_reg_note (insn, REG_NORETURN, NULL_RTX))
9401 rtx b = emit_insn_before (gen_safe_across_calls_all (), insn);
9402 rtx a = emit_insn_after (gen_safe_across_calls_normal (), insn);
9403 if (BB_HEAD (bb) == insn)
9405 if (BB_END (bb) == insn)
9409 if (insn == BB_END (bb))
9411 insn = NEXT_INSN (insn);
9416 /* Perform machine dependent operations on the rtl chain INSNS. */
9421 /* We are freeing block_for_insn in the toplev to keep compatibility
9422 with old MDEP_REORGS that are not CFG based. Recompute it now. */
9423 compute_bb_for_insn ();
9425 /* If optimizing, we'll have split before scheduling. */
9429 if (optimize && flag_schedule_insns_after_reload
9430 && dbg_cnt (ia64_sched2))
9433 timevar_push (TV_SCHED2);
9434 ia64_final_schedule = 1;
9436 /* We can't let modulo-sched prevent us from scheduling any bbs,
9437 since we need the final schedule to produce bundle information. */
9439 bb->flags &= ~BB_DISABLE_SCHEDULE;
9441 initiate_bundle_states ();
9442 ia64_nop = make_insn_raw (gen_nop ());
9443 PREV_INSN (ia64_nop) = NEXT_INSN (ia64_nop) = NULL_RTX;
9444 recog_memoized (ia64_nop);
9445 clocks_length = get_max_uid () + 1;
9446 stops_p = XCNEWVEC (char, clocks_length);
9448 if (ia64_tune == PROCESSOR_ITANIUM2)
9450 pos_1 = get_cpu_unit_code ("2_1");
9451 pos_2 = get_cpu_unit_code ("2_2");
9452 pos_3 = get_cpu_unit_code ("2_3");
9453 pos_4 = get_cpu_unit_code ("2_4");
9454 pos_5 = get_cpu_unit_code ("2_5");
9455 pos_6 = get_cpu_unit_code ("2_6");
9456 _0mii_ = get_cpu_unit_code ("2b_0mii.");
9457 _0mmi_ = get_cpu_unit_code ("2b_0mmi.");
9458 _0mfi_ = get_cpu_unit_code ("2b_0mfi.");
9459 _0mmf_ = get_cpu_unit_code ("2b_0mmf.");
9460 _0bbb_ = get_cpu_unit_code ("2b_0bbb.");
9461 _0mbb_ = get_cpu_unit_code ("2b_0mbb.");
9462 _0mib_ = get_cpu_unit_code ("2b_0mib.");
9463 _0mmb_ = get_cpu_unit_code ("2b_0mmb.");
9464 _0mfb_ = get_cpu_unit_code ("2b_0mfb.");
9465 _0mlx_ = get_cpu_unit_code ("2b_0mlx.");
9466 _1mii_ = get_cpu_unit_code ("2b_1mii.");
9467 _1mmi_ = get_cpu_unit_code ("2b_1mmi.");
9468 _1mfi_ = get_cpu_unit_code ("2b_1mfi.");
9469 _1mmf_ = get_cpu_unit_code ("2b_1mmf.");
9470 _1bbb_ = get_cpu_unit_code ("2b_1bbb.");
9471 _1mbb_ = get_cpu_unit_code ("2b_1mbb.");
9472 _1mib_ = get_cpu_unit_code ("2b_1mib.");
9473 _1mmb_ = get_cpu_unit_code ("2b_1mmb.");
9474 _1mfb_ = get_cpu_unit_code ("2b_1mfb.");
9475 _1mlx_ = get_cpu_unit_code ("2b_1mlx.");
9479 pos_1 = get_cpu_unit_code ("1_1");
9480 pos_2 = get_cpu_unit_code ("1_2");
9481 pos_3 = get_cpu_unit_code ("1_3");
9482 pos_4 = get_cpu_unit_code ("1_4");
9483 pos_5 = get_cpu_unit_code ("1_5");
9484 pos_6 = get_cpu_unit_code ("1_6");
9485 _0mii_ = get_cpu_unit_code ("1b_0mii.");
9486 _0mmi_ = get_cpu_unit_code ("1b_0mmi.");
9487 _0mfi_ = get_cpu_unit_code ("1b_0mfi.");
9488 _0mmf_ = get_cpu_unit_code ("1b_0mmf.");
9489 _0bbb_ = get_cpu_unit_code ("1b_0bbb.");
9490 _0mbb_ = get_cpu_unit_code ("1b_0mbb.");
9491 _0mib_ = get_cpu_unit_code ("1b_0mib.");
9492 _0mmb_ = get_cpu_unit_code ("1b_0mmb.");
9493 _0mfb_ = get_cpu_unit_code ("1b_0mfb.");
9494 _0mlx_ = get_cpu_unit_code ("1b_0mlx.");
9495 _1mii_ = get_cpu_unit_code ("1b_1mii.");
9496 _1mmi_ = get_cpu_unit_code ("1b_1mmi.");
9497 _1mfi_ = get_cpu_unit_code ("1b_1mfi.");
9498 _1mmf_ = get_cpu_unit_code ("1b_1mmf.");
9499 _1bbb_ = get_cpu_unit_code ("1b_1bbb.");
9500 _1mbb_ = get_cpu_unit_code ("1b_1mbb.");
9501 _1mib_ = get_cpu_unit_code ("1b_1mib.");
9502 _1mmb_ = get_cpu_unit_code ("1b_1mmb.");
9503 _1mfb_ = get_cpu_unit_code ("1b_1mfb.");
9504 _1mlx_ = get_cpu_unit_code ("1b_1mlx.");
9507 if (flag_selective_scheduling2
9508 && !maybe_skip_selective_scheduling ())
9509 run_selective_scheduling ();
9513 /* Redo alignment computation, as it might gone wrong. */
9514 compute_alignments ();
9516 /* We cannot reuse this one because it has been corrupted by the
9518 finish_bundle_states ();
9521 emit_insn_group_barriers (dump_file);
9523 ia64_final_schedule = 0;
9524 timevar_pop (TV_SCHED2);
9527 emit_all_insn_group_barriers (dump_file);
9531 /* A call must not be the last instruction in a function, so that the
9532 return address is still within the function, so that unwinding works
9533 properly. Note that IA-64 differs from dwarf2 on this point. */
9534 if (ia64_except_unwind_info (&global_options) == UI_TARGET)
9539 insn = get_last_insn ();
9540 if (! INSN_P (insn))
9541 insn = prev_active_insn (insn);
9544 /* Skip over insns that expand to nothing. */
9545 while (GET_CODE (insn) == INSN
9546 && get_attr_empty (insn) == EMPTY_YES)
9548 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
9549 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
9551 insn = prev_active_insn (insn);
9553 if (GET_CODE (insn) == CALL_INSN)
9556 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
9557 emit_insn (gen_break_f ());
9558 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
9563 emit_predicate_relation_info ();
9565 if (flag_var_tracking)
9567 timevar_push (TV_VAR_TRACKING);
9568 variable_tracking_main ();
9569 timevar_pop (TV_VAR_TRACKING);
9571 df_finish_pass (false);
9574 /* Return true if REGNO is used by the epilogue. */
9577 ia64_epilogue_uses (int regno)
9582 /* With a call to a function in another module, we will write a new
9583 value to "gp". After returning from such a call, we need to make
9584 sure the function restores the original gp-value, even if the
9585 function itself does not use the gp anymore. */
9586 return !(TARGET_AUTO_PIC || TARGET_NO_PIC);
9588 case IN_REG (0): case IN_REG (1): case IN_REG (2): case IN_REG (3):
9589 case IN_REG (4): case IN_REG (5): case IN_REG (6): case IN_REG (7):
9590 /* For functions defined with the syscall_linkage attribute, all
9591 input registers are marked as live at all function exits. This
9592 prevents the register allocator from using the input registers,
9593 which in turn makes it possible to restart a system call after
9594 an interrupt without having to save/restore the input registers.
9595 This also prevents kernel data from leaking to application code. */
9596 return lookup_attribute ("syscall_linkage",
9597 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))) != NULL;
9600 /* Conditional return patterns can't represent the use of `b0' as
9601 the return address, so we force the value live this way. */
9605 /* Likewise for ar.pfs, which is used by br.ret. */
9613 /* Return true if REGNO is used by the frame unwinder. */
9616 ia64_eh_uses (int regno)
9620 if (! reload_completed)
9626 for (r = reg_save_b0; r <= reg_save_ar_lc; r++)
9627 if (regno == current_frame_info.r[r]
9628 || regno == emitted_frame_related_regs[r])
9634 /* Return true if this goes in small data/bss. */
9636 /* ??? We could also support own long data here. Generating movl/add/ld8
9637 instead of addl,ld8/ld8. This makes the code bigger, but should make the
9638 code faster because there is one less load. This also includes incomplete
9639 types which can't go in sdata/sbss. */
9642 ia64_in_small_data_p (const_tree exp)
9644 if (TARGET_NO_SDATA)
9647 /* We want to merge strings, so we never consider them small data. */
9648 if (TREE_CODE (exp) == STRING_CST)
9651 /* Functions are never small data. */
9652 if (TREE_CODE (exp) == FUNCTION_DECL)
9655 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
9657 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
9659 if (strcmp (section, ".sdata") == 0
9660 || strncmp (section, ".sdata.", 7) == 0
9661 || strncmp (section, ".gnu.linkonce.s.", 16) == 0
9662 || strcmp (section, ".sbss") == 0
9663 || strncmp (section, ".sbss.", 6) == 0
9664 || strncmp (section, ".gnu.linkonce.sb.", 17) == 0)
9669 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
9671 /* If this is an incomplete type with size 0, then we can't put it
9672 in sdata because it might be too big when completed. */
9673 if (size > 0 && size <= ia64_section_threshold)
9680 /* Output assembly directives for prologue regions. */
9682 /* The current basic block number. */
9684 static bool last_block;
9686 /* True if we need a copy_state command at the start of the next block. */
9688 static bool need_copy_state;
9690 #ifndef MAX_ARTIFICIAL_LABEL_BYTES
9691 # define MAX_ARTIFICIAL_LABEL_BYTES 30
9694 /* The function emits unwind directives for the start of an epilogue. */
9697 process_epilogue (FILE *asm_out_file, rtx insn ATTRIBUTE_UNUSED,
9698 bool unwind, bool frame ATTRIBUTE_UNUSED)
9700 /* If this isn't the last block of the function, then we need to label the
9701 current state, and copy it back in at the start of the next block. */
9706 fprintf (asm_out_file, "\t.label_state %d\n",
9707 ++cfun->machine->state_num);
9708 need_copy_state = true;
9712 fprintf (asm_out_file, "\t.restore sp\n");
9715 /* This function processes a SET pattern for REG_CFA_ADJUST_CFA. */
9718 process_cfa_adjust_cfa (FILE *asm_out_file, rtx pat, rtx insn,
9719 bool unwind, bool frame)
9721 rtx dest = SET_DEST (pat);
9722 rtx src = SET_SRC (pat);
9724 if (dest == stack_pointer_rtx)
9726 if (GET_CODE (src) == PLUS)
9728 rtx op0 = XEXP (src, 0);
9729 rtx op1 = XEXP (src, 1);
9731 gcc_assert (op0 == dest && GET_CODE (op1) == CONST_INT);
9733 if (INTVAL (op1) < 0)
9735 gcc_assert (!frame_pointer_needed);
9737 fprintf (asm_out_file,
9738 "\t.fframe "HOST_WIDE_INT_PRINT_DEC"\n",
9742 process_epilogue (asm_out_file, insn, unwind, frame);
9746 gcc_assert (src == hard_frame_pointer_rtx);
9747 process_epilogue (asm_out_file, insn, unwind, frame);
9750 else if (dest == hard_frame_pointer_rtx)
9752 gcc_assert (src == stack_pointer_rtx);
9753 gcc_assert (frame_pointer_needed);
9756 fprintf (asm_out_file, "\t.vframe r%d\n",
9757 ia64_dbx_register_number (REGNO (dest)));
9763 /* This function processes a SET pattern for REG_CFA_REGISTER. */
9766 process_cfa_register (FILE *asm_out_file, rtx pat, bool unwind)
9768 rtx dest = SET_DEST (pat);
9769 rtx src = SET_SRC (pat);
9770 int dest_regno = REGNO (dest);
9775 /* Saving return address pointer. */
9777 fprintf (asm_out_file, "\t.save rp, r%d\n",
9778 ia64_dbx_register_number (dest_regno));
9782 src_regno = REGNO (src);
9787 gcc_assert (dest_regno == current_frame_info.r[reg_save_pr]);
9789 fprintf (asm_out_file, "\t.save pr, r%d\n",
9790 ia64_dbx_register_number (dest_regno));
9793 case AR_UNAT_REGNUM:
9794 gcc_assert (dest_regno == current_frame_info.r[reg_save_ar_unat]);
9796 fprintf (asm_out_file, "\t.save ar.unat, r%d\n",
9797 ia64_dbx_register_number (dest_regno));
9801 gcc_assert (dest_regno == current_frame_info.r[reg_save_ar_lc]);
9803 fprintf (asm_out_file, "\t.save ar.lc, r%d\n",
9804 ia64_dbx_register_number (dest_regno));
9808 /* Everything else should indicate being stored to memory. */
9813 /* This function processes a SET pattern for REG_CFA_OFFSET. */
9816 process_cfa_offset (FILE *asm_out_file, rtx pat, bool unwind)
9818 rtx dest = SET_DEST (pat);
9819 rtx src = SET_SRC (pat);
9820 int src_regno = REGNO (src);
9825 gcc_assert (MEM_P (dest));
9826 if (GET_CODE (XEXP (dest, 0)) == REG)
9828 base = XEXP (dest, 0);
9833 gcc_assert (GET_CODE (XEXP (dest, 0)) == PLUS
9834 && GET_CODE (XEXP (XEXP (dest, 0), 1)) == CONST_INT);
9835 base = XEXP (XEXP (dest, 0), 0);
9836 off = INTVAL (XEXP (XEXP (dest, 0), 1));
9839 if (base == hard_frame_pointer_rtx)
9841 saveop = ".savepsp";
9846 gcc_assert (base == stack_pointer_rtx);
9850 src_regno = REGNO (src);
9854 gcc_assert (!current_frame_info.r[reg_save_b0]);
9856 fprintf (asm_out_file, "\t%s rp, " HOST_WIDE_INT_PRINT_DEC "\n",
9861 gcc_assert (!current_frame_info.r[reg_save_pr]);
9863 fprintf (asm_out_file, "\t%s pr, " HOST_WIDE_INT_PRINT_DEC "\n",
9868 gcc_assert (!current_frame_info.r[reg_save_ar_lc]);
9870 fprintf (asm_out_file, "\t%s ar.lc, " HOST_WIDE_INT_PRINT_DEC "\n",
9875 gcc_assert (!current_frame_info.r[reg_save_ar_pfs]);
9877 fprintf (asm_out_file, "\t%s ar.pfs, " HOST_WIDE_INT_PRINT_DEC "\n",
9881 case AR_UNAT_REGNUM:
9882 gcc_assert (!current_frame_info.r[reg_save_ar_unat]);
9884 fprintf (asm_out_file, "\t%s ar.unat, " HOST_WIDE_INT_PRINT_DEC "\n",
9893 fprintf (asm_out_file, "\t.save.g 0x%x\n",
9894 1 << (src_regno - GR_REG (4)));
9903 fprintf (asm_out_file, "\t.save.b 0x%x\n",
9904 1 << (src_regno - BR_REG (1)));
9912 fprintf (asm_out_file, "\t.save.f 0x%x\n",
9913 1 << (src_regno - FR_REG (2)));
9916 case FR_REG (16): case FR_REG (17): case FR_REG (18): case FR_REG (19):
9917 case FR_REG (20): case FR_REG (21): case FR_REG (22): case FR_REG (23):
9918 case FR_REG (24): case FR_REG (25): case FR_REG (26): case FR_REG (27):
9919 case FR_REG (28): case FR_REG (29): case FR_REG (30): case FR_REG (31):
9921 fprintf (asm_out_file, "\t.save.gf 0x0, 0x%x\n",
9922 1 << (src_regno - FR_REG (12)));
9926 /* ??? For some reason we mark other general registers, even those
9927 we can't represent in the unwind info. Ignore them. */
9932 /* This function looks at a single insn and emits any directives
9933 required to unwind this insn. */
9936 ia64_asm_unwind_emit (FILE *asm_out_file, rtx insn)
9938 bool unwind = ia64_except_unwind_info (&global_options) == UI_TARGET;
9939 bool frame = dwarf2out_do_frame ();
9943 if (!unwind && !frame)
9946 if (NOTE_INSN_BASIC_BLOCK_P (insn))
9948 last_block = NOTE_BASIC_BLOCK (insn)->next_bb == EXIT_BLOCK_PTR;
9950 /* Restore unwind state from immediately before the epilogue. */
9951 if (need_copy_state)
9955 fprintf (asm_out_file, "\t.body\n");
9956 fprintf (asm_out_file, "\t.copy_state %d\n",
9957 cfun->machine->state_num);
9959 need_copy_state = false;
9963 if (GET_CODE (insn) == NOTE || ! RTX_FRAME_RELATED_P (insn))
9966 /* Look for the ALLOC insn. */
9967 if (INSN_CODE (insn) == CODE_FOR_alloc)
9969 rtx dest = SET_DEST (XVECEXP (PATTERN (insn), 0, 0));
9970 int dest_regno = REGNO (dest);
9972 /* If this is the final destination for ar.pfs, then this must
9973 be the alloc in the prologue. */
9974 if (dest_regno == current_frame_info.r[reg_save_ar_pfs])
9977 fprintf (asm_out_file, "\t.save ar.pfs, r%d\n",
9978 ia64_dbx_register_number (dest_regno));
9982 /* This must be an alloc before a sibcall. We must drop the
9983 old frame info. The easiest way to drop the old frame
9984 info is to ensure we had a ".restore sp" directive
9985 followed by a new prologue. If the procedure doesn't
9986 have a memory-stack frame, we'll issue a dummy ".restore
9988 if (current_frame_info.total_size == 0 && !frame_pointer_needed)
9989 /* if haven't done process_epilogue() yet, do it now */
9990 process_epilogue (asm_out_file, insn, unwind, frame);
9992 fprintf (asm_out_file, "\t.prologue\n");
9997 handled_one = false;
9998 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
9999 switch (REG_NOTE_KIND (note))
10001 case REG_CFA_ADJUST_CFA:
10002 pat = XEXP (note, 0);
10004 pat = PATTERN (insn);
10005 process_cfa_adjust_cfa (asm_out_file, pat, insn, unwind, frame);
10006 handled_one = true;
10009 case REG_CFA_OFFSET:
10010 pat = XEXP (note, 0);
10012 pat = PATTERN (insn);
10013 process_cfa_offset (asm_out_file, pat, unwind);
10014 handled_one = true;
10017 case REG_CFA_REGISTER:
10018 pat = XEXP (note, 0);
10020 pat = PATTERN (insn);
10021 process_cfa_register (asm_out_file, pat, unwind);
10022 handled_one = true;
10025 case REG_FRAME_RELATED_EXPR:
10026 case REG_CFA_DEF_CFA:
10027 case REG_CFA_EXPRESSION:
10028 case REG_CFA_RESTORE:
10029 case REG_CFA_SET_VDRAP:
10030 /* Not used in the ia64 port. */
10031 gcc_unreachable ();
10034 /* Not a frame-related note. */
10038 /* All REG_FRAME_RELATED_P insns, besides ALLOC, are marked with the
10039 explicit action to take. No guessing required. */
10040 gcc_assert (handled_one);
10043 /* Implement TARGET_ASM_EMIT_EXCEPT_PERSONALITY. */
10046 ia64_asm_emit_except_personality (rtx personality)
10048 fputs ("\t.personality\t", asm_out_file);
10049 output_addr_const (asm_out_file, personality);
10050 fputc ('\n', asm_out_file);
10053 /* Implement TARGET_ASM_INITIALIZE_SECTIONS. */
10056 ia64_asm_init_sections (void)
10058 exception_section = get_unnamed_section (0, output_section_asm_op,
10062 /* Implement TARGET_DEBUG_UNWIND_INFO. */
10064 static enum unwind_info_type
10065 ia64_debug_unwind_info (void)
10073 IA64_BUILTIN_COPYSIGNQ,
10074 IA64_BUILTIN_FABSQ,
10075 IA64_BUILTIN_FLUSHRS,
10077 IA64_BUILTIN_HUGE_VALQ,
10081 static GTY(()) tree ia64_builtins[(int) IA64_BUILTIN_max];
10084 ia64_init_builtins (void)
10090 /* The __fpreg type. */
10091 fpreg_type = make_node (REAL_TYPE);
10092 TYPE_PRECISION (fpreg_type) = 82;
10093 layout_type (fpreg_type);
10094 (*lang_hooks.types.register_builtin_type) (fpreg_type, "__fpreg");
10096 /* The __float80 type. */
10097 float80_type = make_node (REAL_TYPE);
10098 TYPE_PRECISION (float80_type) = 80;
10099 layout_type (float80_type);
10100 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
10102 /* The __float128 type. */
10106 tree float128_type = make_node (REAL_TYPE);
10108 TYPE_PRECISION (float128_type) = 128;
10109 layout_type (float128_type);
10110 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
10112 /* TFmode support builtins. */
10113 ftype = build_function_type_list (float128_type, NULL_TREE);
10114 decl = add_builtin_function ("__builtin_infq", ftype,
10115 IA64_BUILTIN_INFQ, BUILT_IN_MD,
10117 ia64_builtins[IA64_BUILTIN_INFQ] = decl;
10119 decl = add_builtin_function ("__builtin_huge_valq", ftype,
10120 IA64_BUILTIN_HUGE_VALQ, BUILT_IN_MD,
10122 ia64_builtins[IA64_BUILTIN_HUGE_VALQ] = decl;
10124 ftype = build_function_type_list (float128_type,
10127 decl = add_builtin_function ("__builtin_fabsq", ftype,
10128 IA64_BUILTIN_FABSQ, BUILT_IN_MD,
10129 "__fabstf2", NULL_TREE);
10130 TREE_READONLY (decl) = 1;
10131 ia64_builtins[IA64_BUILTIN_FABSQ] = decl;
10133 ftype = build_function_type_list (float128_type,
10137 decl = add_builtin_function ("__builtin_copysignq", ftype,
10138 IA64_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
10139 "__copysigntf3", NULL_TREE);
10140 TREE_READONLY (decl) = 1;
10141 ia64_builtins[IA64_BUILTIN_COPYSIGNQ] = decl;
10144 /* Under HPUX, this is a synonym for "long double". */
10145 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
10148 /* Fwrite on VMS is non-standard. */
10149 #if TARGET_ABI_OPEN_VMS
10150 vms_patch_builtins ();
10153 #define def_builtin(name, type, code) \
10154 add_builtin_function ((name), (type), (code), BUILT_IN_MD, \
10157 decl = def_builtin ("__builtin_ia64_bsp",
10158 build_function_type_list (ptr_type_node, NULL_TREE),
10160 ia64_builtins[IA64_BUILTIN_BSP] = decl;
10162 decl = def_builtin ("__builtin_ia64_flushrs",
10163 build_function_type_list (void_type_node, NULL_TREE),
10164 IA64_BUILTIN_FLUSHRS);
10165 ia64_builtins[IA64_BUILTIN_FLUSHRS] = decl;
10171 if ((decl = builtin_decl_explicit (BUILT_IN_FINITE)) != NULL_TREE)
10172 set_user_assembler_name (decl, "_Isfinite");
10173 if ((decl = builtin_decl_explicit (BUILT_IN_FINITEF)) != NULL_TREE)
10174 set_user_assembler_name (decl, "_Isfinitef");
10175 if ((decl = builtin_decl_explicit (BUILT_IN_FINITEL)) != NULL_TREE)
10176 set_user_assembler_name (decl, "_Isfinitef128");
10181 ia64_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
10182 enum machine_mode mode ATTRIBUTE_UNUSED,
10183 int ignore ATTRIBUTE_UNUSED)
10185 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10186 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10190 case IA64_BUILTIN_BSP:
10191 if (! target || ! register_operand (target, DImode))
10192 target = gen_reg_rtx (DImode);
10193 emit_insn (gen_bsp_value (target));
10194 #ifdef POINTERS_EXTEND_UNSIGNED
10195 target = convert_memory_address (ptr_mode, target);
10199 case IA64_BUILTIN_FLUSHRS:
10200 emit_insn (gen_flushrs ());
10203 case IA64_BUILTIN_INFQ:
10204 case IA64_BUILTIN_HUGE_VALQ:
10206 enum machine_mode target_mode = TYPE_MODE (TREE_TYPE (exp));
10207 REAL_VALUE_TYPE inf;
10211 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, target_mode);
10213 tmp = validize_mem (force_const_mem (target_mode, tmp));
10216 target = gen_reg_rtx (target_mode);
10218 emit_move_insn (target, tmp);
10222 case IA64_BUILTIN_FABSQ:
10223 case IA64_BUILTIN_COPYSIGNQ:
10224 return expand_call (exp, target, ignore);
10227 gcc_unreachable ();
10233 /* Return the ia64 builtin for CODE. */
10236 ia64_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
10238 if (code >= IA64_BUILTIN_max)
10239 return error_mark_node;
10241 return ia64_builtins[code];
10244 /* For the HP-UX IA64 aggregate parameters are passed stored in the
10245 most significant bits of the stack slot. */
10248 ia64_hpux_function_arg_padding (enum machine_mode mode, const_tree type)
10250 /* Exception to normal case for structures/unions/etc. */
10252 if (type && AGGREGATE_TYPE_P (type)
10253 && int_size_in_bytes (type) < UNITS_PER_WORD)
10256 /* Fall back to the default. */
10257 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
10260 /* Emit text to declare externally defined variables and functions, because
10261 the Intel assembler does not support undefined externals. */
10264 ia64_asm_output_external (FILE *file, tree decl, const char *name)
10266 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
10267 set in order to avoid putting out names that are never really
10269 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
10271 /* maybe_assemble_visibility will return 1 if the assembler
10272 visibility directive is output. */
10273 int need_visibility = ((*targetm.binds_local_p) (decl)
10274 && maybe_assemble_visibility (decl));
10276 /* GNU as does not need anything here, but the HP linker does
10277 need something for external functions. */
10278 if ((TARGET_HPUX_LD || !TARGET_GNU_AS)
10279 && TREE_CODE (decl) == FUNCTION_DECL)
10280 (*targetm.asm_out.globalize_decl_name) (file, decl);
10281 else if (need_visibility && !TARGET_GNU_AS)
10282 (*targetm.asm_out.globalize_label) (file, name);
10286 /* Set SImode div/mod functions, init_integral_libfuncs only initializes
10287 modes of word_mode and larger. Rename the TFmode libfuncs using the
10288 HPUX conventions. __divtf3 is used for XFmode. We need to keep it for
10289 backward compatibility. */
10292 ia64_init_libfuncs (void)
10294 set_optab_libfunc (sdiv_optab, SImode, "__divsi3");
10295 set_optab_libfunc (udiv_optab, SImode, "__udivsi3");
10296 set_optab_libfunc (smod_optab, SImode, "__modsi3");
10297 set_optab_libfunc (umod_optab, SImode, "__umodsi3");
10299 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
10300 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
10301 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
10302 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
10303 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
10305 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
10306 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
10307 set_conv_libfunc (sext_optab, TFmode, XFmode, "_U_Qfcnvff_f80_to_quad");
10308 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
10309 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
10310 set_conv_libfunc (trunc_optab, XFmode, TFmode, "_U_Qfcnvff_quad_to_f80");
10312 set_conv_libfunc (sfix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_sgl");
10313 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
10314 set_conv_libfunc (sfix_optab, TImode, TFmode, "_U_Qfcnvfxt_quad_to_quad");
10315 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxut_quad_to_sgl");
10316 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxut_quad_to_dbl");
10318 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
10319 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
10320 set_conv_libfunc (sfloat_optab, TFmode, TImode, "_U_Qfcnvxf_quad_to_quad");
10321 /* HP-UX 11.23 libc does not have a function for unsigned
10322 SImode-to-TFmode conversion. */
10323 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_U_Qfcnvxuf_dbl_to_quad");
10326 /* Rename all the TFmode libfuncs using the HPUX conventions. */
10329 ia64_hpux_init_libfuncs (void)
10331 ia64_init_libfuncs ();
10333 /* The HP SI millicode division and mod functions expect DI arguments.
10334 By turning them off completely we avoid using both libgcc and the
10335 non-standard millicode routines and use the HP DI millicode routines
10338 set_optab_libfunc (sdiv_optab, SImode, 0);
10339 set_optab_libfunc (udiv_optab, SImode, 0);
10340 set_optab_libfunc (smod_optab, SImode, 0);
10341 set_optab_libfunc (umod_optab, SImode, 0);
10343 set_optab_libfunc (sdiv_optab, DImode, "__milli_divI");
10344 set_optab_libfunc (udiv_optab, DImode, "__milli_divU");
10345 set_optab_libfunc (smod_optab, DImode, "__milli_remI");
10346 set_optab_libfunc (umod_optab, DImode, "__milli_remU");
10348 /* HP-UX libc has TF min/max/abs routines in it. */
10349 set_optab_libfunc (smin_optab, TFmode, "_U_Qfmin");
10350 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
10351 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
10353 /* ia64_expand_compare uses this. */
10354 cmptf_libfunc = init_one_libfunc ("_U_Qfcmp");
10356 /* These should never be used. */
10357 set_optab_libfunc (eq_optab, TFmode, 0);
10358 set_optab_libfunc (ne_optab, TFmode, 0);
10359 set_optab_libfunc (gt_optab, TFmode, 0);
10360 set_optab_libfunc (ge_optab, TFmode, 0);
10361 set_optab_libfunc (lt_optab, TFmode, 0);
10362 set_optab_libfunc (le_optab, TFmode, 0);
10365 /* Rename the division and modulus functions in VMS. */
10368 ia64_vms_init_libfuncs (void)
10370 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10371 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10372 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10373 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10374 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10375 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10376 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10377 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10378 abort_libfunc = init_one_libfunc ("decc$abort");
10379 memcmp_libfunc = init_one_libfunc ("decc$memcmp");
10380 #ifdef MEM_LIBFUNCS_INIT
10385 /* Rename the TFmode libfuncs available from soft-fp in glibc using
10386 the HPUX conventions. */
10389 ia64_sysv4_init_libfuncs (void)
10391 ia64_init_libfuncs ();
10393 /* These functions are not part of the HPUX TFmode interface. We
10394 use them instead of _U_Qfcmp, which doesn't work the way we
10396 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
10397 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
10398 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
10399 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
10400 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
10401 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
10403 /* We leave out _U_Qfmin, _U_Qfmax and _U_Qfabs since soft-fp in
10404 glibc doesn't have them. */
10410 ia64_soft_fp_init_libfuncs (void)
10415 ia64_vms_valid_pointer_mode (enum machine_mode mode)
10417 return (mode == SImode || mode == DImode);
10420 /* For HPUX, it is illegal to have relocations in shared segments. */
10423 ia64_hpux_reloc_rw_mask (void)
10428 /* For others, relax this so that relocations to local data goes in
10429 read-only segments, but we still cannot allow global relocations
10430 in read-only segments. */
10433 ia64_reloc_rw_mask (void)
10435 return flag_pic ? 3 : 2;
10438 /* Return the section to use for X. The only special thing we do here
10439 is to honor small data. */
10442 ia64_select_rtx_section (enum machine_mode mode, rtx x,
10443 unsigned HOST_WIDE_INT align)
10445 if (GET_MODE_SIZE (mode) > 0
10446 && GET_MODE_SIZE (mode) <= ia64_section_threshold
10447 && !TARGET_NO_SDATA)
10448 return sdata_section;
10450 return default_elf_select_rtx_section (mode, x, align);
10453 static unsigned int
10454 ia64_section_type_flags (tree decl, const char *name, int reloc)
10456 unsigned int flags = 0;
10458 if (strcmp (name, ".sdata") == 0
10459 || strncmp (name, ".sdata.", 7) == 0
10460 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
10461 || strncmp (name, ".sdata2.", 8) == 0
10462 || strncmp (name, ".gnu.linkonce.s2.", 17) == 0
10463 || strcmp (name, ".sbss") == 0
10464 || strncmp (name, ".sbss.", 6) == 0
10465 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
10466 flags = SECTION_SMALL;
10468 #if TARGET_ABI_OPEN_VMS
10469 if (decl && DECL_ATTRIBUTES (decl)
10470 && lookup_attribute ("common_object", DECL_ATTRIBUTES (decl)))
10471 flags |= SECTION_VMS_OVERLAY;
10474 flags |= default_section_type_flags (decl, name, reloc);
10478 /* Returns true if FNTYPE (a FUNCTION_TYPE or a METHOD_TYPE) returns a
10479 structure type and that the address of that type should be passed
10480 in out0, rather than in r8. */
10483 ia64_struct_retval_addr_is_first_parm_p (tree fntype)
10485 tree ret_type = TREE_TYPE (fntype);
10487 /* The Itanium C++ ABI requires that out0, rather than r8, be used
10488 as the structure return address parameter, if the return value
10489 type has a non-trivial copy constructor or destructor. It is not
10490 clear if this same convention should be used for other
10491 programming languages. Until G++ 3.4, we incorrectly used r8 for
10492 these return values. */
10493 return (abi_version_at_least (2)
10495 && TYPE_MODE (ret_type) == BLKmode
10496 && TREE_ADDRESSABLE (ret_type)
10497 && strcmp (lang_hooks.name, "GNU C++") == 0);
10500 /* Output the assembler code for a thunk function. THUNK_DECL is the
10501 declaration for the thunk function itself, FUNCTION is the decl for
10502 the target function. DELTA is an immediate constant offset to be
10503 added to THIS. If VCALL_OFFSET is nonzero, the word at
10504 *(*this + vcall_offset) should be added to THIS. */
10507 ia64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
10508 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
10511 rtx this_rtx, insn, funexp;
10512 unsigned int this_parmno;
10513 unsigned int this_regno;
10516 reload_completed = 1;
10517 epilogue_completed = 1;
10519 /* Set things up as ia64_expand_prologue might. */
10520 last_scratch_gr_reg = 15;
10522 memset (¤t_frame_info, 0, sizeof (current_frame_info));
10523 current_frame_info.spill_cfa_off = -16;
10524 current_frame_info.n_input_regs = 1;
10525 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
10527 /* Mark the end of the (empty) prologue. */
10528 emit_note (NOTE_INSN_PROLOGUE_END);
10530 /* Figure out whether "this" will be the first parameter (the
10531 typical case) or the second parameter (as happens when the
10532 virtual function returns certain class objects). */
10534 = (ia64_struct_retval_addr_is_first_parm_p (TREE_TYPE (thunk))
10536 this_regno = IN_REG (this_parmno);
10537 if (!TARGET_REG_NAMES)
10538 reg_names[this_regno] = ia64_reg_numbers[this_parmno];
10540 this_rtx = gen_rtx_REG (Pmode, this_regno);
10542 /* Apply the constant offset, if required. */
10543 delta_rtx = GEN_INT (delta);
10546 rtx tmp = gen_rtx_REG (ptr_mode, this_regno);
10547 REG_POINTER (tmp) = 1;
10548 if (delta && satisfies_constraint_I (delta_rtx))
10550 emit_insn (gen_ptr_extend_plus_imm (this_rtx, tmp, delta_rtx));
10554 emit_insn (gen_ptr_extend (this_rtx, tmp));
10558 if (!satisfies_constraint_I (delta_rtx))
10560 rtx tmp = gen_rtx_REG (Pmode, 2);
10561 emit_move_insn (tmp, delta_rtx);
10564 emit_insn (gen_adddi3 (this_rtx, this_rtx, delta_rtx));
10567 /* Apply the offset from the vtable, if required. */
10570 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
10571 rtx tmp = gen_rtx_REG (Pmode, 2);
10575 rtx t = gen_rtx_REG (ptr_mode, 2);
10576 REG_POINTER (t) = 1;
10577 emit_move_insn (t, gen_rtx_MEM (ptr_mode, this_rtx));
10578 if (satisfies_constraint_I (vcall_offset_rtx))
10580 emit_insn (gen_ptr_extend_plus_imm (tmp, t, vcall_offset_rtx));
10584 emit_insn (gen_ptr_extend (tmp, t));
10587 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
10591 if (!satisfies_constraint_J (vcall_offset_rtx))
10593 rtx tmp2 = gen_rtx_REG (Pmode, next_scratch_gr_reg ());
10594 emit_move_insn (tmp2, vcall_offset_rtx);
10595 vcall_offset_rtx = tmp2;
10597 emit_insn (gen_adddi3 (tmp, tmp, vcall_offset_rtx));
10601 emit_insn (gen_zero_extendsidi2 (tmp, gen_rtx_MEM (ptr_mode, tmp)));
10603 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
10605 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
10608 /* Generate a tail call to the target function. */
10609 if (! TREE_USED (function))
10611 assemble_external (function);
10612 TREE_USED (function) = 1;
10614 funexp = XEXP (DECL_RTL (function), 0);
10615 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
10616 ia64_expand_call (NULL_RTX, funexp, NULL_RTX, 1);
10617 insn = get_last_insn ();
10618 SIBLING_CALL_P (insn) = 1;
10620 /* Code generation for calls relies on splitting. */
10621 reload_completed = 1;
10622 epilogue_completed = 1;
10623 try_split (PATTERN (insn), insn, 0);
10627 /* Run just enough of rest_of_compilation to get the insns emitted.
10628 There's not really enough bulk here to make other passes such as
10629 instruction scheduling worth while. Note that use_thunk calls
10630 assemble_start_function and assemble_end_function. */
10632 insn_locators_alloc ();
10633 emit_all_insn_group_barriers (NULL);
10634 insn = get_insns ();
10635 shorten_branches (insn);
10636 final_start_function (insn, file, 1);
10637 final (insn, file, 1);
10638 final_end_function ();
10640 reload_completed = 0;
10641 epilogue_completed = 0;
10644 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
10647 ia64_struct_value_rtx (tree fntype,
10648 int incoming ATTRIBUTE_UNUSED)
10650 if (TARGET_ABI_OPEN_VMS ||
10651 (fntype && ia64_struct_retval_addr_is_first_parm_p (fntype)))
10653 return gen_rtx_REG (Pmode, GR_REG (8));
10657 ia64_scalar_mode_supported_p (enum machine_mode mode)
10683 ia64_vector_mode_supported_p (enum machine_mode mode)
10700 /* Implement the FUNCTION_PROFILER macro. */
10703 ia64_output_function_profiler (FILE *file, int labelno)
10705 bool indirect_call;
10707 /* If the function needs a static chain and the static chain
10708 register is r15, we use an indirect call so as to bypass
10709 the PLT stub in case the executable is dynamically linked,
10710 because the stub clobbers r15 as per 5.3.6 of the psABI.
10711 We don't need to do that in non canonical PIC mode. */
10713 if (cfun->static_chain_decl && !TARGET_NO_PIC && !TARGET_AUTO_PIC)
10715 gcc_assert (STATIC_CHAIN_REGNUM == 15);
10716 indirect_call = true;
10719 indirect_call = false;
10722 fputs ("\t.prologue 4, r40\n", file);
10724 fputs ("\t.prologue\n\t.save ar.pfs, r40\n", file);
10725 fputs ("\talloc out0 = ar.pfs, 8, 0, 4, 0\n", file);
10727 if (NO_PROFILE_COUNTERS)
10728 fputs ("\tmov out3 = r0\n", file);
10732 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
10734 if (TARGET_AUTO_PIC)
10735 fputs ("\tmovl out3 = @gprel(", file);
10737 fputs ("\taddl out3 = @ltoff(", file);
10738 assemble_name (file, buf);
10739 if (TARGET_AUTO_PIC)
10740 fputs (")\n", file);
10742 fputs ("), r1\n", file);
10746 fputs ("\taddl r14 = @ltoff(@fptr(_mcount)), r1\n", file);
10747 fputs ("\t;;\n", file);
10749 fputs ("\t.save rp, r42\n", file);
10750 fputs ("\tmov out2 = b0\n", file);
10752 fputs ("\tld8 r14 = [r14]\n\t;;\n", file);
10753 fputs ("\t.body\n", file);
10754 fputs ("\tmov out1 = r1\n", file);
10757 fputs ("\tld8 r16 = [r14], 8\n\t;;\n", file);
10758 fputs ("\tmov b6 = r16\n", file);
10759 fputs ("\tld8 r1 = [r14]\n", file);
10760 fputs ("\tbr.call.sptk.many b0 = b6\n\t;;\n", file);
10763 fputs ("\tbr.call.sptk.many b0 = _mcount\n\t;;\n", file);
10766 static GTY(()) rtx mcount_func_rtx;
10768 gen_mcount_func_rtx (void)
10770 if (!mcount_func_rtx)
10771 mcount_func_rtx = init_one_libfunc ("_mcount");
10772 return mcount_func_rtx;
10776 ia64_profile_hook (int labelno)
10780 if (NO_PROFILE_COUNTERS)
10781 label = const0_rtx;
10785 const char *label_name;
10786 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
10787 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
10788 label = gen_rtx_SYMBOL_REF (Pmode, label_name);
10789 SYMBOL_REF_FLAGS (label) = SYMBOL_FLAG_LOCAL;
10791 ip = gen_reg_rtx (Pmode);
10792 emit_insn (gen_ip_value (ip));
10793 emit_library_call (gen_mcount_func_rtx (), LCT_NORMAL,
10795 gen_rtx_REG (Pmode, BR_REG (0)), Pmode,
10800 /* Return the mangling of TYPE if it is an extended fundamental type. */
10802 static const char *
10803 ia64_mangle_type (const_tree type)
10805 type = TYPE_MAIN_VARIANT (type);
10807 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
10808 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
10811 /* On HP-UX, "long double" is mangled as "e" so __float128 is
10813 if (!TARGET_HPUX && TYPE_MODE (type) == TFmode)
10815 /* On HP-UX, "e" is not available as a mangling of __float80 so use
10816 an extended mangling. Elsewhere, "e" is available since long
10817 double is 80 bits. */
10818 if (TYPE_MODE (type) == XFmode)
10819 return TARGET_HPUX ? "u9__float80" : "e";
10820 if (TYPE_MODE (type) == RFmode)
10821 return "u7__fpreg";
10825 /* Return the diagnostic message string if conversion from FROMTYPE to
10826 TOTYPE is not allowed, NULL otherwise. */
10827 static const char *
10828 ia64_invalid_conversion (const_tree fromtype, const_tree totype)
10830 /* Reject nontrivial conversion to or from __fpreg. */
10831 if (TYPE_MODE (fromtype) == RFmode
10832 && TYPE_MODE (totype) != RFmode
10833 && TYPE_MODE (totype) != VOIDmode)
10834 return N_("invalid conversion from %<__fpreg%>");
10835 if (TYPE_MODE (totype) == RFmode
10836 && TYPE_MODE (fromtype) != RFmode)
10837 return N_("invalid conversion to %<__fpreg%>");
10841 /* Return the diagnostic message string if the unary operation OP is
10842 not permitted on TYPE, NULL otherwise. */
10843 static const char *
10844 ia64_invalid_unary_op (int op, const_tree type)
10846 /* Reject operations on __fpreg other than unary + or &. */
10847 if (TYPE_MODE (type) == RFmode
10848 && op != CONVERT_EXPR
10849 && op != ADDR_EXPR)
10850 return N_("invalid operation on %<__fpreg%>");
10854 /* Return the diagnostic message string if the binary operation OP is
10855 not permitted on TYPE1 and TYPE2, NULL otherwise. */
10856 static const char *
10857 ia64_invalid_binary_op (int op ATTRIBUTE_UNUSED, const_tree type1, const_tree type2)
10859 /* Reject operations on __fpreg. */
10860 if (TYPE_MODE (type1) == RFmode || TYPE_MODE (type2) == RFmode)
10861 return N_("invalid operation on %<__fpreg%>");
10865 /* HP-UX version_id attribute.
10866 For object foo, if the version_id is set to 1234 put out an alias
10867 of '.alias foo "foo{1234}" We can't use "foo{1234}" in anything
10868 other than an alias statement because it is an illegal symbol name. */
10871 ia64_handle_version_id_attribute (tree *node ATTRIBUTE_UNUSED,
10872 tree name ATTRIBUTE_UNUSED,
10874 int flags ATTRIBUTE_UNUSED,
10875 bool *no_add_attrs)
10877 tree arg = TREE_VALUE (args);
10879 if (TREE_CODE (arg) != STRING_CST)
10881 error("version attribute is not a string");
10882 *no_add_attrs = true;
10888 /* Target hook for c_mode_for_suffix. */
10890 static enum machine_mode
10891 ia64_c_mode_for_suffix (char suffix)
10901 static enum machine_mode
10902 ia64_promote_function_mode (const_tree type,
10903 enum machine_mode mode,
10905 const_tree funtype,
10908 /* Special processing required for OpenVMS ... */
10910 if (!TARGET_ABI_OPEN_VMS)
10911 return default_promote_function_mode(type, mode, punsignedp, funtype,
10914 /* HP OpenVMS Calling Standard dated June, 2004, that describes
10915 HP OpenVMS I64 Version 8.2EFT,
10916 chapter 4 "OpenVMS I64 Conventions"
10917 section 4.7 "Procedure Linkage"
10918 subsection 4.7.5.2, "Normal Register Parameters"
10920 "Unsigned integral (except unsigned 32-bit), set, and VAX floating-point
10921 values passed in registers are zero-filled; signed integral values as
10922 well as unsigned 32-bit integral values are sign-extended to 64 bits.
10923 For all other types passed in the general registers, unused bits are
10926 if (for_return != 2
10927 && GET_MODE_CLASS (mode) == MODE_INT
10928 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
10930 if (mode == SImode)
10935 return promote_mode (type, mode, punsignedp);
10938 static GTY(()) rtx ia64_dconst_0_5_rtx;
10941 ia64_dconst_0_5 (void)
10943 if (! ia64_dconst_0_5_rtx)
10945 REAL_VALUE_TYPE rv;
10946 real_from_string (&rv, "0.5");
10947 ia64_dconst_0_5_rtx = const_double_from_real_value (rv, DFmode);
10949 return ia64_dconst_0_5_rtx;
10952 static GTY(()) rtx ia64_dconst_0_375_rtx;
10955 ia64_dconst_0_375 (void)
10957 if (! ia64_dconst_0_375_rtx)
10959 REAL_VALUE_TYPE rv;
10960 real_from_string (&rv, "0.375");
10961 ia64_dconst_0_375_rtx = const_double_from_real_value (rv, DFmode);
10963 return ia64_dconst_0_375_rtx;
10966 static enum machine_mode
10967 ia64_get_reg_raw_mode (int regno)
10969 if (FR_REGNO_P (regno))
10971 return default_get_reg_raw_mode(regno);
10974 /* Always default to .text section until HP-UX linker is fixed. */
10976 ATTRIBUTE_UNUSED static section *
10977 ia64_hpux_function_section (tree decl ATTRIBUTE_UNUSED,
10978 enum node_frequency freq ATTRIBUTE_UNUSED,
10979 bool startup ATTRIBUTE_UNUSED,
10980 bool exit ATTRIBUTE_UNUSED)
10985 #include "gt-ia64.h"