1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992-2017 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
29 #include "stringpool.h"
42 #include "diagnostic-core.h"
44 #include "fold-const.h"
45 #include "stor-layout.h"
49 #include "insn-attr.h"
54 #include "common/common-target.h"
56 #include "langhooks.h"
58 #include "tree-pass.h"
60 #include "gimple-iterator.h"
62 #include "tree-stdarg.h"
63 #include "tm-constrs.h"
69 /* This file should be included last. */
70 #include "target-def.h"
72 /* Specify which cpu to schedule for. */
73 enum processor_type alpha_tune;
75 /* Which cpu we're generating code for. */
76 enum processor_type alpha_cpu;
78 static const char * const alpha_cpu_name[] =
83 /* Specify how accurate floating-point traps need to be. */
85 enum alpha_trap_precision alpha_tp;
87 /* Specify the floating-point rounding mode. */
89 enum alpha_fp_rounding_mode alpha_fprm;
91 /* Specify which things cause traps. */
93 enum alpha_fp_trap_mode alpha_fptm;
95 /* Nonzero if inside of a function, because the Alpha asm can't
96 handle .files inside of functions. */
98 static int inside_function = FALSE;
100 /* The number of cycles of latency we should assume on memory reads. */
102 static int alpha_memory_latency = 3;
104 /* Whether the function needs the GP. */
106 static int alpha_function_needs_gp;
108 /* The assembler name of the current function. */
110 static const char *alpha_fnname;
112 /* The next explicit relocation sequence number. */
113 extern GTY(()) int alpha_next_sequence_number;
114 int alpha_next_sequence_number = 1;
116 /* The literal and gpdisp sequence numbers for this insn, as printed
117 by %# and %* respectively. */
118 extern GTY(()) int alpha_this_literal_sequence_number;
119 extern GTY(()) int alpha_this_gpdisp_sequence_number;
120 int alpha_this_literal_sequence_number;
121 int alpha_this_gpdisp_sequence_number;
123 /* Costs of various operations on the different architectures. */
125 struct alpha_rtx_cost_data
127 unsigned char fp_add;
128 unsigned char fp_mult;
129 unsigned char fp_div_sf;
130 unsigned char fp_div_df;
131 unsigned char int_mult_si;
132 unsigned char int_mult_di;
133 unsigned char int_shift;
134 unsigned char int_cmov;
135 unsigned short int_div;
138 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
141 COSTS_N_INSNS (6), /* fp_add */
142 COSTS_N_INSNS (6), /* fp_mult */
143 COSTS_N_INSNS (34), /* fp_div_sf */
144 COSTS_N_INSNS (63), /* fp_div_df */
145 COSTS_N_INSNS (23), /* int_mult_si */
146 COSTS_N_INSNS (23), /* int_mult_di */
147 COSTS_N_INSNS (2), /* int_shift */
148 COSTS_N_INSNS (2), /* int_cmov */
149 COSTS_N_INSNS (97), /* int_div */
152 COSTS_N_INSNS (4), /* fp_add */
153 COSTS_N_INSNS (4), /* fp_mult */
154 COSTS_N_INSNS (15), /* fp_div_sf */
155 COSTS_N_INSNS (22), /* fp_div_df */
156 COSTS_N_INSNS (8), /* int_mult_si */
157 COSTS_N_INSNS (12), /* int_mult_di */
158 COSTS_N_INSNS (1) + 1, /* int_shift */
159 COSTS_N_INSNS (1), /* int_cmov */
160 COSTS_N_INSNS (83), /* int_div */
163 COSTS_N_INSNS (4), /* fp_add */
164 COSTS_N_INSNS (4), /* fp_mult */
165 COSTS_N_INSNS (12), /* fp_div_sf */
166 COSTS_N_INSNS (15), /* fp_div_df */
167 COSTS_N_INSNS (7), /* int_mult_si */
168 COSTS_N_INSNS (7), /* int_mult_di */
169 COSTS_N_INSNS (1), /* int_shift */
170 COSTS_N_INSNS (2), /* int_cmov */
171 COSTS_N_INSNS (86), /* int_div */
175 /* Similar but tuned for code size instead of execution latency. The
176 extra +N is fractional cost tuning based on latency. It's used to
177 encourage use of cheaper insns like shift, but only if there's just
180 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
182 COSTS_N_INSNS (1), /* fp_add */
183 COSTS_N_INSNS (1), /* fp_mult */
184 COSTS_N_INSNS (1), /* fp_div_sf */
185 COSTS_N_INSNS (1) + 1, /* fp_div_df */
186 COSTS_N_INSNS (1) + 1, /* int_mult_si */
187 COSTS_N_INSNS (1) + 2, /* int_mult_di */
188 COSTS_N_INSNS (1), /* int_shift */
189 COSTS_N_INSNS (1), /* int_cmov */
190 COSTS_N_INSNS (6), /* int_div */
193 /* Get the number of args of a function in one of two ways. */
194 #if TARGET_ABI_OPEN_VMS
195 #define NUM_ARGS crtl->args.info.num_args
197 #define NUM_ARGS crtl->args.info
203 /* Declarations of static functions. */
204 static struct machine_function *alpha_init_machine_status (void);
205 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
206 static void alpha_handle_trap_shadows (void);
207 static void alpha_align_insns (void);
208 static void alpha_override_options_after_change (void);
210 #if TARGET_ABI_OPEN_VMS
211 static void alpha_write_linkage (FILE *, const char *);
212 static bool vms_valid_pointer_mode (scalar_int_mode);
214 #define vms_patch_builtins() gcc_unreachable()
218 rest_of_handle_trap_shadows (void)
220 alpha_handle_trap_shadows ();
226 const pass_data pass_data_handle_trap_shadows =
229 "trap_shadows", /* name */
230 OPTGROUP_NONE, /* optinfo_flags */
232 0, /* properties_required */
233 0, /* properties_provided */
234 0, /* properties_destroyed */
235 0, /* todo_flags_start */
236 TODO_df_finish, /* todo_flags_finish */
239 class pass_handle_trap_shadows : public rtl_opt_pass
242 pass_handle_trap_shadows(gcc::context *ctxt)
243 : rtl_opt_pass(pass_data_handle_trap_shadows, ctxt)
246 /* opt_pass methods: */
247 virtual bool gate (function *)
249 return alpha_tp != ALPHA_TP_PROG || flag_exceptions;
252 virtual unsigned int execute (function *)
254 return rest_of_handle_trap_shadows ();
257 }; // class pass_handle_trap_shadows
262 make_pass_handle_trap_shadows (gcc::context *ctxt)
264 return new pass_handle_trap_shadows (ctxt);
268 rest_of_align_insns (void)
270 alpha_align_insns ();
276 const pass_data pass_data_align_insns =
279 "align_insns", /* name */
280 OPTGROUP_NONE, /* optinfo_flags */
282 0, /* properties_required */
283 0, /* properties_provided */
284 0, /* properties_destroyed */
285 0, /* todo_flags_start */
286 TODO_df_finish, /* todo_flags_finish */
289 class pass_align_insns : public rtl_opt_pass
292 pass_align_insns(gcc::context *ctxt)
293 : rtl_opt_pass(pass_data_align_insns, ctxt)
296 /* opt_pass methods: */
297 virtual bool gate (function *)
299 /* Due to the number of extra trapb insns, don't bother fixing up
300 alignment when trap precision is instruction. Moreover, we can
301 only do our job when sched2 is run. */
302 return ((alpha_tune == PROCESSOR_EV4
303 || alpha_tune == PROCESSOR_EV5)
304 && optimize && !optimize_size
305 && alpha_tp != ALPHA_TP_INSN
306 && flag_schedule_insns_after_reload);
309 virtual unsigned int execute (function *)
311 return rest_of_align_insns ();
314 }; // class pass_align_insns
319 make_pass_align_insns (gcc::context *ctxt)
321 return new pass_align_insns (ctxt);
324 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
325 /* Implement TARGET_MANGLE_TYPE. */
328 alpha_mangle_type (const_tree type)
330 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
331 && TARGET_LONG_DOUBLE_128)
334 /* For all other types, use normal C++ mangling. */
339 /* Parse target option strings. */
342 alpha_option_override (void)
344 static const struct cpu_table {
345 const char *const name;
346 const enum processor_type processor;
348 const unsigned short line_size; /* in bytes */
349 const unsigned short l1_size; /* in kb. */
350 const unsigned short l2_size; /* in kb. */
352 /* EV4/LCA45 had 8k L1 caches; EV45 had 16k L1 caches.
353 EV4/EV45 had 128k to 16M 32-byte direct Bcache. LCA45
354 had 64k to 8M 8-byte direct Bcache. */
355 { "ev4", PROCESSOR_EV4, 0, 32, 8, 8*1024 },
356 { "21064", PROCESSOR_EV4, 0, 32, 8, 8*1024 },
357 { "ev45", PROCESSOR_EV4, 0, 32, 16, 16*1024 },
359 /* EV5 or EV56 had 8k 32 byte L1, 96k 32 or 64 byte L2,
360 and 1M to 16M 64 byte L3 (not modeled).
361 PCA56 had 16k 64-byte cache; PCA57 had 32k Icache.
362 PCA56 had 8k 64-byte cache; PCA57 had 16k Dcache. */
363 { "ev5", PROCESSOR_EV5, 0, 32, 8, 96 },
364 { "21164", PROCESSOR_EV5, 0, 32, 8, 96 },
365 { "ev56", PROCESSOR_EV5, MASK_BWX, 32, 8, 96 },
366 { "21164a", PROCESSOR_EV5, MASK_BWX, 32, 8, 96 },
367 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX, 64, 16, 4*1024 },
368 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX, 64, 16, 4*1024 },
369 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX, 64, 16, 4*1024 },
371 /* EV6 had 64k 64 byte L1, 1M to 16M Bcache. */
372 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX, 64, 64, 16*1024 },
373 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX, 64, 64, 16*1024 },
374 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX,
376 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX,
380 int const ct_size = ARRAY_SIZE (cpu_table);
381 int line_size = 0, l1_size = 0, l2_size = 0;
384 #ifdef SUBTARGET_OVERRIDE_OPTIONS
385 SUBTARGET_OVERRIDE_OPTIONS;
388 /* Default to full IEEE compliance mode for Go language. */
389 if (strcmp (lang_hooks.name, "GNU Go") == 0
390 && !(target_flags_explicit & MASK_IEEE))
391 target_flags |= MASK_IEEE;
393 alpha_fprm = ALPHA_FPRM_NORM;
394 alpha_tp = ALPHA_TP_PROG;
395 alpha_fptm = ALPHA_FPTM_N;
399 alpha_tp = ALPHA_TP_INSN;
400 alpha_fptm = ALPHA_FPTM_SU;
402 if (TARGET_IEEE_WITH_INEXACT)
404 alpha_tp = ALPHA_TP_INSN;
405 alpha_fptm = ALPHA_FPTM_SUI;
410 if (! strcmp (alpha_tp_string, "p"))
411 alpha_tp = ALPHA_TP_PROG;
412 else if (! strcmp (alpha_tp_string, "f"))
413 alpha_tp = ALPHA_TP_FUNC;
414 else if (! strcmp (alpha_tp_string, "i"))
415 alpha_tp = ALPHA_TP_INSN;
417 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
420 if (alpha_fprm_string)
422 if (! strcmp (alpha_fprm_string, "n"))
423 alpha_fprm = ALPHA_FPRM_NORM;
424 else if (! strcmp (alpha_fprm_string, "m"))
425 alpha_fprm = ALPHA_FPRM_MINF;
426 else if (! strcmp (alpha_fprm_string, "c"))
427 alpha_fprm = ALPHA_FPRM_CHOP;
428 else if (! strcmp (alpha_fprm_string,"d"))
429 alpha_fprm = ALPHA_FPRM_DYN;
431 error ("bad value %qs for -mfp-rounding-mode switch",
435 if (alpha_fptm_string)
437 if (strcmp (alpha_fptm_string, "n") == 0)
438 alpha_fptm = ALPHA_FPTM_N;
439 else if (strcmp (alpha_fptm_string, "u") == 0)
440 alpha_fptm = ALPHA_FPTM_U;
441 else if (strcmp (alpha_fptm_string, "su") == 0)
442 alpha_fptm = ALPHA_FPTM_SU;
443 else if (strcmp (alpha_fptm_string, "sui") == 0)
444 alpha_fptm = ALPHA_FPTM_SUI;
446 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
449 if (alpha_cpu_string)
451 for (i = 0; i < ct_size; i++)
452 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
454 alpha_tune = alpha_cpu = cpu_table[i].processor;
455 line_size = cpu_table[i].line_size;
456 l1_size = cpu_table[i].l1_size;
457 l2_size = cpu_table[i].l2_size;
458 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
459 target_flags |= cpu_table[i].flags;
463 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
466 if (alpha_tune_string)
468 for (i = 0; i < ct_size; i++)
469 if (! strcmp (alpha_tune_string, cpu_table [i].name))
471 alpha_tune = cpu_table[i].processor;
472 line_size = cpu_table[i].line_size;
473 l1_size = cpu_table[i].l1_size;
474 l2_size = cpu_table[i].l2_size;
478 error ("bad value %qs for -mtune switch", alpha_tune_string);
482 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, line_size,
483 global_options.x_param_values,
484 global_options_set.x_param_values);
486 maybe_set_param_value (PARAM_L1_CACHE_SIZE, l1_size,
487 global_options.x_param_values,
488 global_options_set.x_param_values);
490 maybe_set_param_value (PARAM_L2_CACHE_SIZE, l2_size,
491 global_options.x_param_values,
492 global_options_set.x_param_values);
494 /* Do some sanity checks on the above options. */
496 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
497 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
499 warning (0, "fp software completion requires -mtrap-precision=i");
500 alpha_tp = ALPHA_TP_INSN;
503 if (alpha_cpu == PROCESSOR_EV6)
505 /* Except for EV6 pass 1 (not released), we always have precise
506 arithmetic traps. Which means we can do software completion
507 without minding trap shadows. */
508 alpha_tp = ALPHA_TP_PROG;
511 if (TARGET_FLOAT_VAX)
513 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
515 warning (0, "rounding mode not supported for VAX floats");
516 alpha_fprm = ALPHA_FPRM_NORM;
518 if (alpha_fptm == ALPHA_FPTM_SUI)
520 warning (0, "trap mode not supported for VAX floats");
521 alpha_fptm = ALPHA_FPTM_SU;
523 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
524 warning (0, "128-bit long double not supported for VAX floats");
525 target_flags &= ~MASK_LONG_DOUBLE_128;
532 if (!alpha_mlat_string)
533 alpha_mlat_string = "L1";
535 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
536 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
538 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
539 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
540 && alpha_mlat_string[2] == '\0')
542 static int const cache_latency[][4] =
544 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
545 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
546 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
549 lat = alpha_mlat_string[1] - '0';
550 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
552 warning (0, "L%d cache latency unknown for %s",
553 lat, alpha_cpu_name[alpha_tune]);
557 lat = cache_latency[alpha_tune][lat-1];
559 else if (! strcmp (alpha_mlat_string, "main"))
561 /* Most current memories have about 370ns latency. This is
562 a reasonable guess for a fast cpu. */
567 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
571 alpha_memory_latency = lat;
574 /* Default the definition of "small data" to 8 bytes. */
575 if (!global_options_set.x_g_switch_value)
578 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
580 target_flags |= MASK_SMALL_DATA;
581 else if (flag_pic == 2)
582 target_flags &= ~MASK_SMALL_DATA;
584 alpha_override_options_after_change ();
586 /* Register variables and functions with the garbage collector. */
588 /* Set up function hooks. */
589 init_machine_status = alpha_init_machine_status;
591 /* Tell the compiler when we're using VAX floating point. */
592 if (TARGET_FLOAT_VAX)
594 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
595 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
596 REAL_MODE_FORMAT (TFmode) = NULL;
599 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
600 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
601 target_flags |= MASK_LONG_DOUBLE_128;
606 /* Implement targetm.override_options_after_change. */
609 alpha_override_options_after_change (void)
611 /* Align labels and loops for optimal branching. */
612 /* ??? Kludge these by not doing anything if we don't optimize. */
615 if (align_loops <= 0)
617 if (align_jumps <= 0)
620 if (align_functions <= 0)
621 align_functions = 16;
624 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
627 zap_mask (HOST_WIDE_INT value)
631 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
633 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
639 /* Return true if OP is valid for a particular TLS relocation.
640 We are already guaranteed that OP is a CONST. */
643 tls_symbolic_operand_1 (rtx op, int size, int unspec)
647 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
649 op = XVECEXP (op, 0, 0);
651 if (GET_CODE (op) != SYMBOL_REF)
654 switch (SYMBOL_REF_TLS_MODEL (op))
656 case TLS_MODEL_LOCAL_DYNAMIC:
657 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
658 case TLS_MODEL_INITIAL_EXEC:
659 return unspec == UNSPEC_TPREL && size == 64;
660 case TLS_MODEL_LOCAL_EXEC:
661 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
667 /* Used by aligned_memory_operand and unaligned_memory_operand to
668 resolve what reload is going to do with OP if it's a register. */
671 resolve_reload_operand (rtx op)
673 if (reload_in_progress)
677 tmp = SUBREG_REG (tmp);
679 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
681 op = reg_equiv_memory_loc (REGNO (tmp));
689 /* The scalar modes supported differs from the default check-what-c-supports
690 version in that sometimes TFmode is available even when long double
691 indicates only DFmode. */
694 alpha_scalar_mode_supported_p (scalar_mode mode)
702 case E_TImode: /* via optabs.c */
710 return TARGET_HAS_XFLOATING_LIBS;
717 /* Alpha implements a couple of integer vector mode operations when
718 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
719 which allows the vectorizer to operate on e.g. move instructions,
720 or when expand_vector_operations can do something useful. */
723 alpha_vector_mode_supported_p (machine_mode mode)
725 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
728 /* Return 1 if this function can directly return via $26. */
733 return (TARGET_ABI_OSF
735 && alpha_sa_size () == 0
736 && get_frame_size () == 0
737 && crtl->outgoing_args_size == 0
738 && crtl->args.pretend_args_size == 0);
741 /* Return the TLS model to use for SYMBOL. */
743 static enum tls_model
744 tls_symbolic_operand_type (rtx symbol)
746 enum tls_model model;
748 if (GET_CODE (symbol) != SYMBOL_REF)
749 return TLS_MODEL_NONE;
750 model = SYMBOL_REF_TLS_MODEL (symbol);
752 /* Local-exec with a 64-bit size is the same code as initial-exec. */
753 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
754 model = TLS_MODEL_INITIAL_EXEC;
759 /* Return true if the function DECL will share the same GP as any
760 function in the current unit of translation. */
763 decl_has_samegp (const_tree decl)
765 /* Functions that are not local can be overridden, and thus may
766 not share the same gp. */
767 if (!(*targetm.binds_local_p) (decl))
770 /* If -msmall-data is in effect, assume that there is only one GP
771 for the module, and so any local symbol has this property. We
772 need explicit relocations to be able to enforce this for symbols
773 not defined in this unit of translation, however. */
774 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
777 /* Functions that are not external are defined in this UoT. */
778 /* ??? Irritatingly, static functions not yet emitted are still
779 marked "external". Apply this to non-static functions only. */
780 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
783 /* Return true if EXP should be placed in the small data section. */
786 alpha_in_small_data_p (const_tree exp)
788 /* We want to merge strings, so we never consider them small data. */
789 if (TREE_CODE (exp) == STRING_CST)
792 /* Functions are never in the small data area. Duh. */
793 if (TREE_CODE (exp) == FUNCTION_DECL)
796 /* COMMON symbols are never small data. */
797 if (TREE_CODE (exp) == VAR_DECL && DECL_COMMON (exp))
800 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
802 const char *section = DECL_SECTION_NAME (exp);
803 if (strcmp (section, ".sdata") == 0
804 || strcmp (section, ".sbss") == 0)
809 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
811 /* If this is an incomplete type with size 0, then we can't put it
812 in sdata because it might be too big when completed. */
813 if (size > 0 && size <= g_switch_value)
820 #if TARGET_ABI_OPEN_VMS
822 vms_valid_pointer_mode (scalar_int_mode mode)
824 return (mode == SImode || mode == DImode);
828 alpha_linkage_symbol_p (const char *symname)
830 int symlen = strlen (symname);
833 return strcmp (&symname [symlen - 4], "..lk") == 0;
838 #define LINKAGE_SYMBOL_REF_P(X) \
839 ((GET_CODE (X) == SYMBOL_REF \
840 && alpha_linkage_symbol_p (XSTR (X, 0))) \
841 || (GET_CODE (X) == CONST \
842 && GET_CODE (XEXP (X, 0)) == PLUS \
843 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
844 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
847 /* legitimate_address_p recognizes an RTL expression that is a valid
848 memory address for an instruction. The MODE argument is the
849 machine mode for the MEM expression that wants to use this address.
851 For Alpha, we have either a constant address or the sum of a
852 register and a constant address, or just a register. For DImode,
853 any of those forms can be surrounded with an AND that clear the
854 low-order three bits; this is an "unaligned" access. */
857 alpha_legitimate_address_p (machine_mode mode, rtx x, bool strict)
859 /* If this is an ldq_u type address, discard the outer AND. */
861 && GET_CODE (x) == AND
862 && CONST_INT_P (XEXP (x, 1))
863 && INTVAL (XEXP (x, 1)) == -8)
866 /* Discard non-paradoxical subregs. */
868 && (GET_MODE_SIZE (GET_MODE (x))
869 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
872 /* Unadorned general registers are valid. */
875 ? STRICT_REG_OK_FOR_BASE_P (x)
876 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
879 /* Constant addresses (i.e. +/- 32k) are valid. */
880 if (CONSTANT_ADDRESS_P (x))
883 #if TARGET_ABI_OPEN_VMS
884 if (LINKAGE_SYMBOL_REF_P (x))
888 /* Register plus a small constant offset is valid. */
889 if (GET_CODE (x) == PLUS)
891 rtx ofs = XEXP (x, 1);
894 /* Discard non-paradoxical subregs. */
896 && (GET_MODE_SIZE (GET_MODE (x))
897 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
903 && NONSTRICT_REG_OK_FP_BASE_P (x)
904 && CONST_INT_P (ofs))
907 ? STRICT_REG_OK_FOR_BASE_P (x)
908 : NONSTRICT_REG_OK_FOR_BASE_P (x))
909 && CONSTANT_ADDRESS_P (ofs))
914 /* If we're managing explicit relocations, LO_SUM is valid, as are small
915 data symbols. Avoid explicit relocations of modes larger than word
916 mode since i.e. $LC0+8($1) can fold around +/- 32k offset. */
917 else if (TARGET_EXPLICIT_RELOCS
918 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
920 if (small_symbolic_operand (x, Pmode))
923 if (GET_CODE (x) == LO_SUM)
925 rtx ofs = XEXP (x, 1);
928 /* Discard non-paradoxical subregs. */
930 && (GET_MODE_SIZE (GET_MODE (x))
931 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
934 /* Must have a valid base register. */
937 ? STRICT_REG_OK_FOR_BASE_P (x)
938 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
941 /* The symbol must be local. */
942 if (local_symbolic_operand (ofs, Pmode)
943 || dtp32_symbolic_operand (ofs, Pmode)
944 || tp32_symbolic_operand (ofs, Pmode))
952 /* Build the SYMBOL_REF for __tls_get_addr. */
954 static GTY(()) rtx tls_get_addr_libfunc;
957 get_tls_get_addr (void)
959 if (!tls_get_addr_libfunc)
960 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
961 return tls_get_addr_libfunc;
964 /* Try machine-dependent ways of modifying an illegitimate address
965 to be legitimate. If we find one, return the new, valid address. */
968 alpha_legitimize_address_1 (rtx x, rtx scratch, machine_mode mode)
970 HOST_WIDE_INT addend;
972 /* If the address is (plus reg const_int) and the CONST_INT is not a
973 valid offset, compute the high part of the constant and add it to
974 the register. Then our address is (plus temp low-part-const). */
975 if (GET_CODE (x) == PLUS
976 && REG_P (XEXP (x, 0))
977 && CONST_INT_P (XEXP (x, 1))
978 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
980 addend = INTVAL (XEXP (x, 1));
985 /* If the address is (const (plus FOO const_int)), find the low-order
986 part of the CONST_INT. Then load FOO plus any high-order part of the
987 CONST_INT into a register. Our address is (plus reg low-part-const).
988 This is done to reduce the number of GOT entries. */
989 if (can_create_pseudo_p ()
990 && GET_CODE (x) == CONST
991 && GET_CODE (XEXP (x, 0)) == PLUS
992 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
994 addend = INTVAL (XEXP (XEXP (x, 0), 1));
995 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
999 /* If we have a (plus reg const), emit the load as in (2), then add
1000 the two registers, and finally generate (plus reg low-part-const) as
1002 if (can_create_pseudo_p ()
1003 && GET_CODE (x) == PLUS
1004 && REG_P (XEXP (x, 0))
1005 && GET_CODE (XEXP (x, 1)) == CONST
1006 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
1007 && CONST_INT_P (XEXP (XEXP (XEXP (x, 1), 0), 1)))
1009 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
1010 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
1011 XEXP (XEXP (XEXP (x, 1), 0), 0),
1012 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1016 /* If this is a local symbol, split the address into HIGH/LO_SUM parts.
1017 Avoid modes larger than word mode since i.e. $LC0+8($1) can fold
1018 around +/- 32k offset. */
1019 if (TARGET_EXPLICIT_RELOCS
1020 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD
1021 && symbolic_operand (x, Pmode))
1023 rtx r0, r16, eqv, tga, tp, dest, seq;
1026 switch (tls_symbolic_operand_type (x))
1028 case TLS_MODEL_NONE:
1031 case TLS_MODEL_GLOBAL_DYNAMIC:
1035 r0 = gen_rtx_REG (Pmode, 0);
1036 r16 = gen_rtx_REG (Pmode, 16);
1037 tga = get_tls_get_addr ();
1038 dest = gen_reg_rtx (Pmode);
1039 seq = GEN_INT (alpha_next_sequence_number++);
1041 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
1042 rtx val = gen_call_value_osf_tlsgd (r0, tga, seq);
1043 insn = emit_call_insn (val);
1044 RTL_CONST_CALL_P (insn) = 1;
1045 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1047 insn = get_insns ();
1050 emit_libcall_block (insn, dest, r0, x);
1054 case TLS_MODEL_LOCAL_DYNAMIC:
1058 r0 = gen_rtx_REG (Pmode, 0);
1059 r16 = gen_rtx_REG (Pmode, 16);
1060 tga = get_tls_get_addr ();
1061 scratch = gen_reg_rtx (Pmode);
1062 seq = GEN_INT (alpha_next_sequence_number++);
1064 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1065 rtx val = gen_call_value_osf_tlsldm (r0, tga, seq);
1066 insn = emit_call_insn (val);
1067 RTL_CONST_CALL_P (insn) = 1;
1068 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1070 insn = get_insns ();
1073 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1074 UNSPEC_TLSLDM_CALL);
1075 emit_libcall_block (insn, scratch, r0, eqv);
1077 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1078 eqv = gen_rtx_CONST (Pmode, eqv);
1080 if (alpha_tls_size == 64)
1082 dest = gen_reg_rtx (Pmode);
1083 emit_insn (gen_rtx_SET (dest, eqv));
1084 emit_insn (gen_adddi3 (dest, dest, scratch));
1087 if (alpha_tls_size == 32)
1089 rtx temp = gen_rtx_HIGH (Pmode, eqv);
1090 temp = gen_rtx_PLUS (Pmode, scratch, temp);
1091 scratch = gen_reg_rtx (Pmode);
1092 emit_insn (gen_rtx_SET (scratch, temp));
1094 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1097 case TLS_MODEL_INITIAL_EXEC:
1098 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1099 eqv = gen_rtx_CONST (Pmode, eqv);
1100 tp = gen_reg_rtx (Pmode);
1101 scratch = gen_reg_rtx (Pmode);
1102 dest = gen_reg_rtx (Pmode);
1104 emit_insn (gen_get_thread_pointerdi (tp));
1105 emit_insn (gen_rtx_SET (scratch, eqv));
1106 emit_insn (gen_adddi3 (dest, tp, scratch));
1109 case TLS_MODEL_LOCAL_EXEC:
1110 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1111 eqv = gen_rtx_CONST (Pmode, eqv);
1112 tp = gen_reg_rtx (Pmode);
1114 emit_insn (gen_get_thread_pointerdi (tp));
1115 if (alpha_tls_size == 32)
1117 rtx temp = gen_rtx_HIGH (Pmode, eqv);
1118 temp = gen_rtx_PLUS (Pmode, tp, temp);
1119 tp = gen_reg_rtx (Pmode);
1120 emit_insn (gen_rtx_SET (tp, temp));
1122 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1128 if (local_symbolic_operand (x, Pmode))
1130 if (small_symbolic_operand (x, Pmode))
1134 if (can_create_pseudo_p ())
1135 scratch = gen_reg_rtx (Pmode);
1136 emit_insn (gen_rtx_SET (scratch, gen_rtx_HIGH (Pmode, x)));
1137 return gen_rtx_LO_SUM (Pmode, scratch, x);
1146 HOST_WIDE_INT low, high;
1148 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1150 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1154 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1155 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1156 1, OPTAB_LIB_WIDEN);
1158 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1159 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1160 1, OPTAB_LIB_WIDEN);
1162 return plus_constant (Pmode, x, low);
1167 /* Try machine-dependent ways of modifying an illegitimate address
1168 to be legitimate. Return X or the new, valid address. */
1171 alpha_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1174 rtx new_x = alpha_legitimize_address_1 (x, NULL_RTX, mode);
1175 return new_x ? new_x : x;
1178 /* Return true if ADDR has an effect that depends on the machine mode it
1179 is used for. On the Alpha this is true only for the unaligned modes.
1180 We can simplify the test since we know that the address must be valid. */
1183 alpha_mode_dependent_address_p (const_rtx addr,
1184 addr_space_t as ATTRIBUTE_UNUSED)
1186 return GET_CODE (addr) == AND;
1189 /* Primarily this is required for TLS symbols, but given that our move
1190 patterns *ought* to be able to handle any symbol at any time, we
1191 should never be spilling symbolic operands to the constant pool, ever. */
1194 alpha_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1196 enum rtx_code code = GET_CODE (x);
1197 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1200 /* We do not allow indirect calls to be optimized into sibling calls, nor
1201 can we allow a call to a function with a different GP to be optimized
1205 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1207 /* Can't do indirect tail calls, since we don't know if the target
1208 uses the same GP. */
1212 /* Otherwise, we can make a tail call if the target function shares
1214 return decl_has_samegp (decl);
1218 some_small_symbolic_operand_int (rtx x)
1220 subrtx_var_iterator::array_type array;
1221 FOR_EACH_SUBRTX_VAR (iter, array, x, ALL)
1224 /* Don't re-split. */
1225 if (GET_CODE (x) == LO_SUM)
1226 iter.skip_subrtxes ();
1227 else if (small_symbolic_operand (x, Pmode))
1234 split_small_symbolic_operand (rtx x)
1237 subrtx_ptr_iterator::array_type array;
1238 FOR_EACH_SUBRTX_PTR (iter, array, &x, ALL)
1242 /* Don't re-split. */
1243 if (GET_CODE (x) == LO_SUM)
1244 iter.skip_subrtxes ();
1245 else if (small_symbolic_operand (x, Pmode))
1247 *ptr = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1248 iter.skip_subrtxes ();
1254 /* Indicate that INSN cannot be duplicated. This is true for any insn
1255 that we've marked with gpdisp relocs, since those have to stay in
1256 1-1 correspondence with one another.
1258 Technically we could copy them if we could set up a mapping from one
1259 sequence number to another, across the set of insns to be duplicated.
1260 This seems overly complicated and error-prone since interblock motion
1261 from sched-ebb could move one of the pair of insns to a different block.
1263 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1264 then they'll be in a different block from their ldgp. Which could lead
1265 the bb reorder code to think that it would be ok to copy just the block
1266 containing the call and branch to the block containing the ldgp. */
1269 alpha_cannot_copy_insn_p (rtx_insn *insn)
1271 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1273 if (recog_memoized (insn) >= 0)
1274 return get_attr_cannot_copy (insn);
1280 /* Try a machine-dependent way of reloading an illegitimate address
1281 operand. If we find one, push the reload and return the new rtx. */
1284 alpha_legitimize_reload_address (rtx x,
1285 machine_mode mode ATTRIBUTE_UNUSED,
1286 int opnum, int type,
1287 int ind_levels ATTRIBUTE_UNUSED)
1289 /* We must recognize output that we have already generated ourselves. */
1290 if (GET_CODE (x) == PLUS
1291 && GET_CODE (XEXP (x, 0)) == PLUS
1292 && REG_P (XEXP (XEXP (x, 0), 0))
1293 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
1294 && CONST_INT_P (XEXP (x, 1)))
1296 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1297 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1298 opnum, (enum reload_type) type);
1302 /* We wish to handle large displacements off a base register by
1303 splitting the addend across an ldah and the mem insn. This
1304 cuts number of extra insns needed from 3 to 1. */
1305 if (GET_CODE (x) == PLUS
1306 && REG_P (XEXP (x, 0))
1307 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1308 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1309 && CONST_INT_P (XEXP (x, 1)))
1311 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1312 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1314 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1316 /* Check for 32-bit overflow. */
1317 if (high + low != val)
1320 /* Reload the high part into a base reg; leave the low part
1321 in the mem directly. */
1322 x = gen_rtx_PLUS (GET_MODE (x),
1323 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1327 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1328 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1329 opnum, (enum reload_type) type);
1336 /* Return the cost of moving between registers of various classes. Moving
1337 between FLOAT_REGS and anything else except float regs is expensive.
1338 In fact, we make it quite expensive because we really don't want to
1339 do these moves unless it is clearly worth it. Optimizations may
1340 reduce the impact of not being able to allocate a pseudo to a
1344 alpha_register_move_cost (machine_mode /*mode*/,
1345 reg_class_t from, reg_class_t to)
1347 if ((from == FLOAT_REGS) == (to == FLOAT_REGS))
1351 return (from == FLOAT_REGS) ? 6 : 8;
1353 return 4 + 2 * alpha_memory_latency;
1356 /* Return the cost of moving data of MODE from a register to
1357 or from memory. On the Alpha, bump this up a bit. */
1360 alpha_memory_move_cost (machine_mode /*mode*/, reg_class_t /*regclass*/,
1363 return 2 * alpha_memory_latency;
1366 /* Compute a (partial) cost for rtx X. Return true if the complete
1367 cost has been computed, and false if subexpressions should be
1368 scanned. In either case, *TOTAL contains the cost result. */
1371 alpha_rtx_costs (rtx x, machine_mode mode, int outer_code, int opno, int *total,
1374 int code = GET_CODE (x);
1375 bool float_mode_p = FLOAT_MODE_P (mode);
1376 const struct alpha_rtx_cost_data *cost_data;
1379 cost_data = &alpha_rtx_cost_size;
1381 cost_data = &alpha_rtx_cost_data[alpha_tune];
1386 /* If this is an 8-bit constant, return zero since it can be used
1387 nearly anywhere with no cost. If it is a valid operand for an
1388 ADD or AND, likewise return 0 if we know it will be used in that
1389 context. Otherwise, return 2 since it might be used there later.
1390 All other constants take at least two insns. */
1391 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1399 case CONST_WIDE_INT:
1400 if (x == CONST0_RTX (mode))
1402 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1403 || (outer_code == AND && and_operand (x, VOIDmode)))
1405 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1408 *total = COSTS_N_INSNS (2);
1414 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1415 *total = COSTS_N_INSNS (outer_code != MEM);
1416 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1417 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1418 else if (tls_symbolic_operand_type (x))
1419 /* Estimate of cost for call_pal rduniq. */
1420 /* ??? How many insns do we emit here? More than one... */
1421 *total = COSTS_N_INSNS (15);
1423 /* Otherwise we do a load from the GOT. */
1424 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1428 /* This is effectively an add_operand. */
1435 *total = cost_data->fp_add;
1436 else if (GET_CODE (XEXP (x, 0)) == MULT
1437 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1439 *total = (rtx_cost (XEXP (XEXP (x, 0), 0), mode,
1440 (enum rtx_code) outer_code, opno, speed)
1441 + rtx_cost (XEXP (x, 1), mode,
1442 (enum rtx_code) outer_code, opno, speed)
1443 + COSTS_N_INSNS (1));
1450 *total = cost_data->fp_mult;
1451 else if (mode == DImode)
1452 *total = cost_data->int_mult_di;
1454 *total = cost_data->int_mult_si;
1458 if (CONST_INT_P (XEXP (x, 1))
1459 && INTVAL (XEXP (x, 1)) <= 3)
1461 *total = COSTS_N_INSNS (1);
1468 *total = cost_data->int_shift;
1473 *total = cost_data->fp_add;
1475 *total = cost_data->int_cmov;
1483 *total = cost_data->int_div;
1484 else if (mode == SFmode)
1485 *total = cost_data->fp_div_sf;
1487 *total = cost_data->fp_div_df;
1491 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1497 *total = COSTS_N_INSNS (1);
1505 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1511 case UNSIGNED_FLOAT:
1514 case FLOAT_TRUNCATE:
1515 *total = cost_data->fp_add;
1519 if (MEM_P (XEXP (x, 0)))
1522 *total = cost_data->fp_add;
1530 /* REF is an alignable memory location. Place an aligned SImode
1531 reference into *PALIGNED_MEM and the number of bits to shift into
1532 *PBITNUM. SCRATCH is a free register for use in reloading out
1533 of range stack slots. */
1536 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1539 HOST_WIDE_INT disp, offset;
1541 gcc_assert (MEM_P (ref));
1543 if (reload_in_progress)
1545 base = find_replacement (&XEXP (ref, 0));
1546 gcc_assert (memory_address_p (GET_MODE (ref), base));
1549 base = XEXP (ref, 0);
1551 if (GET_CODE (base) == PLUS)
1552 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1556 /* Find the byte offset within an aligned word. If the memory itself is
1557 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1558 will have examined the base register and determined it is aligned, and
1559 thus displacements from it are naturally alignable. */
1560 if (MEM_ALIGN (ref) >= 32)
1565 /* The location should not cross aligned word boundary. */
1566 gcc_assert (offset + GET_MODE_SIZE (GET_MODE (ref))
1567 <= GET_MODE_SIZE (SImode));
1569 /* Access the entire aligned word. */
1570 *paligned_mem = widen_memory_access (ref, SImode, -offset);
1572 /* Convert the byte offset within the word to a bit offset. */
1573 offset *= BITS_PER_UNIT;
1574 *pbitnum = GEN_INT (offset);
1577 /* Similar, but just get the address. Handle the two reload cases.
1578 Add EXTRA_OFFSET to the address we return. */
1581 get_unaligned_address (rtx ref)
1584 HOST_WIDE_INT offset = 0;
1586 gcc_assert (MEM_P (ref));
1588 if (reload_in_progress)
1590 base = find_replacement (&XEXP (ref, 0));
1591 gcc_assert (memory_address_p (GET_MODE (ref), base));
1594 base = XEXP (ref, 0);
1596 if (GET_CODE (base) == PLUS)
1597 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1599 return plus_constant (Pmode, base, offset);
1602 /* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1603 X is always returned in a register. */
1606 get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs)
1608 if (GET_CODE (addr) == PLUS)
1610 ofs += INTVAL (XEXP (addr, 1));
1611 addr = XEXP (addr, 0);
1614 return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7),
1615 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1618 /* On the Alpha, all (non-symbolic) constants except zero go into
1619 a floating-point register via memory. Note that we cannot
1620 return anything that is not a subset of RCLASS, and that some
1621 symbolic constants cannot be dropped to memory. */
1624 alpha_preferred_reload_class(rtx x, enum reg_class rclass)
1626 /* Zero is present in any register class. */
1627 if (x == CONST0_RTX (GET_MODE (x)))
1630 /* These sorts of constants we can easily drop to memory. */
1631 if (CONST_SCALAR_INT_P (x)
1632 || CONST_DOUBLE_P (x)
1633 || GET_CODE (x) == CONST_VECTOR)
1635 if (rclass == FLOAT_REGS)
1637 if (rclass == ALL_REGS)
1638 return GENERAL_REGS;
1642 /* All other kinds of constants should not (and in the case of HIGH
1643 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1644 secondary reload. */
1646 return (rclass == ALL_REGS ? GENERAL_REGS : rclass);
1651 /* Inform reload about cases where moving X with a mode MODE to a register in
1652 RCLASS requires an extra scratch or immediate register. Return the class
1653 needed for the immediate register. */
1656 alpha_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
1657 machine_mode mode, secondary_reload_info *sri)
1659 enum reg_class rclass = (enum reg_class) rclass_i;
1661 /* Loading and storing HImode or QImode values to and from memory
1662 usually requires a scratch register. */
1663 if (!TARGET_BWX && (mode == QImode || mode == HImode || mode == CQImode))
1665 if (any_memory_operand (x, mode))
1669 if (!aligned_memory_operand (x, mode))
1670 sri->icode = direct_optab_handler (reload_in_optab, mode);
1673 sri->icode = direct_optab_handler (reload_out_optab, mode);
1678 /* We also cannot do integral arithmetic into FP regs, as might result
1679 from register elimination into a DImode fp register. */
1680 if (rclass == FLOAT_REGS)
1682 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
1683 return GENERAL_REGS;
1684 if (in_p && INTEGRAL_MODE_P (mode)
1685 && !MEM_P (x) && !REG_P (x) && !CONST_INT_P (x))
1686 return GENERAL_REGS;
1692 /* Implement TARGET_SECONDARY_MEMORY_NEEDED_MODE. If MODE is
1693 floating-point, use it. Otherwise, widen to a word like the default.
1694 This is needed because we always store integers in FP registers in
1695 quadword format. This whole area is very tricky! */
1698 alpha_secondary_memory_needed_mode (machine_mode mode)
1700 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1702 if (GET_MODE_SIZE (mode) >= 4)
1704 return mode_for_size (BITS_PER_WORD, GET_MODE_CLASS (mode), 0).require ();
1707 /* Given SEQ, which is an INSN list, look for any MEMs in either
1708 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1709 volatile flags from REF into each of the MEMs found. If REF is not
1710 a MEM, don't do anything. */
1713 alpha_set_memflags (rtx seq, rtx ref)
1720 /* This is only called from alpha.md, after having had something
1721 generated from one of the insn patterns. So if everything is
1722 zero, the pattern is already up-to-date. */
1723 if (!MEM_VOLATILE_P (ref)
1724 && !MEM_NOTRAP_P (ref)
1725 && !MEM_READONLY_P (ref))
1728 subrtx_var_iterator::array_type array;
1729 for (insn = as_a <rtx_insn *> (seq); insn; insn = NEXT_INSN (insn))
1731 FOR_EACH_SUBRTX_VAR (iter, array, PATTERN (insn), NONCONST)
1736 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (ref);
1737 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (ref);
1738 MEM_READONLY_P (x) = MEM_READONLY_P (ref);
1739 /* Sadly, we cannot use alias sets because the extra
1740 aliasing produced by the AND interferes. Given that
1741 two-byte quantities are the only thing we would be
1742 able to differentiate anyway, there does not seem to
1743 be any point in convoluting the early out of the
1745 iter.skip_subrtxes ();
1752 static rtx alpha_emit_set_const (rtx, machine_mode, HOST_WIDE_INT,
1755 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1756 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1757 and return pc_rtx if successful. */
1760 alpha_emit_set_const_1 (rtx target, machine_mode mode,
1761 HOST_WIDE_INT c, int n, bool no_output)
1763 HOST_WIDE_INT new_const;
1765 /* Use a pseudo if highly optimizing and still generating RTL. */
1767 = (flag_expensive_optimizations && can_create_pseudo_p () ? 0 : target);
1770 /* If this is a sign-extended 32-bit constant, we can do this in at most
1771 three insns, so do it if we have enough insns left. */
1773 if (c >> 31 == -1 || c >> 31 == 0)
1775 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1776 HOST_WIDE_INT tmp1 = c - low;
1777 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1778 HOST_WIDE_INT extra = 0;
1780 /* If HIGH will be interpreted as negative but the constant is
1781 positive, we must adjust it to do two ldha insns. */
1783 if ((high & 0x8000) != 0 && c >= 0)
1787 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1790 if (c == low || (low == 0 && extra == 0))
1792 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1793 but that meant that we can't handle INT_MIN on 32-bit machines
1794 (like NT/Alpha), because we recurse indefinitely through
1795 emit_move_insn to gen_movdi. So instead, since we know exactly
1796 what we want, create it explicitly. */
1801 target = gen_reg_rtx (mode);
1802 emit_insn (gen_rtx_SET (target, GEN_INT (c)));
1805 else if (n >= 2 + (extra != 0))
1809 if (!can_create_pseudo_p ())
1811 emit_insn (gen_rtx_SET (target, GEN_INT (high << 16)));
1815 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1818 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1819 This means that if we go through expand_binop, we'll try to
1820 generate extensions, etc, which will require new pseudos, which
1821 will fail during some split phases. The SImode add patterns
1822 still exist, but are not named. So build the insns by hand. */
1827 subtarget = gen_reg_rtx (mode);
1828 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1829 insn = gen_rtx_SET (subtarget, insn);
1835 target = gen_reg_rtx (mode);
1836 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1837 insn = gen_rtx_SET (target, insn);
1843 /* If we couldn't do it that way, try some other methods. But if we have
1844 no instructions left, don't bother. Likewise, if this is SImode and
1845 we can't make pseudos, we can't do anything since the expand_binop
1846 and expand_unop calls will widen and try to make pseudos. */
1848 if (n == 1 || (mode == SImode && !can_create_pseudo_p ()))
1851 /* Next, see if we can load a related constant and then shift and possibly
1852 negate it to get the constant we want. Try this once each increasing
1853 numbers of insns. */
1855 for (i = 1; i < n; i++)
1857 /* First, see if minus some low bits, we've an easy load of
1860 new_const = ((c & 0xffff) ^ 0x8000) - 0x8000;
1863 temp = alpha_emit_set_const (subtarget, mode, c - new_const, i, no_output);
1868 return expand_binop (mode, add_optab, temp, GEN_INT (new_const),
1869 target, 0, OPTAB_WIDEN);
1873 /* Next try complementing. */
1874 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1879 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1882 /* Next try to form a constant and do a left shift. We can do this
1883 if some low-order bits are zero; the exact_log2 call below tells
1884 us that information. The bits we are shifting out could be any
1885 value, but here we'll just try the 0- and sign-extended forms of
1886 the constant. To try to increase the chance of having the same
1887 constant in more than one insn, start at the highest number of
1888 bits to shift, but try all possibilities in case a ZAPNOT will
1891 bits = exact_log2 (c & -c);
1893 for (; bits > 0; bits--)
1895 new_const = c >> bits;
1896 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1899 new_const = (unsigned HOST_WIDE_INT)c >> bits;
1900 temp = alpha_emit_set_const (subtarget, mode, new_const,
1907 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1908 target, 0, OPTAB_WIDEN);
1912 /* Now try high-order zero bits. Here we try the shifted-in bits as
1913 all zero and all ones. Be careful to avoid shifting outside the
1914 mode and to avoid shifting outside the host wide int size. */
1916 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1917 - floor_log2 (c) - 1);
1919 for (; bits > 0; bits--)
1921 new_const = c << bits;
1922 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1925 new_const = (c << bits) | ((HOST_WIDE_INT_1U << bits) - 1);
1926 temp = alpha_emit_set_const (subtarget, mode, new_const,
1933 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1934 target, 1, OPTAB_WIDEN);
1938 /* Now try high-order 1 bits. We get that with a sign-extension.
1939 But one bit isn't enough here. Be careful to avoid shifting outside
1940 the mode and to avoid shifting outside the host wide int size. */
1942 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1943 - floor_log2 (~ c) - 2);
1945 for (; bits > 0; bits--)
1947 new_const = c << bits;
1948 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1951 new_const = (c << bits) | ((HOST_WIDE_INT_1U << bits) - 1);
1952 temp = alpha_emit_set_const (subtarget, mode, new_const,
1959 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1960 target, 0, OPTAB_WIDEN);
1965 /* Finally, see if can load a value into the target that is the same as the
1966 constant except that all bytes that are 0 are changed to be 0xff. If we
1967 can, then we can do a ZAPNOT to obtain the desired constant. */
1970 for (i = 0; i < 64; i += 8)
1971 if ((new_const & ((HOST_WIDE_INT) 0xff << i)) == 0)
1972 new_const |= (HOST_WIDE_INT) 0xff << i;
1974 /* We are only called for SImode and DImode. If this is SImode, ensure that
1975 we are sign extended to a full word. */
1978 new_const = ((new_const & 0xffffffff) ^ 0x80000000) - 0x80000000;
1982 temp = alpha_emit_set_const (subtarget, mode, new_const, n - 1, no_output);
1987 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new_const),
1988 target, 0, OPTAB_WIDEN);
1995 /* Try to output insns to set TARGET equal to the constant C if it can be
1996 done in less than N insns. Do all computations in MODE. Returns the place
1997 where the output has been placed if it can be done and the insns have been
1998 emitted. If it would take more than N insns, zero is returned and no
1999 insns and emitted. */
2002 alpha_emit_set_const (rtx target, machine_mode mode,
2003 HOST_WIDE_INT c, int n, bool no_output)
2005 machine_mode orig_mode = mode;
2006 rtx orig_target = target;
2010 /* If we can't make any pseudos, TARGET is an SImode hard register, we
2011 can't load this constant in one insn, do this in DImode. */
2012 if (!can_create_pseudo_p () && mode == SImode
2013 && REG_P (target) && REGNO (target) < FIRST_PSEUDO_REGISTER)
2015 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
2019 target = no_output ? NULL : gen_lowpart (DImode, target);
2022 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
2024 target = no_output ? NULL : gen_lowpart (DImode, target);
2028 /* Try 1 insn, then 2, then up to N. */
2029 for (i = 1; i <= n; i++)
2031 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
2040 insn = get_last_insn ();
2041 set = single_set (insn);
2042 if (! CONSTANT_P (SET_SRC (set)))
2043 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
2048 /* Allow for the case where we changed the mode of TARGET. */
2051 if (result == target)
2052 result = orig_target;
2053 else if (mode != orig_mode)
2054 result = gen_lowpart (orig_mode, result);
2060 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
2061 fall back to a straight forward decomposition. We do this to avoid
2062 exponential run times encountered when looking for longer sequences
2063 with alpha_emit_set_const. */
2066 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1)
2068 HOST_WIDE_INT d1, d2, d3, d4;
2070 /* Decompose the entire word */
2072 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2074 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2075 c1 = (c1 - d2) >> 32;
2076 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2078 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2079 gcc_assert (c1 == d4);
2081 /* Construct the high word */
2084 emit_move_insn (target, GEN_INT (d4));
2086 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
2089 emit_move_insn (target, GEN_INT (d3));
2091 /* Shift it into place */
2092 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
2094 /* Add in the low bits. */
2096 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
2098 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
2103 /* Given an integral CONST_INT or CONST_VECTOR, return the low 64 bits. */
2105 static HOST_WIDE_INT
2106 alpha_extract_integer (rtx x)
2108 if (GET_CODE (x) == CONST_VECTOR)
2109 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2111 gcc_assert (CONST_INT_P (x));
2116 /* Implement TARGET_LEGITIMATE_CONSTANT_P. This is all constants for which
2117 we are willing to load the value into a register via a move pattern.
2118 Normally this is all symbolic constants, integral constants that
2119 take three or fewer instructions, and floating-point zero. */
2122 alpha_legitimate_constant_p (machine_mode mode, rtx x)
2126 switch (GET_CODE (x))
2133 if (GET_CODE (XEXP (x, 0)) == PLUS
2134 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
2135 x = XEXP (XEXP (x, 0), 0);
2139 if (GET_CODE (x) != SYMBOL_REF)
2144 /* TLS symbols are never valid. */
2145 return SYMBOL_REF_TLS_MODEL (x) == 0;
2147 case CONST_WIDE_INT:
2148 if (TARGET_BUILD_CONSTANTS)
2150 if (x == CONST0_RTX (mode))
2153 gcc_assert (CONST_WIDE_INT_NUNITS (x) == 2);
2154 i0 = CONST_WIDE_INT_ELT (x, 1);
2155 if (alpha_emit_set_const_1 (NULL_RTX, mode, i0, 3, true) == NULL)
2157 i0 = CONST_WIDE_INT_ELT (x, 0);
2161 if (x == CONST0_RTX (mode))
2166 if (x == CONST0_RTX (mode))
2168 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2170 if (GET_MODE_SIZE (mode) != 8)
2175 if (TARGET_BUILD_CONSTANTS)
2177 i0 = alpha_extract_integer (x);
2179 return alpha_emit_set_const_1 (NULL_RTX, mode, i0, 3, true) != NULL;
2186 /* Operand 1 is known to be a constant, and should require more than one
2187 instruction to load. Emit that multi-part load. */
2190 alpha_split_const_mov (machine_mode mode, rtx *operands)
2193 rtx temp = NULL_RTX;
2195 i0 = alpha_extract_integer (operands[1]);
2197 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2199 if (!temp && TARGET_BUILD_CONSTANTS)
2200 temp = alpha_emit_set_long_const (operands[0], i0);
2204 if (!rtx_equal_p (operands[0], temp))
2205 emit_move_insn (operands[0], temp);
2212 /* Expand a move instruction; return true if all work is done.
2213 We don't handle non-bwx subword loads here. */
2216 alpha_expand_mov (machine_mode mode, rtx *operands)
2220 /* If the output is not a register, the input must be. */
2221 if (MEM_P (operands[0])
2222 && ! reg_or_0_operand (operands[1], mode))
2223 operands[1] = force_reg (mode, operands[1]);
2225 /* Allow legitimize_address to perform some simplifications. */
2226 if (mode == Pmode && symbolic_operand (operands[1], mode))
2228 tmp = alpha_legitimize_address_1 (operands[1], operands[0], mode);
2231 if (tmp == operands[0])
2238 /* Early out for non-constants and valid constants. */
2239 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2242 /* Split large integers. */
2243 if (CONST_INT_P (operands[1])
2244 || GET_CODE (operands[1]) == CONST_VECTOR)
2246 if (alpha_split_const_mov (mode, operands))
2250 /* Otherwise we've nothing left but to drop the thing to memory. */
2251 tmp = force_const_mem (mode, operands[1]);
2253 if (tmp == NULL_RTX)
2256 if (reload_in_progress)
2258 emit_move_insn (operands[0], XEXP (tmp, 0));
2259 operands[1] = replace_equiv_address (tmp, operands[0]);
2262 operands[1] = validize_mem (tmp);
2266 /* Expand a non-bwx QImode or HImode move instruction;
2267 return true if all work is done. */
2270 alpha_expand_mov_nobwx (machine_mode mode, rtx *operands)
2274 /* If the output is not a register, the input must be. */
2275 if (MEM_P (operands[0]))
2276 operands[1] = force_reg (mode, operands[1]);
2278 /* Handle four memory cases, unaligned and aligned for either the input
2279 or the output. The only case where we can be called during reload is
2280 for aligned loads; all other cases require temporaries. */
2282 if (any_memory_operand (operands[1], mode))
2284 if (aligned_memory_operand (operands[1], mode))
2286 if (reload_in_progress)
2289 seq = gen_reload_inqi_aligned (operands[0], operands[1]);
2291 seq = gen_reload_inhi_aligned (operands[0], operands[1]);
2296 rtx aligned_mem, bitnum;
2297 rtx scratch = gen_reg_rtx (SImode);
2301 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2303 subtarget = operands[0];
2304 if (REG_P (subtarget))
2305 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2307 subtarget = gen_reg_rtx (DImode), copyout = true;
2310 seq = gen_aligned_loadqi (subtarget, aligned_mem,
2313 seq = gen_aligned_loadhi (subtarget, aligned_mem,
2318 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2323 /* Don't pass these as parameters since that makes the generated
2324 code depend on parameter evaluation order which will cause
2325 bootstrap failures. */
2327 rtx temp1, temp2, subtarget, ua;
2330 temp1 = gen_reg_rtx (DImode);
2331 temp2 = gen_reg_rtx (DImode);
2333 subtarget = operands[0];
2334 if (REG_P (subtarget))
2335 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2337 subtarget = gen_reg_rtx (DImode), copyout = true;
2339 ua = get_unaligned_address (operands[1]);
2341 seq = gen_unaligned_loadqi (subtarget, ua, temp1, temp2);
2343 seq = gen_unaligned_loadhi (subtarget, ua, temp1, temp2);
2345 alpha_set_memflags (seq, operands[1]);
2349 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2354 if (any_memory_operand (operands[0], mode))
2356 if (aligned_memory_operand (operands[0], mode))
2358 rtx aligned_mem, bitnum;
2359 rtx temp1 = gen_reg_rtx (SImode);
2360 rtx temp2 = gen_reg_rtx (SImode);
2362 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2364 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2369 rtx temp1 = gen_reg_rtx (DImode);
2370 rtx temp2 = gen_reg_rtx (DImode);
2371 rtx temp3 = gen_reg_rtx (DImode);
2372 rtx ua = get_unaligned_address (operands[0]);
2375 seq = gen_unaligned_storeqi (ua, operands[1], temp1, temp2, temp3);
2377 seq = gen_unaligned_storehi (ua, operands[1], temp1, temp2, temp3);
2379 alpha_set_memflags (seq, operands[0]);
2388 /* Implement the movmisalign patterns. One of the operands is a memory
2389 that is not naturally aligned. Emit instructions to load it. */
2392 alpha_expand_movmisalign (machine_mode mode, rtx *operands)
2394 /* Honor misaligned loads, for those we promised to do so. */
2395 if (MEM_P (operands[1]))
2399 if (register_operand (operands[0], mode))
2402 tmp = gen_reg_rtx (mode);
2404 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2405 if (tmp != operands[0])
2406 emit_move_insn (operands[0], tmp);
2408 else if (MEM_P (operands[0]))
2410 if (!reg_or_0_operand (operands[1], mode))
2411 operands[1] = force_reg (mode, operands[1]);
2412 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2418 /* Generate an unsigned DImode to FP conversion. This is the same code
2419 optabs would emit if we didn't have TFmode patterns.
2421 For SFmode, this is the only construction I've found that can pass
2422 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2423 intermediates will work, because you'll get intermediate rounding
2424 that ruins the end result. Some of this could be fixed by turning
2425 on round-to-positive-infinity, but that requires diddling the fpsr,
2426 which kills performance. I tried turning this around and converting
2427 to a negative number, so that I could turn on /m, but either I did
2428 it wrong or there's something else cause I wound up with the exact
2429 same single-bit error. There is a branch-less form of this same code:
2440 fcmoveq $f10,$f11,$f0
2442 I'm not using it because it's the same number of instructions as
2443 this branch-full form, and it has more serialized long latency
2444 instructions on the critical path.
2446 For DFmode, we can avoid rounding errors by breaking up the word
2447 into two pieces, converting them separately, and adding them back:
2449 LC0: .long 0,0x5f800000
2454 cpyse $f11,$f31,$f10
2455 cpyse $f31,$f11,$f11
2463 This doesn't seem to be a clear-cut win over the optabs form.
2464 It probably all depends on the distribution of numbers being
2465 converted -- in the optabs form, all but high-bit-set has a
2466 much lower minimum execution time. */
2469 alpha_emit_floatuns (rtx operands[2])
2471 rtx neglab, donelab, i0, i1, f0, in, out;
2475 in = force_reg (DImode, operands[1]);
2476 mode = GET_MODE (out);
2477 neglab = gen_label_rtx ();
2478 donelab = gen_label_rtx ();
2479 i0 = gen_reg_rtx (DImode);
2480 i1 = gen_reg_rtx (DImode);
2481 f0 = gen_reg_rtx (mode);
2483 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2485 emit_insn (gen_rtx_SET (out, gen_rtx_FLOAT (mode, in)));
2486 emit_jump_insn (gen_jump (donelab));
2489 emit_label (neglab);
2491 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2492 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2493 emit_insn (gen_iordi3 (i0, i0, i1));
2494 emit_insn (gen_rtx_SET (f0, gen_rtx_FLOAT (mode, i0)));
2495 emit_insn (gen_rtx_SET (out, gen_rtx_PLUS (mode, f0, f0)));
2497 emit_label (donelab);
2500 /* Generate the comparison for a conditional branch. */
2503 alpha_emit_conditional_branch (rtx operands[], machine_mode cmp_mode)
2505 enum rtx_code cmp_code, branch_code;
2506 machine_mode branch_mode = VOIDmode;
2507 enum rtx_code code = GET_CODE (operands[0]);
2508 rtx op0 = operands[1], op1 = operands[2];
2511 if (cmp_mode == TFmode)
2513 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2518 /* The general case: fold the comparison code to the types of compares
2519 that we have, choosing the branch as necessary. */
2522 case EQ: case LE: case LT: case LEU: case LTU:
2524 /* We have these compares. */
2525 cmp_code = code, branch_code = NE;
2530 /* These must be reversed. */
2531 cmp_code = reverse_condition (code), branch_code = EQ;
2534 case GE: case GT: case GEU: case GTU:
2535 /* For FP, we swap them, for INT, we reverse them. */
2536 if (cmp_mode == DFmode)
2538 cmp_code = swap_condition (code);
2540 std::swap (op0, op1);
2544 cmp_code = reverse_condition (code);
2553 if (cmp_mode == DFmode)
2555 if (flag_unsafe_math_optimizations && cmp_code != UNORDERED)
2557 /* When we are not as concerned about non-finite values, and we
2558 are comparing against zero, we can branch directly. */
2559 if (op1 == CONST0_RTX (DFmode))
2560 cmp_code = UNKNOWN, branch_code = code;
2561 else if (op0 == CONST0_RTX (DFmode))
2563 /* Undo the swap we probably did just above. */
2564 std::swap (op0, op1);
2565 branch_code = swap_condition (cmp_code);
2571 /* ??? We mark the branch mode to be CCmode to prevent the
2572 compare and branch from being combined, since the compare
2573 insn follows IEEE rules that the branch does not. */
2574 branch_mode = CCmode;
2579 /* The following optimizations are only for signed compares. */
2580 if (code != LEU && code != LTU && code != GEU && code != GTU)
2582 /* Whee. Compare and branch against 0 directly. */
2583 if (op1 == const0_rtx)
2584 cmp_code = UNKNOWN, branch_code = code;
2586 /* If the constants doesn't fit into an immediate, but can
2587 be generated by lda/ldah, we adjust the argument and
2588 compare against zero, so we can use beq/bne directly. */
2589 /* ??? Don't do this when comparing against symbols, otherwise
2590 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2591 be declared false out of hand (at least for non-weak). */
2592 else if (CONST_INT_P (op1)
2593 && (code == EQ || code == NE)
2594 && !(symbolic_operand (op0, VOIDmode)
2595 || (REG_P (op0) && REG_POINTER (op0))))
2597 rtx n_op1 = GEN_INT (-INTVAL (op1));
2599 if (! satisfies_constraint_I (op1)
2600 && (satisfies_constraint_K (n_op1)
2601 || satisfies_constraint_L (n_op1)))
2602 cmp_code = PLUS, branch_code = code, op1 = n_op1;
2606 if (!reg_or_0_operand (op0, DImode))
2607 op0 = force_reg (DImode, op0);
2608 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2609 op1 = force_reg (DImode, op1);
2612 /* Emit an initial compare instruction, if necessary. */
2614 if (cmp_code != UNKNOWN)
2616 tem = gen_reg_rtx (cmp_mode);
2617 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2620 /* Emit the branch instruction. */
2621 tem = gen_rtx_SET (pc_rtx,
2622 gen_rtx_IF_THEN_ELSE (VOIDmode,
2623 gen_rtx_fmt_ee (branch_code,
2625 CONST0_RTX (cmp_mode)),
2626 gen_rtx_LABEL_REF (VOIDmode,
2629 emit_jump_insn (tem);
2632 /* Certain simplifications can be done to make invalid setcc operations
2633 valid. Return the final comparison, or NULL if we can't work. */
2636 alpha_emit_setcc (rtx operands[], machine_mode cmp_mode)
2638 enum rtx_code cmp_code;
2639 enum rtx_code code = GET_CODE (operands[1]);
2640 rtx op0 = operands[2], op1 = operands[3];
2643 if (cmp_mode == TFmode)
2645 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2650 if (cmp_mode == DFmode && !TARGET_FIX)
2653 /* The general case: fold the comparison code to the types of compares
2654 that we have, choosing the branch as necessary. */
2659 case EQ: case LE: case LT: case LEU: case LTU:
2661 /* We have these compares. */
2662 if (cmp_mode == DFmode)
2663 cmp_code = code, code = NE;
2667 if (cmp_mode == DImode && op1 == const0_rtx)
2672 cmp_code = reverse_condition (code);
2676 case GE: case GT: case GEU: case GTU:
2677 /* These normally need swapping, but for integer zero we have
2678 special patterns that recognize swapped operands. */
2679 if (cmp_mode == DImode && op1 == const0_rtx)
2681 code = swap_condition (code);
2682 if (cmp_mode == DFmode)
2683 cmp_code = code, code = NE;
2684 std::swap (op0, op1);
2691 if (cmp_mode == DImode)
2693 if (!register_operand (op0, DImode))
2694 op0 = force_reg (DImode, op0);
2695 if (!reg_or_8bit_operand (op1, DImode))
2696 op1 = force_reg (DImode, op1);
2699 /* Emit an initial compare instruction, if necessary. */
2700 if (cmp_code != UNKNOWN)
2702 tmp = gen_reg_rtx (cmp_mode);
2703 emit_insn (gen_rtx_SET (tmp, gen_rtx_fmt_ee (cmp_code, cmp_mode,
2706 op0 = cmp_mode != DImode ? gen_lowpart (DImode, tmp) : tmp;
2710 /* Emit the setcc instruction. */
2711 emit_insn (gen_rtx_SET (operands[0], gen_rtx_fmt_ee (code, DImode,
2717 /* Rewrite a comparison against zero CMP of the form
2718 (CODE (cc0) (const_int 0)) so it can be written validly in
2719 a conditional move (if_then_else CMP ...).
2720 If both of the operands that set cc0 are nonzero we must emit
2721 an insn to perform the compare (it can't be done within
2722 the conditional move). */
2725 alpha_emit_conditional_move (rtx cmp, machine_mode mode)
2727 enum rtx_code code = GET_CODE (cmp);
2728 enum rtx_code cmov_code = NE;
2729 rtx op0 = XEXP (cmp, 0);
2730 rtx op1 = XEXP (cmp, 1);
2731 machine_mode cmp_mode
2732 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2733 machine_mode cmov_mode = VOIDmode;
2734 int local_fast_math = flag_unsafe_math_optimizations;
2737 if (cmp_mode == TFmode)
2739 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2744 gcc_assert (cmp_mode == DFmode || cmp_mode == DImode);
2746 if (FLOAT_MODE_P (cmp_mode) != FLOAT_MODE_P (mode))
2748 enum rtx_code cmp_code;
2753 /* If we have fp<->int register move instructions, do a cmov by
2754 performing the comparison in fp registers, and move the
2755 zero/nonzero value to integer registers, where we can then
2756 use a normal cmov, or vice-versa. */
2760 case EQ: case LE: case LT: case LEU: case LTU:
2762 /* We have these compares. */
2763 cmp_code = code, code = NE;
2768 /* These must be reversed. */
2769 cmp_code = reverse_condition (code), code = EQ;
2772 case GE: case GT: case GEU: case GTU:
2773 /* These normally need swapping, but for integer zero we have
2774 special patterns that recognize swapped operands. */
2775 if (cmp_mode == DImode && op1 == const0_rtx)
2776 cmp_code = code, code = NE;
2779 cmp_code = swap_condition (code);
2781 std::swap (op0, op1);
2789 if (cmp_mode == DImode)
2791 if (!reg_or_0_operand (op0, DImode))
2792 op0 = force_reg (DImode, op0);
2793 if (!reg_or_8bit_operand (op1, DImode))
2794 op1 = force_reg (DImode, op1);
2797 tem = gen_reg_rtx (cmp_mode);
2798 emit_insn (gen_rtx_SET (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode,
2801 cmp_mode = cmp_mode == DImode ? E_DFmode : E_DImode;
2802 op0 = gen_lowpart (cmp_mode, tem);
2803 op1 = CONST0_RTX (cmp_mode);
2804 cmp = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2805 local_fast_math = 1;
2808 if (cmp_mode == DImode)
2810 if (!reg_or_0_operand (op0, DImode))
2811 op0 = force_reg (DImode, op0);
2812 if (!reg_or_8bit_operand (op1, DImode))
2813 op1 = force_reg (DImode, op1);
2816 /* We may be able to use a conditional move directly.
2817 This avoids emitting spurious compares. */
2818 if (signed_comparison_operator (cmp, VOIDmode)
2819 && (cmp_mode == DImode || local_fast_math)
2820 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2821 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2823 /* We can't put the comparison inside the conditional move;
2824 emit a compare instruction and put that inside the
2825 conditional move. Make sure we emit only comparisons we have;
2826 swap or reverse as necessary. */
2828 if (!can_create_pseudo_p ())
2833 case EQ: case LE: case LT: case LEU: case LTU:
2835 /* We have these compares: */
2840 /* These must be reversed. */
2841 code = reverse_condition (code);
2845 case GE: case GT: case GEU: case GTU:
2846 /* These normally need swapping, but for integer zero we have
2847 special patterns that recognize swapped operands. */
2848 if (cmp_mode == DImode && op1 == const0_rtx)
2850 code = swap_condition (code);
2851 std::swap (op0, op1);
2858 if (cmp_mode == DImode)
2860 if (!reg_or_0_operand (op0, DImode))
2861 op0 = force_reg (DImode, op0);
2862 if (!reg_or_8bit_operand (op1, DImode))
2863 op1 = force_reg (DImode, op1);
2866 /* ??? We mark the branch mode to be CCmode to prevent the compare
2867 and cmov from being combined, since the compare insn follows IEEE
2868 rules that the cmov does not. */
2869 if (cmp_mode == DFmode && !local_fast_math)
2872 tem = gen_reg_rtx (cmp_mode);
2873 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_mode, op0, op1));
2874 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_mode));
2877 /* Simplify a conditional move of two constants into a setcc with
2878 arithmetic. This is done with a splitter since combine would
2879 just undo the work if done during code generation. It also catches
2880 cases we wouldn't have before cse. */
2883 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2884 rtx t_rtx, rtx f_rtx)
2886 HOST_WIDE_INT t, f, diff;
2888 rtx target, subtarget, tmp;
2890 mode = GET_MODE (dest);
2895 if (((code == NE || code == EQ) && diff < 0)
2896 || (code == GE || code == GT))
2898 code = reverse_condition (code);
2899 diff = t, t = f, f = diff;
2903 subtarget = target = dest;
2906 target = gen_lowpart (DImode, dest);
2907 if (can_create_pseudo_p ())
2908 subtarget = gen_reg_rtx (DImode);
2912 /* Below, we must be careful to use copy_rtx on target and subtarget
2913 in intermediate insns, as they may be a subreg rtx, which may not
2916 if (f == 0 && exact_log2 (diff) > 0
2917 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2918 viable over a longer latency cmove. On EV5, the E0 slot is a
2919 scarce resource, and on EV4 shift has the same latency as a cmove. */
2920 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2922 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2923 emit_insn (gen_rtx_SET (copy_rtx (subtarget), tmp));
2925 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2926 GEN_INT (exact_log2 (t)));
2927 emit_insn (gen_rtx_SET (target, tmp));
2929 else if (f == 0 && t == -1)
2931 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2932 emit_insn (gen_rtx_SET (copy_rtx (subtarget), tmp));
2934 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2936 else if (diff == 1 || diff == 4 || diff == 8)
2940 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2941 emit_insn (gen_rtx_SET (copy_rtx (subtarget), tmp));
2944 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2947 add_op = GEN_INT (f);
2948 if (sext_add_operand (add_op, mode))
2950 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2952 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2953 emit_insn (gen_rtx_SET (target, tmp));
2965 /* Look up the function X_floating library function name for the
2968 struct GTY(()) xfloating_op
2970 const enum rtx_code code;
2971 const char *const GTY((skip)) osf_func;
2972 const char *const GTY((skip)) vms_func;
2976 static GTY(()) struct xfloating_op xfloating_ops[] =
2978 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2979 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2980 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2981 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2982 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2983 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2984 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2985 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2986 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2987 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2988 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2989 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2990 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2991 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2992 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2995 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2997 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2998 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
3002 alpha_lookup_xfloating_lib_func (enum rtx_code code)
3004 struct xfloating_op *ops = xfloating_ops;
3005 long n = ARRAY_SIZE (xfloating_ops);
3008 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
3010 /* How irritating. Nothing to key off for the main table. */
3011 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
3014 n = ARRAY_SIZE (vax_cvt_ops);
3017 for (i = 0; i < n; ++i, ++ops)
3018 if (ops->code == code)
3020 rtx func = ops->libcall;
3023 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
3024 ? ops->vms_func : ops->osf_func);
3025 ops->libcall = func;
3033 /* Most X_floating operations take the rounding mode as an argument.
3034 Compute that here. */
3037 alpha_compute_xfloating_mode_arg (enum rtx_code code,
3038 enum alpha_fp_rounding_mode round)
3044 case ALPHA_FPRM_NORM:
3047 case ALPHA_FPRM_MINF:
3050 case ALPHA_FPRM_CHOP:
3053 case ALPHA_FPRM_DYN:
3059 /* XXX For reference, round to +inf is mode = 3. */
3062 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
3068 /* Emit an X_floating library function call.
3070 Note that these functions do not follow normal calling conventions:
3071 TFmode arguments are passed in two integer registers (as opposed to
3072 indirect); TFmode return values appear in R16+R17.
3074 FUNC is the function to call.
3075 TARGET is where the output belongs.
3076 OPERANDS are the inputs.
3077 NOPERANDS is the count of inputs.
3078 EQUIV is the expression equivalent for the function.
3082 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
3083 int noperands, rtx equiv)
3085 rtx usage = NULL_RTX, reg;
3090 for (i = 0; i < noperands; ++i)
3092 switch (GET_MODE (operands[i]))
3095 reg = gen_rtx_REG (TFmode, regno);
3100 reg = gen_rtx_REG (DFmode, regno + 32);
3105 gcc_assert (CONST_INT_P (operands[i]));
3108 reg = gen_rtx_REG (DImode, regno);
3116 emit_move_insn (reg, operands[i]);
3117 use_reg (&usage, reg);
3120 switch (GET_MODE (target))
3123 reg = gen_rtx_REG (TFmode, 16);
3126 reg = gen_rtx_REG (DFmode, 32);
3129 reg = gen_rtx_REG (DImode, 0);
3135 rtx mem = gen_rtx_MEM (QImode, func);
3136 rtx_insn *tmp = emit_call_insn (gen_call_value (reg, mem, const0_rtx,
3137 const0_rtx, const0_rtx));
3138 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3139 RTL_CONST_CALL_P (tmp) = 1;
3144 emit_libcall_block (tmp, target, reg, equiv);
3147 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3150 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3154 rtx out_operands[3];
3156 func = alpha_lookup_xfloating_lib_func (code);
3157 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3159 out_operands[0] = operands[1];
3160 out_operands[1] = operands[2];
3161 out_operands[2] = GEN_INT (mode);
3162 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3163 gen_rtx_fmt_ee (code, TFmode, operands[1],
3167 /* Emit an X_floating library function call for a comparison. */
3170 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3172 enum rtx_code cmp_code, res_code;
3173 rtx func, out, operands[2], note;
3175 /* X_floating library comparison functions return
3179 Convert the compare against the raw return value. */
3207 func = alpha_lookup_xfloating_lib_func (cmp_code);
3211 out = gen_reg_rtx (DImode);
3213 /* What's actually returned is -1,0,1, not a proper boolean value. */
3214 note = gen_rtx_fmt_ee (cmp_code, VOIDmode, op0, op1);
3215 note = gen_rtx_UNSPEC (DImode, gen_rtvec (1, note), UNSPEC_XFLT_COMPARE);
3216 alpha_emit_xfloating_libcall (func, out, operands, 2, note);
3221 /* Emit an X_floating library function call for a conversion. */
3224 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3226 int noperands = 1, mode;
3227 rtx out_operands[2];
3229 enum rtx_code code = orig_code;
3231 if (code == UNSIGNED_FIX)
3234 func = alpha_lookup_xfloating_lib_func (code);
3236 out_operands[0] = operands[1];
3241 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3242 out_operands[1] = GEN_INT (mode);
3245 case FLOAT_TRUNCATE:
3246 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3247 out_operands[1] = GEN_INT (mode);
3254 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3255 gen_rtx_fmt_e (orig_code,
3256 GET_MODE (operands[0]),
3260 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3261 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3262 guarantee that the sequence
3265 is valid. Naturally, output operand ordering is little-endian.
3266 This is used by *movtf_internal and *movti_internal. */
3269 alpha_split_tmode_pair (rtx operands[4], machine_mode mode,
3272 switch (GET_CODE (operands[1]))
3275 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3276 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3280 operands[3] = adjust_address (operands[1], DImode, 8);
3281 operands[2] = adjust_address (operands[1], DImode, 0);
3284 CASE_CONST_SCALAR_INT:
3286 gcc_assert (operands[1] == CONST0_RTX (mode));
3287 operands[2] = operands[3] = const0_rtx;
3294 switch (GET_CODE (operands[0]))
3297 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3298 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3302 operands[1] = adjust_address (operands[0], DImode, 8);
3303 operands[0] = adjust_address (operands[0], DImode, 0);
3310 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3312 std::swap (operands[0], operands[1]);
3313 std::swap (operands[2], operands[3]);
3317 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3318 op2 is a register containing the sign bit, operation is the
3319 logical operation to be performed. */
3322 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3324 rtx high_bit = operands[2];
3328 alpha_split_tmode_pair (operands, TFmode, false);
3330 /* Detect three flavors of operand overlap. */
3332 if (rtx_equal_p (operands[0], operands[2]))
3334 else if (rtx_equal_p (operands[1], operands[2]))
3336 if (rtx_equal_p (operands[0], high_bit))
3343 emit_move_insn (operands[0], operands[2]);
3345 /* ??? If the destination overlaps both source tf and high_bit, then
3346 assume source tf is dead in its entirety and use the other half
3347 for a scratch register. Otherwise "scratch" is just the proper
3348 destination register. */
3349 scratch = operands[move < 2 ? 1 : 3];
3351 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3355 emit_move_insn (operands[0], operands[2]);
3357 emit_move_insn (operands[1], scratch);
3361 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3365 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3366 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3367 lda r3,X(r11) lda r3,X+2(r11)
3368 extwl r1,r3,r1 extql r1,r3,r1
3369 extwh r2,r3,r2 extqh r2,r3,r2
3370 or r1.r2.r1 or r1,r2,r1
3373 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3374 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3375 lda r3,X(r11) lda r3,X(r11)
3376 extll r1,r3,r1 extll r1,r3,r1
3377 extlh r2,r3,r2 extlh r2,r3,r2
3378 or r1.r2.r1 addl r1,r2,r1
3380 quad: ldq_u r1,X(r11)
3389 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3390 HOST_WIDE_INT ofs, int sign)
3392 rtx meml, memh, addr, extl, exth, tmp, mema;
3395 if (TARGET_BWX && size == 2)
3397 meml = adjust_address (mem, QImode, ofs);
3398 memh = adjust_address (mem, QImode, ofs+1);
3399 extl = gen_reg_rtx (DImode);
3400 exth = gen_reg_rtx (DImode);
3401 emit_insn (gen_zero_extendqidi2 (extl, meml));
3402 emit_insn (gen_zero_extendqidi2 (exth, memh));
3403 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3404 NULL, 1, OPTAB_LIB_WIDEN);
3405 addr = expand_simple_binop (DImode, IOR, extl, exth,
3406 NULL, 1, OPTAB_LIB_WIDEN);
3408 if (sign && GET_MODE (tgt) != HImode)
3410 addr = gen_lowpart (HImode, addr);
3411 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3415 if (GET_MODE (tgt) != DImode)
3416 addr = gen_lowpart (GET_MODE (tgt), addr);
3417 emit_move_insn (tgt, addr);
3422 meml = gen_reg_rtx (DImode);
3423 memh = gen_reg_rtx (DImode);
3424 addr = gen_reg_rtx (DImode);
3425 extl = gen_reg_rtx (DImode);
3426 exth = gen_reg_rtx (DImode);
3428 mema = XEXP (mem, 0);
3429 if (GET_CODE (mema) == LO_SUM)
3430 mema = force_reg (Pmode, mema);
3432 /* AND addresses cannot be in any alias set, since they may implicitly
3433 alias surrounding code. Ideally we'd have some alias set that
3434 covered all types except those with alignment 8 or higher. */
3436 tmp = change_address (mem, DImode,
3437 gen_rtx_AND (DImode,
3438 plus_constant (DImode, mema, ofs),
3440 set_mem_alias_set (tmp, 0);
3441 emit_move_insn (meml, tmp);
3443 tmp = change_address (mem, DImode,
3444 gen_rtx_AND (DImode,
3445 plus_constant (DImode, mema,
3448 set_mem_alias_set (tmp, 0);
3449 emit_move_insn (memh, tmp);
3451 if (sign && size == 2)
3453 emit_move_insn (addr, plus_constant (Pmode, mema, ofs+2));
3455 emit_insn (gen_extql (extl, meml, addr));
3456 emit_insn (gen_extqh (exth, memh, addr));
3458 /* We must use tgt here for the target. Alpha-vms port fails if we use
3459 addr for the target, because addr is marked as a pointer and combine
3460 knows that pointers are always sign-extended 32-bit values. */
3461 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3462 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3463 addr, 1, OPTAB_WIDEN);
3467 emit_move_insn (addr, plus_constant (Pmode, mema, ofs));
3468 emit_insn (gen_extxl (extl, meml, GEN_INT (size*8), addr));
3472 emit_insn (gen_extwh (exth, memh, addr));
3476 emit_insn (gen_extlh (exth, memh, addr));
3480 emit_insn (gen_extqh (exth, memh, addr));
3487 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3488 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3493 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3496 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3499 alpha_expand_unaligned_store (rtx dst, rtx src,
3500 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3502 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3504 if (TARGET_BWX && size == 2)
3506 if (src != const0_rtx)
3508 dstl = gen_lowpart (QImode, src);
3509 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3510 NULL, 1, OPTAB_LIB_WIDEN);
3511 dsth = gen_lowpart (QImode, dsth);
3514 dstl = dsth = const0_rtx;
3516 meml = adjust_address (dst, QImode, ofs);
3517 memh = adjust_address (dst, QImode, ofs+1);
3519 emit_move_insn (meml, dstl);
3520 emit_move_insn (memh, dsth);
3524 dstl = gen_reg_rtx (DImode);
3525 dsth = gen_reg_rtx (DImode);
3526 insl = gen_reg_rtx (DImode);
3527 insh = gen_reg_rtx (DImode);
3529 dsta = XEXP (dst, 0);
3530 if (GET_CODE (dsta) == LO_SUM)
3531 dsta = force_reg (Pmode, dsta);
3533 /* AND addresses cannot be in any alias set, since they may implicitly
3534 alias surrounding code. Ideally we'd have some alias set that
3535 covered all types except those with alignment 8 or higher. */
3537 meml = change_address (dst, DImode,
3538 gen_rtx_AND (DImode,
3539 plus_constant (DImode, dsta, ofs),
3541 set_mem_alias_set (meml, 0);
3543 memh = change_address (dst, DImode,
3544 gen_rtx_AND (DImode,
3545 plus_constant (DImode, dsta,
3548 set_mem_alias_set (memh, 0);
3550 emit_move_insn (dsth, memh);
3551 emit_move_insn (dstl, meml);
3553 addr = copy_addr_to_reg (plus_constant (Pmode, dsta, ofs));
3555 if (src != CONST0_RTX (GET_MODE (src)))
3557 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3558 GEN_INT (size*8), addr));
3563 emit_insn (gen_inswl (insl, gen_lowpart (HImode, src), addr));
3566 emit_insn (gen_insll (insl, gen_lowpart (SImode, src), addr));
3569 emit_insn (gen_insql (insl, gen_lowpart (DImode, src), addr));
3576 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3581 emit_insn (gen_mskwl (dstl, dstl, addr));
3584 emit_insn (gen_mskll (dstl, dstl, addr));
3587 emit_insn (gen_mskql (dstl, dstl, addr));
3593 if (src != CONST0_RTX (GET_MODE (src)))
3595 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3596 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3599 /* Must store high before low for degenerate case of aligned. */
3600 emit_move_insn (memh, dsth);
3601 emit_move_insn (meml, dstl);
3604 /* The block move code tries to maximize speed by separating loads and
3605 stores at the expense of register pressure: we load all of the data
3606 before we store it back out. There are two secondary effects worth
3607 mentioning, that this speeds copying to/from aligned and unaligned
3608 buffers, and that it makes the code significantly easier to write. */
3610 #define MAX_MOVE_WORDS 8
3612 /* Load an integral number of consecutive unaligned quadwords. */
3615 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3616 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3618 rtx const im8 = GEN_INT (-8);
3619 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3620 rtx sreg, areg, tmp, smema;
3623 smema = XEXP (smem, 0);
3624 if (GET_CODE (smema) == LO_SUM)
3625 smema = force_reg (Pmode, smema);
3627 /* Generate all the tmp registers we need. */
3628 for (i = 0; i < words; ++i)
3630 data_regs[i] = out_regs[i];
3631 ext_tmps[i] = gen_reg_rtx (DImode);
3633 data_regs[words] = gen_reg_rtx (DImode);
3636 smem = adjust_address (smem, GET_MODE (smem), ofs);
3638 /* Load up all of the source data. */
3639 for (i = 0; i < words; ++i)
3641 tmp = change_address (smem, DImode,
3642 gen_rtx_AND (DImode,
3643 plus_constant (DImode, smema, 8*i),
3645 set_mem_alias_set (tmp, 0);
3646 emit_move_insn (data_regs[i], tmp);
3649 tmp = change_address (smem, DImode,
3650 gen_rtx_AND (DImode,
3651 plus_constant (DImode, smema,
3654 set_mem_alias_set (tmp, 0);
3655 emit_move_insn (data_regs[words], tmp);
3657 /* Extract the half-word fragments. Unfortunately DEC decided to make
3658 extxh with offset zero a noop instead of zeroing the register, so
3659 we must take care of that edge condition ourselves with cmov. */
3661 sreg = copy_addr_to_reg (smema);
3662 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3664 for (i = 0; i < words; ++i)
3666 emit_insn (gen_extql (data_regs[i], data_regs[i], sreg));
3667 emit_insn (gen_extqh (ext_tmps[i], data_regs[i+1], sreg));
3668 emit_insn (gen_rtx_SET (ext_tmps[i],
3669 gen_rtx_IF_THEN_ELSE (DImode,
3670 gen_rtx_EQ (DImode, areg,
3672 const0_rtx, ext_tmps[i])));
3675 /* Merge the half-words into whole words. */
3676 for (i = 0; i < words; ++i)
3678 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3679 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3683 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3684 may be NULL to store zeros. */
3687 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3688 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3690 rtx const im8 = GEN_INT (-8);
3691 rtx ins_tmps[MAX_MOVE_WORDS];
3692 rtx st_tmp_1, st_tmp_2, dreg;
3693 rtx st_addr_1, st_addr_2, dmema;
3696 dmema = XEXP (dmem, 0);
3697 if (GET_CODE (dmema) == LO_SUM)
3698 dmema = force_reg (Pmode, dmema);
3700 /* Generate all the tmp registers we need. */
3701 if (data_regs != NULL)
3702 for (i = 0; i < words; ++i)
3703 ins_tmps[i] = gen_reg_rtx(DImode);
3704 st_tmp_1 = gen_reg_rtx(DImode);
3705 st_tmp_2 = gen_reg_rtx(DImode);
3708 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3710 st_addr_2 = change_address (dmem, DImode,
3711 gen_rtx_AND (DImode,
3712 plus_constant (DImode, dmema,
3715 set_mem_alias_set (st_addr_2, 0);
3717 st_addr_1 = change_address (dmem, DImode,
3718 gen_rtx_AND (DImode, dmema, im8));
3719 set_mem_alias_set (st_addr_1, 0);
3721 /* Load up the destination end bits. */
3722 emit_move_insn (st_tmp_2, st_addr_2);
3723 emit_move_insn (st_tmp_1, st_addr_1);
3725 /* Shift the input data into place. */
3726 dreg = copy_addr_to_reg (dmema);
3727 if (data_regs != NULL)
3729 for (i = words-1; i >= 0; --i)
3731 emit_insn (gen_insqh (ins_tmps[i], data_regs[i], dreg));
3732 emit_insn (gen_insql (data_regs[i], data_regs[i], dreg));
3734 for (i = words-1; i > 0; --i)
3736 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3737 ins_tmps[i-1], ins_tmps[i-1], 1,
3742 /* Split and merge the ends with the destination data. */
3743 emit_insn (gen_mskqh (st_tmp_2, st_tmp_2, dreg));
3744 emit_insn (gen_mskql (st_tmp_1, st_tmp_1, dreg));
3746 if (data_regs != NULL)
3748 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3749 st_tmp_2, 1, OPTAB_WIDEN);
3750 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3751 st_tmp_1, 1, OPTAB_WIDEN);
3755 emit_move_insn (st_addr_2, st_tmp_2);
3756 for (i = words-1; i > 0; --i)
3758 rtx tmp = change_address (dmem, DImode,
3759 gen_rtx_AND (DImode,
3760 plus_constant (DImode,
3763 set_mem_alias_set (tmp, 0);
3764 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3766 emit_move_insn (st_addr_1, st_tmp_1);
3770 /* Expand string/block move operations.
3772 operands[0] is the pointer to the destination.
3773 operands[1] is the pointer to the source.
3774 operands[2] is the number of bytes to move.
3775 operands[3] is the alignment. */
3778 alpha_expand_block_move (rtx operands[])
3780 rtx bytes_rtx = operands[2];
3781 rtx align_rtx = operands[3];
3782 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3783 HOST_WIDE_INT bytes = orig_bytes;
3784 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3785 HOST_WIDE_INT dst_align = src_align;
3786 rtx orig_src = operands[1];
3787 rtx orig_dst = operands[0];
3788 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3790 unsigned int i, words, ofs, nregs = 0;
3792 if (orig_bytes <= 0)
3794 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3797 /* Look for additional alignment information from recorded register info. */
3799 tmp = XEXP (orig_src, 0);
3801 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3802 else if (GET_CODE (tmp) == PLUS
3803 && REG_P (XEXP (tmp, 0))
3804 && CONST_INT_P (XEXP (tmp, 1)))
3806 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3807 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3811 if (a >= 64 && c % 8 == 0)
3813 else if (a >= 32 && c % 4 == 0)
3815 else if (a >= 16 && c % 2 == 0)
3820 tmp = XEXP (orig_dst, 0);
3822 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3823 else if (GET_CODE (tmp) == PLUS
3824 && REG_P (XEXP (tmp, 0))
3825 && CONST_INT_P (XEXP (tmp, 1)))
3827 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3828 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3832 if (a >= 64 && c % 8 == 0)
3834 else if (a >= 32 && c % 4 == 0)
3836 else if (a >= 16 && c % 2 == 0)
3842 if (src_align >= 64 && bytes >= 8)
3846 for (i = 0; i < words; ++i)
3847 data_regs[nregs + i] = gen_reg_rtx (DImode);
3849 for (i = 0; i < words; ++i)
3850 emit_move_insn (data_regs[nregs + i],
3851 adjust_address (orig_src, DImode, ofs + i * 8));
3858 if (src_align >= 32 && bytes >= 4)
3862 for (i = 0; i < words; ++i)
3863 data_regs[nregs + i] = gen_reg_rtx (SImode);
3865 for (i = 0; i < words; ++i)
3866 emit_move_insn (data_regs[nregs + i],
3867 adjust_address (orig_src, SImode, ofs + i * 4));
3878 for (i = 0; i < words+1; ++i)
3879 data_regs[nregs + i] = gen_reg_rtx (DImode);
3881 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3889 if (! TARGET_BWX && bytes >= 4)
3891 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3892 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3899 if (src_align >= 16)
3902 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3903 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3906 } while (bytes >= 2);
3908 else if (! TARGET_BWX)
3910 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3911 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3919 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3920 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3925 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
3927 /* Now save it back out again. */
3931 /* Write out the data in whatever chunks reading the source allowed. */
3932 if (dst_align >= 64)
3934 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3936 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
3943 if (dst_align >= 32)
3945 /* If the source has remaining DImode regs, write them out in
3947 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3949 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
3950 NULL_RTX, 1, OPTAB_WIDEN);
3952 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3953 gen_lowpart (SImode, data_regs[i]));
3954 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
3955 gen_lowpart (SImode, tmp));
3960 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3962 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3969 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
3971 /* Write out a remaining block of words using unaligned methods. */
3973 for (words = 1; i + words < nregs; words++)
3974 if (GET_MODE (data_regs[i + words]) != DImode)
3978 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
3980 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
3987 /* Due to the above, this won't be aligned. */
3988 /* ??? If we have more than one of these, consider constructing full
3989 words in registers and using alpha_expand_unaligned_store_words. */
3990 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3992 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
3997 if (dst_align >= 16)
3998 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4000 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4005 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4007 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4012 /* The remainder must be byte copies. */
4015 gcc_assert (GET_MODE (data_regs[i]) == QImode);
4016 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4025 alpha_expand_block_clear (rtx operands[])
4027 rtx bytes_rtx = operands[1];
4028 rtx align_rtx = operands[3];
4029 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4030 HOST_WIDE_INT bytes = orig_bytes;
4031 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4032 HOST_WIDE_INT alignofs = 0;
4033 rtx orig_dst = operands[0];
4035 int i, words, ofs = 0;
4037 if (orig_bytes <= 0)
4039 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4042 /* Look for stricter alignment. */
4043 tmp = XEXP (orig_dst, 0);
4045 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4046 else if (GET_CODE (tmp) == PLUS
4047 && REG_P (XEXP (tmp, 0))
4048 && CONST_INT_P (XEXP (tmp, 1)))
4050 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4051 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4056 align = a, alignofs = 8 - c % 8;
4058 align = a, alignofs = 4 - c % 4;
4060 align = a, alignofs = 2 - c % 2;
4064 /* Handle an unaligned prefix first. */
4068 /* Given that alignofs is bounded by align, the only time BWX could
4069 generate three stores is for a 7 byte fill. Prefer two individual
4070 stores over a load/mask/store sequence. */
4071 if ((!TARGET_BWX || alignofs == 7)
4073 && !(alignofs == 4 && bytes >= 4))
4075 machine_mode mode = (align >= 64 ? DImode : SImode);
4076 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4080 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
4081 set_mem_alias_set (mem, 0);
4083 mask = ~(HOST_WIDE_INT_M1U << (inv_alignofs * 8));
4084 if (bytes < alignofs)
4086 mask |= HOST_WIDE_INT_M1U << ((inv_alignofs + bytes) * 8);
4097 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4098 NULL_RTX, 1, OPTAB_WIDEN);
4100 emit_move_insn (mem, tmp);
4103 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4105 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4110 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4112 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
4117 if (alignofs == 4 && bytes >= 4)
4119 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4125 /* If we've not used the extra lead alignment information by now,
4126 we won't be able to. Downgrade align to match what's left over. */
4129 alignofs = alignofs & -alignofs;
4130 align = MIN (align, alignofs * BITS_PER_UNIT);
4134 /* Handle a block of contiguous long-words. */
4136 if (align >= 64 && bytes >= 8)
4140 for (i = 0; i < words; ++i)
4141 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4148 /* If the block is large and appropriately aligned, emit a single
4149 store followed by a sequence of stq_u insns. */
4151 if (align >= 32 && bytes > 16)
4155 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4159 orig_dsta = XEXP (orig_dst, 0);
4160 if (GET_CODE (orig_dsta) == LO_SUM)
4161 orig_dsta = force_reg (Pmode, orig_dsta);
4164 for (i = 0; i < words; ++i)
4167 = change_address (orig_dst, DImode,
4168 gen_rtx_AND (DImode,
4169 plus_constant (DImode, orig_dsta,
4172 set_mem_alias_set (mem, 0);
4173 emit_move_insn (mem, const0_rtx);
4176 /* Depending on the alignment, the first stq_u may have overlapped
4177 with the initial stl, which means that the last stq_u didn't
4178 write as much as it would appear. Leave those questionable bytes
4180 bytes -= words * 8 - 4;
4181 ofs += words * 8 - 4;
4184 /* Handle a smaller block of aligned words. */
4186 if ((align >= 64 && bytes == 4)
4187 || (align == 32 && bytes >= 4))
4191 for (i = 0; i < words; ++i)
4192 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4199 /* An unaligned block uses stq_u stores for as many as possible. */
4205 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4211 /* Next clean up any trailing pieces. */
4213 /* Count the number of bits in BYTES for which aligned stores could
4216 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4220 /* If we have appropriate alignment (and it wouldn't take too many
4221 instructions otherwise), mask out the bytes we need. */
4222 if (TARGET_BWX ? words > 2 : bytes > 0)
4229 mem = adjust_address (orig_dst, DImode, ofs);
4230 set_mem_alias_set (mem, 0);
4232 mask = HOST_WIDE_INT_M1U << (bytes * 8);
4234 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4235 NULL_RTX, 1, OPTAB_WIDEN);
4237 emit_move_insn (mem, tmp);
4240 else if (align >= 32 && bytes < 4)
4245 mem = adjust_address (orig_dst, SImode, ofs);
4246 set_mem_alias_set (mem, 0);
4248 mask = HOST_WIDE_INT_M1U << (bytes * 8);
4250 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4251 NULL_RTX, 1, OPTAB_WIDEN);
4253 emit_move_insn (mem, tmp);
4258 if (!TARGET_BWX && bytes >= 4)
4260 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4270 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4274 } while (bytes >= 2);
4276 else if (! TARGET_BWX)
4278 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4286 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4294 /* Returns a mask so that zap(x, value) == x & mask. */
4297 alpha_expand_zap_mask (HOST_WIDE_INT value)
4301 HOST_WIDE_INT mask = 0;
4303 for (i = 7; i >= 0; --i)
4306 if (!((value >> i) & 1))
4310 result = gen_int_mode (mask, DImode);
4315 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4317 rtx op0, rtx op1, rtx op2)
4319 op0 = gen_lowpart (mode, op0);
4321 if (op1 == const0_rtx)
4322 op1 = CONST0_RTX (mode);
4324 op1 = gen_lowpart (mode, op1);
4326 if (op2 == const0_rtx)
4327 op2 = CONST0_RTX (mode);
4329 op2 = gen_lowpart (mode, op2);
4331 emit_insn ((*gen) (op0, op1, op2));
4334 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4335 COND is true. Mark the jump as unlikely to be taken. */
4338 emit_unlikely_jump (rtx cond, rtx label)
4340 rtx x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4341 rtx_insn *insn = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
4342 add_reg_br_prob_note (insn, profile_probability::very_unlikely ());
4345 /* A subroutine of the atomic operation splitters. Emit a load-locked
4346 instruction in MODE. */
4349 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
4351 rtx (*fn) (rtx, rtx) = NULL;
4353 fn = gen_load_locked_si;
4354 else if (mode == DImode)
4355 fn = gen_load_locked_di;
4356 emit_insn (fn (reg, mem));
4359 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4360 instruction in MODE. */
4363 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
4365 rtx (*fn) (rtx, rtx, rtx) = NULL;
4367 fn = gen_store_conditional_si;
4368 else if (mode == DImode)
4369 fn = gen_store_conditional_di;
4370 emit_insn (fn (res, mem, val));
4373 /* Subroutines of the atomic operation splitters. Emit barriers
4374 as needed for the memory MODEL. */
4377 alpha_pre_atomic_barrier (enum memmodel model)
4379 if (need_atomic_barrier_p (model, true))
4380 emit_insn (gen_memory_barrier ());
4384 alpha_post_atomic_barrier (enum memmodel model)
4386 if (need_atomic_barrier_p (model, false))
4387 emit_insn (gen_memory_barrier ());
4390 /* A subroutine of the atomic operation splitters. Emit an insxl
4391 instruction in MODE. */
4394 emit_insxl (machine_mode mode, rtx op1, rtx op2)
4396 rtx ret = gen_reg_rtx (DImode);
4397 rtx (*fn) (rtx, rtx, rtx);
4417 op1 = force_reg (mode, op1);
4418 emit_insn (fn (ret, op1, op2));
4423 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
4424 to perform. MEM is the memory on which to operate. VAL is the second
4425 operand of the binary operator. BEFORE and AFTER are optional locations to
4426 return the value of MEM either before of after the operation. SCRATCH is
4427 a scratch register. */
4430 alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val, rtx before,
4431 rtx after, rtx scratch, enum memmodel model)
4433 machine_mode mode = GET_MODE (mem);
4434 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4436 alpha_pre_atomic_barrier (model);
4438 label = gen_label_rtx ();
4440 label = gen_rtx_LABEL_REF (DImode, label);
4444 emit_load_locked (mode, before, mem);
4448 x = gen_rtx_AND (mode, before, val);
4449 emit_insn (gen_rtx_SET (val, x));
4451 x = gen_rtx_NOT (mode, val);
4454 x = gen_rtx_fmt_ee (code, mode, before, val);
4456 emit_insn (gen_rtx_SET (after, copy_rtx (x)));
4457 emit_insn (gen_rtx_SET (scratch, x));
4459 emit_store_conditional (mode, cond, mem, scratch);
4461 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4462 emit_unlikely_jump (x, label);
4464 alpha_post_atomic_barrier (model);
4467 /* Expand a compare and swap operation. */
4470 alpha_split_compare_and_swap (rtx operands[])
4472 rtx cond, retval, mem, oldval, newval;
4474 enum memmodel mod_s, mod_f;
4476 rtx label1, label2, x;
4479 retval = operands[1];
4481 oldval = operands[3];
4482 newval = operands[4];
4483 is_weak = (operands[5] != const0_rtx);
4484 mod_s = memmodel_from_int (INTVAL (operands[6]));
4485 mod_f = memmodel_from_int (INTVAL (operands[7]));
4486 mode = GET_MODE (mem);
4488 alpha_pre_atomic_barrier (mod_s);
4493 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4494 emit_label (XEXP (label1, 0));
4496 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4498 emit_load_locked (mode, retval, mem);
4500 x = gen_lowpart (DImode, retval);
4501 if (oldval == const0_rtx)
4503 emit_move_insn (cond, const0_rtx);
4504 x = gen_rtx_NE (DImode, x, const0_rtx);
4508 x = gen_rtx_EQ (DImode, x, oldval);
4509 emit_insn (gen_rtx_SET (cond, x));
4510 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4512 emit_unlikely_jump (x, label2);
4514 emit_move_insn (cond, newval);
4515 emit_store_conditional (mode, cond, mem, gen_lowpart (mode, cond));
4519 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4520 emit_unlikely_jump (x, label1);
4523 if (!is_mm_relaxed (mod_f))
4524 emit_label (XEXP (label2, 0));
4526 alpha_post_atomic_barrier (mod_s);
4528 if (is_mm_relaxed (mod_f))
4529 emit_label (XEXP (label2, 0));
4533 alpha_expand_compare_and_swap_12 (rtx operands[])
4535 rtx cond, dst, mem, oldval, newval, is_weak, mod_s, mod_f;
4537 rtx addr, align, wdst;
4538 rtx (*gen) (rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx);
4543 oldval = operands[3];
4544 newval = operands[4];
4545 is_weak = operands[5];
4546 mod_s = operands[6];
4547 mod_f = operands[7];
4548 mode = GET_MODE (mem);
4550 /* We forced the address into a register via mem_noofs_operand. */
4551 addr = XEXP (mem, 0);
4552 gcc_assert (register_operand (addr, DImode));
4554 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4555 NULL_RTX, 1, OPTAB_DIRECT);
4557 oldval = convert_modes (DImode, mode, oldval, 1);
4559 if (newval != const0_rtx)
4560 newval = emit_insxl (mode, newval, addr);
4562 wdst = gen_reg_rtx (DImode);
4564 gen = gen_atomic_compare_and_swapqi_1;
4566 gen = gen_atomic_compare_and_swaphi_1;
4567 emit_insn (gen (cond, wdst, mem, oldval, newval, align,
4568 is_weak, mod_s, mod_f));
4570 emit_move_insn (dst, gen_lowpart (mode, wdst));
4574 alpha_split_compare_and_swap_12 (rtx operands[])
4576 rtx cond, dest, orig_mem, oldval, newval, align, scratch;
4579 enum memmodel mod_s, mod_f;
4580 rtx label1, label2, mem, addr, width, mask, x;
4584 orig_mem = operands[2];
4585 oldval = operands[3];
4586 newval = operands[4];
4587 align = operands[5];
4588 is_weak = (operands[6] != const0_rtx);
4589 mod_s = memmodel_from_int (INTVAL (operands[7]));
4590 mod_f = memmodel_from_int (INTVAL (operands[8]));
4591 scratch = operands[9];
4592 mode = GET_MODE (orig_mem);
4593 addr = XEXP (orig_mem, 0);
4595 mem = gen_rtx_MEM (DImode, align);
4596 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
4597 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
4598 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
4600 alpha_pre_atomic_barrier (mod_s);
4605 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4606 emit_label (XEXP (label1, 0));
4608 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4610 emit_load_locked (DImode, scratch, mem);
4612 width = GEN_INT (GET_MODE_BITSIZE (mode));
4613 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4614 emit_insn (gen_extxl (dest, scratch, width, addr));
4616 if (oldval == const0_rtx)
4618 emit_move_insn (cond, const0_rtx);
4619 x = gen_rtx_NE (DImode, dest, const0_rtx);
4623 x = gen_rtx_EQ (DImode, dest, oldval);
4624 emit_insn (gen_rtx_SET (cond, x));
4625 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4627 emit_unlikely_jump (x, label2);
4629 emit_insn (gen_mskxl (cond, scratch, mask, addr));
4631 if (newval != const0_rtx)
4632 emit_insn (gen_iordi3 (cond, cond, newval));
4634 emit_store_conditional (DImode, cond, mem, cond);
4638 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4639 emit_unlikely_jump (x, label1);
4642 if (!is_mm_relaxed (mod_f))
4643 emit_label (XEXP (label2, 0));
4645 alpha_post_atomic_barrier (mod_s);
4647 if (is_mm_relaxed (mod_f))
4648 emit_label (XEXP (label2, 0));
4651 /* Expand an atomic exchange operation. */
4654 alpha_split_atomic_exchange (rtx operands[])
4656 rtx retval, mem, val, scratch;
4657 enum memmodel model;
4661 retval = operands[0];
4664 model = (enum memmodel) INTVAL (operands[3]);
4665 scratch = operands[4];
4666 mode = GET_MODE (mem);
4667 cond = gen_lowpart (DImode, scratch);
4669 alpha_pre_atomic_barrier (model);
4671 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4672 emit_label (XEXP (label, 0));
4674 emit_load_locked (mode, retval, mem);
4675 emit_move_insn (scratch, val);
4676 emit_store_conditional (mode, cond, mem, scratch);
4678 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4679 emit_unlikely_jump (x, label);
4681 alpha_post_atomic_barrier (model);
4685 alpha_expand_atomic_exchange_12 (rtx operands[])
4687 rtx dst, mem, val, model;
4689 rtx addr, align, wdst;
4690 rtx (*gen) (rtx, rtx, rtx, rtx, rtx);
4695 model = operands[3];
4696 mode = GET_MODE (mem);
4698 /* We forced the address into a register via mem_noofs_operand. */
4699 addr = XEXP (mem, 0);
4700 gcc_assert (register_operand (addr, DImode));
4702 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4703 NULL_RTX, 1, OPTAB_DIRECT);
4705 /* Insert val into the correct byte location within the word. */
4706 if (val != const0_rtx)
4707 val = emit_insxl (mode, val, addr);
4709 wdst = gen_reg_rtx (DImode);
4711 gen = gen_atomic_exchangeqi_1;
4713 gen = gen_atomic_exchangehi_1;
4714 emit_insn (gen (wdst, mem, val, align, model));
4716 emit_move_insn (dst, gen_lowpart (mode, wdst));
4720 alpha_split_atomic_exchange_12 (rtx operands[])
4722 rtx dest, orig_mem, addr, val, align, scratch;
4723 rtx label, mem, width, mask, x;
4725 enum memmodel model;
4728 orig_mem = operands[1];
4730 align = operands[3];
4731 model = (enum memmodel) INTVAL (operands[4]);
4732 scratch = operands[5];
4733 mode = GET_MODE (orig_mem);
4734 addr = XEXP (orig_mem, 0);
4736 mem = gen_rtx_MEM (DImode, align);
4737 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
4738 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
4739 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
4741 alpha_pre_atomic_barrier (model);
4743 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4744 emit_label (XEXP (label, 0));
4746 emit_load_locked (DImode, scratch, mem);
4748 width = GEN_INT (GET_MODE_BITSIZE (mode));
4749 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4750 emit_insn (gen_extxl (dest, scratch, width, addr));
4751 emit_insn (gen_mskxl (scratch, scratch, mask, addr));
4752 if (val != const0_rtx)
4753 emit_insn (gen_iordi3 (scratch, scratch, val));
4755 emit_store_conditional (DImode, scratch, mem, scratch);
4757 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4758 emit_unlikely_jump (x, label);
4760 alpha_post_atomic_barrier (model);
4763 /* Adjust the cost of a scheduling dependency. Return the new cost of
4764 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4767 alpha_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
4770 enum attr_type dep_insn_type;
4772 /* If the dependence is an anti-dependence, there is no cost. For an
4773 output dependence, there is sometimes a cost, but it doesn't seem
4774 worth handling those few cases. */
4778 /* If we can't recognize the insns, we can't really do anything. */
4779 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4782 dep_insn_type = get_attr_type (dep_insn);
4784 /* Bring in the user-defined memory latency. */
4785 if (dep_insn_type == TYPE_ILD
4786 || dep_insn_type == TYPE_FLD
4787 || dep_insn_type == TYPE_LDSYM)
4788 cost += alpha_memory_latency-1;
4790 /* Everything else handled in DFA bypasses now. */
4795 /* The number of instructions that can be issued per cycle. */
4798 alpha_issue_rate (void)
4800 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4803 /* How many alternative schedules to try. This should be as wide as the
4804 scheduling freedom in the DFA, but no wider. Making this value too
4805 large results extra work for the scheduler.
4807 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4808 alternative schedules. For EV5, we can choose between E0/E1 and
4809 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4812 alpha_multipass_dfa_lookahead (void)
4814 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4817 /* Machine-specific function data. */
4819 struct GTY(()) alpha_links;
4821 struct GTY(()) machine_function
4823 /* For flag_reorder_blocks_and_partition. */
4826 /* For VMS condition handlers. */
4827 bool uses_condition_handler;
4829 /* Linkage entries. */
4830 hash_map<nofree_string_hash, alpha_links *> *links;
4833 /* How to allocate a 'struct machine_function'. */
4835 static struct machine_function *
4836 alpha_init_machine_status (void)
4838 return ggc_cleared_alloc<machine_function> ();
4841 /* Support for frame based VMS condition handlers. */
4843 /* A VMS condition handler may be established for a function with a call to
4844 __builtin_establish_vms_condition_handler, and cancelled with a call to
4845 __builtin_revert_vms_condition_handler.
4847 The VMS Condition Handling Facility knows about the existence of a handler
4848 from the procedure descriptor .handler field. As the VMS native compilers,
4849 we store the user specified handler's address at a fixed location in the
4850 stack frame and point the procedure descriptor at a common wrapper which
4851 fetches the real handler's address and issues an indirect call.
4853 The indirection wrapper is "__gcc_shell_handler", provided by libgcc.
4855 We force the procedure kind to PT_STACK, and the fixed frame location is
4856 fp+8, just before the register save area. We use the handler_data field in
4857 the procedure descriptor to state the fp offset at which the installed
4858 handler address can be found. */
4860 #define VMS_COND_HANDLER_FP_OFFSET 8
4862 /* Expand code to store the currently installed user VMS condition handler
4863 into TARGET and install HANDLER as the new condition handler. */
4866 alpha_expand_builtin_establish_vms_condition_handler (rtx target, rtx handler)
4868 rtx handler_slot_address = plus_constant (Pmode, hard_frame_pointer_rtx,
4869 VMS_COND_HANDLER_FP_OFFSET);
4872 = gen_rtx_MEM (DImode, handler_slot_address);
4874 emit_move_insn (target, handler_slot);
4875 emit_move_insn (handler_slot, handler);
4877 /* Notify the start/prologue/epilogue emitters that the condition handler
4878 slot is needed. In addition to reserving the slot space, this will force
4879 the procedure kind to PT_STACK so ensure that the hard_frame_pointer_rtx
4880 use above is correct. */
4881 cfun->machine->uses_condition_handler = true;
4884 /* Expand code to store the current VMS condition handler into TARGET and
4888 alpha_expand_builtin_revert_vms_condition_handler (rtx target)
4890 /* We implement this by establishing a null condition handler, with the tiny
4891 side effect of setting uses_condition_handler. This is a little bit
4892 pessimistic if no actual builtin_establish call is ever issued, which is
4893 not a real problem and expected never to happen anyway. */
4895 alpha_expand_builtin_establish_vms_condition_handler (target, const0_rtx);
4898 /* Functions to save and restore alpha_return_addr_rtx. */
4900 /* Start the ball rolling with RETURN_ADDR_RTX. */
4903 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4908 return get_hard_reg_initial_val (Pmode, REG_RA);
4911 /* Return or create a memory slot containing the gp value for the current
4912 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4915 alpha_gp_save_rtx (void)
4918 rtx m = cfun->machine->gp_save_rtx;
4924 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4925 m = validize_mem (m);
4926 emit_move_insn (m, pic_offset_table_rtx);
4931 /* We used to simply emit the sequence after entry_of_function.
4932 However this breaks the CFG if the first instruction in the
4933 first block is not the NOTE_INSN_BASIC_BLOCK, for example a
4934 label. Emit the sequence properly on the edge. We are only
4935 invoked from dw2_build_landing_pads and finish_eh_generation
4936 will call commit_edge_insertions thanks to a kludge. */
4937 insert_insn_on_edge (seq,
4938 single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
4940 cfun->machine->gp_save_rtx = m;
4947 alpha_instantiate_decls (void)
4949 if (cfun->machine->gp_save_rtx != NULL_RTX)
4950 instantiate_decl_rtl (cfun->machine->gp_save_rtx);
4954 alpha_ra_ever_killed (void)
4958 if (!has_hard_reg_initial_val (Pmode, REG_RA))
4959 return (int)df_regs_ever_live_p (REG_RA);
4961 push_topmost_sequence ();
4963 pop_topmost_sequence ();
4965 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL);
4969 /* Return the trap mode suffix applicable to the current
4970 instruction, or NULL. */
4973 get_trap_mode_suffix (void)
4975 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4979 case TRAP_SUFFIX_NONE:
4982 case TRAP_SUFFIX_SU:
4983 if (alpha_fptm >= ALPHA_FPTM_SU)
4987 case TRAP_SUFFIX_SUI:
4988 if (alpha_fptm >= ALPHA_FPTM_SUI)
4992 case TRAP_SUFFIX_V_SV:
5000 case ALPHA_FPTM_SUI:
5006 case TRAP_SUFFIX_V_SV_SVI:
5015 case ALPHA_FPTM_SUI:
5022 case TRAP_SUFFIX_U_SU_SUI:
5031 case ALPHA_FPTM_SUI:
5044 /* Return the rounding mode suffix applicable to the current
5045 instruction, or NULL. */
5048 get_round_mode_suffix (void)
5050 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
5054 case ROUND_SUFFIX_NONE:
5056 case ROUND_SUFFIX_NORMAL:
5059 case ALPHA_FPRM_NORM:
5061 case ALPHA_FPRM_MINF:
5063 case ALPHA_FPRM_CHOP:
5065 case ALPHA_FPRM_DYN:
5072 case ROUND_SUFFIX_C:
5081 /* Implement TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
5084 alpha_print_operand_punct_valid_p (unsigned char code)
5086 return (code == '/' || code == ',' || code == '-' || code == '~'
5087 || code == '#' || code == '*' || code == '&');
5090 /* Implement TARGET_PRINT_OPERAND. The alpha-specific
5091 operand codes are documented below. */
5094 alpha_print_operand (FILE *file, rtx x, int code)
5101 /* Print the assembler name of the current function. */
5102 assemble_name (file, alpha_fnname);
5106 if (const char *name = get_some_local_dynamic_name ())
5107 assemble_name (file, name);
5109 output_operand_lossage ("'%%&' used without any "
5110 "local dynamic TLS references");
5114 /* Generates the instruction suffix. The TRAP_SUFFIX and ROUND_SUFFIX
5115 attributes are examined to determine what is appropriate. */
5117 const char *trap = get_trap_mode_suffix ();
5118 const char *round = get_round_mode_suffix ();
5121 fprintf (file, "/%s%s", (trap ? trap : ""), (round ? round : ""));
5126 /* Generates single precision suffix for floating point
5127 instructions (s for IEEE, f for VAX). */
5128 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
5132 /* Generates double precision suffix for floating point
5133 instructions (t for IEEE, g for VAX). */
5134 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
5138 if (alpha_this_literal_sequence_number == 0)
5139 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5140 fprintf (file, "%d", alpha_this_literal_sequence_number);
5144 if (alpha_this_gpdisp_sequence_number == 0)
5145 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5146 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5153 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5155 x = XVECEXP (x, 0, 0);
5156 lituse = "lituse_tlsgd";
5158 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5160 x = XVECEXP (x, 0, 0);
5161 lituse = "lituse_tlsldm";
5163 else if (CONST_INT_P (x))
5164 lituse = "lituse_jsr";
5167 output_operand_lossage ("invalid %%J value");
5171 if (x != const0_rtx)
5172 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5180 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5181 lituse = "lituse_jsrdirect";
5183 lituse = "lituse_jsr";
5186 gcc_assert (INTVAL (x) != 0);
5187 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5191 /* If this operand is the constant zero, write it as "$31". */
5193 fprintf (file, "%s", reg_names[REGNO (x)]);
5194 else if (x == CONST0_RTX (GET_MODE (x)))
5195 fprintf (file, "$31");
5197 output_operand_lossage ("invalid %%r value");
5201 /* Similar, but for floating-point. */
5203 fprintf (file, "%s", reg_names[REGNO (x)]);
5204 else if (x == CONST0_RTX (GET_MODE (x)))
5205 fprintf (file, "$f31");
5207 output_operand_lossage ("invalid %%R value");
5211 /* Write the 1's complement of a constant. */
5212 if (!CONST_INT_P (x))
5213 output_operand_lossage ("invalid %%N value");
5215 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5219 /* Write 1 << C, for a constant C. */
5220 if (!CONST_INT_P (x))
5221 output_operand_lossage ("invalid %%P value");
5223 fprintf (file, HOST_WIDE_INT_PRINT_DEC, HOST_WIDE_INT_1 << INTVAL (x));
5227 /* Write the high-order 16 bits of a constant, sign-extended. */
5228 if (!CONST_INT_P (x))
5229 output_operand_lossage ("invalid %%h value");
5231 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5235 /* Write the low-order 16 bits of a constant, sign-extended. */
5236 if (!CONST_INT_P (x))
5237 output_operand_lossage ("invalid %%L value");
5239 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5240 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5244 /* Write mask for ZAP insn. */
5245 if (CONST_INT_P (x))
5247 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5249 for (i = 0; i < 8; i++, value >>= 8)
5253 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5256 output_operand_lossage ("invalid %%m value");
5260 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5261 if (!mode_width_operand (x, VOIDmode))
5262 output_operand_lossage ("invalid %%M value");
5264 fprintf (file, "%s",
5265 (INTVAL (x) == 8 ? "b"
5266 : INTVAL (x) == 16 ? "w"
5267 : INTVAL (x) == 32 ? "l"
5272 /* Similar, except do it from the mask. */
5273 if (CONST_INT_P (x))
5275 HOST_WIDE_INT value = INTVAL (x);
5282 if (value == 0xffff)
5287 if (value == 0xffffffff)
5299 output_operand_lossage ("invalid %%U value");
5303 /* Write the constant value divided by 8. */
5304 if (!CONST_INT_P (x)
5305 || (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5306 || (INTVAL (x) & 7) != 0)
5307 output_operand_lossage ("invalid %%s value");
5309 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) / 8);
5312 case 'C': case 'D': case 'c': case 'd':
5313 /* Write out comparison name. */
5315 enum rtx_code c = GET_CODE (x);
5317 if (!COMPARISON_P (x))
5318 output_operand_lossage ("invalid %%C value");
5320 else if (code == 'D')
5321 c = reverse_condition (c);
5322 else if (code == 'c')
5323 c = swap_condition (c);
5324 else if (code == 'd')
5325 c = swap_condition (reverse_condition (c));
5328 fprintf (file, "ule");
5330 fprintf (file, "ult");
5331 else if (c == UNORDERED)
5332 fprintf (file, "un");
5334 fprintf (file, "%s", GET_RTX_NAME (c));
5339 /* Write the divide or modulus operator. */
5340 switch (GET_CODE (x))
5343 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5346 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5349 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5352 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5355 output_operand_lossage ("invalid %%E value");
5361 /* Write "_u" for unaligned access. */
5362 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
5363 fprintf (file, "_u");
5368 fprintf (file, "%s", reg_names[REGNO (x)]);
5370 output_address (GET_MODE (x), XEXP (x, 0));
5371 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5373 switch (XINT (XEXP (x, 0), 1))
5377 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5380 output_operand_lossage ("unknown relocation unspec");
5385 output_addr_const (file, x);
5389 output_operand_lossage ("invalid %%xn code");
5393 /* Implement TARGET_PRINT_OPERAND_ADDRESS. */
5396 alpha_print_operand_address (FILE *file, machine_mode /*mode*/, rtx addr)
5399 HOST_WIDE_INT offset = 0;
5401 if (GET_CODE (addr) == AND)
5402 addr = XEXP (addr, 0);
5404 if (GET_CODE (addr) == PLUS
5405 && CONST_INT_P (XEXP (addr, 1)))
5407 offset = INTVAL (XEXP (addr, 1));
5408 addr = XEXP (addr, 0);
5411 if (GET_CODE (addr) == LO_SUM)
5413 const char *reloc16, *reloclo;
5414 rtx op1 = XEXP (addr, 1);
5416 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5418 op1 = XEXP (op1, 0);
5419 switch (XINT (op1, 1))
5423 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5427 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5430 output_operand_lossage ("unknown relocation unspec");
5434 output_addr_const (file, XVECEXP (op1, 0, 0));
5439 reloclo = "gprellow";
5440 output_addr_const (file, op1);
5444 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5446 addr = XEXP (addr, 0);
5447 switch (GET_CODE (addr))
5450 basereg = REGNO (addr);
5454 basereg = subreg_regno (addr);
5461 fprintf (file, "($%d)\t\t!%s", basereg,
5462 (basereg == 29 ? reloc16 : reloclo));
5466 switch (GET_CODE (addr))
5469 basereg = REGNO (addr);
5473 basereg = subreg_regno (addr);
5477 offset = INTVAL (addr);
5481 gcc_assert(TARGET_ABI_OPEN_VMS || this_is_asm_operands);
5482 fprintf (file, "%s", XSTR (addr, 0));
5486 gcc_assert(TARGET_ABI_OPEN_VMS || this_is_asm_operands);
5487 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5488 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5489 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5490 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5491 INTVAL (XEXP (XEXP (addr, 0), 1)));
5495 output_operand_lossage ("invalid operand address");
5499 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5502 /* Emit RTL insns to initialize the variable parts of a trampoline at
5503 M_TRAMP. FNDECL is target function's decl. CHAIN_VALUE is an rtx
5504 for the static chain value for the function. */
5507 alpha_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
5509 rtx fnaddr, mem, word1, word2;
5511 fnaddr = XEXP (DECL_RTL (fndecl), 0);
5513 #ifdef POINTERS_EXTEND_UNSIGNED
5514 fnaddr = convert_memory_address (Pmode, fnaddr);
5515 chain_value = convert_memory_address (Pmode, chain_value);
5518 if (TARGET_ABI_OPEN_VMS)
5523 /* Construct the name of the trampoline entry point. */
5524 fnname = XSTR (fnaddr, 0);
5525 trname = (char *) alloca (strlen (fnname) + 5);
5526 strcpy (trname, fnname);
5527 strcat (trname, "..tr");
5528 fnname = ggc_alloc_string (trname, strlen (trname) + 1);
5529 word2 = gen_rtx_SYMBOL_REF (Pmode, fnname);
5531 /* Trampoline (or "bounded") procedure descriptor is constructed from
5532 the function's procedure descriptor with certain fields zeroed IAW
5533 the VMS calling standard. This is stored in the first quadword. */
5534 word1 = force_reg (DImode, gen_const_mem (DImode, fnaddr));
5535 word1 = expand_and (DImode, word1,
5536 GEN_INT (HOST_WIDE_INT_C (0xffff0fff0000fff0)),
5541 /* These 4 instructions are:
5546 We don't bother setting the HINT field of the jump; the nop
5547 is merely there for padding. */
5548 word1 = GEN_INT (HOST_WIDE_INT_C (0xa77b0010a43b0018));
5549 word2 = GEN_INT (HOST_WIDE_INT_C (0x47ff041f6bfb0000));
5552 /* Store the first two words, as computed above. */
5553 mem = adjust_address (m_tramp, DImode, 0);
5554 emit_move_insn (mem, word1);
5555 mem = adjust_address (m_tramp, DImode, 8);
5556 emit_move_insn (mem, word2);
5558 /* Store function address and static chain value. */
5559 mem = adjust_address (m_tramp, Pmode, 16);
5560 emit_move_insn (mem, fnaddr);
5561 mem = adjust_address (m_tramp, Pmode, 24);
5562 emit_move_insn (mem, chain_value);
5566 emit_insn (gen_imb ());
5567 #ifdef HAVE_ENABLE_EXECUTE_STACK
5568 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5569 LCT_NORMAL, VOIDmode, XEXP (m_tramp, 0), Pmode);
5574 /* Determine where to put an argument to a function.
5575 Value is zero to push the argument on the stack,
5576 or a hard register in which to store the argument.
5578 MODE is the argument's machine mode.
5579 TYPE is the data type of the argument (as a tree).
5580 This is null for libcalls where that information may
5582 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5583 the preceding args and about the function being called.
5584 NAMED is nonzero if this argument is a named parameter
5585 (otherwise it is an extra parameter matching an ellipsis).
5587 On Alpha the first 6 words of args are normally in registers
5588 and the rest are pushed. */
5591 alpha_function_arg (cumulative_args_t cum_v, machine_mode mode,
5592 const_tree type, bool named ATTRIBUTE_UNUSED)
5594 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
5598 /* Don't get confused and pass small structures in FP registers. */
5599 if (type && AGGREGATE_TYPE_P (type))
5603 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5605 gcc_checking_assert (!COMPLEX_MODE_P (mode));
5607 /* Set up defaults for FP operands passed in FP registers, and
5608 integral operands passed in integer registers. */
5609 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5615 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5616 the two platforms, so we can't avoid conditional compilation. */
5617 #if TARGET_ABI_OPEN_VMS
5619 if (mode == VOIDmode)
5620 return alpha_arg_info_reg_val (*cum);
5622 num_args = cum->num_args;
5624 || targetm.calls.must_pass_in_stack (mode, type))
5627 #elif TARGET_ABI_OSF
5633 /* VOID is passed as a special flag for "last argument". */
5634 if (type == void_type_node)
5636 else if (targetm.calls.must_pass_in_stack (mode, type))
5640 #error Unhandled ABI
5643 return gen_rtx_REG (mode, num_args + basereg);
5646 /* Update the data in CUM to advance over an argument
5647 of mode MODE and data type TYPE.
5648 (TYPE is null for libcalls where that information may not be available.) */
5651 alpha_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
5652 const_tree type, bool named ATTRIBUTE_UNUSED)
5654 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
5655 bool onstack = targetm.calls.must_pass_in_stack (mode, type);
5656 int increment = onstack ? 6 : ALPHA_ARG_SIZE (mode, type);
5661 if (!onstack && cum->num_args < 6)
5662 cum->atypes[cum->num_args] = alpha_arg_type (mode);
5663 cum->num_args += increment;
5668 alpha_arg_partial_bytes (cumulative_args_t cum_v,
5669 machine_mode mode ATTRIBUTE_UNUSED,
5670 tree type ATTRIBUTE_UNUSED,
5671 bool named ATTRIBUTE_UNUSED)
5674 CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED = get_cumulative_args (cum_v);
5676 #if TARGET_ABI_OPEN_VMS
5677 if (cum->num_args < 6
5678 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type))
5679 words = 6 - cum->num_args;
5680 #elif TARGET_ABI_OSF
5681 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type))
5684 #error Unhandled ABI
5687 return words * UNITS_PER_WORD;
5691 /* Return true if TYPE must be returned in memory, instead of in registers. */
5694 alpha_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
5696 machine_mode mode = VOIDmode;
5701 mode = TYPE_MODE (type);
5703 /* All aggregates are returned in memory, except on OpenVMS where
5704 records that fit 64 bits should be returned by immediate value
5705 as required by section 3.8.7.1 of the OpenVMS Calling Standard. */
5706 if (TARGET_ABI_OPEN_VMS
5707 && TREE_CODE (type) != ARRAY_TYPE
5708 && (unsigned HOST_WIDE_INT) int_size_in_bytes(type) <= 8)
5711 if (AGGREGATE_TYPE_P (type))
5715 size = GET_MODE_SIZE (mode);
5716 switch (GET_MODE_CLASS (mode))
5718 case MODE_VECTOR_FLOAT:
5719 /* Pass all float vectors in memory, like an aggregate. */
5722 case MODE_COMPLEX_FLOAT:
5723 /* We judge complex floats on the size of their element,
5724 not the size of the whole type. */
5725 size = GET_MODE_UNIT_SIZE (mode);
5730 case MODE_COMPLEX_INT:
5731 case MODE_VECTOR_INT:
5735 /* ??? We get called on all sorts of random stuff from
5736 aggregate_value_p. We must return something, but it's not
5737 clear what's safe to return. Pretend it's a struct I
5742 /* Otherwise types must fit in one register. */
5743 return size > UNITS_PER_WORD;
5746 /* Return true if TYPE should be passed by invisible reference. */
5749 alpha_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
5751 const_tree type ATTRIBUTE_UNUSED,
5754 /* Pass float and _Complex float variable arguments by reference.
5755 This avoids 64-bit store from a FP register to a pretend args save area
5756 and subsequent 32-bit load from the saved location to a FP register.
5758 Note that 32-bit loads and stores to/from a FP register on alpha reorder
5759 bits to form a canonical 64-bit value in the FP register. This fact
5760 invalidates compiler assumption that 32-bit FP value lives in the lower
5761 32-bits of the passed 64-bit FP value, so loading the 32-bit value from
5762 the stored 64-bit location using 32-bit FP load is invalid on alpha.
5764 This introduces sort of ABI incompatibility, but until _Float32 was
5765 introduced, C-family languages promoted 32-bit float variable arg to
5766 a 64-bit double, and it was not allowed to pass float as a varible
5767 argument. Passing _Complex float as a variable argument never
5768 worked on alpha. Thus, we have no backward compatibility issues
5769 to worry about, and passing unpromoted _Float32 and _Complex float
5770 as a variable argument will actually work in the future. */
5772 if (mode == SFmode || mode == SCmode)
5775 return mode == TFmode || mode == TCmode;
5778 /* Define how to find the value returned by a function. VALTYPE is the
5779 data type of the value (as a tree). If the precise function being
5780 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5781 MODE is set instead of VALTYPE for libcalls.
5783 On Alpha the value is found in $0 for integer functions and
5784 $f0 for floating-point functions. */
5787 alpha_function_value_1 (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
5790 unsigned int regnum, dummy ATTRIBUTE_UNUSED;
5791 enum mode_class mclass;
5793 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5796 mode = TYPE_MODE (valtype);
5798 mclass = GET_MODE_CLASS (mode);
5802 /* Do the same thing as PROMOTE_MODE except for libcalls on VMS,
5803 where we have them returning both SImode and DImode. */
5804 if (!(TARGET_ABI_OPEN_VMS && valtype && AGGREGATE_TYPE_P (valtype)))
5805 PROMOTE_MODE (mode, dummy, valtype);
5808 case MODE_COMPLEX_INT:
5809 case MODE_VECTOR_INT:
5817 case MODE_COMPLEX_FLOAT:
5819 machine_mode cmode = GET_MODE_INNER (mode);
5821 return gen_rtx_PARALLEL
5824 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5826 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5827 GEN_INT (GET_MODE_SIZE (cmode)))));
5831 /* We should only reach here for BLKmode on VMS. */
5832 gcc_assert (TARGET_ABI_OPEN_VMS && mode == BLKmode);
5840 return gen_rtx_REG (mode, regnum);
5843 /* Implement TARGET_FUNCTION_VALUE. */
5846 alpha_function_value (const_tree valtype, const_tree fn_decl_or_type,
5849 return alpha_function_value_1 (valtype, fn_decl_or_type, VOIDmode);
5852 /* Implement TARGET_LIBCALL_VALUE. */
5855 alpha_libcall_value (machine_mode mode, const_rtx /*fun*/)
5857 return alpha_function_value_1 (NULL_TREE, NULL_TREE, mode);
5860 /* Implement TARGET_FUNCTION_VALUE_REGNO_P.
5862 On the Alpha, $0 $1 and $f0 $f1 are the only register thus used. */
5865 alpha_function_value_regno_p (const unsigned int regno)
5867 return (regno == 0 || regno == 1 || regno == 32 || regno == 33);
5870 /* TCmode complex values are passed by invisible reference. We
5871 should not split these values. */
5874 alpha_split_complex_arg (const_tree type)
5876 return TYPE_MODE (type) != TCmode;
5880 alpha_build_builtin_va_list (void)
5882 tree base, ofs, space, record, type_decl;
5884 if (TARGET_ABI_OPEN_VMS)
5885 return ptr_type_node;
5887 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5888 type_decl = build_decl (BUILTINS_LOCATION,
5889 TYPE_DECL, get_identifier ("__va_list_tag"), record);
5890 TYPE_STUB_DECL (record) = type_decl;
5891 TYPE_NAME (record) = type_decl;
5893 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5895 /* Dummy field to prevent alignment warnings. */
5896 space = build_decl (BUILTINS_LOCATION,
5897 FIELD_DECL, NULL_TREE, integer_type_node);
5898 DECL_FIELD_CONTEXT (space) = record;
5899 DECL_ARTIFICIAL (space) = 1;
5900 DECL_IGNORED_P (space) = 1;
5902 ofs = build_decl (BUILTINS_LOCATION,
5903 FIELD_DECL, get_identifier ("__offset"),
5905 DECL_FIELD_CONTEXT (ofs) = record;
5906 DECL_CHAIN (ofs) = space;
5908 base = build_decl (BUILTINS_LOCATION,
5909 FIELD_DECL, get_identifier ("__base"),
5911 DECL_FIELD_CONTEXT (base) = record;
5912 DECL_CHAIN (base) = ofs;
5914 TYPE_FIELDS (record) = base;
5915 layout_type (record);
5917 va_list_gpr_counter_field = ofs;
5922 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5923 and constant additions. */
5926 va_list_skip_additions (tree lhs)
5932 enum tree_code code;
5934 stmt = SSA_NAME_DEF_STMT (lhs);
5936 if (gimple_code (stmt) == GIMPLE_PHI)
5939 if (!is_gimple_assign (stmt)
5940 || gimple_assign_lhs (stmt) != lhs)
5943 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
5945 code = gimple_assign_rhs_code (stmt);
5946 if (!CONVERT_EXPR_CODE_P (code)
5947 && ((code != PLUS_EXPR && code != POINTER_PLUS_EXPR)
5948 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST
5949 || !tree_fits_uhwi_p (gimple_assign_rhs2 (stmt))))
5952 lhs = gimple_assign_rhs1 (stmt);
5956 /* Check if LHS = RHS statement is
5957 LHS = *(ap.__base + ap.__offset + cst)
5960 + ((ap.__offset + cst <= 47)
5961 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5962 If the former, indicate that GPR registers are needed,
5963 if the latter, indicate that FPR registers are needed.
5965 Also look for LHS = (*ptr).field, where ptr is one of the forms
5968 On alpha, cfun->va_list_gpr_size is used as size of the needed
5969 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
5970 registers are needed and bit 1 set if FPR registers are needed.
5971 Return true if va_list references should not be scanned for the
5972 current statement. */
5975 alpha_stdarg_optimize_hook (struct stdarg_info *si, const gimple *stmt)
5977 tree base, offset, rhs;
5981 if (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
5982 != GIMPLE_SINGLE_RHS)
5985 rhs = gimple_assign_rhs1 (stmt);
5986 while (handled_component_p (rhs))
5987 rhs = TREE_OPERAND (rhs, 0);
5988 if (TREE_CODE (rhs) != MEM_REF
5989 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5992 stmt = va_list_skip_additions (TREE_OPERAND (rhs, 0));
5994 || !is_gimple_assign (stmt)
5995 || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
5998 base = gimple_assign_rhs1 (stmt);
5999 if (TREE_CODE (base) == SSA_NAME)
6001 base_stmt = va_list_skip_additions (base);
6003 && is_gimple_assign (base_stmt)
6004 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
6005 base = gimple_assign_rhs1 (base_stmt);
6008 if (TREE_CODE (base) != COMPONENT_REF
6009 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
6011 base = gimple_assign_rhs2 (stmt);
6012 if (TREE_CODE (base) == SSA_NAME)
6014 base_stmt = va_list_skip_additions (base);
6016 && is_gimple_assign (base_stmt)
6017 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
6018 base = gimple_assign_rhs1 (base_stmt);
6021 if (TREE_CODE (base) != COMPONENT_REF
6022 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
6028 base = get_base_address (base);
6029 if (TREE_CODE (base) != VAR_DECL
6030 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base) + num_ssa_names))
6033 offset = gimple_op (stmt, 1 + offset_arg);
6034 if (TREE_CODE (offset) == SSA_NAME)
6036 gimple *offset_stmt = va_list_skip_additions (offset);
6039 && gimple_code (offset_stmt) == GIMPLE_PHI)
6042 gimple *arg1_stmt, *arg2_stmt;
6044 enum tree_code code1, code2;
6046 if (gimple_phi_num_args (offset_stmt) != 2)
6050 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 0));
6052 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 1));
6053 if (arg1_stmt == NULL
6054 || !is_gimple_assign (arg1_stmt)
6055 || arg2_stmt == NULL
6056 || !is_gimple_assign (arg2_stmt))
6059 code1 = gimple_assign_rhs_code (arg1_stmt);
6060 code2 = gimple_assign_rhs_code (arg2_stmt);
6061 if (code1 == COMPONENT_REF
6062 && (code2 == MINUS_EXPR || code2 == PLUS_EXPR))
6064 else if (code2 == COMPONENT_REF
6065 && (code1 == MINUS_EXPR || code1 == PLUS_EXPR))
6067 gimple *tem = arg1_stmt;
6069 arg1_stmt = arg2_stmt;
6075 if (!tree_fits_shwi_p (gimple_assign_rhs2 (arg2_stmt)))
6078 sub = tree_to_shwi (gimple_assign_rhs2 (arg2_stmt));
6079 if (code2 == MINUS_EXPR)
6081 if (sub < -48 || sub > -32)
6084 arg1 = gimple_assign_rhs1 (arg1_stmt);
6085 arg2 = gimple_assign_rhs1 (arg2_stmt);
6086 if (TREE_CODE (arg2) == SSA_NAME)
6088 arg2_stmt = va_list_skip_additions (arg2);
6089 if (arg2_stmt == NULL
6090 || !is_gimple_assign (arg2_stmt)
6091 || gimple_assign_rhs_code (arg2_stmt) != COMPONENT_REF)
6093 arg2 = gimple_assign_rhs1 (arg2_stmt);
6098 if (TREE_CODE (arg1) != COMPONENT_REF
6099 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
6100 || get_base_address (arg1) != base)
6103 /* Need floating point regs. */
6104 cfun->va_list_fpr_size |= 2;
6108 && is_gimple_assign (offset_stmt)
6109 && gimple_assign_rhs_code (offset_stmt) == COMPONENT_REF)
6110 offset = gimple_assign_rhs1 (offset_stmt);
6112 if (TREE_CODE (offset) != COMPONENT_REF
6113 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
6114 || get_base_address (offset) != base)
6117 /* Need general regs. */
6118 cfun->va_list_fpr_size |= 1;
6122 si->va_list_escapes = true;
6127 /* Perform any needed actions needed for a function that is receiving a
6128 variable number of arguments. */
6131 alpha_setup_incoming_varargs (cumulative_args_t pcum, machine_mode mode,
6132 tree type, int *pretend_size, int no_rtl)
6134 CUMULATIVE_ARGS cum = *get_cumulative_args (pcum);
6136 /* Skip the current argument. */
6137 targetm.calls.function_arg_advance (pack_cumulative_args (&cum), mode, type,
6140 #if TARGET_ABI_OPEN_VMS
6141 /* For VMS, we allocate space for all 6 arg registers plus a count.
6143 However, if NO registers need to be saved, don't allocate any space.
6144 This is not only because we won't need the space, but because AP
6145 includes the current_pretend_args_size and we don't want to mess up
6146 any ap-relative addresses already made. */
6147 if (cum.num_args < 6)
6151 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6152 emit_insn (gen_arg_home ());
6154 *pretend_size = 7 * UNITS_PER_WORD;
6157 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6158 only push those that are remaining. However, if NO registers need to
6159 be saved, don't allocate any space. This is not only because we won't
6160 need the space, but because AP includes the current_pretend_args_size
6161 and we don't want to mess up any ap-relative addresses already made.
6163 If we are not to use the floating-point registers, save the integer
6164 registers where we would put the floating-point registers. This is
6165 not the most efficient way to implement varargs with just one register
6166 class, but it isn't worth doing anything more efficient in this rare
6174 alias_set_type set = get_varargs_alias_set ();
6177 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6178 if (count > 6 - cum)
6181 /* Detect whether integer registers or floating-point registers
6182 are needed by the detected va_arg statements. See above for
6183 how these values are computed. Note that the "escape" value
6184 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6186 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6188 if (cfun->va_list_fpr_size & 1)
6190 tmp = gen_rtx_MEM (BLKmode,
6191 plus_constant (Pmode, virtual_incoming_args_rtx,
6192 (cum + 6) * UNITS_PER_WORD));
6193 MEM_NOTRAP_P (tmp) = 1;
6194 set_mem_alias_set (tmp, set);
6195 move_block_from_reg (16 + cum, tmp, count);
6198 if (cfun->va_list_fpr_size & 2)
6200 tmp = gen_rtx_MEM (BLKmode,
6201 plus_constant (Pmode, virtual_incoming_args_rtx,
6202 cum * UNITS_PER_WORD));
6203 MEM_NOTRAP_P (tmp) = 1;
6204 set_mem_alias_set (tmp, set);
6205 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6208 *pretend_size = 12 * UNITS_PER_WORD;
6213 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6215 HOST_WIDE_INT offset;
6216 tree t, offset_field, base_field;
6218 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6221 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6222 up by 48, storing fp arg registers in the first 48 bytes, and the
6223 integer arg registers in the next 48 bytes. This is only done,
6224 however, if any integer registers need to be stored.
6226 If no integer registers need be stored, then we must subtract 48
6227 in order to account for the integer arg registers which are counted
6228 in argsize above, but which are not actually stored on the stack.
6229 Must further be careful here about structures straddling the last
6230 integer argument register; that futzes with pretend_args_size,
6231 which changes the meaning of AP. */
6234 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6236 offset = -6 * UNITS_PER_WORD + crtl->args.pretend_args_size;
6238 if (TARGET_ABI_OPEN_VMS)
6240 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6241 t = fold_build_pointer_plus_hwi (t, offset + NUM_ARGS * UNITS_PER_WORD);
6242 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
6243 TREE_SIDE_EFFECTS (t) = 1;
6244 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6248 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6249 offset_field = DECL_CHAIN (base_field);
6251 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6252 valist, base_field, NULL_TREE);
6253 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6254 valist, offset_field, NULL_TREE);
6256 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6257 t = fold_build_pointer_plus_hwi (t, offset);
6258 t = build2 (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
6259 TREE_SIDE_EFFECTS (t) = 1;
6260 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6262 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6263 t = build2 (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
6264 TREE_SIDE_EFFECTS (t) = 1;
6265 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6270 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset,
6273 tree type_size, ptr_type, addend, t, addr;
6274 gimple_seq internal_post;
6276 /* If the type could not be passed in registers, skip the block
6277 reserved for the registers. */
6278 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6280 t = build_int_cst (TREE_TYPE (offset), 6*8);
6281 gimplify_assign (offset,
6282 build2 (MAX_EXPR, TREE_TYPE (offset), offset, t),
6287 ptr_type = build_pointer_type_for_mode (type, ptr_mode, true);
6289 if (TREE_CODE (type) == COMPLEX_TYPE)
6291 tree real_part, imag_part, real_temp;
6293 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6296 /* Copy the value into a new temporary, lest the formal temporary
6297 be reused out from under us. */
6298 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6300 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6303 return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
6305 else if (TREE_CODE (type) == REAL_TYPE)
6307 tree fpaddend, cond, fourtyeight;
6309 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6310 fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6311 addend, fourtyeight);
6312 cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6313 addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6317 /* Build the final address and force that value into a temporary. */
6318 addr = fold_build_pointer_plus (fold_convert (ptr_type, base), addend);
6319 internal_post = NULL;
6320 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6321 gimple_seq_add_seq (pre_p, internal_post);
6323 /* Update the offset field. */
6324 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6325 if (type_size == NULL || TREE_OVERFLOW (type_size))
6329 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6330 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6331 t = size_binop (MULT_EXPR, t, size_int (8));
6333 t = fold_convert (TREE_TYPE (offset), t);
6334 gimplify_assign (offset, build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t),
6337 return build_va_arg_indirect_ref (addr);
6341 alpha_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6344 tree offset_field, base_field, offset, base, t, r;
6347 if (TARGET_ABI_OPEN_VMS)
6348 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6350 base_field = TYPE_FIELDS (va_list_type_node);
6351 offset_field = DECL_CHAIN (base_field);
6352 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6353 valist, base_field, NULL_TREE);
6354 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6355 valist, offset_field, NULL_TREE);
6357 /* Pull the fields of the structure out into temporaries. Since we never
6358 modify the base field, we can use a formal temporary. Sign-extend the
6359 offset field so that it's the proper width for pointer arithmetic. */
6360 base = get_formal_tmp_var (base_field, pre_p);
6362 t = fold_convert (build_nonstandard_integer_type (64, 0), offset_field);
6363 offset = get_initialized_tmp_var (t, pre_p, NULL);
6365 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6367 type = build_pointer_type_for_mode (type, ptr_mode, true);
6369 /* Find the value. Note that this will be a stable indirection, or
6370 a composite of stable indirections in the case of complex. */
6371 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6373 /* Stuff the offset temporary back into its field. */
6374 gimplify_assign (unshare_expr (offset_field),
6375 fold_convert (TREE_TYPE (offset_field), offset), pre_p);
6378 r = build_va_arg_indirect_ref (r);
6387 ALPHA_BUILTIN_CMPBGE,
6388 ALPHA_BUILTIN_EXTBL,
6389 ALPHA_BUILTIN_EXTWL,
6390 ALPHA_BUILTIN_EXTLL,
6391 ALPHA_BUILTIN_EXTQL,
6392 ALPHA_BUILTIN_EXTWH,
6393 ALPHA_BUILTIN_EXTLH,
6394 ALPHA_BUILTIN_EXTQH,
6395 ALPHA_BUILTIN_INSBL,
6396 ALPHA_BUILTIN_INSWL,
6397 ALPHA_BUILTIN_INSLL,
6398 ALPHA_BUILTIN_INSQL,
6399 ALPHA_BUILTIN_INSWH,
6400 ALPHA_BUILTIN_INSLH,
6401 ALPHA_BUILTIN_INSQH,
6402 ALPHA_BUILTIN_MSKBL,
6403 ALPHA_BUILTIN_MSKWL,
6404 ALPHA_BUILTIN_MSKLL,
6405 ALPHA_BUILTIN_MSKQL,
6406 ALPHA_BUILTIN_MSKWH,
6407 ALPHA_BUILTIN_MSKLH,
6408 ALPHA_BUILTIN_MSKQH,
6409 ALPHA_BUILTIN_UMULH,
6411 ALPHA_BUILTIN_ZAPNOT,
6412 ALPHA_BUILTIN_AMASK,
6413 ALPHA_BUILTIN_IMPLVER,
6415 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6416 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER,
6419 ALPHA_BUILTIN_MINUB8,
6420 ALPHA_BUILTIN_MINSB8,
6421 ALPHA_BUILTIN_MINUW4,
6422 ALPHA_BUILTIN_MINSW4,
6423 ALPHA_BUILTIN_MAXUB8,
6424 ALPHA_BUILTIN_MAXSB8,
6425 ALPHA_BUILTIN_MAXUW4,
6426 ALPHA_BUILTIN_MAXSW4,
6430 ALPHA_BUILTIN_UNPKBL,
6431 ALPHA_BUILTIN_UNPKBW,
6436 ALPHA_BUILTIN_CTPOP,
6441 static enum insn_code const code_for_builtin[ALPHA_BUILTIN_max] = {
6442 CODE_FOR_builtin_cmpbge,
6450 CODE_FOR_builtin_insbl,
6451 CODE_FOR_builtin_inswl,
6452 CODE_FOR_builtin_insll,
6464 CODE_FOR_umuldi3_highpart,
6465 CODE_FOR_builtin_zap,
6466 CODE_FOR_builtin_zapnot,
6467 CODE_FOR_builtin_amask,
6468 CODE_FOR_builtin_implver,
6469 CODE_FOR_builtin_rpcc,
6470 CODE_FOR_builtin_establish_vms_condition_handler,
6471 CODE_FOR_builtin_revert_vms_condition_handler,
6474 CODE_FOR_builtin_minub8,
6475 CODE_FOR_builtin_minsb8,
6476 CODE_FOR_builtin_minuw4,
6477 CODE_FOR_builtin_minsw4,
6478 CODE_FOR_builtin_maxub8,
6479 CODE_FOR_builtin_maxsb8,
6480 CODE_FOR_builtin_maxuw4,
6481 CODE_FOR_builtin_maxsw4,
6482 CODE_FOR_builtin_perr,
6483 CODE_FOR_builtin_pklb,
6484 CODE_FOR_builtin_pkwb,
6485 CODE_FOR_builtin_unpkbl,
6486 CODE_FOR_builtin_unpkbw,
6491 CODE_FOR_popcountdi2
6494 struct alpha_builtin_def
6497 enum alpha_builtin code;
6498 unsigned int target_mask;
6502 static struct alpha_builtin_def const zero_arg_builtins[] = {
6503 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6504 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6507 static struct alpha_builtin_def const one_arg_builtins[] = {
6508 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6509 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6510 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6511 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6512 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6513 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6514 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6515 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6518 static struct alpha_builtin_def const two_arg_builtins[] = {
6519 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6520 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6521 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6522 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6523 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6524 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6525 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6526 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6527 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6528 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6529 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6530 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6531 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6532 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6533 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6534 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6535 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6536 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6537 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6538 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6539 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6540 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6541 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6542 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6543 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6544 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6545 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6546 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6547 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6548 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6549 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6550 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6551 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6552 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6555 static GTY(()) tree alpha_dimode_u;
6556 static GTY(()) tree alpha_v8qi_u;
6557 static GTY(()) tree alpha_v8qi_s;
6558 static GTY(()) tree alpha_v4hi_u;
6559 static GTY(()) tree alpha_v4hi_s;
6561 static GTY(()) tree alpha_builtins[(int) ALPHA_BUILTIN_max];
6563 /* Return the alpha builtin for CODE. */
6566 alpha_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
6568 if (code >= ALPHA_BUILTIN_max)
6569 return error_mark_node;
6570 return alpha_builtins[code];
6573 /* Helper function of alpha_init_builtins. Add the built-in specified
6574 by NAME, TYPE, CODE, and ECF. */
6577 alpha_builtin_function (const char *name, tree ftype,
6578 enum alpha_builtin code, unsigned ecf)
6580 tree decl = add_builtin_function (name, ftype, (int) code,
6581 BUILT_IN_MD, NULL, NULL_TREE);
6583 if (ecf & ECF_CONST)
6584 TREE_READONLY (decl) = 1;
6585 if (ecf & ECF_NOTHROW)
6586 TREE_NOTHROW (decl) = 1;
6588 alpha_builtins [(int) code] = decl;
6591 /* Helper function of alpha_init_builtins. Add the COUNT built-in
6592 functions pointed to by P, with function type FTYPE. */
6595 alpha_add_builtins (const struct alpha_builtin_def *p, size_t count,
6600 for (i = 0; i < count; ++i, ++p)
6601 if ((target_flags & p->target_mask) == p->target_mask)
6602 alpha_builtin_function (p->name, ftype, p->code,
6603 (p->is_const ? ECF_CONST : 0) | ECF_NOTHROW);
6607 alpha_init_builtins (void)
6611 alpha_dimode_u = lang_hooks.types.type_for_mode (DImode, 1);
6612 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6613 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6614 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6615 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6617 ftype = build_function_type_list (alpha_dimode_u, NULL_TREE);
6618 alpha_add_builtins (zero_arg_builtins, ARRAY_SIZE (zero_arg_builtins), ftype);
6620 ftype = build_function_type_list (alpha_dimode_u, alpha_dimode_u, NULL_TREE);
6621 alpha_add_builtins (one_arg_builtins, ARRAY_SIZE (one_arg_builtins), ftype);
6623 ftype = build_function_type_list (alpha_dimode_u, alpha_dimode_u,
6624 alpha_dimode_u, NULL_TREE);
6625 alpha_add_builtins (two_arg_builtins, ARRAY_SIZE (two_arg_builtins), ftype);
6627 if (TARGET_ABI_OPEN_VMS)
6629 ftype = build_function_type_list (ptr_type_node, ptr_type_node,
6631 alpha_builtin_function ("__builtin_establish_vms_condition_handler",
6633 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6636 ftype = build_function_type_list (ptr_type_node, void_type_node,
6638 alpha_builtin_function ("__builtin_revert_vms_condition_handler", ftype,
6639 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER, 0);
6641 vms_patch_builtins ();
6645 /* Expand an expression EXP that calls a built-in function,
6646 with result going to TARGET if that's convenient
6647 (and in mode MODE if that's convenient).
6648 SUBTARGET may be used as the target for computing one of EXP's operands.
6649 IGNORE is nonzero if the value is to be ignored. */
6652 alpha_expand_builtin (tree exp, rtx target,
6653 rtx subtarget ATTRIBUTE_UNUSED,
6654 machine_mode mode ATTRIBUTE_UNUSED,
6655 int ignore ATTRIBUTE_UNUSED)
6659 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
6660 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6662 call_expr_arg_iterator iter;
6663 enum insn_code icode;
6664 rtx op[MAX_ARGS], pat;
6668 if (fcode >= ALPHA_BUILTIN_max)
6669 internal_error ("bad builtin fcode");
6670 icode = code_for_builtin[fcode];
6672 internal_error ("bad builtin fcode");
6674 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6677 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
6679 const struct insn_operand_data *insn_op;
6681 if (arg == error_mark_node)
6683 if (arity > MAX_ARGS)
6686 insn_op = &insn_data[icode].operand[arity + nonvoid];
6688 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
6690 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6691 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6697 machine_mode tmode = insn_data[icode].operand[0].mode;
6699 || GET_MODE (target) != tmode
6700 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6701 target = gen_reg_rtx (tmode);
6707 pat = GEN_FCN (icode) (target);
6711 pat = GEN_FCN (icode) (target, op[0]);
6713 pat = GEN_FCN (icode) (op[0]);
6716 pat = GEN_FCN (icode) (target, op[0], op[1]);
6731 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6732 with an 8-bit output vector. OPINT contains the integer operands; bit N
6733 of OP_CONST is set if OPINT[N] is valid. */
6736 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6741 for (i = 0, val = 0; i < 8; ++i)
6743 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6744 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6748 return build_int_cst (alpha_dimode_u, val);
6750 else if (op_const == 2 && opint[1] == 0)
6751 return build_int_cst (alpha_dimode_u, 0xff);
6755 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6756 specialized form of an AND operation. Other byte manipulation instructions
6757 are defined in terms of this instruction, so this is also used as a
6758 subroutine for other builtins.
6760 OP contains the tree operands; OPINT contains the extracted integer values.
6761 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6762 OPINT may be considered. */
6765 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6770 unsigned HOST_WIDE_INT mask = 0;
6773 for (i = 0; i < 8; ++i)
6774 if ((opint[1] >> i) & 1)
6775 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6778 return build_int_cst (alpha_dimode_u, opint[0] & mask);
6781 return fold_build2 (BIT_AND_EXPR, alpha_dimode_u, op[0],
6782 build_int_cst (alpha_dimode_u, mask));
6784 else if ((op_const & 1) && opint[0] == 0)
6785 return build_int_cst (alpha_dimode_u, 0);
6789 /* Fold the builtins for the EXT family of instructions. */
6792 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6793 long op_const, unsigned HOST_WIDE_INT bytemask,
6797 tree *zap_op = NULL;
6801 unsigned HOST_WIDE_INT loc;
6804 loc *= BITS_PER_UNIT;
6810 unsigned HOST_WIDE_INT temp = opint[0];
6823 opint[1] = bytemask;
6824 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6827 /* Fold the builtins for the INS family of instructions. */
6830 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6831 long op_const, unsigned HOST_WIDE_INT bytemask,
6834 if ((op_const & 1) && opint[0] == 0)
6835 return build_int_cst (alpha_dimode_u, 0);
6839 unsigned HOST_WIDE_INT temp, loc, byteloc;
6840 tree *zap_op = NULL;
6848 byteloc = (64 - (loc * 8)) & 0x3f;
6865 opint[1] = bytemask;
6866 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6873 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6874 long op_const, unsigned HOST_WIDE_INT bytemask,
6879 unsigned HOST_WIDE_INT loc;
6887 opint[1] = bytemask ^ 0xff;
6890 return alpha_fold_builtin_zapnot (op, opint, op_const);
6894 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6896 tree op0 = fold_convert (vtype, op[0]);
6897 tree op1 = fold_convert (vtype, op[1]);
6898 tree val = fold_build2 (code, vtype, op0, op1);
6899 return fold_build1 (VIEW_CONVERT_EXPR, alpha_dimode_u, val);
6903 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6905 unsigned HOST_WIDE_INT temp = 0;
6911 for (i = 0; i < 8; ++i)
6913 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6914 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6921 return build_int_cst (alpha_dimode_u, temp);
6925 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6927 unsigned HOST_WIDE_INT temp;
6932 temp = opint[0] & 0xff;
6933 temp |= (opint[0] >> 24) & 0xff00;
6935 return build_int_cst (alpha_dimode_u, temp);
6939 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
6941 unsigned HOST_WIDE_INT temp;
6946 temp = opint[0] & 0xff;
6947 temp |= (opint[0] >> 8) & 0xff00;
6948 temp |= (opint[0] >> 16) & 0xff0000;
6949 temp |= (opint[0] >> 24) & 0xff000000;
6951 return build_int_cst (alpha_dimode_u, temp);
6955 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
6957 unsigned HOST_WIDE_INT temp;
6962 temp = opint[0] & 0xff;
6963 temp |= (opint[0] & 0xff00) << 24;
6965 return build_int_cst (alpha_dimode_u, temp);
6969 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
6971 unsigned HOST_WIDE_INT temp;
6976 temp = opint[0] & 0xff;
6977 temp |= (opint[0] & 0x0000ff00) << 8;
6978 temp |= (opint[0] & 0x00ff0000) << 16;
6979 temp |= (opint[0] & 0xff000000) << 24;
6981 return build_int_cst (alpha_dimode_u, temp);
6985 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
6987 unsigned HOST_WIDE_INT temp;
6995 temp = exact_log2 (opint[0] & -opint[0]);
6997 return build_int_cst (alpha_dimode_u, temp);
7001 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
7003 unsigned HOST_WIDE_INT temp;
7011 temp = 64 - floor_log2 (opint[0]) - 1;
7013 return build_int_cst (alpha_dimode_u, temp);
7017 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
7019 unsigned HOST_WIDE_INT temp, op;
7027 temp++, op &= op - 1;
7029 return build_int_cst (alpha_dimode_u, temp);
7032 /* Fold one of our builtin functions. */
7035 alpha_fold_builtin (tree fndecl, int n_args, tree *op,
7036 bool ignore ATTRIBUTE_UNUSED)
7038 unsigned HOST_WIDE_INT opint[MAX_ARGS];
7042 if (n_args > MAX_ARGS)
7045 for (i = 0; i < n_args; i++)
7048 if (arg == error_mark_node)
7052 if (TREE_CODE (arg) == INTEGER_CST)
7054 op_const |= 1L << i;
7055 opint[i] = int_cst_value (arg);
7059 switch (DECL_FUNCTION_CODE (fndecl))
7061 case ALPHA_BUILTIN_CMPBGE:
7062 return alpha_fold_builtin_cmpbge (opint, op_const);
7064 case ALPHA_BUILTIN_EXTBL:
7065 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
7066 case ALPHA_BUILTIN_EXTWL:
7067 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
7068 case ALPHA_BUILTIN_EXTLL:
7069 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
7070 case ALPHA_BUILTIN_EXTQL:
7071 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
7072 case ALPHA_BUILTIN_EXTWH:
7073 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
7074 case ALPHA_BUILTIN_EXTLH:
7075 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
7076 case ALPHA_BUILTIN_EXTQH:
7077 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
7079 case ALPHA_BUILTIN_INSBL:
7080 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
7081 case ALPHA_BUILTIN_INSWL:
7082 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
7083 case ALPHA_BUILTIN_INSLL:
7084 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
7085 case ALPHA_BUILTIN_INSQL:
7086 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
7087 case ALPHA_BUILTIN_INSWH:
7088 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
7089 case ALPHA_BUILTIN_INSLH:
7090 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
7091 case ALPHA_BUILTIN_INSQH:
7092 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
7094 case ALPHA_BUILTIN_MSKBL:
7095 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
7096 case ALPHA_BUILTIN_MSKWL:
7097 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
7098 case ALPHA_BUILTIN_MSKLL:
7099 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
7100 case ALPHA_BUILTIN_MSKQL:
7101 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
7102 case ALPHA_BUILTIN_MSKWH:
7103 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
7104 case ALPHA_BUILTIN_MSKLH:
7105 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
7106 case ALPHA_BUILTIN_MSKQH:
7107 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
7109 case ALPHA_BUILTIN_ZAP:
7112 case ALPHA_BUILTIN_ZAPNOT:
7113 return alpha_fold_builtin_zapnot (op, opint, op_const);
7115 case ALPHA_BUILTIN_MINUB8:
7116 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7117 case ALPHA_BUILTIN_MINSB8:
7118 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7119 case ALPHA_BUILTIN_MINUW4:
7120 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7121 case ALPHA_BUILTIN_MINSW4:
7122 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7123 case ALPHA_BUILTIN_MAXUB8:
7124 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7125 case ALPHA_BUILTIN_MAXSB8:
7126 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7127 case ALPHA_BUILTIN_MAXUW4:
7128 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7129 case ALPHA_BUILTIN_MAXSW4:
7130 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7132 case ALPHA_BUILTIN_PERR:
7133 return alpha_fold_builtin_perr (opint, op_const);
7134 case ALPHA_BUILTIN_PKLB:
7135 return alpha_fold_builtin_pklb (opint, op_const);
7136 case ALPHA_BUILTIN_PKWB:
7137 return alpha_fold_builtin_pkwb (opint, op_const);
7138 case ALPHA_BUILTIN_UNPKBL:
7139 return alpha_fold_builtin_unpkbl (opint, op_const);
7140 case ALPHA_BUILTIN_UNPKBW:
7141 return alpha_fold_builtin_unpkbw (opint, op_const);
7143 case ALPHA_BUILTIN_CTTZ:
7144 return alpha_fold_builtin_cttz (opint, op_const);
7145 case ALPHA_BUILTIN_CTLZ:
7146 return alpha_fold_builtin_ctlz (opint, op_const);
7147 case ALPHA_BUILTIN_CTPOP:
7148 return alpha_fold_builtin_ctpop (opint, op_const);
7150 case ALPHA_BUILTIN_AMASK:
7151 case ALPHA_BUILTIN_IMPLVER:
7152 case ALPHA_BUILTIN_RPCC:
7153 /* None of these are foldable at compile-time. */
7160 alpha_gimple_fold_builtin (gimple_stmt_iterator *gsi)
7162 bool changed = false;
7163 gimple *stmt = gsi_stmt (*gsi);
7164 tree call = gimple_call_fn (stmt);
7165 gimple *new_stmt = NULL;
7169 tree fndecl = gimple_call_fndecl (stmt);
7175 switch (DECL_FUNCTION_CODE (fndecl))
7177 case ALPHA_BUILTIN_UMULH:
7178 arg0 = gimple_call_arg (stmt, 0);
7179 arg1 = gimple_call_arg (stmt, 1);
7181 new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
7182 MULT_HIGHPART_EXPR, arg0, arg1);
7192 gsi_replace (gsi, new_stmt, true);
7199 /* This page contains routines that are used to determine what the function
7200 prologue and epilogue code will do and write them out. */
7202 /* Compute the size of the save area in the stack. */
7204 /* These variables are used for communication between the following functions.
7205 They indicate various things about the current function being compiled
7206 that are used to tell what kind of prologue, epilogue and procedure
7207 descriptor to generate. */
7209 /* Nonzero if we need a stack procedure. */
7210 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7211 static enum alpha_procedure_types alpha_procedure_type;
7213 /* Register number (either FP or SP) that is used to unwind the frame. */
7214 static int vms_unwind_regno;
7216 /* Register number used to save FP. We need not have one for RA since
7217 we don't modify it for register procedures. This is only defined
7218 for register frame procedures. */
7219 static int vms_save_fp_regno;
7221 /* Register number used to reference objects off our PV. */
7222 static int vms_base_regno;
7224 /* Compute register masks for saved registers. */
7227 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7229 unsigned long imask = 0;
7230 unsigned long fmask = 0;
7233 /* When outputting a thunk, we don't have valid register life info,
7234 but assemble_start_function wants to output .frame and .mask
7243 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7244 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7246 /* One for every register we have to save. */
7247 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7248 if (! fixed_regs[i] && ! call_used_regs[i]
7249 && df_regs_ever_live_p (i) && i != REG_RA)
7252 imask |= (1UL << i);
7254 fmask |= (1UL << (i - 32));
7257 /* We need to restore these for the handler. */
7258 if (crtl->calls_eh_return)
7262 unsigned regno = EH_RETURN_DATA_REGNO (i);
7263 if (regno == INVALID_REGNUM)
7265 imask |= 1UL << regno;
7269 /* If any register spilled, then spill the return address also. */
7270 /* ??? This is required by the Digital stack unwind specification
7271 and isn't needed if we're doing Dwarf2 unwinding. */
7272 if (imask || fmask || alpha_ra_ever_killed ())
7273 imask |= (1UL << REG_RA);
7280 alpha_sa_size (void)
7282 unsigned long mask[2];
7286 alpha_sa_mask (&mask[0], &mask[1]);
7288 for (j = 0; j < 2; ++j)
7289 for (i = 0; i < 32; ++i)
7290 if ((mask[j] >> i) & 1)
7293 if (TARGET_ABI_OPEN_VMS)
7295 /* Start with a stack procedure if we make any calls (REG_RA used), or
7296 need a frame pointer, with a register procedure if we otherwise need
7297 at least a slot, and with a null procedure in other cases. */
7298 if ((mask[0] >> REG_RA) & 1 || frame_pointer_needed)
7299 alpha_procedure_type = PT_STACK;
7300 else if (get_frame_size() != 0)
7301 alpha_procedure_type = PT_REGISTER;
7303 alpha_procedure_type = PT_NULL;
7305 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7306 made the final decision on stack procedure vs register procedure. */
7307 if (alpha_procedure_type == PT_STACK)
7310 /* Decide whether to refer to objects off our PV via FP or PV.
7311 If we need FP for something else or if we receive a nonlocal
7312 goto (which expects PV to contain the value), we must use PV.
7313 Otherwise, start by assuming we can use FP. */
7316 = (frame_pointer_needed
7317 || cfun->has_nonlocal_label
7318 || alpha_procedure_type == PT_STACK
7319 || crtl->outgoing_args_size)
7320 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7322 /* If we want to copy PV into FP, we need to find some register
7323 in which to save FP. */
7325 vms_save_fp_regno = -1;
7326 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7327 for (i = 0; i < 32; i++)
7328 if (! fixed_regs[i] && call_used_regs[i] && ! df_regs_ever_live_p (i))
7329 vms_save_fp_regno = i;
7331 /* A VMS condition handler requires a stack procedure in our
7332 implementation. (not required by the calling standard). */
7333 if ((vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7334 || cfun->machine->uses_condition_handler)
7335 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7336 else if (alpha_procedure_type == PT_NULL)
7337 vms_base_regno = REG_PV;
7339 /* Stack unwinding should be done via FP unless we use it for PV. */
7340 vms_unwind_regno = (vms_base_regno == REG_PV
7341 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7343 /* If this is a stack procedure, allow space for saving FP, RA and
7344 a condition handler slot if needed. */
7345 if (alpha_procedure_type == PT_STACK)
7346 sa_size += 2 + cfun->machine->uses_condition_handler;
7350 /* Our size must be even (multiple of 16 bytes). */
7358 /* Define the offset between two registers, one to be eliminated,
7359 and the other its replacement, at the start of a routine. */
7362 alpha_initial_elimination_offset (unsigned int from,
7363 unsigned int to ATTRIBUTE_UNUSED)
7367 ret = alpha_sa_size ();
7368 ret += ALPHA_ROUND (crtl->outgoing_args_size);
7372 case FRAME_POINTER_REGNUM:
7375 case ARG_POINTER_REGNUM:
7376 ret += (ALPHA_ROUND (get_frame_size ()
7377 + crtl->args.pretend_args_size)
7378 - crtl->args.pretend_args_size);
7388 #if TARGET_ABI_OPEN_VMS
7390 /* Worker function for TARGET_CAN_ELIMINATE. */
7393 alpha_vms_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
7395 /* We need the alpha_procedure_type to decide. Evaluate it now. */
7398 switch (alpha_procedure_type)
7401 /* NULL procedures have no frame of their own and we only
7402 know how to resolve from the current stack pointer. */
7403 return to == STACK_POINTER_REGNUM;
7407 /* We always eliminate except to the stack pointer if there is no
7408 usable frame pointer at hand. */
7409 return (to != STACK_POINTER_REGNUM
7410 || vms_unwind_regno != HARD_FRAME_POINTER_REGNUM);
7416 /* FROM is to be eliminated for TO. Return the offset so that TO+offset
7417 designates the same location as FROM. */
7420 alpha_vms_initial_elimination_offset (unsigned int from, unsigned int to)
7422 /* The only possible attempts we ever expect are ARG or FRAME_PTR to
7423 HARD_FRAME or STACK_PTR. We need the alpha_procedure_type to decide
7424 on the proper computations and will need the register save area size
7427 HOST_WIDE_INT sa_size = alpha_sa_size ();
7429 /* PT_NULL procedures have no frame of their own and we only allow
7430 elimination to the stack pointer. This is the argument pointer and we
7431 resolve the soft frame pointer to that as well. */
7433 if (alpha_procedure_type == PT_NULL)
7436 /* For a PT_STACK procedure the frame layout looks as follows
7438 -----> decreasing addresses
7440 < size rounded up to 16 | likewise >
7441 --------------#------------------------------+++--------------+++-------#
7442 incoming args # pretended args | "frame" | regs sa | PV | outgoing args #
7443 --------------#---------------------------------------------------------#
7445 ARG_PTR FRAME_PTR HARD_FRAME_PTR STACK_PTR
7448 PT_REGISTER procedures are similar in that they may have a frame of their
7449 own. They have no regs-sa/pv/outgoing-args area.
7451 We first compute offset to HARD_FRAME_PTR, then add what we need to get
7452 to STACK_PTR if need be. */
7455 HOST_WIDE_INT offset;
7456 HOST_WIDE_INT pv_save_size = alpha_procedure_type == PT_STACK ? 8 : 0;
7460 case FRAME_POINTER_REGNUM:
7461 offset = ALPHA_ROUND (sa_size + pv_save_size);
7463 case ARG_POINTER_REGNUM:
7464 offset = (ALPHA_ROUND (sa_size + pv_save_size
7466 + crtl->args.pretend_args_size)
7467 - crtl->args.pretend_args_size);
7473 if (to == STACK_POINTER_REGNUM)
7474 offset += ALPHA_ROUND (crtl->outgoing_args_size);
7480 #define COMMON_OBJECT "common_object"
7483 common_object_handler (tree *node, tree name ATTRIBUTE_UNUSED,
7484 tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED,
7485 bool *no_add_attrs ATTRIBUTE_UNUSED)
7488 gcc_assert (DECL_P (decl));
7490 DECL_COMMON (decl) = 1;
7494 static const struct attribute_spec vms_attribute_table[] =
7496 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
7497 affects_type_identity } */
7498 { COMMON_OBJECT, 0, 1, true, false, false, common_object_handler, false },
7499 { NULL, 0, 0, false, false, false, NULL, false }
7503 vms_output_aligned_decl_common(FILE *file, tree decl, const char *name,
7504 unsigned HOST_WIDE_INT size,
7507 tree attr = DECL_ATTRIBUTES (decl);
7508 fprintf (file, "%s", COMMON_ASM_OP);
7509 assemble_name (file, name);
7510 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED, size);
7511 /* ??? Unlike on OSF/1, the alignment factor is not in log units. */
7512 fprintf (file, ",%u", align / BITS_PER_UNIT);
7515 attr = lookup_attribute (COMMON_OBJECT, attr);
7517 fprintf (file, ",%s",
7518 IDENTIFIER_POINTER (TREE_VALUE (TREE_VALUE (attr))));
7523 #undef COMMON_OBJECT
7528 alpha_find_lo_sum_using_gp (rtx insn)
7530 subrtx_iterator::array_type array;
7531 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
7533 const_rtx x = *iter;
7534 if (GET_CODE (x) == LO_SUM && XEXP (x, 0) == pic_offset_table_rtx)
7541 alpha_does_function_need_gp (void)
7545 /* The GP being variable is an OSF abi thing. */
7546 if (! TARGET_ABI_OSF)
7549 /* We need the gp to load the address of __mcount. */
7550 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7553 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7557 /* The nonlocal receiver pattern assumes that the gp is valid for
7558 the nested function. Reasonable because it's almost always set
7559 correctly already. For the cases where that's wrong, make sure
7560 the nested function loads its gp on entry. */
7561 if (crtl->has_nonlocal_goto)
7564 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7565 Even if we are a static function, we still need to do this in case
7566 our address is taken and passed to something like qsort. */
7568 push_topmost_sequence ();
7569 insn = get_insns ();
7570 pop_topmost_sequence ();
7572 for (; insn; insn = NEXT_INSN (insn))
7573 if (NONDEBUG_INSN_P (insn)
7574 && GET_CODE (PATTERN (insn)) != USE
7575 && GET_CODE (PATTERN (insn)) != CLOBBER
7576 && get_attr_usegp (insn))
7583 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7587 set_frame_related_p (void)
7589 rtx_insn *seq = get_insns ();
7600 while (insn != NULL_RTX)
7602 RTX_FRAME_RELATED_P (insn) = 1;
7603 insn = NEXT_INSN (insn);
7605 seq = emit_insn (seq);
7609 seq = emit_insn (seq);
7610 RTX_FRAME_RELATED_P (seq) = 1;
7615 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7617 /* Generates a store with the proper unwind info attached. VALUE is
7618 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7619 contains SP+FRAME_BIAS, and that is the unwind info that should be
7620 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7621 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7624 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7625 HOST_WIDE_INT base_ofs, rtx frame_reg)
7630 addr = plus_constant (Pmode, base_reg, base_ofs);
7631 mem = gen_frame_mem (DImode, addr);
7633 insn = emit_move_insn (mem, value);
7634 RTX_FRAME_RELATED_P (insn) = 1;
7636 if (frame_bias || value != frame_reg)
7640 addr = plus_constant (Pmode, stack_pointer_rtx,
7641 frame_bias + base_ofs);
7642 mem = gen_rtx_MEM (DImode, addr);
7645 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
7646 gen_rtx_SET (mem, frame_reg));
7651 emit_frame_store (unsigned int regno, rtx base_reg,
7652 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7654 rtx reg = gen_rtx_REG (DImode, regno);
7655 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7658 /* Compute the frame size. SIZE is the size of the "naked" frame
7659 and SA_SIZE is the size of the register save area. */
7661 static HOST_WIDE_INT
7662 compute_frame_size (HOST_WIDE_INT size, HOST_WIDE_INT sa_size)
7664 if (TARGET_ABI_OPEN_VMS)
7665 return ALPHA_ROUND (sa_size
7666 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7668 + crtl->args.pretend_args_size);
7670 return ALPHA_ROUND (crtl->outgoing_args_size)
7673 + crtl->args.pretend_args_size);
7676 /* Write function prologue. */
7678 /* On vms we have two kinds of functions:
7680 - stack frame (PROC_STACK)
7681 these are 'normal' functions with local vars and which are
7682 calling other functions
7683 - register frame (PROC_REGISTER)
7684 keeps all data in registers, needs no stack
7686 We must pass this to the assembler so it can generate the
7687 proper pdsc (procedure descriptor)
7688 This is done with the '.pdesc' command.
7690 On not-vms, we don't really differentiate between the two, as we can
7691 simply allocate stack without saving registers. */
7694 alpha_expand_prologue (void)
7696 /* Registers to save. */
7697 unsigned long imask = 0;
7698 unsigned long fmask = 0;
7699 /* Stack space needed for pushing registers clobbered by us. */
7700 HOST_WIDE_INT sa_size, sa_bias;
7701 /* Complete stack size needed. */
7702 HOST_WIDE_INT frame_size;
7703 /* Probed stack size; it additionally includes the size of
7704 the "reserve region" if any. */
7705 HOST_WIDE_INT probed_size;
7706 /* Offset from base reg to register save area. */
7707 HOST_WIDE_INT reg_offset;
7711 sa_size = alpha_sa_size ();
7712 frame_size = compute_frame_size (get_frame_size (), sa_size);
7714 if (flag_stack_usage_info)
7715 current_function_static_stack_size = frame_size;
7717 if (TARGET_ABI_OPEN_VMS)
7718 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
7720 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
7722 alpha_sa_mask (&imask, &fmask);
7724 /* Emit an insn to reload GP, if needed. */
7727 alpha_function_needs_gp = alpha_does_function_need_gp ();
7728 if (alpha_function_needs_gp)
7729 emit_insn (gen_prologue_ldgp ());
7732 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7733 the call to mcount ourselves, rather than having the linker do it
7734 magically in response to -pg. Since _mcount has special linkage,
7735 don't represent the call as a call. */
7736 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7737 emit_insn (gen_prologue_mcount ());
7739 /* Adjust the stack by the frame size. If the frame size is > 4096
7740 bytes, we need to be sure we probe somewhere in the first and last
7741 4096 bytes (we can probably get away without the latter test) and
7742 every 8192 bytes in between. If the frame size is > 32768, we
7743 do this in a loop. Otherwise, we generate the explicit probe
7746 Note that we are only allowed to adjust sp once in the prologue. */
7748 probed_size = frame_size;
7749 if (flag_stack_check)
7750 probed_size += STACK_CHECK_PROTECT;
7752 if (probed_size <= 32768)
7754 if (probed_size > 4096)
7758 for (probed = 4096; probed < probed_size; probed += 8192)
7759 emit_insn (gen_probe_stack (GEN_INT (-probed)));
7761 /* We only have to do this probe if we aren't saving registers or
7762 if we are probing beyond the frame because of -fstack-check. */
7763 if ((sa_size == 0 && probed_size > probed - 4096)
7764 || flag_stack_check)
7765 emit_insn (gen_probe_stack (GEN_INT (-probed_size)));
7768 if (frame_size != 0)
7769 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7770 GEN_INT (-frame_size))));
7774 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7775 number of 8192 byte blocks to probe. We then probe each block
7776 in the loop and then set SP to the proper location. If the
7777 amount remaining is > 4096, we have to do one more probe if we
7778 are not saving any registers or if we are probing beyond the
7779 frame because of -fstack-check. */
7781 HOST_WIDE_INT blocks = (probed_size + 4096) / 8192;
7782 HOST_WIDE_INT leftover = probed_size + 4096 - blocks * 8192;
7783 rtx ptr = gen_rtx_REG (DImode, 22);
7784 rtx count = gen_rtx_REG (DImode, 23);
7787 emit_move_insn (count, GEN_INT (blocks));
7788 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx, GEN_INT (4096)));
7790 /* Because of the difficulty in emitting a new basic block this
7791 late in the compilation, generate the loop as a single insn. */
7792 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7794 if ((leftover > 4096 && sa_size == 0) || flag_stack_check)
7796 rtx last = gen_rtx_MEM (DImode,
7797 plus_constant (Pmode, ptr, -leftover));
7798 MEM_VOLATILE_P (last) = 1;
7799 emit_move_insn (last, const0_rtx);
7802 if (flag_stack_check)
7804 /* If -fstack-check is specified we have to load the entire
7805 constant into a register and subtract from the sp in one go,
7806 because the probed stack size is not equal to the frame size. */
7807 HOST_WIDE_INT lo, hi;
7808 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7809 hi = frame_size - lo;
7811 emit_move_insn (ptr, GEN_INT (hi));
7812 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7813 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7818 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7819 GEN_INT (-leftover)));
7822 /* This alternative is special, because the DWARF code cannot
7823 possibly intuit through the loop above. So we invent this
7824 note it looks at instead. */
7825 RTX_FRAME_RELATED_P (seq) = 1;
7826 add_reg_note (seq, REG_FRAME_RELATED_EXPR,
7827 gen_rtx_SET (stack_pointer_rtx,
7828 plus_constant (Pmode, stack_pointer_rtx,
7832 /* Cope with very large offsets to the register save area. */
7834 sa_reg = stack_pointer_rtx;
7835 if (reg_offset + sa_size > 0x8000)
7837 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7840 if (low + sa_size <= 0x8000)
7841 sa_bias = reg_offset - low, reg_offset = low;
7843 sa_bias = reg_offset, reg_offset = 0;
7845 sa_reg = gen_rtx_REG (DImode, 24);
7846 sa_bias_rtx = GEN_INT (sa_bias);
7848 if (add_operand (sa_bias_rtx, DImode))
7849 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7852 emit_move_insn (sa_reg, sa_bias_rtx);
7853 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7857 /* Save regs in stack order. Beginning with VMS PV. */
7858 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7859 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7861 /* Save register RA next. */
7862 if (imask & (1UL << REG_RA))
7864 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7865 imask &= ~(1UL << REG_RA);
7869 /* Now save any other registers required to be saved. */
7870 for (i = 0; i < 31; i++)
7871 if (imask & (1UL << i))
7873 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7877 for (i = 0; i < 31; i++)
7878 if (fmask & (1UL << i))
7880 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7884 if (TARGET_ABI_OPEN_VMS)
7886 /* Register frame procedures save the fp. */
7887 if (alpha_procedure_type == PT_REGISTER)
7890 emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7891 hard_frame_pointer_rtx);
7892 add_reg_note (insn, REG_CFA_REGISTER, NULL);
7893 RTX_FRAME_RELATED_P (insn) = 1;
7896 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
7897 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7898 gen_rtx_REG (DImode, REG_PV)));
7900 if (alpha_procedure_type != PT_NULL
7901 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7902 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7904 /* If we have to allocate space for outgoing args, do it now. */
7905 if (crtl->outgoing_args_size != 0)
7908 = emit_move_insn (stack_pointer_rtx,
7910 (Pmode, hard_frame_pointer_rtx,
7912 (crtl->outgoing_args_size))));
7914 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7915 if ! frame_pointer_needed. Setting the bit will change the CFA
7916 computation rule to use sp again, which would be wrong if we had
7917 frame_pointer_needed, as this means sp might move unpredictably
7921 frame_pointer_needed
7922 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7924 crtl->outgoing_args_size != 0
7925 => alpha_procedure_type != PT_NULL,
7927 so when we are not setting the bit here, we are guaranteed to
7928 have emitted an FRP frame pointer update just before. */
7929 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7934 /* If we need a frame pointer, set it from the stack pointer. */
7935 if (frame_pointer_needed)
7937 if (TARGET_CAN_FAULT_IN_PROLOGUE)
7938 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7940 /* This must always be the last instruction in the
7941 prologue, thus we emit a special move + clobber. */
7942 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7943 stack_pointer_rtx, sa_reg)));
7947 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7948 the prologue, for exception handling reasons, we cannot do this for
7949 any insn that might fault. We could prevent this for mems with a
7950 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7951 have to prevent all such scheduling with a blockage.
7953 Linux, on the other hand, never bothered to implement OSF/1's
7954 exception handling, and so doesn't care about such things. Anyone
7955 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7957 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7958 emit_insn (gen_blockage ());
7961 /* Count the number of .file directives, so that .loc is up to date. */
7962 int num_source_filenames = 0;
7964 /* Output the textual info surrounding the prologue. */
7967 alpha_start_function (FILE *file, const char *fnname,
7968 tree decl ATTRIBUTE_UNUSED)
7970 unsigned long imask = 0;
7971 unsigned long fmask = 0;
7972 /* Stack space needed for pushing registers clobbered by us. */
7973 HOST_WIDE_INT sa_size;
7974 /* Complete stack size needed. */
7975 unsigned HOST_WIDE_INT frame_size;
7976 /* The maximum debuggable frame size. */
7977 unsigned HOST_WIDE_INT max_frame_size = 1UL << 31;
7978 /* Offset from base reg to register save area. */
7979 HOST_WIDE_INT reg_offset;
7980 char *entry_label = (char *) alloca (strlen (fnname) + 6);
7981 char *tramp_label = (char *) alloca (strlen (fnname) + 6);
7984 #if TARGET_ABI_OPEN_VMS
7985 vms_start_function (fnname);
7988 alpha_fnname = fnname;
7989 sa_size = alpha_sa_size ();
7990 frame_size = compute_frame_size (get_frame_size (), sa_size);
7992 if (TARGET_ABI_OPEN_VMS)
7993 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
7995 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
7997 alpha_sa_mask (&imask, &fmask);
7999 /* Issue function start and label. */
8000 if (TARGET_ABI_OPEN_VMS || !flag_inhibit_size_directive)
8002 fputs ("\t.ent ", file);
8003 assemble_name (file, fnname);
8006 /* If the function needs GP, we'll write the "..ng" label there.
8007 Otherwise, do it here. */
8009 && ! alpha_function_needs_gp
8010 && ! cfun->is_thunk)
8013 assemble_name (file, fnname);
8014 fputs ("..ng:\n", file);
8017 /* Nested functions on VMS that are potentially called via trampoline
8018 get a special transfer entry point that loads the called functions
8019 procedure descriptor and static chain. */
8020 if (TARGET_ABI_OPEN_VMS
8021 && !TREE_PUBLIC (decl)
8022 && DECL_CONTEXT (decl)
8023 && !TYPE_P (DECL_CONTEXT (decl))
8024 && TREE_CODE (DECL_CONTEXT (decl)) != TRANSLATION_UNIT_DECL)
8026 strcpy (tramp_label, fnname);
8027 strcat (tramp_label, "..tr");
8028 ASM_OUTPUT_LABEL (file, tramp_label);
8029 fprintf (file, "\tldq $1,24($27)\n");
8030 fprintf (file, "\tldq $27,16($27)\n");
8033 strcpy (entry_label, fnname);
8034 if (TARGET_ABI_OPEN_VMS)
8035 strcat (entry_label, "..en");
8037 ASM_OUTPUT_LABEL (file, entry_label);
8038 inside_function = TRUE;
8040 if (TARGET_ABI_OPEN_VMS)
8041 fprintf (file, "\t.base $%d\n", vms_base_regno);
8044 && TARGET_IEEE_CONFORMANT
8045 && !flag_inhibit_size_directive)
8047 /* Set flags in procedure descriptor to request IEEE-conformant
8048 math-library routines. The value we set it to is PDSC_EXC_IEEE
8049 (/usr/include/pdsc.h). */
8050 fputs ("\t.eflag 48\n", file);
8053 /* Set up offsets to alpha virtual arg/local debugging pointer. */
8054 alpha_auto_offset = -frame_size + crtl->args.pretend_args_size;
8055 alpha_arg_offset = -frame_size + 48;
8057 /* Describe our frame. If the frame size is larger than an integer,
8058 print it as zero to avoid an assembler error. We won't be
8059 properly describing such a frame, but that's the best we can do. */
8060 if (TARGET_ABI_OPEN_VMS)
8061 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
8062 HOST_WIDE_INT_PRINT_DEC "\n",
8064 frame_size >= (1UL << 31) ? 0 : frame_size,
8066 else if (!flag_inhibit_size_directive)
8067 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
8068 (frame_pointer_needed
8069 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
8070 frame_size >= max_frame_size ? 0 : frame_size,
8071 crtl->args.pretend_args_size);
8073 /* Describe which registers were spilled. */
8074 if (TARGET_ABI_OPEN_VMS)
8077 /* ??? Does VMS care if mask contains ra? The old code didn't
8078 set it, so I don't here. */
8079 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
8081 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
8082 if (alpha_procedure_type == PT_REGISTER)
8083 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
8085 else if (!flag_inhibit_size_directive)
8089 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
8090 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
8092 for (i = 0; i < 32; ++i)
8093 if (imask & (1UL << i))
8098 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
8099 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
8102 #if TARGET_ABI_OPEN_VMS
8103 /* If a user condition handler has been installed at some point, emit
8104 the procedure descriptor bits to point the Condition Handling Facility
8105 at the indirection wrapper, and state the fp offset at which the user
8106 handler may be found. */
8107 if (cfun->machine->uses_condition_handler)
8109 fprintf (file, "\t.handler __gcc_shell_handler\n");
8110 fprintf (file, "\t.handler_data %d\n", VMS_COND_HANDLER_FP_OFFSET);
8113 #ifdef TARGET_VMS_CRASH_DEBUG
8114 /* Support of minimal traceback info. */
8115 switch_to_section (readonly_data_section);
8116 fprintf (file, "\t.align 3\n");
8117 assemble_name (file, fnname); fputs ("..na:\n", file);
8118 fputs ("\t.ascii \"", file);
8119 assemble_name (file, fnname);
8120 fputs ("\\0\"\n", file);
8121 switch_to_section (text_section);
8123 #endif /* TARGET_ABI_OPEN_VMS */
8126 /* Emit the .prologue note at the scheduled end of the prologue. */
8129 alpha_output_function_end_prologue (FILE *file)
8131 if (TARGET_ABI_OPEN_VMS)
8132 fputs ("\t.prologue\n", file);
8133 else if (!flag_inhibit_size_directive)
8134 fprintf (file, "\t.prologue %d\n",
8135 alpha_function_needs_gp || cfun->is_thunk);
8138 /* Write function epilogue. */
8141 alpha_expand_epilogue (void)
8143 /* Registers to save. */
8144 unsigned long imask = 0;
8145 unsigned long fmask = 0;
8146 /* Stack space needed for pushing registers clobbered by us. */
8147 HOST_WIDE_INT sa_size;
8148 /* Complete stack size needed. */
8149 HOST_WIDE_INT frame_size;
8150 /* Offset from base reg to register save area. */
8151 HOST_WIDE_INT reg_offset;
8152 int fp_is_frame_pointer, fp_offset;
8153 rtx sa_reg, sa_reg_exp = NULL;
8154 rtx sp_adj1, sp_adj2, mem, reg, insn;
8156 rtx cfa_restores = NULL_RTX;
8159 sa_size = alpha_sa_size ();
8160 frame_size = compute_frame_size (get_frame_size (), sa_size);
8162 if (TARGET_ABI_OPEN_VMS)
8164 if (alpha_procedure_type == PT_STACK)
8165 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
8170 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
8172 alpha_sa_mask (&imask, &fmask);
8175 = (TARGET_ABI_OPEN_VMS
8176 ? alpha_procedure_type == PT_STACK
8177 : frame_pointer_needed);
8179 sa_reg = stack_pointer_rtx;
8181 if (crtl->calls_eh_return)
8182 eh_ofs = EH_RETURN_STACKADJ_RTX;
8188 /* If we have a frame pointer, restore SP from it. */
8189 if (TARGET_ABI_OPEN_VMS
8190 ? vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
8191 : frame_pointer_needed)
8192 emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
8194 /* Cope with very large offsets to the register save area. */
8195 if (reg_offset + sa_size > 0x8000)
8197 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8200 if (low + sa_size <= 0x8000)
8201 bias = reg_offset - low, reg_offset = low;
8203 bias = reg_offset, reg_offset = 0;
8205 sa_reg = gen_rtx_REG (DImode, 22);
8206 sa_reg_exp = plus_constant (Pmode, stack_pointer_rtx, bias);
8208 emit_move_insn (sa_reg, sa_reg_exp);
8211 /* Restore registers in order, excepting a true frame pointer. */
8213 mem = gen_frame_mem (DImode, plus_constant (Pmode, sa_reg, reg_offset));
8214 reg = gen_rtx_REG (DImode, REG_RA);
8215 emit_move_insn (reg, mem);
8216 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8219 imask &= ~(1UL << REG_RA);
8221 for (i = 0; i < 31; ++i)
8222 if (imask & (1UL << i))
8224 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8225 fp_offset = reg_offset;
8228 mem = gen_frame_mem (DImode,
8229 plus_constant (Pmode, sa_reg,
8231 reg = gen_rtx_REG (DImode, i);
8232 emit_move_insn (reg, mem);
8233 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
8239 for (i = 0; i < 31; ++i)
8240 if (fmask & (1UL << i))
8242 mem = gen_frame_mem (DFmode, plus_constant (Pmode, sa_reg,
8244 reg = gen_rtx_REG (DFmode, i+32);
8245 emit_move_insn (reg, mem);
8246 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8251 if (frame_size || eh_ofs)
8253 sp_adj1 = stack_pointer_rtx;
8257 sp_adj1 = gen_rtx_REG (DImode, 23);
8258 emit_move_insn (sp_adj1,
8259 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8262 /* If the stack size is large, begin computation into a temporary
8263 register so as not to interfere with a potential fp restore,
8264 which must be consecutive with an SP restore. */
8265 if (frame_size < 32768 && !cfun->calls_alloca)
8266 sp_adj2 = GEN_INT (frame_size);
8267 else if (frame_size < 0x40007fffL)
8269 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8271 sp_adj2 = plus_constant (Pmode, sp_adj1, frame_size - low);
8272 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8276 sp_adj1 = gen_rtx_REG (DImode, 23);
8277 emit_move_insn (sp_adj1, sp_adj2);
8279 sp_adj2 = GEN_INT (low);
8283 rtx tmp = gen_rtx_REG (DImode, 23);
8284 sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size, 3, false);
8287 /* We can't drop new things to memory this late, afaik,
8288 so build it up by pieces. */
8289 sp_adj2 = alpha_emit_set_long_const (tmp, frame_size);
8290 gcc_assert (sp_adj2);
8294 /* From now on, things must be in order. So emit blockages. */
8296 /* Restore the frame pointer. */
8297 if (fp_is_frame_pointer)
8299 emit_insn (gen_blockage ());
8300 mem = gen_frame_mem (DImode, plus_constant (Pmode, sa_reg,
8302 emit_move_insn (hard_frame_pointer_rtx, mem);
8303 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8304 hard_frame_pointer_rtx, cfa_restores);
8306 else if (TARGET_ABI_OPEN_VMS)
8308 emit_insn (gen_blockage ());
8309 emit_move_insn (hard_frame_pointer_rtx,
8310 gen_rtx_REG (DImode, vms_save_fp_regno));
8311 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8312 hard_frame_pointer_rtx, cfa_restores);
8315 /* Restore the stack pointer. */
8316 emit_insn (gen_blockage ());
8317 if (sp_adj2 == const0_rtx)
8318 insn = emit_move_insn (stack_pointer_rtx, sp_adj1);
8320 insn = emit_move_insn (stack_pointer_rtx,
8321 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2));
8322 REG_NOTES (insn) = cfa_restores;
8323 add_reg_note (insn, REG_CFA_DEF_CFA, stack_pointer_rtx);
8324 RTX_FRAME_RELATED_P (insn) = 1;
8328 gcc_assert (cfa_restores == NULL);
8330 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8332 emit_insn (gen_blockage ());
8333 insn = emit_move_insn (hard_frame_pointer_rtx,
8334 gen_rtx_REG (DImode, vms_save_fp_regno));
8335 add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
8336 RTX_FRAME_RELATED_P (insn) = 1;
8341 /* Output the rest of the textual info surrounding the epilogue. */
8344 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8348 /* We output a nop after noreturn calls at the very end of the function to
8349 ensure that the return address always remains in the caller's code range,
8350 as not doing so might confuse unwinding engines. */
8351 insn = get_last_insn ();
8353 insn = prev_active_insn (insn);
8354 if (insn && CALL_P (insn))
8355 output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL);
8357 #if TARGET_ABI_OPEN_VMS
8358 /* Write the linkage entries. */
8359 alpha_write_linkage (file, fnname);
8362 /* End the function. */
8363 if (TARGET_ABI_OPEN_VMS
8364 || !flag_inhibit_size_directive)
8366 fputs ("\t.end ", file);
8367 assemble_name (file, fnname);
8370 inside_function = FALSE;
8374 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8376 In order to avoid the hordes of differences between generated code
8377 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8378 lots of code loading up large constants, generate rtl and emit it
8379 instead of going straight to text.
8381 Not sure why this idea hasn't been explored before... */
8384 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8385 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8388 HOST_WIDE_INT hi, lo;
8389 rtx this_rtx, funexp;
8392 /* We always require a valid GP. */
8393 emit_insn (gen_prologue_ldgp ());
8394 emit_note (NOTE_INSN_PROLOGUE_END);
8396 /* Find the "this" pointer. If the function returns a structure,
8397 the structure return pointer is in $16. */
8398 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8399 this_rtx = gen_rtx_REG (Pmode, 17);
8401 this_rtx = gen_rtx_REG (Pmode, 16);
8403 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8404 entire constant for the add. */
8405 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8406 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8407 if (hi + lo == delta)
8410 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (hi)));
8412 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (lo)));
8416 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0), delta);
8417 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8420 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8425 tmp = gen_rtx_REG (Pmode, 0);
8426 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
8428 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8429 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8430 if (hi + lo == vcall_offset)
8433 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8437 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8439 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8443 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8446 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8448 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8451 /* Generate a tail call to the target function. */
8452 if (! TREE_USED (function))
8454 assemble_external (function);
8455 TREE_USED (function) = 1;
8457 funexp = XEXP (DECL_RTL (function), 0);
8458 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8459 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8460 SIBLING_CALL_P (insn) = 1;
8462 /* Run just enough of rest_of_compilation to get the insns emitted.
8463 There's not really enough bulk here to make other passes such as
8464 instruction scheduling worth while. Note that use_thunk calls
8465 assemble_start_function and assemble_end_function. */
8466 insn = get_insns ();
8467 shorten_branches (insn);
8468 final_start_function (insn, file, 1);
8469 final (insn, file, 1);
8470 final_end_function ();
8472 #endif /* TARGET_ABI_OSF */
8474 /* Debugging support. */
8478 /* Name of the file containing the current function. */
8480 static const char *current_function_file = "";
8482 /* Offsets to alpha virtual arg/local debugging pointers. */
8484 long alpha_arg_offset;
8485 long alpha_auto_offset;
8487 /* Emit a new filename to a stream. */
8490 alpha_output_filename (FILE *stream, const char *name)
8492 static int first_time = TRUE;
8497 ++num_source_filenames;
8498 current_function_file = name;
8499 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8500 output_quoted_string (stream, name);
8501 fprintf (stream, "\n");
8504 else if (name != current_function_file
8505 && strcmp (name, current_function_file) != 0)
8507 ++num_source_filenames;
8508 current_function_file = name;
8509 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8511 output_quoted_string (stream, name);
8512 fprintf (stream, "\n");
8516 /* Structure to show the current status of registers and memory. */
8518 struct shadow_summary
8521 unsigned int i : 31; /* Mask of int regs */
8522 unsigned int fp : 31; /* Mask of fp regs */
8523 unsigned int mem : 1; /* mem == imem | fpmem */
8527 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8528 to the summary structure. SET is nonzero if the insn is setting the
8529 object, otherwise zero. */
8532 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8534 const char *format_ptr;
8540 switch (GET_CODE (x))
8542 /* ??? Note that this case would be incorrect if the Alpha had a
8543 ZERO_EXTRACT in SET_DEST. */
8545 summarize_insn (SET_SRC (x), sum, 0);
8546 summarize_insn (SET_DEST (x), sum, 1);
8550 summarize_insn (XEXP (x, 0), sum, 1);
8554 summarize_insn (XEXP (x, 0), sum, 0);
8558 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8559 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8563 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8564 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8568 summarize_insn (SUBREG_REG (x), sum, 0);
8573 int regno = REGNO (x);
8574 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8576 if (regno == 31 || regno == 63)
8582 sum->defd.i |= mask;
8584 sum->defd.fp |= mask;
8589 sum->used.i |= mask;
8591 sum->used.fp |= mask;
8602 /* Find the regs used in memory address computation: */
8603 summarize_insn (XEXP (x, 0), sum, 0);
8606 case CONST_INT: case CONST_WIDE_INT: case CONST_DOUBLE:
8607 case SYMBOL_REF: case LABEL_REF: case CONST:
8608 case SCRATCH: case ASM_INPUT:
8611 /* Handle common unary and binary ops for efficiency. */
8612 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8613 case MOD: case UDIV: case UMOD: case AND: case IOR:
8614 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8615 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8616 case NE: case EQ: case GE: case GT: case LE:
8617 case LT: case GEU: case GTU: case LEU: case LTU:
8618 summarize_insn (XEXP (x, 0), sum, 0);
8619 summarize_insn (XEXP (x, 1), sum, 0);
8622 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8623 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8624 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8625 case SQRT: case FFS:
8626 summarize_insn (XEXP (x, 0), sum, 0);
8630 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8631 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8632 switch (format_ptr[i])
8635 summarize_insn (XEXP (x, i), sum, 0);
8639 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8640 summarize_insn (XVECEXP (x, i, j), sum, 0);
8652 /* Ensure a sufficient number of `trapb' insns are in the code when
8653 the user requests code with a trap precision of functions or
8656 In naive mode, when the user requests a trap-precision of
8657 "instruction", a trapb is needed after every instruction that may
8658 generate a trap. This ensures that the code is resumption safe but
8661 When optimizations are turned on, we delay issuing a trapb as long
8662 as possible. In this context, a trap shadow is the sequence of
8663 instructions that starts with a (potentially) trap generating
8664 instruction and extends to the next trapb or call_pal instruction
8665 (but GCC never generates call_pal by itself). We can delay (and
8666 therefore sometimes omit) a trapb subject to the following
8669 (a) On entry to the trap shadow, if any Alpha register or memory
8670 location contains a value that is used as an operand value by some
8671 instruction in the trap shadow (live on entry), then no instruction
8672 in the trap shadow may modify the register or memory location.
8674 (b) Within the trap shadow, the computation of the base register
8675 for a memory load or store instruction may not involve using the
8676 result of an instruction that might generate an UNPREDICTABLE
8679 (c) Within the trap shadow, no register may be used more than once
8680 as a destination register. (This is to make life easier for the
8683 (d) The trap shadow may not include any branch instructions. */
8686 alpha_handle_trap_shadows (void)
8688 struct shadow_summary shadow;
8689 int trap_pending, exception_nesting;
8693 exception_nesting = 0;
8696 shadow.used.mem = 0;
8697 shadow.defd = shadow.used;
8699 for (i = get_insns (); i ; i = NEXT_INSN (i))
8703 switch (NOTE_KIND (i))
8705 case NOTE_INSN_EH_REGION_BEG:
8706 exception_nesting++;
8711 case NOTE_INSN_EH_REGION_END:
8712 exception_nesting--;
8717 case NOTE_INSN_EPILOGUE_BEG:
8718 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8723 else if (trap_pending)
8725 if (alpha_tp == ALPHA_TP_FUNC)
8728 && GET_CODE (PATTERN (i)) == RETURN)
8731 else if (alpha_tp == ALPHA_TP_INSN)
8735 struct shadow_summary sum;
8740 sum.defd = sum.used;
8742 switch (GET_CODE (i))
8745 /* Annoyingly, get_attr_trap will die on these. */
8746 if (GET_CODE (PATTERN (i)) == USE
8747 || GET_CODE (PATTERN (i)) == CLOBBER)
8750 summarize_insn (PATTERN (i), &sum, 0);
8752 if ((sum.defd.i & shadow.defd.i)
8753 || (sum.defd.fp & shadow.defd.fp))
8755 /* (c) would be violated */
8759 /* Combine shadow with summary of current insn: */
8760 shadow.used.i |= sum.used.i;
8761 shadow.used.fp |= sum.used.fp;
8762 shadow.used.mem |= sum.used.mem;
8763 shadow.defd.i |= sum.defd.i;
8764 shadow.defd.fp |= sum.defd.fp;
8765 shadow.defd.mem |= sum.defd.mem;
8767 if ((sum.defd.i & shadow.used.i)
8768 || (sum.defd.fp & shadow.used.fp)
8769 || (sum.defd.mem & shadow.used.mem))
8771 /* (a) would be violated (also takes care of (b)) */
8772 gcc_assert (get_attr_trap (i) != TRAP_YES
8773 || (!(sum.defd.i & sum.used.i)
8774 && !(sum.defd.fp & sum.used.fp)));
8781 /* __builtin_unreachable can expand to no code at all,
8782 leaving (barrier) RTXes in the instruction stream. */
8783 goto close_shadow_notrapb;
8797 n = emit_insn_before (gen_trapb (), i);
8798 PUT_MODE (n, TImode);
8799 PUT_MODE (i, TImode);
8800 close_shadow_notrapb:
8804 shadow.used.mem = 0;
8805 shadow.defd = shadow.used;
8810 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8811 && NONJUMP_INSN_P (i)
8812 && GET_CODE (PATTERN (i)) != USE
8813 && GET_CODE (PATTERN (i)) != CLOBBER
8814 && get_attr_trap (i) == TRAP_YES)
8816 if (optimize && !trap_pending)
8817 summarize_insn (PATTERN (i), &shadow, 0);
8823 /* Alpha can only issue instruction groups simultaneously if they are
8824 suitably aligned. This is very processor-specific. */
8825 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8826 that are marked "fake". These instructions do not exist on that target,
8827 but it is possible to see these insns with deranged combinations of
8828 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8829 choose a result at random. */
8831 enum alphaev4_pipe {
8838 enum alphaev5_pipe {
8849 static enum alphaev4_pipe
8850 alphaev4_insn_pipe (rtx_insn *insn)
8852 if (recog_memoized (insn) < 0)
8854 if (get_attr_length (insn) != 4)
8857 switch (get_attr_type (insn))
8873 case TYPE_MVI: /* fake */
8888 case TYPE_FSQRT: /* fake */
8889 case TYPE_FTOI: /* fake */
8890 case TYPE_ITOF: /* fake */
8898 static enum alphaev5_pipe
8899 alphaev5_insn_pipe (rtx_insn *insn)
8901 if (recog_memoized (insn) < 0)
8903 if (get_attr_length (insn) != 4)
8906 switch (get_attr_type (insn))
8926 case TYPE_FTOI: /* fake */
8927 case TYPE_ITOF: /* fake */
8942 case TYPE_FSQRT: /* fake */
8953 /* IN_USE is a mask of the slots currently filled within the insn group.
8954 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8955 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8957 LEN is, of course, the length of the group in bytes. */
8960 alphaev4_next_group (rtx_insn *insn, int *pin_use, int *plen)
8967 || GET_CODE (PATTERN (insn)) == CLOBBER
8968 || GET_CODE (PATTERN (insn)) == USE)
8973 enum alphaev4_pipe pipe;
8975 pipe = alphaev4_insn_pipe (insn);
8979 /* Force complex instructions to start new groups. */
8983 /* If this is a completely unrecognized insn, it's an asm.
8984 We don't know how long it is, so record length as -1 to
8985 signal a needed realignment. */
8986 if (recog_memoized (insn) < 0)
8989 len = get_attr_length (insn);
8993 if (in_use & EV4_IB0)
8995 if (in_use & EV4_IB1)
9000 in_use |= EV4_IB0 | EV4_IBX;
9004 if (in_use & EV4_IB0)
9006 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
9014 if (in_use & EV4_IB1)
9024 /* Haifa doesn't do well scheduling branches. */
9029 insn = next_nonnote_insn (insn);
9031 if (!insn || ! INSN_P (insn))
9034 /* Let Haifa tell us where it thinks insn group boundaries are. */
9035 if (GET_MODE (insn) == TImode)
9038 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9043 insn = next_nonnote_insn (insn);
9051 /* IN_USE is a mask of the slots currently filled within the insn group.
9052 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
9053 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
9055 LEN is, of course, the length of the group in bytes. */
9058 alphaev5_next_group (rtx_insn *insn, int *pin_use, int *plen)
9065 || GET_CODE (PATTERN (insn)) == CLOBBER
9066 || GET_CODE (PATTERN (insn)) == USE)
9071 enum alphaev5_pipe pipe;
9073 pipe = alphaev5_insn_pipe (insn);
9077 /* Force complex instructions to start new groups. */
9081 /* If this is a completely unrecognized insn, it's an asm.
9082 We don't know how long it is, so record length as -1 to
9083 signal a needed realignment. */
9084 if (recog_memoized (insn) < 0)
9087 len = get_attr_length (insn);
9090 /* ??? Most of the places below, we would like to assert never
9091 happen, as it would indicate an error either in Haifa, or
9092 in the scheduling description. Unfortunately, Haifa never
9093 schedules the last instruction of the BB, so we don't have
9094 an accurate TI bit to go off. */
9096 if (in_use & EV5_E0)
9098 if (in_use & EV5_E1)
9103 in_use |= EV5_E0 | EV5_E01;
9107 if (in_use & EV5_E0)
9109 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
9117 if (in_use & EV5_E1)
9123 if (in_use & EV5_FA)
9125 if (in_use & EV5_FM)
9130 in_use |= EV5_FA | EV5_FAM;
9134 if (in_use & EV5_FA)
9140 if (in_use & EV5_FM)
9153 /* Haifa doesn't do well scheduling branches. */
9154 /* ??? If this is predicted not-taken, slotting continues, except
9155 that no more IBR, FBR, or JSR insns may be slotted. */
9160 insn = next_nonnote_insn (insn);
9162 if (!insn || ! INSN_P (insn))
9165 /* Let Haifa tell us where it thinks insn group boundaries are. */
9166 if (GET_MODE (insn) == TImode)
9169 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9174 insn = next_nonnote_insn (insn);
9183 alphaev4_next_nop (int *pin_use)
9185 int in_use = *pin_use;
9188 if (!(in_use & EV4_IB0))
9193 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9198 else if (TARGET_FP && !(in_use & EV4_IB1))
9211 alphaev5_next_nop (int *pin_use)
9213 int in_use = *pin_use;
9216 if (!(in_use & EV5_E1))
9221 else if (TARGET_FP && !(in_use & EV5_FA))
9226 else if (TARGET_FP && !(in_use & EV5_FM))
9238 /* The instruction group alignment main loop. */
9241 alpha_align_insns_1 (unsigned int max_align,
9242 rtx_insn *(*next_group) (rtx_insn *, int *, int *),
9243 rtx (*next_nop) (int *))
9245 /* ALIGN is the known alignment for the insn group. */
9247 /* OFS is the offset of the current insn in the insn group. */
9249 int prev_in_use, in_use, len, ldgp;
9252 /* Let shorten branches care for assigning alignments to code labels. */
9253 shorten_branches (get_insns ());
9255 if (align_functions < 4)
9257 else if ((unsigned int) align_functions < max_align)
9258 align = align_functions;
9262 ofs = prev_in_use = 0;
9265 i = next_nonnote_insn (i);
9267 ldgp = alpha_function_needs_gp ? 8 : 0;
9271 next = (*next_group) (i, &in_use, &len);
9273 /* When we see a label, resync alignment etc. */
9276 unsigned int new_align = 1 << label_to_alignment (i);
9278 if (new_align >= align)
9280 align = new_align < max_align ? new_align : max_align;
9284 else if (ofs & (new_align-1))
9285 ofs = (ofs | (new_align-1)) + 1;
9289 /* Handle complex instructions special. */
9290 else if (in_use == 0)
9292 /* Asms will have length < 0. This is a signal that we have
9293 lost alignment knowledge. Assume, however, that the asm
9294 will not mis-align instructions. */
9303 /* If the known alignment is smaller than the recognized insn group,
9304 realign the output. */
9305 else if ((int) align < len)
9307 unsigned int new_log_align = len > 8 ? 4 : 3;
9308 rtx_insn *prev, *where;
9310 where = prev = prev_nonnote_insn (i);
9311 if (!where || !LABEL_P (where))
9314 /* Can't realign between a call and its gp reload. */
9315 if (! (TARGET_EXPLICIT_RELOCS
9316 && prev && CALL_P (prev)))
9318 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9319 align = 1 << new_log_align;
9324 /* We may not insert padding inside the initial ldgp sequence. */
9328 /* If the group won't fit in the same INT16 as the previous,
9329 we need to add padding to keep the group together. Rather
9330 than simply leaving the insn filling to the assembler, we
9331 can make use of the knowledge of what sorts of instructions
9332 were issued in the previous group to make sure that all of
9333 the added nops are really free. */
9334 else if (ofs + len > (int) align)
9336 int nop_count = (align - ofs) / 4;
9339 /* Insert nops before labels, branches, and calls to truly merge
9340 the execution of the nops with the previous instruction group. */
9341 where = prev_nonnote_insn (i);
9344 if (LABEL_P (where))
9346 rtx_insn *where2 = prev_nonnote_insn (where);
9347 if (where2 && JUMP_P (where2))
9350 else if (NONJUMP_INSN_P (where))
9357 emit_insn_before ((*next_nop)(&prev_in_use), where);
9358 while (--nop_count);
9362 ofs = (ofs + len) & (align - 1);
9363 prev_in_use = in_use;
9369 alpha_align_insns (void)
9371 if (alpha_tune == PROCESSOR_EV4)
9372 alpha_align_insns_1 (8, alphaev4_next_group, alphaev4_next_nop);
9373 else if (alpha_tune == PROCESSOR_EV5)
9374 alpha_align_insns_1 (16, alphaev5_next_group, alphaev5_next_nop);
9379 /* Insert an unop between sibcall or noreturn function call and GP load. */
9382 alpha_pad_function_end (void)
9384 rtx_insn *insn, *next;
9386 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9389 || !(SIBLING_CALL_P (insn)
9390 || find_reg_note (insn, REG_NORETURN, NULL_RTX)))
9393 /* Make sure we do not split a call and its corresponding
9394 CALL_ARG_LOCATION note. */
9395 next = NEXT_INSN (insn);
9398 if (NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION)
9401 next = next_active_insn (insn);
9404 rtx pat = PATTERN (next);
9406 if (GET_CODE (pat) == SET
9407 && GET_CODE (SET_SRC (pat)) == UNSPEC_VOLATILE
9408 && XINT (SET_SRC (pat), 1) == UNSPECV_LDGP1)
9409 emit_insn_after (gen_unop (), insn);
9414 /* Machine dependent reorg pass. */
9419 /* Workaround for a linker error that triggers when an exception
9420 handler immediatelly follows a sibcall or a noreturn function.
9422 In the sibcall case:
9424 The instruction stream from an object file:
9426 1d8: 00 00 fb 6b jmp (t12)
9427 1dc: 00 00 ba 27 ldah gp,0(ra)
9428 1e0: 00 00 bd 23 lda gp,0(gp)
9429 1e4: 00 00 7d a7 ldq t12,0(gp)
9430 1e8: 00 40 5b 6b jsr ra,(t12),1ec <__funcZ+0x1ec>
9432 was converted in the final link pass to:
9434 12003aa88: 67 fa ff c3 br 120039428 <...>
9435 12003aa8c: 00 00 fe 2f unop
9436 12003aa90: 00 00 fe 2f unop
9437 12003aa94: 48 83 7d a7 ldq t12,-31928(gp)
9438 12003aa98: 00 40 5b 6b jsr ra,(t12),12003aa9c <__func+0x1ec>
9440 And in the noreturn case:
9442 The instruction stream from an object file:
9444 54: 00 40 5b 6b jsr ra,(t12),58 <__func+0x58>
9445 58: 00 00 ba 27 ldah gp,0(ra)
9446 5c: 00 00 bd 23 lda gp,0(gp)
9447 60: 00 00 7d a7 ldq t12,0(gp)
9448 64: 00 40 5b 6b jsr ra,(t12),68 <__func+0x68>
9450 was converted in the final link pass to:
9452 fdb24: a0 03 40 d3 bsr ra,fe9a8 <_called_func+0x8>
9453 fdb28: 00 00 fe 2f unop
9454 fdb2c: 00 00 fe 2f unop
9455 fdb30: 30 82 7d a7 ldq t12,-32208(gp)
9456 fdb34: 00 40 5b 6b jsr ra,(t12),fdb38 <__func+0x68>
9458 GP load instructions were wrongly cleared by the linker relaxation
9459 pass. This workaround prevents removal of GP loads by inserting
9460 an unop instruction between a sibcall or noreturn function call and
9461 exception handler prologue. */
9463 if (current_function_has_exception_handlers ())
9464 alpha_pad_function_end ();
9466 /* CALL_PAL that implements trap insn, updates program counter to point
9467 after the insn. In case trap is the last insn in the function,
9468 emit NOP to guarantee that PC remains inside function boundaries.
9469 This workaround is needed to get reliable backtraces. */
9471 rtx_insn *insn = prev_active_insn (get_last_insn ());
9473 if (insn && NONJUMP_INSN_P (insn))
9475 rtx pat = PATTERN (insn);
9476 if (GET_CODE (pat) == PARALLEL)
9478 rtx vec = XVECEXP (pat, 0, 0);
9479 if (GET_CODE (vec) == TRAP_IF
9480 && XEXP (vec, 0) == const1_rtx)
9481 emit_insn_after (gen_unop (), insn);
9487 alpha_file_start (void)
9489 default_file_start ();
9491 fputs ("\t.set noreorder\n", asm_out_file);
9492 fputs ("\t.set volatile\n", asm_out_file);
9494 fputs ("\t.set noat\n", asm_out_file);
9495 if (TARGET_EXPLICIT_RELOCS)
9496 fputs ("\t.set nomacro\n", asm_out_file);
9497 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9501 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9503 else if (TARGET_MAX)
9505 else if (TARGET_BWX)
9507 else if (alpha_cpu == PROCESSOR_EV5)
9512 fprintf (asm_out_file, "\t.arch %s\n", arch);
9516 /* Since we don't have a .dynbss section, we should not allow global
9517 relocations in the .rodata section. */
9520 alpha_elf_reloc_rw_mask (void)
9522 return flag_pic ? 3 : 2;
9525 /* Return a section for X. The only special thing we do here is to
9526 honor small data. */
9529 alpha_elf_select_rtx_section (machine_mode mode, rtx x,
9530 unsigned HOST_WIDE_INT align)
9532 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9533 /* ??? Consider using mergeable sdata sections. */
9534 return sdata_section;
9536 return default_elf_select_rtx_section (mode, x, align);
9540 alpha_elf_section_type_flags (tree decl, const char *name, int reloc)
9542 unsigned int flags = 0;
9544 if (strcmp (name, ".sdata") == 0
9545 || strncmp (name, ".sdata.", 7) == 0
9546 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9547 || strcmp (name, ".sbss") == 0
9548 || strncmp (name, ".sbss.", 6) == 0
9549 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9550 flags = SECTION_SMALL;
9552 flags |= default_section_type_flags (decl, name, reloc);
9556 /* Structure to collect function names for final output in link section. */
9557 /* Note that items marked with GTY can't be ifdef'ed out. */
9565 struct GTY(()) alpha_links
9569 enum reloc_kind rkind;
9572 #if TARGET_ABI_OPEN_VMS
9574 /* Return the VMS argument type corresponding to MODE. */
9577 alpha_arg_type (machine_mode mode)
9582 return TARGET_FLOAT_VAX ? FF : FS;
9584 return TARGET_FLOAT_VAX ? FD : FT;
9590 /* Return an rtx for an integer representing the VMS Argument Information
9594 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9596 unsigned HOST_WIDE_INT regval = cum.num_args;
9599 for (i = 0; i < 6; i++)
9600 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9602 return GEN_INT (regval);
9606 /* Return a SYMBOL_REF representing the reference to the .linkage entry
9607 of function FUNC built for calls made from CFUNDECL. LFLAG is 1 if
9608 this is the reference to the linkage pointer value, 0 if this is the
9609 reference to the function entry value. RFLAG is 1 if this a reduced
9610 reference (code address only), 0 if this is a full reference. */
9613 alpha_use_linkage (rtx func, bool lflag, bool rflag)
9615 struct alpha_links *al = NULL;
9616 const char *name = XSTR (func, 0);
9618 if (cfun->machine->links)
9620 /* Is this name already defined? */
9621 alpha_links **slot = cfun->machine->links->get (name);
9626 cfun->machine->links
9627 = hash_map<nofree_string_hash, alpha_links *>::create_ggc (64);
9638 /* Follow transparent alias, as this is used for CRTL translations. */
9639 id = maybe_get_identifier (name);
9642 while (IDENTIFIER_TRANSPARENT_ALIAS (id))
9643 id = TREE_CHAIN (id);
9644 name = IDENTIFIER_POINTER (id);
9647 buf_len = strlen (name) + 8 + 9;
9648 linksym = (char *) alloca (buf_len);
9649 snprintf (linksym, buf_len, "$%d..%s..lk", cfun->funcdef_no, name);
9651 al = ggc_alloc<alpha_links> ();
9653 al->linkage = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (linksym));
9655 cfun->machine->links->put (ggc_strdup (name), al);
9658 al->rkind = rflag ? KIND_CODEADDR : KIND_LINKAGE;
9661 return gen_rtx_MEM (Pmode, plus_constant (Pmode, al->linkage, 8));
9667 alpha_write_one_linkage (const char *name, alpha_links *link, FILE *stream)
9669 ASM_OUTPUT_INTERNAL_LABEL (stream, XSTR (link->linkage, 0));
9670 if (link->rkind == KIND_CODEADDR)
9672 /* External and used, request code address. */
9673 fprintf (stream, "\t.code_address ");
9677 if (!SYMBOL_REF_EXTERNAL_P (link->func)
9678 && SYMBOL_REF_LOCAL_P (link->func))
9680 /* Locally defined, build linkage pair. */
9681 fprintf (stream, "\t.quad %s..en\n", name);
9682 fprintf (stream, "\t.quad ");
9686 /* External, request linkage pair. */
9687 fprintf (stream, "\t.linkage ");
9690 assemble_name (stream, name);
9691 fputs ("\n", stream);
9697 alpha_write_linkage (FILE *stream, const char *funname)
9699 fprintf (stream, "\t.link\n");
9700 fprintf (stream, "\t.align 3\n");
9703 #ifdef TARGET_VMS_CRASH_DEBUG
9704 fputs ("\t.name ", stream);
9705 assemble_name (stream, funname);
9706 fputs ("..na\n", stream);
9709 ASM_OUTPUT_LABEL (stream, funname);
9710 fprintf (stream, "\t.pdesc ");
9711 assemble_name (stream, funname);
9712 fprintf (stream, "..en,%s\n",
9713 alpha_procedure_type == PT_STACK ? "stack"
9714 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9716 if (cfun->machine->links)
9718 hash_map<nofree_string_hash, alpha_links *>::iterator iter
9719 = cfun->machine->links->begin ();
9720 for (; iter != cfun->machine->links->end (); ++iter)
9721 alpha_write_one_linkage ((*iter).first, (*iter).second, stream);
9725 /* Switch to an arbitrary section NAME with attributes as specified
9726 by FLAGS. ALIGN specifies any known alignment requirements for
9727 the section; 0 if the default should be used. */
9730 vms_asm_named_section (const char *name, unsigned int flags,
9731 tree decl ATTRIBUTE_UNUSED)
9733 fputc ('\n', asm_out_file);
9734 fprintf (asm_out_file, ".section\t%s", name);
9736 if (flags & SECTION_DEBUG)
9737 fprintf (asm_out_file, ",NOWRT");
9739 fputc ('\n', asm_out_file);
9742 /* Record an element in the table of global constructors. SYMBOL is
9743 a SYMBOL_REF of the function to be called; PRIORITY is a number
9744 between 0 and MAX_INIT_PRIORITY.
9746 Differs from default_ctors_section_asm_out_constructor in that the
9747 width of the .ctors entry is always 64 bits, rather than the 32 bits
9748 used by a normal pointer. */
9751 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9753 switch_to_section (ctors_section);
9754 assemble_align (BITS_PER_WORD);
9755 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9759 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9761 switch_to_section (dtors_section);
9762 assemble_align (BITS_PER_WORD);
9763 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9767 alpha_use_linkage (rtx func ATTRIBUTE_UNUSED,
9768 bool lflag ATTRIBUTE_UNUSED,
9769 bool rflag ATTRIBUTE_UNUSED)
9774 #endif /* TARGET_ABI_OPEN_VMS */
9777 alpha_init_libfuncs (void)
9779 if (TARGET_ABI_OPEN_VMS)
9781 /* Use the VMS runtime library functions for division and
9783 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
9784 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
9785 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
9786 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
9787 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
9788 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
9789 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
9790 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
9791 #ifdef MEM_LIBFUNCS_INIT
9797 /* On the Alpha, we use this to disable the floating-point registers
9798 when they don't exist. */
9801 alpha_conditional_register_usage (void)
9804 if (! TARGET_FPREGS)
9805 for (i = 32; i < 63; i++)
9806 fixed_regs[i] = call_used_regs[i] = 1;
9809 /* Canonicalize a comparison from one we don't have to one we do have. */
9812 alpha_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
9813 bool op0_preserve_value)
9815 if (!op0_preserve_value
9816 && (*code == GE || *code == GT || *code == GEU || *code == GTU)
9817 && (REG_P (*op1) || *op1 == const0_rtx))
9822 *code = (int)swap_condition ((enum rtx_code)*code);
9825 if ((*code == LT || *code == LTU)
9826 && CONST_INT_P (*op1) && INTVAL (*op1) == 256)
9828 *code = *code == LT ? LE : LEU;
9829 *op1 = GEN_INT (255);
9833 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV. */
9836 alpha_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
9838 const unsigned HOST_WIDE_INT SWCR_STATUS_MASK = (0x3fUL << 17);
9840 tree fenv_var, get_fpscr, set_fpscr, mask, ld_fenv, masked_fenv;
9841 tree new_fenv_var, reload_fenv, restore_fnenv;
9842 tree update_call, atomic_feraiseexcept, hold_fnclex;
9844 /* Assume OSF/1 compatible interfaces. */
9845 if (!TARGET_ABI_OSF)
9848 /* Generate the equivalent of :
9849 unsigned long fenv_var;
9850 fenv_var = __ieee_get_fp_control ();
9852 unsigned long masked_fenv;
9853 masked_fenv = fenv_var & mask;
9855 __ieee_set_fp_control (masked_fenv); */
9857 fenv_var = create_tmp_var_raw (long_unsigned_type_node);
9859 = build_fn_decl ("__ieee_get_fp_control",
9860 build_function_type_list (long_unsigned_type_node, NULL));
9862 = build_fn_decl ("__ieee_set_fp_control",
9863 build_function_type_list (void_type_node, NULL));
9864 mask = build_int_cst (long_unsigned_type_node, ~SWCR_STATUS_MASK);
9865 ld_fenv = build2 (MODIFY_EXPR, long_unsigned_type_node,
9866 fenv_var, build_call_expr (get_fpscr, 0));
9867 masked_fenv = build2 (BIT_AND_EXPR, long_unsigned_type_node, fenv_var, mask);
9868 hold_fnclex = build_call_expr (set_fpscr, 1, masked_fenv);
9869 *hold = build2 (COMPOUND_EXPR, void_type_node,
9870 build2 (COMPOUND_EXPR, void_type_node, masked_fenv, ld_fenv),
9873 /* Store the value of masked_fenv to clear the exceptions:
9874 __ieee_set_fp_control (masked_fenv); */
9876 *clear = build_call_expr (set_fpscr, 1, masked_fenv);
9878 /* Generate the equivalent of :
9879 unsigned long new_fenv_var;
9880 new_fenv_var = __ieee_get_fp_control ();
9882 __ieee_set_fp_control (fenv_var);
9884 __atomic_feraiseexcept (new_fenv_var); */
9886 new_fenv_var = create_tmp_var_raw (long_unsigned_type_node);
9887 reload_fenv = build2 (MODIFY_EXPR, long_unsigned_type_node, new_fenv_var,
9888 build_call_expr (get_fpscr, 0));
9889 restore_fnenv = build_call_expr (set_fpscr, 1, fenv_var);
9890 atomic_feraiseexcept = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
9892 = build_call_expr (atomic_feraiseexcept, 1,
9893 fold_convert (integer_type_node, new_fenv_var));
9894 *update = build2 (COMPOUND_EXPR, void_type_node,
9895 build2 (COMPOUND_EXPR, void_type_node,
9896 reload_fenv, restore_fnenv), update_call);
9899 /* Implement TARGET_HARD_REGNO_MODE_OK. On Alpha, the integer registers
9900 can hold any mode. The floating-point registers can hold 64-bit
9901 integers as well, but not smaller values. */
9904 alpha_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
9906 if (IN_RANGE (regno, 32, 62))
9907 return (mode == SFmode
9915 /* Implement TARGET_MODES_TIEABLE_P. This asymmetric test is true when
9916 MODE1 could be put in an FP register but MODE2 could not. */
9919 alpha_modes_tieable_p (machine_mode mode1, machine_mode mode2)
9921 return (alpha_hard_regno_mode_ok (32, mode1)
9922 ? alpha_hard_regno_mode_ok (32, mode2)
9926 /* Initialize the GCC target structure. */
9927 #if TARGET_ABI_OPEN_VMS
9928 # undef TARGET_ATTRIBUTE_TABLE
9929 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
9930 # undef TARGET_CAN_ELIMINATE
9931 # define TARGET_CAN_ELIMINATE alpha_vms_can_eliminate
9934 #undef TARGET_IN_SMALL_DATA_P
9935 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
9937 #undef TARGET_ASM_ALIGNED_HI_OP
9938 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
9939 #undef TARGET_ASM_ALIGNED_DI_OP
9940 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
9942 /* Default unaligned ops are provided for ELF systems. To get unaligned
9943 data for non-ELF systems, we have to turn off auto alignment. */
9944 #if TARGET_ABI_OPEN_VMS
9945 #undef TARGET_ASM_UNALIGNED_HI_OP
9946 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
9947 #undef TARGET_ASM_UNALIGNED_SI_OP
9948 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
9949 #undef TARGET_ASM_UNALIGNED_DI_OP
9950 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
9953 #undef TARGET_ASM_RELOC_RW_MASK
9954 #define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
9955 #undef TARGET_ASM_SELECT_RTX_SECTION
9956 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
9957 #undef TARGET_SECTION_TYPE_FLAGS
9958 #define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags
9960 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
9961 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
9963 #undef TARGET_INIT_LIBFUNCS
9964 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
9966 #undef TARGET_LEGITIMIZE_ADDRESS
9967 #define TARGET_LEGITIMIZE_ADDRESS alpha_legitimize_address
9968 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
9969 #define TARGET_MODE_DEPENDENT_ADDRESS_P alpha_mode_dependent_address_p
9971 #undef TARGET_ASM_FILE_START
9972 #define TARGET_ASM_FILE_START alpha_file_start
9974 #undef TARGET_SCHED_ADJUST_COST
9975 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
9976 #undef TARGET_SCHED_ISSUE_RATE
9977 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
9978 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
9979 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
9980 alpha_multipass_dfa_lookahead
9982 #undef TARGET_HAVE_TLS
9983 #define TARGET_HAVE_TLS HAVE_AS_TLS
9985 #undef TARGET_BUILTIN_DECL
9986 #define TARGET_BUILTIN_DECL alpha_builtin_decl
9987 #undef TARGET_INIT_BUILTINS
9988 #define TARGET_INIT_BUILTINS alpha_init_builtins
9989 #undef TARGET_EXPAND_BUILTIN
9990 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
9991 #undef TARGET_FOLD_BUILTIN
9992 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
9993 #undef TARGET_GIMPLE_FOLD_BUILTIN
9994 #define TARGET_GIMPLE_FOLD_BUILTIN alpha_gimple_fold_builtin
9996 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
9997 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
9998 #undef TARGET_CANNOT_COPY_INSN_P
9999 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
10000 #undef TARGET_LEGITIMATE_CONSTANT_P
10001 #define TARGET_LEGITIMATE_CONSTANT_P alpha_legitimate_constant_p
10002 #undef TARGET_CANNOT_FORCE_CONST_MEM
10003 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
10006 #undef TARGET_ASM_OUTPUT_MI_THUNK
10007 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
10008 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10009 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
10010 #undef TARGET_STDARG_OPTIMIZE_HOOK
10011 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
10014 #undef TARGET_PRINT_OPERAND
10015 #define TARGET_PRINT_OPERAND alpha_print_operand
10016 #undef TARGET_PRINT_OPERAND_ADDRESS
10017 #define TARGET_PRINT_OPERAND_ADDRESS alpha_print_operand_address
10018 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
10019 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P alpha_print_operand_punct_valid_p
10021 /* Use 16-bits anchor. */
10022 #undef TARGET_MIN_ANCHOR_OFFSET
10023 #define TARGET_MIN_ANCHOR_OFFSET -0x7fff - 1
10024 #undef TARGET_MAX_ANCHOR_OFFSET
10025 #define TARGET_MAX_ANCHOR_OFFSET 0x7fff
10026 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
10027 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
10029 #undef TARGET_REGISTER_MOVE_COST
10030 #define TARGET_REGISTER_MOVE_COST alpha_register_move_cost
10031 #undef TARGET_MEMORY_MOVE_COST
10032 #define TARGET_MEMORY_MOVE_COST alpha_memory_move_cost
10033 #undef TARGET_RTX_COSTS
10034 #define TARGET_RTX_COSTS alpha_rtx_costs
10035 #undef TARGET_ADDRESS_COST
10036 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
10038 #undef TARGET_MACHINE_DEPENDENT_REORG
10039 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
10041 #undef TARGET_PROMOTE_FUNCTION_MODE
10042 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
10043 #undef TARGET_PROMOTE_PROTOTYPES
10044 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false
10046 #undef TARGET_FUNCTION_VALUE
10047 #define TARGET_FUNCTION_VALUE alpha_function_value
10048 #undef TARGET_LIBCALL_VALUE
10049 #define TARGET_LIBCALL_VALUE alpha_libcall_value
10050 #undef TARGET_FUNCTION_VALUE_REGNO_P
10051 #define TARGET_FUNCTION_VALUE_REGNO_P alpha_function_value_regno_p
10052 #undef TARGET_RETURN_IN_MEMORY
10053 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
10054 #undef TARGET_PASS_BY_REFERENCE
10055 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
10056 #undef TARGET_SETUP_INCOMING_VARARGS
10057 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
10058 #undef TARGET_STRICT_ARGUMENT_NAMING
10059 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
10060 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
10061 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
10062 #undef TARGET_SPLIT_COMPLEX_ARG
10063 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
10064 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10065 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
10066 #undef TARGET_ARG_PARTIAL_BYTES
10067 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
10068 #undef TARGET_FUNCTION_ARG
10069 #define TARGET_FUNCTION_ARG alpha_function_arg
10070 #undef TARGET_FUNCTION_ARG_ADVANCE
10071 #define TARGET_FUNCTION_ARG_ADVANCE alpha_function_arg_advance
10072 #undef TARGET_TRAMPOLINE_INIT
10073 #define TARGET_TRAMPOLINE_INIT alpha_trampoline_init
10075 #undef TARGET_INSTANTIATE_DECLS
10076 #define TARGET_INSTANTIATE_DECLS alpha_instantiate_decls
10078 #undef TARGET_SECONDARY_RELOAD
10079 #define TARGET_SECONDARY_RELOAD alpha_secondary_reload
10080 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
10081 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE alpha_secondary_memory_needed_mode
10083 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10084 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
10085 #undef TARGET_VECTOR_MODE_SUPPORTED_P
10086 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
10088 #undef TARGET_BUILD_BUILTIN_VA_LIST
10089 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
10091 #undef TARGET_EXPAND_BUILTIN_VA_START
10092 #define TARGET_EXPAND_BUILTIN_VA_START alpha_va_start
10094 #undef TARGET_OPTION_OVERRIDE
10095 #define TARGET_OPTION_OVERRIDE alpha_option_override
10097 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
10098 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE \
10099 alpha_override_options_after_change
10101 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10102 #undef TARGET_MANGLE_TYPE
10103 #define TARGET_MANGLE_TYPE alpha_mangle_type
10106 #undef TARGET_LRA_P
10107 #define TARGET_LRA_P hook_bool_void_false
10109 #undef TARGET_LEGITIMATE_ADDRESS_P
10110 #define TARGET_LEGITIMATE_ADDRESS_P alpha_legitimate_address_p
10112 #undef TARGET_CONDITIONAL_REGISTER_USAGE
10113 #define TARGET_CONDITIONAL_REGISTER_USAGE alpha_conditional_register_usage
10115 #undef TARGET_CANONICALIZE_COMPARISON
10116 #define TARGET_CANONICALIZE_COMPARISON alpha_canonicalize_comparison
10118 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
10119 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV alpha_atomic_assign_expand_fenv
10121 #undef TARGET_HARD_REGNO_MODE_OK
10122 #define TARGET_HARD_REGNO_MODE_OK alpha_hard_regno_mode_ok
10124 #undef TARGET_MODES_TIEABLE_P
10125 #define TARGET_MODES_TIEABLE_P alpha_modes_tieable_p
10127 struct gcc_target targetm = TARGET_INITIALIZER;
10130 #include "gt-alpha.h"