1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
4 Free Software Foundation, Inc.
5 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
26 #include "coretypes.h"
31 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
35 #include "insn-attr.h"
44 #include "diagnostic-core.h"
48 #include "target-def.h"
49 #include "common/common-target.h"
51 #include "langhooks.h"
52 #include "splay-tree.h"
54 #include "tree-flow.h"
55 #include "tree-stdarg.h"
56 #include "tm-constrs.h"
62 /* Specify which cpu to schedule for. */
63 enum processor_type alpha_tune;
65 /* Which cpu we're generating code for. */
66 enum processor_type alpha_cpu;
68 static const char * const alpha_cpu_name[] =
73 /* Specify how accurate floating-point traps need to be. */
75 enum alpha_trap_precision alpha_tp;
77 /* Specify the floating-point rounding mode. */
79 enum alpha_fp_rounding_mode alpha_fprm;
81 /* Specify which things cause traps. */
83 enum alpha_fp_trap_mode alpha_fptm;
85 /* Nonzero if inside of a function, because the Alpha asm can't
86 handle .files inside of functions. */
88 static int inside_function = FALSE;
90 /* The number of cycles of latency we should assume on memory reads. */
92 int alpha_memory_latency = 3;
94 /* Whether the function needs the GP. */
96 static int alpha_function_needs_gp;
98 /* The assembler name of the current function. */
100 static const char *alpha_fnname;
102 /* The next explicit relocation sequence number. */
103 extern GTY(()) int alpha_next_sequence_number;
104 int alpha_next_sequence_number = 1;
106 /* The literal and gpdisp sequence numbers for this insn, as printed
107 by %# and %* respectively. */
108 extern GTY(()) int alpha_this_literal_sequence_number;
109 extern GTY(()) int alpha_this_gpdisp_sequence_number;
110 int alpha_this_literal_sequence_number;
111 int alpha_this_gpdisp_sequence_number;
113 /* Costs of various operations on the different architectures. */
115 struct alpha_rtx_cost_data
117 unsigned char fp_add;
118 unsigned char fp_mult;
119 unsigned char fp_div_sf;
120 unsigned char fp_div_df;
121 unsigned char int_mult_si;
122 unsigned char int_mult_di;
123 unsigned char int_shift;
124 unsigned char int_cmov;
125 unsigned short int_div;
128 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
131 COSTS_N_INSNS (6), /* fp_add */
132 COSTS_N_INSNS (6), /* fp_mult */
133 COSTS_N_INSNS (34), /* fp_div_sf */
134 COSTS_N_INSNS (63), /* fp_div_df */
135 COSTS_N_INSNS (23), /* int_mult_si */
136 COSTS_N_INSNS (23), /* int_mult_di */
137 COSTS_N_INSNS (2), /* int_shift */
138 COSTS_N_INSNS (2), /* int_cmov */
139 COSTS_N_INSNS (97), /* int_div */
142 COSTS_N_INSNS (4), /* fp_add */
143 COSTS_N_INSNS (4), /* fp_mult */
144 COSTS_N_INSNS (15), /* fp_div_sf */
145 COSTS_N_INSNS (22), /* fp_div_df */
146 COSTS_N_INSNS (8), /* int_mult_si */
147 COSTS_N_INSNS (12), /* int_mult_di */
148 COSTS_N_INSNS (1) + 1, /* int_shift */
149 COSTS_N_INSNS (1), /* int_cmov */
150 COSTS_N_INSNS (83), /* int_div */
153 COSTS_N_INSNS (4), /* fp_add */
154 COSTS_N_INSNS (4), /* fp_mult */
155 COSTS_N_INSNS (12), /* fp_div_sf */
156 COSTS_N_INSNS (15), /* fp_div_df */
157 COSTS_N_INSNS (7), /* int_mult_si */
158 COSTS_N_INSNS (7), /* int_mult_di */
159 COSTS_N_INSNS (1), /* int_shift */
160 COSTS_N_INSNS (2), /* int_cmov */
161 COSTS_N_INSNS (86), /* int_div */
165 /* Similar but tuned for code size instead of execution latency. The
166 extra +N is fractional cost tuning based on latency. It's used to
167 encourage use of cheaper insns like shift, but only if there's just
170 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
172 COSTS_N_INSNS (1), /* fp_add */
173 COSTS_N_INSNS (1), /* fp_mult */
174 COSTS_N_INSNS (1), /* fp_div_sf */
175 COSTS_N_INSNS (1) + 1, /* fp_div_df */
176 COSTS_N_INSNS (1) + 1, /* int_mult_si */
177 COSTS_N_INSNS (1) + 2, /* int_mult_di */
178 COSTS_N_INSNS (1), /* int_shift */
179 COSTS_N_INSNS (1), /* int_cmov */
180 COSTS_N_INSNS (6), /* int_div */
183 /* Get the number of args of a function in one of two ways. */
184 #if TARGET_ABI_OPEN_VMS
185 #define NUM_ARGS crtl->args.info.num_args
187 #define NUM_ARGS crtl->args.info
193 /* Declarations of static functions. */
194 static struct machine_function *alpha_init_machine_status (void);
195 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
197 #if TARGET_ABI_OPEN_VMS
198 static void alpha_write_linkage (FILE *, const char *);
199 static bool vms_valid_pointer_mode (enum machine_mode);
201 #define vms_patch_builtins() gcc_unreachable()
204 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
205 /* Implement TARGET_MANGLE_TYPE. */
208 alpha_mangle_type (const_tree type)
210 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
211 && TARGET_LONG_DOUBLE_128)
214 /* For all other types, use normal C++ mangling. */
219 /* Parse target option strings. */
222 alpha_option_override (void)
224 static const struct cpu_table {
225 const char *const name;
226 const enum processor_type processor;
228 const unsigned short line_size; /* in bytes */
229 const unsigned short l1_size; /* in kb. */
230 const unsigned short l2_size; /* in kb. */
232 /* EV4/LCA45 had 8k L1 caches; EV45 had 16k L1 caches.
233 EV4/EV45 had 128k to 16M 32-byte direct Bcache. LCA45
234 had 64k to 8M 8-byte direct Bcache. */
235 { "ev4", PROCESSOR_EV4, 0, 32, 8, 8*1024 },
236 { "21064", PROCESSOR_EV4, 0, 32, 8, 8*1024 },
237 { "ev45", PROCESSOR_EV4, 0, 32, 16, 16*1024 },
239 /* EV5 or EV56 had 8k 32 byte L1, 96k 32 or 64 byte L2,
240 and 1M to 16M 64 byte L3 (not modeled).
241 PCA56 had 16k 64-byte cache; PCA57 had 32k Icache.
242 PCA56 had 8k 64-byte cache; PCA57 had 16k Dcache. */
243 { "ev5", PROCESSOR_EV5, 0, 32, 8, 96 },
244 { "21164", PROCESSOR_EV5, 0, 32, 8, 96 },
245 { "ev56", PROCESSOR_EV5, MASK_BWX, 32, 8, 96 },
246 { "21164a", PROCESSOR_EV5, MASK_BWX, 32, 8, 96 },
247 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX, 64, 16, 4*1024 },
248 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX, 64, 16, 4*1024 },
249 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX, 64, 16, 4*1024 },
251 /* EV6 had 64k 64 byte L1, 1M to 16M Bcache. */
252 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX, 64, 64, 16*1024 },
253 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX, 64, 64, 16*1024 },
254 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX,
256 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX,
260 int const ct_size = ARRAY_SIZE (cpu_table);
261 int line_size = 0, l1_size = 0, l2_size = 0;
264 #ifdef SUBTARGET_OVERRIDE_OPTIONS
265 SUBTARGET_OVERRIDE_OPTIONS;
268 /* Default to full IEEE compliance mode for Go language. */
269 if (strcmp (lang_hooks.name, "GNU Go") == 0
270 && !(target_flags_explicit & MASK_IEEE))
271 target_flags |= MASK_IEEE;
273 alpha_fprm = ALPHA_FPRM_NORM;
274 alpha_tp = ALPHA_TP_PROG;
275 alpha_fptm = ALPHA_FPTM_N;
279 alpha_tp = ALPHA_TP_INSN;
280 alpha_fptm = ALPHA_FPTM_SU;
282 if (TARGET_IEEE_WITH_INEXACT)
284 alpha_tp = ALPHA_TP_INSN;
285 alpha_fptm = ALPHA_FPTM_SUI;
290 if (! strcmp (alpha_tp_string, "p"))
291 alpha_tp = ALPHA_TP_PROG;
292 else if (! strcmp (alpha_tp_string, "f"))
293 alpha_tp = ALPHA_TP_FUNC;
294 else if (! strcmp (alpha_tp_string, "i"))
295 alpha_tp = ALPHA_TP_INSN;
297 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
300 if (alpha_fprm_string)
302 if (! strcmp (alpha_fprm_string, "n"))
303 alpha_fprm = ALPHA_FPRM_NORM;
304 else if (! strcmp (alpha_fprm_string, "m"))
305 alpha_fprm = ALPHA_FPRM_MINF;
306 else if (! strcmp (alpha_fprm_string, "c"))
307 alpha_fprm = ALPHA_FPRM_CHOP;
308 else if (! strcmp (alpha_fprm_string,"d"))
309 alpha_fprm = ALPHA_FPRM_DYN;
311 error ("bad value %qs for -mfp-rounding-mode switch",
315 if (alpha_fptm_string)
317 if (strcmp (alpha_fptm_string, "n") == 0)
318 alpha_fptm = ALPHA_FPTM_N;
319 else if (strcmp (alpha_fptm_string, "u") == 0)
320 alpha_fptm = ALPHA_FPTM_U;
321 else if (strcmp (alpha_fptm_string, "su") == 0)
322 alpha_fptm = ALPHA_FPTM_SU;
323 else if (strcmp (alpha_fptm_string, "sui") == 0)
324 alpha_fptm = ALPHA_FPTM_SUI;
326 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
329 if (alpha_cpu_string)
331 for (i = 0; i < ct_size; i++)
332 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
334 alpha_tune = alpha_cpu = cpu_table[i].processor;
335 line_size = cpu_table[i].line_size;
336 l1_size = cpu_table[i].l1_size;
337 l2_size = cpu_table[i].l2_size;
338 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
339 target_flags |= cpu_table[i].flags;
343 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
346 if (alpha_tune_string)
348 for (i = 0; i < ct_size; i++)
349 if (! strcmp (alpha_tune_string, cpu_table [i].name))
351 alpha_tune = cpu_table[i].processor;
352 line_size = cpu_table[i].line_size;
353 l1_size = cpu_table[i].l1_size;
354 l2_size = cpu_table[i].l2_size;
358 error ("bad value %qs for -mtune switch", alpha_tune_string);
362 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, line_size,
363 global_options.x_param_values,
364 global_options_set.x_param_values);
366 maybe_set_param_value (PARAM_L1_CACHE_SIZE, l1_size,
367 global_options.x_param_values,
368 global_options_set.x_param_values);
370 maybe_set_param_value (PARAM_L2_CACHE_SIZE, l2_size,
371 global_options.x_param_values,
372 global_options_set.x_param_values);
374 /* Do some sanity checks on the above options. */
376 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
377 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
379 warning (0, "fp software completion requires -mtrap-precision=i");
380 alpha_tp = ALPHA_TP_INSN;
383 if (alpha_cpu == PROCESSOR_EV6)
385 /* Except for EV6 pass 1 (not released), we always have precise
386 arithmetic traps. Which means we can do software completion
387 without minding trap shadows. */
388 alpha_tp = ALPHA_TP_PROG;
391 if (TARGET_FLOAT_VAX)
393 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
395 warning (0, "rounding mode not supported for VAX floats");
396 alpha_fprm = ALPHA_FPRM_NORM;
398 if (alpha_fptm == ALPHA_FPTM_SUI)
400 warning (0, "trap mode not supported for VAX floats");
401 alpha_fptm = ALPHA_FPTM_SU;
403 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
404 warning (0, "128-bit long double not supported for VAX floats");
405 target_flags &= ~MASK_LONG_DOUBLE_128;
412 if (!alpha_mlat_string)
413 alpha_mlat_string = "L1";
415 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
416 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
418 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
419 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
420 && alpha_mlat_string[2] == '\0')
422 static int const cache_latency[][4] =
424 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
425 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
426 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
429 lat = alpha_mlat_string[1] - '0';
430 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
432 warning (0, "L%d cache latency unknown for %s",
433 lat, alpha_cpu_name[alpha_tune]);
437 lat = cache_latency[alpha_tune][lat-1];
439 else if (! strcmp (alpha_mlat_string, "main"))
441 /* Most current memories have about 370ns latency. This is
442 a reasonable guess for a fast cpu. */
447 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
451 alpha_memory_latency = lat;
454 /* Default the definition of "small data" to 8 bytes. */
455 if (!global_options_set.x_g_switch_value)
458 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
460 target_flags |= MASK_SMALL_DATA;
461 else if (flag_pic == 2)
462 target_flags &= ~MASK_SMALL_DATA;
464 /* Align labels and loops for optimal branching. */
465 /* ??? Kludge these by not doing anything if we don't optimize. */
468 if (align_loops <= 0)
470 if (align_jumps <= 0)
473 if (align_functions <= 0)
474 align_functions = 16;
476 /* Register variables and functions with the garbage collector. */
478 /* Set up function hooks. */
479 init_machine_status = alpha_init_machine_status;
481 /* Tell the compiler when we're using VAX floating point. */
482 if (TARGET_FLOAT_VAX)
484 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
485 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
486 REAL_MODE_FORMAT (TFmode) = NULL;
489 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
490 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
491 target_flags |= MASK_LONG_DOUBLE_128;
495 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
498 zap_mask (HOST_WIDE_INT value)
502 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
504 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
510 /* Return true if OP is valid for a particular TLS relocation.
511 We are already guaranteed that OP is a CONST. */
514 tls_symbolic_operand_1 (rtx op, int size, int unspec)
518 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
520 op = XVECEXP (op, 0, 0);
522 if (GET_CODE (op) != SYMBOL_REF)
525 switch (SYMBOL_REF_TLS_MODEL (op))
527 case TLS_MODEL_LOCAL_DYNAMIC:
528 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
529 case TLS_MODEL_INITIAL_EXEC:
530 return unspec == UNSPEC_TPREL && size == 64;
531 case TLS_MODEL_LOCAL_EXEC:
532 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
538 /* Used by aligned_memory_operand and unaligned_memory_operand to
539 resolve what reload is going to do with OP if it's a register. */
542 resolve_reload_operand (rtx op)
544 if (reload_in_progress)
547 if (GET_CODE (tmp) == SUBREG)
548 tmp = SUBREG_REG (tmp);
550 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
552 op = reg_equiv_memory_loc (REGNO (tmp));
560 /* The scalar modes supported differs from the default check-what-c-supports
561 version in that sometimes TFmode is available even when long double
562 indicates only DFmode. */
565 alpha_scalar_mode_supported_p (enum machine_mode mode)
573 case TImode: /* via optabs.c */
581 return TARGET_HAS_XFLOATING_LIBS;
588 /* Alpha implements a couple of integer vector mode operations when
589 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
590 which allows the vectorizer to operate on e.g. move instructions,
591 or when expand_vector_operations can do something useful. */
594 alpha_vector_mode_supported_p (enum machine_mode mode)
596 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
599 /* Return 1 if this function can directly return via $26. */
604 return (TARGET_ABI_OSF
606 && alpha_sa_size () == 0
607 && get_frame_size () == 0
608 && crtl->outgoing_args_size == 0
609 && crtl->args.pretend_args_size == 0);
612 /* Return the TLS model to use for SYMBOL. */
614 static enum tls_model
615 tls_symbolic_operand_type (rtx symbol)
617 enum tls_model model;
619 if (GET_CODE (symbol) != SYMBOL_REF)
620 return TLS_MODEL_NONE;
621 model = SYMBOL_REF_TLS_MODEL (symbol);
623 /* Local-exec with a 64-bit size is the same code as initial-exec. */
624 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
625 model = TLS_MODEL_INITIAL_EXEC;
630 /* Return true if the function DECL will share the same GP as any
631 function in the current unit of translation. */
634 decl_has_samegp (const_tree decl)
636 /* Functions that are not local can be overridden, and thus may
637 not share the same gp. */
638 if (!(*targetm.binds_local_p) (decl))
641 /* If -msmall-data is in effect, assume that there is only one GP
642 for the module, and so any local symbol has this property. We
643 need explicit relocations to be able to enforce this for symbols
644 not defined in this unit of translation, however. */
645 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
648 /* Functions that are not external are defined in this UoT. */
649 /* ??? Irritatingly, static functions not yet emitted are still
650 marked "external". Apply this to non-static functions only. */
651 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
654 /* Return true if EXP should be placed in the small data section. */
657 alpha_in_small_data_p (const_tree exp)
659 /* We want to merge strings, so we never consider them small data. */
660 if (TREE_CODE (exp) == STRING_CST)
663 /* Functions are never in the small data area. Duh. */
664 if (TREE_CODE (exp) == FUNCTION_DECL)
667 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
669 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
670 if (strcmp (section, ".sdata") == 0
671 || strcmp (section, ".sbss") == 0)
676 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
678 /* If this is an incomplete type with size 0, then we can't put it
679 in sdata because it might be too big when completed. */
680 if (size > 0 && size <= g_switch_value)
687 #if TARGET_ABI_OPEN_VMS
689 vms_valid_pointer_mode (enum machine_mode mode)
691 return (mode == SImode || mode == DImode);
695 alpha_linkage_symbol_p (const char *symname)
697 int symlen = strlen (symname);
700 return strcmp (&symname [symlen - 4], "..lk") == 0;
705 #define LINKAGE_SYMBOL_REF_P(X) \
706 ((GET_CODE (X) == SYMBOL_REF \
707 && alpha_linkage_symbol_p (XSTR (X, 0))) \
708 || (GET_CODE (X) == CONST \
709 && GET_CODE (XEXP (X, 0)) == PLUS \
710 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
711 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
714 /* legitimate_address_p recognizes an RTL expression that is a valid
715 memory address for an instruction. The MODE argument is the
716 machine mode for the MEM expression that wants to use this address.
718 For Alpha, we have either a constant address or the sum of a
719 register and a constant address, or just a register. For DImode,
720 any of those forms can be surrounded with an AND that clear the
721 low-order three bits; this is an "unaligned" access. */
724 alpha_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
726 /* If this is an ldq_u type address, discard the outer AND. */
728 && GET_CODE (x) == AND
729 && CONST_INT_P (XEXP (x, 1))
730 && INTVAL (XEXP (x, 1)) == -8)
733 /* Discard non-paradoxical subregs. */
734 if (GET_CODE (x) == SUBREG
735 && (GET_MODE_SIZE (GET_MODE (x))
736 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
739 /* Unadorned general registers are valid. */
742 ? STRICT_REG_OK_FOR_BASE_P (x)
743 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
746 /* Constant addresses (i.e. +/- 32k) are valid. */
747 if (CONSTANT_ADDRESS_P (x))
750 #if TARGET_ABI_OPEN_VMS
751 if (LINKAGE_SYMBOL_REF_P (x))
755 /* Register plus a small constant offset is valid. */
756 if (GET_CODE (x) == PLUS)
758 rtx ofs = XEXP (x, 1);
761 /* Discard non-paradoxical subregs. */
762 if (GET_CODE (x) == SUBREG
763 && (GET_MODE_SIZE (GET_MODE (x))
764 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
770 && NONSTRICT_REG_OK_FP_BASE_P (x)
771 && CONST_INT_P (ofs))
774 ? STRICT_REG_OK_FOR_BASE_P (x)
775 : NONSTRICT_REG_OK_FOR_BASE_P (x))
776 && CONSTANT_ADDRESS_P (ofs))
781 /* If we're managing explicit relocations, LO_SUM is valid, as are small
782 data symbols. Avoid explicit relocations of modes larger than word
783 mode since i.e. $LC0+8($1) can fold around +/- 32k offset. */
784 else if (TARGET_EXPLICIT_RELOCS
785 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
787 if (small_symbolic_operand (x, Pmode))
790 if (GET_CODE (x) == LO_SUM)
792 rtx ofs = XEXP (x, 1);
795 /* Discard non-paradoxical subregs. */
796 if (GET_CODE (x) == SUBREG
797 && (GET_MODE_SIZE (GET_MODE (x))
798 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
801 /* Must have a valid base register. */
804 ? STRICT_REG_OK_FOR_BASE_P (x)
805 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
808 /* The symbol must be local. */
809 if (local_symbolic_operand (ofs, Pmode)
810 || dtp32_symbolic_operand (ofs, Pmode)
811 || tp32_symbolic_operand (ofs, Pmode))
819 /* Build the SYMBOL_REF for __tls_get_addr. */
821 static GTY(()) rtx tls_get_addr_libfunc;
824 get_tls_get_addr (void)
826 if (!tls_get_addr_libfunc)
827 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
828 return tls_get_addr_libfunc;
831 /* Try machine-dependent ways of modifying an illegitimate address
832 to be legitimate. If we find one, return the new, valid address. */
835 alpha_legitimize_address_1 (rtx x, rtx scratch, enum machine_mode mode)
837 HOST_WIDE_INT addend;
839 /* If the address is (plus reg const_int) and the CONST_INT is not a
840 valid offset, compute the high part of the constant and add it to
841 the register. Then our address is (plus temp low-part-const). */
842 if (GET_CODE (x) == PLUS
843 && REG_P (XEXP (x, 0))
844 && CONST_INT_P (XEXP (x, 1))
845 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
847 addend = INTVAL (XEXP (x, 1));
852 /* If the address is (const (plus FOO const_int)), find the low-order
853 part of the CONST_INT. Then load FOO plus any high-order part of the
854 CONST_INT into a register. Our address is (plus reg low-part-const).
855 This is done to reduce the number of GOT entries. */
856 if (can_create_pseudo_p ()
857 && GET_CODE (x) == CONST
858 && GET_CODE (XEXP (x, 0)) == PLUS
859 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
861 addend = INTVAL (XEXP (XEXP (x, 0), 1));
862 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
866 /* If we have a (plus reg const), emit the load as in (2), then add
867 the two registers, and finally generate (plus reg low-part-const) as
869 if (can_create_pseudo_p ()
870 && GET_CODE (x) == PLUS
871 && REG_P (XEXP (x, 0))
872 && GET_CODE (XEXP (x, 1)) == CONST
873 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
874 && CONST_INT_P (XEXP (XEXP (XEXP (x, 1), 0), 1)))
876 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
877 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
878 XEXP (XEXP (XEXP (x, 1), 0), 0),
879 NULL_RTX, 1, OPTAB_LIB_WIDEN);
883 /* If this is a local symbol, split the address into HIGH/LO_SUM parts.
884 Avoid modes larger than word mode since i.e. $LC0+8($1) can fold
885 around +/- 32k offset. */
886 if (TARGET_EXPLICIT_RELOCS
887 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD
888 && symbolic_operand (x, Pmode))
890 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
892 switch (tls_symbolic_operand_type (x))
897 case TLS_MODEL_GLOBAL_DYNAMIC:
900 r0 = gen_rtx_REG (Pmode, 0);
901 r16 = gen_rtx_REG (Pmode, 16);
902 tga = get_tls_get_addr ();
903 dest = gen_reg_rtx (Pmode);
904 seq = GEN_INT (alpha_next_sequence_number++);
906 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
907 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
908 insn = emit_call_insn (insn);
909 RTL_CONST_CALL_P (insn) = 1;
910 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
915 emit_libcall_block (insn, dest, r0, x);
918 case TLS_MODEL_LOCAL_DYNAMIC:
921 r0 = gen_rtx_REG (Pmode, 0);
922 r16 = gen_rtx_REG (Pmode, 16);
923 tga = get_tls_get_addr ();
924 scratch = gen_reg_rtx (Pmode);
925 seq = GEN_INT (alpha_next_sequence_number++);
927 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
928 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
929 insn = emit_call_insn (insn);
930 RTL_CONST_CALL_P (insn) = 1;
931 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
936 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
938 emit_libcall_block (insn, scratch, r0, eqv);
940 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
941 eqv = gen_rtx_CONST (Pmode, eqv);
943 if (alpha_tls_size == 64)
945 dest = gen_reg_rtx (Pmode);
946 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
947 emit_insn (gen_adddi3 (dest, dest, scratch));
950 if (alpha_tls_size == 32)
952 insn = gen_rtx_HIGH (Pmode, eqv);
953 insn = gen_rtx_PLUS (Pmode, scratch, insn);
954 scratch = gen_reg_rtx (Pmode);
955 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
957 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
959 case TLS_MODEL_INITIAL_EXEC:
960 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
961 eqv = gen_rtx_CONST (Pmode, eqv);
962 tp = gen_reg_rtx (Pmode);
963 scratch = gen_reg_rtx (Pmode);
964 dest = gen_reg_rtx (Pmode);
966 emit_insn (gen_load_tp (tp));
967 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
968 emit_insn (gen_adddi3 (dest, tp, scratch));
971 case TLS_MODEL_LOCAL_EXEC:
972 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
973 eqv = gen_rtx_CONST (Pmode, eqv);
974 tp = gen_reg_rtx (Pmode);
976 emit_insn (gen_load_tp (tp));
977 if (alpha_tls_size == 32)
979 insn = gen_rtx_HIGH (Pmode, eqv);
980 insn = gen_rtx_PLUS (Pmode, tp, insn);
981 tp = gen_reg_rtx (Pmode);
982 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
984 return gen_rtx_LO_SUM (Pmode, tp, eqv);
990 if (local_symbolic_operand (x, Pmode))
992 if (small_symbolic_operand (x, Pmode))
996 if (can_create_pseudo_p ())
997 scratch = gen_reg_rtx (Pmode);
998 emit_insn (gen_rtx_SET (VOIDmode, scratch,
999 gen_rtx_HIGH (Pmode, x)));
1000 return gen_rtx_LO_SUM (Pmode, scratch, x);
1009 HOST_WIDE_INT low, high;
1011 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1013 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1017 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1018 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1019 1, OPTAB_LIB_WIDEN);
1021 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1022 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1023 1, OPTAB_LIB_WIDEN);
1025 return plus_constant (Pmode, x, low);
1030 /* Try machine-dependent ways of modifying an illegitimate address
1031 to be legitimate. Return X or the new, valid address. */
1034 alpha_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1035 enum machine_mode mode)
1037 rtx new_x = alpha_legitimize_address_1 (x, NULL_RTX, mode);
1038 return new_x ? new_x : x;
1041 /* Return true if ADDR has an effect that depends on the machine mode it
1042 is used for. On the Alpha this is true only for the unaligned modes.
1043 We can simplify the test since we know that the address must be valid. */
1046 alpha_mode_dependent_address_p (const_rtx addr)
1048 return GET_CODE (addr) == AND;
1051 /* Primarily this is required for TLS symbols, but given that our move
1052 patterns *ought* to be able to handle any symbol at any time, we
1053 should never be spilling symbolic operands to the constant pool, ever. */
1056 alpha_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1058 enum rtx_code code = GET_CODE (x);
1059 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1062 /* We do not allow indirect calls to be optimized into sibling calls, nor
1063 can we allow a call to a function with a different GP to be optimized
1067 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1069 /* Can't do indirect tail calls, since we don't know if the target
1070 uses the same GP. */
1074 /* Otherwise, we can make a tail call if the target function shares
1076 return decl_has_samegp (decl);
1080 some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1084 /* Don't re-split. */
1085 if (GET_CODE (x) == LO_SUM)
1088 return small_symbolic_operand (x, Pmode) != 0;
1092 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1096 /* Don't re-split. */
1097 if (GET_CODE (x) == LO_SUM)
1100 if (small_symbolic_operand (x, Pmode))
1102 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1111 split_small_symbolic_operand (rtx x)
1114 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1118 /* Indicate that INSN cannot be duplicated. This is true for any insn
1119 that we've marked with gpdisp relocs, since those have to stay in
1120 1-1 correspondence with one another.
1122 Technically we could copy them if we could set up a mapping from one
1123 sequence number to another, across the set of insns to be duplicated.
1124 This seems overly complicated and error-prone since interblock motion
1125 from sched-ebb could move one of the pair of insns to a different block.
1127 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1128 then they'll be in a different block from their ldgp. Which could lead
1129 the bb reorder code to think that it would be ok to copy just the block
1130 containing the call and branch to the block containing the ldgp. */
1133 alpha_cannot_copy_insn_p (rtx insn)
1135 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1137 if (recog_memoized (insn) >= 0)
1138 return get_attr_cannot_copy (insn);
1144 /* Try a machine-dependent way of reloading an illegitimate address
1145 operand. If we find one, push the reload and return the new rtx. */
1148 alpha_legitimize_reload_address (rtx x,
1149 enum machine_mode mode ATTRIBUTE_UNUSED,
1150 int opnum, int type,
1151 int ind_levels ATTRIBUTE_UNUSED)
1153 /* We must recognize output that we have already generated ourselves. */
1154 if (GET_CODE (x) == PLUS
1155 && GET_CODE (XEXP (x, 0)) == PLUS
1156 && REG_P (XEXP (XEXP (x, 0), 0))
1157 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
1158 && CONST_INT_P (XEXP (x, 1)))
1160 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1161 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1162 opnum, (enum reload_type) type);
1166 /* We wish to handle large displacements off a base register by
1167 splitting the addend across an ldah and the mem insn. This
1168 cuts number of extra insns needed from 3 to 1. */
1169 if (GET_CODE (x) == PLUS
1170 && REG_P (XEXP (x, 0))
1171 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1172 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1173 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1175 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1176 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1178 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1180 /* Check for 32-bit overflow. */
1181 if (high + low != val)
1184 /* Reload the high part into a base reg; leave the low part
1185 in the mem directly. */
1186 x = gen_rtx_PLUS (GET_MODE (x),
1187 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1191 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1192 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1193 opnum, (enum reload_type) type);
1200 /* Compute a (partial) cost for rtx X. Return true if the complete
1201 cost has been computed, and false if subexpressions should be
1202 scanned. In either case, *TOTAL contains the cost result. */
1205 alpha_rtx_costs (rtx x, int code, int outer_code, int opno, int *total,
1208 enum machine_mode mode = GET_MODE (x);
1209 bool float_mode_p = FLOAT_MODE_P (mode);
1210 const struct alpha_rtx_cost_data *cost_data;
1213 cost_data = &alpha_rtx_cost_size;
1215 cost_data = &alpha_rtx_cost_data[alpha_tune];
1220 /* If this is an 8-bit constant, return zero since it can be used
1221 nearly anywhere with no cost. If it is a valid operand for an
1222 ADD or AND, likewise return 0 if we know it will be used in that
1223 context. Otherwise, return 2 since it might be used there later.
1224 All other constants take at least two insns. */
1225 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1233 if (x == CONST0_RTX (mode))
1235 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1236 || (outer_code == AND && and_operand (x, VOIDmode)))
1238 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1241 *total = COSTS_N_INSNS (2);
1247 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1248 *total = COSTS_N_INSNS (outer_code != MEM);
1249 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1250 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1251 else if (tls_symbolic_operand_type (x))
1252 /* Estimate of cost for call_pal rduniq. */
1253 /* ??? How many insns do we emit here? More than one... */
1254 *total = COSTS_N_INSNS (15);
1256 /* Otherwise we do a load from the GOT. */
1257 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1261 /* This is effectively an add_operand. */
1268 *total = cost_data->fp_add;
1269 else if (GET_CODE (XEXP (x, 0)) == MULT
1270 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1272 *total = (rtx_cost (XEXP (XEXP (x, 0), 0),
1273 (enum rtx_code) outer_code, opno, speed)
1274 + rtx_cost (XEXP (x, 1),
1275 (enum rtx_code) outer_code, opno, speed)
1276 + COSTS_N_INSNS (1));
1283 *total = cost_data->fp_mult;
1284 else if (mode == DImode)
1285 *total = cost_data->int_mult_di;
1287 *total = cost_data->int_mult_si;
1291 if (CONST_INT_P (XEXP (x, 1))
1292 && INTVAL (XEXP (x, 1)) <= 3)
1294 *total = COSTS_N_INSNS (1);
1301 *total = cost_data->int_shift;
1306 *total = cost_data->fp_add;
1308 *total = cost_data->int_cmov;
1316 *total = cost_data->int_div;
1317 else if (mode == SFmode)
1318 *total = cost_data->fp_div_sf;
1320 *total = cost_data->fp_div_df;
1324 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1330 *total = COSTS_N_INSNS (1);
1338 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1344 case UNSIGNED_FLOAT:
1347 case FLOAT_TRUNCATE:
1348 *total = cost_data->fp_add;
1352 if (MEM_P (XEXP (x, 0)))
1355 *total = cost_data->fp_add;
1363 /* REF is an alignable memory location. Place an aligned SImode
1364 reference into *PALIGNED_MEM and the number of bits to shift into
1365 *PBITNUM. SCRATCH is a free register for use in reloading out
1366 of range stack slots. */
1369 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1372 HOST_WIDE_INT disp, offset;
1374 gcc_assert (MEM_P (ref));
1376 if (reload_in_progress
1377 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1379 base = find_replacement (&XEXP (ref, 0));
1380 gcc_assert (memory_address_p (GET_MODE (ref), base));
1383 base = XEXP (ref, 0);
1385 if (GET_CODE (base) == PLUS)
1386 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1390 /* Find the byte offset within an aligned word. If the memory itself is
1391 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1392 will have examined the base register and determined it is aligned, and
1393 thus displacements from it are naturally alignable. */
1394 if (MEM_ALIGN (ref) >= 32)
1399 /* The location should not cross aligned word boundary. */
1400 gcc_assert (offset + GET_MODE_SIZE (GET_MODE (ref))
1401 <= GET_MODE_SIZE (SImode));
1403 /* Access the entire aligned word. */
1404 *paligned_mem = widen_memory_access (ref, SImode, -offset);
1406 /* Convert the byte offset within the word to a bit offset. */
1407 offset *= BITS_PER_UNIT;
1408 *pbitnum = GEN_INT (offset);
1411 /* Similar, but just get the address. Handle the two reload cases.
1412 Add EXTRA_OFFSET to the address we return. */
1415 get_unaligned_address (rtx ref)
1418 HOST_WIDE_INT offset = 0;
1420 gcc_assert (MEM_P (ref));
1422 if (reload_in_progress
1423 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1425 base = find_replacement (&XEXP (ref, 0));
1427 gcc_assert (memory_address_p (GET_MODE (ref), base));
1430 base = XEXP (ref, 0);
1432 if (GET_CODE (base) == PLUS)
1433 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1435 return plus_constant (Pmode, base, offset);
1438 /* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1439 X is always returned in a register. */
1442 get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs)
1444 if (GET_CODE (addr) == PLUS)
1446 ofs += INTVAL (XEXP (addr, 1));
1447 addr = XEXP (addr, 0);
1450 return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7),
1451 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1454 /* On the Alpha, all (non-symbolic) constants except zero go into
1455 a floating-point register via memory. Note that we cannot
1456 return anything that is not a subset of RCLASS, and that some
1457 symbolic constants cannot be dropped to memory. */
1460 alpha_preferred_reload_class(rtx x, enum reg_class rclass)
1462 /* Zero is present in any register class. */
1463 if (x == CONST0_RTX (GET_MODE (x)))
1466 /* These sorts of constants we can easily drop to memory. */
1468 || GET_CODE (x) == CONST_DOUBLE
1469 || GET_CODE (x) == CONST_VECTOR)
1471 if (rclass == FLOAT_REGS)
1473 if (rclass == ALL_REGS)
1474 return GENERAL_REGS;
1478 /* All other kinds of constants should not (and in the case of HIGH
1479 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1480 secondary reload. */
1482 return (rclass == ALL_REGS ? GENERAL_REGS : rclass);
1487 /* Inform reload about cases where moving X with a mode MODE to a register in
1488 RCLASS requires an extra scratch or immediate register. Return the class
1489 needed for the immediate register. */
1492 alpha_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
1493 enum machine_mode mode, secondary_reload_info *sri)
1495 enum reg_class rclass = (enum reg_class) rclass_i;
1497 /* Loading and storing HImode or QImode values to and from memory
1498 usually requires a scratch register. */
1499 if (!TARGET_BWX && (mode == QImode || mode == HImode || mode == CQImode))
1501 if (any_memory_operand (x, mode))
1505 if (!aligned_memory_operand (x, mode))
1506 sri->icode = direct_optab_handler (reload_in_optab, mode);
1509 sri->icode = direct_optab_handler (reload_out_optab, mode);
1514 /* We also cannot do integral arithmetic into FP regs, as might result
1515 from register elimination into a DImode fp register. */
1516 if (rclass == FLOAT_REGS)
1518 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
1519 return GENERAL_REGS;
1520 if (in_p && INTEGRAL_MODE_P (mode)
1521 && !MEM_P (x) && !REG_P (x) && !CONST_INT_P (x))
1522 return GENERAL_REGS;
1528 /* Subfunction of the following function. Update the flags of any MEM
1529 found in part of X. */
1532 alpha_set_memflags_1 (rtx *xp, void *data)
1534 rtx x = *xp, orig = (rtx) data;
1539 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1540 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1541 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1543 /* Sadly, we cannot use alias sets because the extra aliasing
1544 produced by the AND interferes. Given that two-byte quantities
1545 are the only thing we would be able to differentiate anyway,
1546 there does not seem to be any point in convoluting the early
1547 out of the alias check. */
1552 /* Given SEQ, which is an INSN list, look for any MEMs in either
1553 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1554 volatile flags from REF into each of the MEMs found. If REF is not
1555 a MEM, don't do anything. */
1558 alpha_set_memflags (rtx seq, rtx ref)
1565 /* This is only called from alpha.md, after having had something
1566 generated from one of the insn patterns. So if everything is
1567 zero, the pattern is already up-to-date. */
1568 if (!MEM_VOLATILE_P (ref)
1569 && !MEM_NOTRAP_P (ref)
1570 && !MEM_READONLY_P (ref))
1573 for (insn = seq; insn; insn = NEXT_INSN (insn))
1575 for_each_rtx (&PATTERN (insn), alpha_set_memflags_1, (void *) ref);
1580 static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1583 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1584 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1585 and return pc_rtx if successful. */
1588 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1589 HOST_WIDE_INT c, int n, bool no_output)
1591 HOST_WIDE_INT new_const;
1593 /* Use a pseudo if highly optimizing and still generating RTL. */
1595 = (flag_expensive_optimizations && can_create_pseudo_p () ? 0 : target);
1598 /* If this is a sign-extended 32-bit constant, we can do this in at most
1599 three insns, so do it if we have enough insns left. We always have
1600 a sign-extended 32-bit constant when compiling on a narrow machine. */
1602 if (HOST_BITS_PER_WIDE_INT != 64
1603 || c >> 31 == -1 || c >> 31 == 0)
1605 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1606 HOST_WIDE_INT tmp1 = c - low;
1607 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1608 HOST_WIDE_INT extra = 0;
1610 /* If HIGH will be interpreted as negative but the constant is
1611 positive, we must adjust it to do two ldha insns. */
1613 if ((high & 0x8000) != 0 && c >= 0)
1617 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1620 if (c == low || (low == 0 && extra == 0))
1622 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1623 but that meant that we can't handle INT_MIN on 32-bit machines
1624 (like NT/Alpha), because we recurse indefinitely through
1625 emit_move_insn to gen_movdi. So instead, since we know exactly
1626 what we want, create it explicitly. */
1631 target = gen_reg_rtx (mode);
1632 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1635 else if (n >= 2 + (extra != 0))
1639 if (!can_create_pseudo_p ())
1641 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1645 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1648 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1649 This means that if we go through expand_binop, we'll try to
1650 generate extensions, etc, which will require new pseudos, which
1651 will fail during some split phases. The SImode add patterns
1652 still exist, but are not named. So build the insns by hand. */
1657 subtarget = gen_reg_rtx (mode);
1658 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1659 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1665 target = gen_reg_rtx (mode);
1666 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1667 insn = gen_rtx_SET (VOIDmode, target, insn);
1673 /* If we couldn't do it that way, try some other methods. But if we have
1674 no instructions left, don't bother. Likewise, if this is SImode and
1675 we can't make pseudos, we can't do anything since the expand_binop
1676 and expand_unop calls will widen and try to make pseudos. */
1678 if (n == 1 || (mode == SImode && !can_create_pseudo_p ()))
1681 /* Next, see if we can load a related constant and then shift and possibly
1682 negate it to get the constant we want. Try this once each increasing
1683 numbers of insns. */
1685 for (i = 1; i < n; i++)
1687 /* First, see if minus some low bits, we've an easy load of
1690 new_const = ((c & 0xffff) ^ 0x8000) - 0x8000;
1693 temp = alpha_emit_set_const (subtarget, mode, c - new_const, i, no_output);
1698 return expand_binop (mode, add_optab, temp, GEN_INT (new_const),
1699 target, 0, OPTAB_WIDEN);
1703 /* Next try complementing. */
1704 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1709 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1712 /* Next try to form a constant and do a left shift. We can do this
1713 if some low-order bits are zero; the exact_log2 call below tells
1714 us that information. The bits we are shifting out could be any
1715 value, but here we'll just try the 0- and sign-extended forms of
1716 the constant. To try to increase the chance of having the same
1717 constant in more than one insn, start at the highest number of
1718 bits to shift, but try all possibilities in case a ZAPNOT will
1721 bits = exact_log2 (c & -c);
1723 for (; bits > 0; bits--)
1725 new_const = c >> bits;
1726 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1729 new_const = (unsigned HOST_WIDE_INT)c >> bits;
1730 temp = alpha_emit_set_const (subtarget, mode, new_const,
1737 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1738 target, 0, OPTAB_WIDEN);
1742 /* Now try high-order zero bits. Here we try the shifted-in bits as
1743 all zero and all ones. Be careful to avoid shifting outside the
1744 mode and to avoid shifting outside the host wide int size. */
1745 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1746 confuse the recursive call and set all of the high 32 bits. */
1748 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1749 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1751 for (; bits > 0; bits--)
1753 new_const = c << bits;
1754 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1757 new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1758 temp = alpha_emit_set_const (subtarget, mode, new_const,
1765 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1766 target, 1, OPTAB_WIDEN);
1770 /* Now try high-order 1 bits. We get that with a sign-extension.
1771 But one bit isn't enough here. Be careful to avoid shifting outside
1772 the mode and to avoid shifting outside the host wide int size. */
1774 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1775 - floor_log2 (~ c) - 2);
1777 for (; bits > 0; bits--)
1779 new_const = c << bits;
1780 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1783 new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1784 temp = alpha_emit_set_const (subtarget, mode, new_const,
1791 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1792 target, 0, OPTAB_WIDEN);
1797 #if HOST_BITS_PER_WIDE_INT == 64
1798 /* Finally, see if can load a value into the target that is the same as the
1799 constant except that all bytes that are 0 are changed to be 0xff. If we
1800 can, then we can do a ZAPNOT to obtain the desired constant. */
1803 for (i = 0; i < 64; i += 8)
1804 if ((new_const & ((HOST_WIDE_INT) 0xff << i)) == 0)
1805 new_const |= (HOST_WIDE_INT) 0xff << i;
1807 /* We are only called for SImode and DImode. If this is SImode, ensure that
1808 we are sign extended to a full word. */
1811 new_const = ((new_const & 0xffffffff) ^ 0x80000000) - 0x80000000;
1815 temp = alpha_emit_set_const (subtarget, mode, new_const, n - 1, no_output);
1820 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new_const),
1821 target, 0, OPTAB_WIDEN);
1829 /* Try to output insns to set TARGET equal to the constant C if it can be
1830 done in less than N insns. Do all computations in MODE. Returns the place
1831 where the output has been placed if it can be done and the insns have been
1832 emitted. If it would take more than N insns, zero is returned and no
1833 insns and emitted. */
1836 alpha_emit_set_const (rtx target, enum machine_mode mode,
1837 HOST_WIDE_INT c, int n, bool no_output)
1839 enum machine_mode orig_mode = mode;
1840 rtx orig_target = target;
1844 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1845 can't load this constant in one insn, do this in DImode. */
1846 if (!can_create_pseudo_p () && mode == SImode
1847 && REG_P (target) && REGNO (target) < FIRST_PSEUDO_REGISTER)
1849 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1853 target = no_output ? NULL : gen_lowpart (DImode, target);
1856 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1858 target = no_output ? NULL : gen_lowpart (DImode, target);
1862 /* Try 1 insn, then 2, then up to N. */
1863 for (i = 1; i <= n; i++)
1865 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
1873 insn = get_last_insn ();
1874 set = single_set (insn);
1875 if (! CONSTANT_P (SET_SRC (set)))
1876 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
1881 /* Allow for the case where we changed the mode of TARGET. */
1884 if (result == target)
1885 result = orig_target;
1886 else if (mode != orig_mode)
1887 result = gen_lowpart (orig_mode, result);
1893 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1894 fall back to a straight forward decomposition. We do this to avoid
1895 exponential run times encountered when looking for longer sequences
1896 with alpha_emit_set_const. */
1899 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
1901 HOST_WIDE_INT d1, d2, d3, d4;
1903 /* Decompose the entire word */
1904 #if HOST_BITS_PER_WIDE_INT >= 64
1905 gcc_assert (c2 == -(c1 < 0));
1906 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1908 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1909 c1 = (c1 - d2) >> 32;
1910 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1912 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1913 gcc_assert (c1 == d4);
1915 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1917 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1918 gcc_assert (c1 == d2);
1920 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
1922 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1923 gcc_assert (c2 == d4);
1926 /* Construct the high word */
1929 emit_move_insn (target, GEN_INT (d4));
1931 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
1934 emit_move_insn (target, GEN_INT (d3));
1936 /* Shift it into place */
1937 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
1939 /* Add in the low bits. */
1941 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
1943 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
1948 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
1952 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
1954 HOST_WIDE_INT i0, i1;
1956 if (GET_CODE (x) == CONST_VECTOR)
1957 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
1960 if (CONST_INT_P (x))
1965 else if (HOST_BITS_PER_WIDE_INT >= 64)
1967 i0 = CONST_DOUBLE_LOW (x);
1972 i0 = CONST_DOUBLE_LOW (x);
1973 i1 = CONST_DOUBLE_HIGH (x);
1980 /* Implement TARGET_LEGITIMATE_CONSTANT_P. This is all constants for which
1981 we are willing to load the value into a register via a move pattern.
1982 Normally this is all symbolic constants, integral constants that
1983 take three or fewer instructions, and floating-point zero. */
1986 alpha_legitimate_constant_p (enum machine_mode mode, rtx x)
1988 HOST_WIDE_INT i0, i1;
1990 switch (GET_CODE (x))
1997 if (GET_CODE (XEXP (x, 0)) == PLUS
1998 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
1999 x = XEXP (XEXP (x, 0), 0);
2003 if (GET_CODE (x) != SYMBOL_REF)
2009 /* TLS symbols are never valid. */
2010 return SYMBOL_REF_TLS_MODEL (x) == 0;
2013 if (x == CONST0_RTX (mode))
2015 if (FLOAT_MODE_P (mode))
2020 if (x == CONST0_RTX (mode))
2022 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2024 if (GET_MODE_SIZE (mode) != 8)
2030 if (TARGET_BUILD_CONSTANTS)
2032 alpha_extract_integer (x, &i0, &i1);
2033 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2034 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2042 /* Operand 1 is known to be a constant, and should require more than one
2043 instruction to load. Emit that multi-part load. */
2046 alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2048 HOST_WIDE_INT i0, i1;
2049 rtx temp = NULL_RTX;
2051 alpha_extract_integer (operands[1], &i0, &i1);
2053 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2054 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2056 if (!temp && TARGET_BUILD_CONSTANTS)
2057 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2061 if (!rtx_equal_p (operands[0], temp))
2062 emit_move_insn (operands[0], temp);
2069 /* Expand a move instruction; return true if all work is done.
2070 We don't handle non-bwx subword loads here. */
2073 alpha_expand_mov (enum machine_mode mode, rtx *operands)
2077 /* If the output is not a register, the input must be. */
2078 if (MEM_P (operands[0])
2079 && ! reg_or_0_operand (operands[1], mode))
2080 operands[1] = force_reg (mode, operands[1]);
2082 /* Allow legitimize_address to perform some simplifications. */
2083 if (mode == Pmode && symbolic_operand (operands[1], mode))
2085 tmp = alpha_legitimize_address_1 (operands[1], operands[0], mode);
2088 if (tmp == operands[0])
2095 /* Early out for non-constants and valid constants. */
2096 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2099 /* Split large integers. */
2100 if (CONST_INT_P (operands[1])
2101 || GET_CODE (operands[1]) == CONST_DOUBLE
2102 || GET_CODE (operands[1]) == CONST_VECTOR)
2104 if (alpha_split_const_mov (mode, operands))
2108 /* Otherwise we've nothing left but to drop the thing to memory. */
2109 tmp = force_const_mem (mode, operands[1]);
2111 if (tmp == NULL_RTX)
2114 if (reload_in_progress)
2116 emit_move_insn (operands[0], XEXP (tmp, 0));
2117 operands[1] = replace_equiv_address (tmp, operands[0]);
2120 operands[1] = validize_mem (tmp);
2124 /* Expand a non-bwx QImode or HImode move instruction;
2125 return true if all work is done. */
2128 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2132 /* If the output is not a register, the input must be. */
2133 if (MEM_P (operands[0]))
2134 operands[1] = force_reg (mode, operands[1]);
2136 /* Handle four memory cases, unaligned and aligned for either the input
2137 or the output. The only case where we can be called during reload is
2138 for aligned loads; all other cases require temporaries. */
2140 if (any_memory_operand (operands[1], mode))
2142 if (aligned_memory_operand (operands[1], mode))
2144 if (reload_in_progress)
2147 seq = gen_reload_inqi_aligned (operands[0], operands[1]);
2149 seq = gen_reload_inhi_aligned (operands[0], operands[1]);
2154 rtx aligned_mem, bitnum;
2155 rtx scratch = gen_reg_rtx (SImode);
2159 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2161 subtarget = operands[0];
2162 if (REG_P (subtarget))
2163 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2165 subtarget = gen_reg_rtx (DImode), copyout = true;
2168 seq = gen_aligned_loadqi (subtarget, aligned_mem,
2171 seq = gen_aligned_loadhi (subtarget, aligned_mem,
2176 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2181 /* Don't pass these as parameters since that makes the generated
2182 code depend on parameter evaluation order which will cause
2183 bootstrap failures. */
2185 rtx temp1, temp2, subtarget, ua;
2188 temp1 = gen_reg_rtx (DImode);
2189 temp2 = gen_reg_rtx (DImode);
2191 subtarget = operands[0];
2192 if (REG_P (subtarget))
2193 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2195 subtarget = gen_reg_rtx (DImode), copyout = true;
2197 ua = get_unaligned_address (operands[1]);
2199 seq = gen_unaligned_loadqi (subtarget, ua, temp1, temp2);
2201 seq = gen_unaligned_loadhi (subtarget, ua, temp1, temp2);
2203 alpha_set_memflags (seq, operands[1]);
2207 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2212 if (any_memory_operand (operands[0], mode))
2214 if (aligned_memory_operand (operands[0], mode))
2216 rtx aligned_mem, bitnum;
2217 rtx temp1 = gen_reg_rtx (SImode);
2218 rtx temp2 = gen_reg_rtx (SImode);
2220 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2222 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2227 rtx temp1 = gen_reg_rtx (DImode);
2228 rtx temp2 = gen_reg_rtx (DImode);
2229 rtx temp3 = gen_reg_rtx (DImode);
2230 rtx ua = get_unaligned_address (operands[0]);
2233 seq = gen_unaligned_storeqi (ua, operands[1], temp1, temp2, temp3);
2235 seq = gen_unaligned_storehi (ua, operands[1], temp1, temp2, temp3);
2237 alpha_set_memflags (seq, operands[0]);
2246 /* Implement the movmisalign patterns. One of the operands is a memory
2247 that is not naturally aligned. Emit instructions to load it. */
2250 alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2252 /* Honor misaligned loads, for those we promised to do so. */
2253 if (MEM_P (operands[1]))
2257 if (register_operand (operands[0], mode))
2260 tmp = gen_reg_rtx (mode);
2262 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2263 if (tmp != operands[0])
2264 emit_move_insn (operands[0], tmp);
2266 else if (MEM_P (operands[0]))
2268 if (!reg_or_0_operand (operands[1], mode))
2269 operands[1] = force_reg (mode, operands[1]);
2270 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2276 /* Generate an unsigned DImode to FP conversion. This is the same code
2277 optabs would emit if we didn't have TFmode patterns.
2279 For SFmode, this is the only construction I've found that can pass
2280 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2281 intermediates will work, because you'll get intermediate rounding
2282 that ruins the end result. Some of this could be fixed by turning
2283 on round-to-positive-infinity, but that requires diddling the fpsr,
2284 which kills performance. I tried turning this around and converting
2285 to a negative number, so that I could turn on /m, but either I did
2286 it wrong or there's something else cause I wound up with the exact
2287 same single-bit error. There is a branch-less form of this same code:
2298 fcmoveq $f10,$f11,$f0
2300 I'm not using it because it's the same number of instructions as
2301 this branch-full form, and it has more serialized long latency
2302 instructions on the critical path.
2304 For DFmode, we can avoid rounding errors by breaking up the word
2305 into two pieces, converting them separately, and adding them back:
2307 LC0: .long 0,0x5f800000
2312 cpyse $f11,$f31,$f10
2313 cpyse $f31,$f11,$f11
2321 This doesn't seem to be a clear-cut win over the optabs form.
2322 It probably all depends on the distribution of numbers being
2323 converted -- in the optabs form, all but high-bit-set has a
2324 much lower minimum execution time. */
2327 alpha_emit_floatuns (rtx operands[2])
2329 rtx neglab, donelab, i0, i1, f0, in, out;
2330 enum machine_mode mode;
2333 in = force_reg (DImode, operands[1]);
2334 mode = GET_MODE (out);
2335 neglab = gen_label_rtx ();
2336 donelab = gen_label_rtx ();
2337 i0 = gen_reg_rtx (DImode);
2338 i1 = gen_reg_rtx (DImode);
2339 f0 = gen_reg_rtx (mode);
2341 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2343 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2344 emit_jump_insn (gen_jump (donelab));
2347 emit_label (neglab);
2349 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2350 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2351 emit_insn (gen_iordi3 (i0, i0, i1));
2352 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2353 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2355 emit_label (donelab);
2358 /* Generate the comparison for a conditional branch. */
2361 alpha_emit_conditional_branch (rtx operands[], enum machine_mode cmp_mode)
2363 enum rtx_code cmp_code, branch_code;
2364 enum machine_mode branch_mode = VOIDmode;
2365 enum rtx_code code = GET_CODE (operands[0]);
2366 rtx op0 = operands[1], op1 = operands[2];
2369 if (cmp_mode == TFmode)
2371 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2376 /* The general case: fold the comparison code to the types of compares
2377 that we have, choosing the branch as necessary. */
2380 case EQ: case LE: case LT: case LEU: case LTU:
2382 /* We have these compares. */
2383 cmp_code = code, branch_code = NE;
2388 /* These must be reversed. */
2389 cmp_code = reverse_condition (code), branch_code = EQ;
2392 case GE: case GT: case GEU: case GTU:
2393 /* For FP, we swap them, for INT, we reverse them. */
2394 if (cmp_mode == DFmode)
2396 cmp_code = swap_condition (code);
2398 tem = op0, op0 = op1, op1 = tem;
2402 cmp_code = reverse_condition (code);
2411 if (cmp_mode == DFmode)
2413 if (flag_unsafe_math_optimizations && cmp_code != UNORDERED)
2415 /* When we are not as concerned about non-finite values, and we
2416 are comparing against zero, we can branch directly. */
2417 if (op1 == CONST0_RTX (DFmode))
2418 cmp_code = UNKNOWN, branch_code = code;
2419 else if (op0 == CONST0_RTX (DFmode))
2421 /* Undo the swap we probably did just above. */
2422 tem = op0, op0 = op1, op1 = tem;
2423 branch_code = swap_condition (cmp_code);
2429 /* ??? We mark the branch mode to be CCmode to prevent the
2430 compare and branch from being combined, since the compare
2431 insn follows IEEE rules that the branch does not. */
2432 branch_mode = CCmode;
2437 /* The following optimizations are only for signed compares. */
2438 if (code != LEU && code != LTU && code != GEU && code != GTU)
2440 /* Whee. Compare and branch against 0 directly. */
2441 if (op1 == const0_rtx)
2442 cmp_code = UNKNOWN, branch_code = code;
2444 /* If the constants doesn't fit into an immediate, but can
2445 be generated by lda/ldah, we adjust the argument and
2446 compare against zero, so we can use beq/bne directly. */
2447 /* ??? Don't do this when comparing against symbols, otherwise
2448 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2449 be declared false out of hand (at least for non-weak). */
2450 else if (CONST_INT_P (op1)
2451 && (code == EQ || code == NE)
2452 && !(symbolic_operand (op0, VOIDmode)
2453 || (REG_P (op0) && REG_POINTER (op0))))
2455 rtx n_op1 = GEN_INT (-INTVAL (op1));
2457 if (! satisfies_constraint_I (op1)
2458 && (satisfies_constraint_K (n_op1)
2459 || satisfies_constraint_L (n_op1)))
2460 cmp_code = PLUS, branch_code = code, op1 = n_op1;
2464 if (!reg_or_0_operand (op0, DImode))
2465 op0 = force_reg (DImode, op0);
2466 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2467 op1 = force_reg (DImode, op1);
2470 /* Emit an initial compare instruction, if necessary. */
2472 if (cmp_code != UNKNOWN)
2474 tem = gen_reg_rtx (cmp_mode);
2475 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2478 /* Emit the branch instruction. */
2479 tem = gen_rtx_SET (VOIDmode, pc_rtx,
2480 gen_rtx_IF_THEN_ELSE (VOIDmode,
2481 gen_rtx_fmt_ee (branch_code,
2483 CONST0_RTX (cmp_mode)),
2484 gen_rtx_LABEL_REF (VOIDmode,
2487 emit_jump_insn (tem);
2490 /* Certain simplifications can be done to make invalid setcc operations
2491 valid. Return the final comparison, or NULL if we can't work. */
2494 alpha_emit_setcc (rtx operands[], enum machine_mode cmp_mode)
2496 enum rtx_code cmp_code;
2497 enum rtx_code code = GET_CODE (operands[1]);
2498 rtx op0 = operands[2], op1 = operands[3];
2501 if (cmp_mode == TFmode)
2503 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2508 if (cmp_mode == DFmode && !TARGET_FIX)
2511 /* The general case: fold the comparison code to the types of compares
2512 that we have, choosing the branch as necessary. */
2517 case EQ: case LE: case LT: case LEU: case LTU:
2519 /* We have these compares. */
2520 if (cmp_mode == DFmode)
2521 cmp_code = code, code = NE;
2525 if (cmp_mode == DImode && op1 == const0_rtx)
2530 cmp_code = reverse_condition (code);
2534 case GE: case GT: case GEU: case GTU:
2535 /* These normally need swapping, but for integer zero we have
2536 special patterns that recognize swapped operands. */
2537 if (cmp_mode == DImode && op1 == const0_rtx)
2539 code = swap_condition (code);
2540 if (cmp_mode == DFmode)
2541 cmp_code = code, code = NE;
2542 tmp = op0, op0 = op1, op1 = tmp;
2549 if (cmp_mode == DImode)
2551 if (!register_operand (op0, DImode))
2552 op0 = force_reg (DImode, op0);
2553 if (!reg_or_8bit_operand (op1, DImode))
2554 op1 = force_reg (DImode, op1);
2557 /* Emit an initial compare instruction, if necessary. */
2558 if (cmp_code != UNKNOWN)
2560 tmp = gen_reg_rtx (cmp_mode);
2561 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2562 gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1)));
2564 op0 = cmp_mode != DImode ? gen_lowpart (DImode, tmp) : tmp;
2568 /* Emit the setcc instruction. */
2569 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2570 gen_rtx_fmt_ee (code, DImode, op0, op1)));
2575 /* Rewrite a comparison against zero CMP of the form
2576 (CODE (cc0) (const_int 0)) so it can be written validly in
2577 a conditional move (if_then_else CMP ...).
2578 If both of the operands that set cc0 are nonzero we must emit
2579 an insn to perform the compare (it can't be done within
2580 the conditional move). */
2583 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2585 enum rtx_code code = GET_CODE (cmp);
2586 enum rtx_code cmov_code = NE;
2587 rtx op0 = XEXP (cmp, 0);
2588 rtx op1 = XEXP (cmp, 1);
2589 enum machine_mode cmp_mode
2590 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2591 enum machine_mode cmov_mode = VOIDmode;
2592 int local_fast_math = flag_unsafe_math_optimizations;
2595 if (cmp_mode == TFmode)
2597 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2602 gcc_assert (cmp_mode == DFmode || cmp_mode == DImode);
2604 if (FLOAT_MODE_P (cmp_mode) != FLOAT_MODE_P (mode))
2606 enum rtx_code cmp_code;
2611 /* If we have fp<->int register move instructions, do a cmov by
2612 performing the comparison in fp registers, and move the
2613 zero/nonzero value to integer registers, where we can then
2614 use a normal cmov, or vice-versa. */
2618 case EQ: case LE: case LT: case LEU: case LTU:
2620 /* We have these compares. */
2621 cmp_code = code, code = NE;
2626 /* These must be reversed. */
2627 cmp_code = reverse_condition (code), code = EQ;
2630 case GE: case GT: case GEU: case GTU:
2631 /* These normally need swapping, but for integer zero we have
2632 special patterns that recognize swapped operands. */
2633 if (cmp_mode == DImode && op1 == const0_rtx)
2634 cmp_code = code, code = NE;
2637 cmp_code = swap_condition (code);
2639 tem = op0, op0 = op1, op1 = tem;
2647 if (cmp_mode == DImode)
2649 if (!reg_or_0_operand (op0, DImode))
2650 op0 = force_reg (DImode, op0);
2651 if (!reg_or_8bit_operand (op1, DImode))
2652 op1 = force_reg (DImode, op1);
2655 tem = gen_reg_rtx (cmp_mode);
2656 emit_insn (gen_rtx_SET (VOIDmode, tem,
2657 gen_rtx_fmt_ee (cmp_code, cmp_mode,
2660 cmp_mode = cmp_mode == DImode ? DFmode : DImode;
2661 op0 = gen_lowpart (cmp_mode, tem);
2662 op1 = CONST0_RTX (cmp_mode);
2663 local_fast_math = 1;
2666 if (cmp_mode == DImode)
2668 if (!reg_or_0_operand (op0, DImode))
2669 op0 = force_reg (DImode, op0);
2670 if (!reg_or_8bit_operand (op1, DImode))
2671 op1 = force_reg (DImode, op1);
2674 /* We may be able to use a conditional move directly.
2675 This avoids emitting spurious compares. */
2676 if (signed_comparison_operator (cmp, VOIDmode)
2677 && (cmp_mode == DImode || local_fast_math)
2678 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2679 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2681 /* We can't put the comparison inside the conditional move;
2682 emit a compare instruction and put that inside the
2683 conditional move. Make sure we emit only comparisons we have;
2684 swap or reverse as necessary. */
2686 if (!can_create_pseudo_p ())
2691 case EQ: case LE: case LT: case LEU: case LTU:
2693 /* We have these compares: */
2698 /* These must be reversed. */
2699 code = reverse_condition (code);
2703 case GE: case GT: case GEU: case GTU:
2704 /* These must be swapped. */
2705 if (op1 != CONST0_RTX (cmp_mode))
2707 code = swap_condition (code);
2708 tem = op0, op0 = op1, op1 = tem;
2716 if (cmp_mode == DImode)
2718 if (!reg_or_0_operand (op0, DImode))
2719 op0 = force_reg (DImode, op0);
2720 if (!reg_or_8bit_operand (op1, DImode))
2721 op1 = force_reg (DImode, op1);
2724 /* ??? We mark the branch mode to be CCmode to prevent the compare
2725 and cmov from being combined, since the compare insn follows IEEE
2726 rules that the cmov does not. */
2727 if (cmp_mode == DFmode && !local_fast_math)
2730 tem = gen_reg_rtx (cmp_mode);
2731 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_mode, op0, op1));
2732 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_mode));
2735 /* Simplify a conditional move of two constants into a setcc with
2736 arithmetic. This is done with a splitter since combine would
2737 just undo the work if done during code generation. It also catches
2738 cases we wouldn't have before cse. */
2741 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2742 rtx t_rtx, rtx f_rtx)
2744 HOST_WIDE_INT t, f, diff;
2745 enum machine_mode mode;
2746 rtx target, subtarget, tmp;
2748 mode = GET_MODE (dest);
2753 if (((code == NE || code == EQ) && diff < 0)
2754 || (code == GE || code == GT))
2756 code = reverse_condition (code);
2757 diff = t, t = f, f = diff;
2761 subtarget = target = dest;
2764 target = gen_lowpart (DImode, dest);
2765 if (can_create_pseudo_p ())
2766 subtarget = gen_reg_rtx (DImode);
2770 /* Below, we must be careful to use copy_rtx on target and subtarget
2771 in intermediate insns, as they may be a subreg rtx, which may not
2774 if (f == 0 && exact_log2 (diff) > 0
2775 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2776 viable over a longer latency cmove. On EV5, the E0 slot is a
2777 scarce resource, and on EV4 shift has the same latency as a cmove. */
2778 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2780 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2781 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2783 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2784 GEN_INT (exact_log2 (t)));
2785 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2787 else if (f == 0 && t == -1)
2789 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2790 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2792 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2794 else if (diff == 1 || diff == 4 || diff == 8)
2798 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2799 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2802 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2805 add_op = GEN_INT (f);
2806 if (sext_add_operand (add_op, mode))
2808 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2810 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2811 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2823 /* Look up the function X_floating library function name for the
2826 struct GTY(()) xfloating_op
2828 const enum rtx_code code;
2829 const char *const GTY((skip)) osf_func;
2830 const char *const GTY((skip)) vms_func;
2834 static GTY(()) struct xfloating_op xfloating_ops[] =
2836 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2837 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2838 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2839 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2840 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2841 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2842 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2843 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2844 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2845 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2846 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2847 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2848 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2849 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2850 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2853 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2855 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2856 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2860 alpha_lookup_xfloating_lib_func (enum rtx_code code)
2862 struct xfloating_op *ops = xfloating_ops;
2863 long n = ARRAY_SIZE (xfloating_ops);
2866 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2868 /* How irritating. Nothing to key off for the main table. */
2869 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
2872 n = ARRAY_SIZE (vax_cvt_ops);
2875 for (i = 0; i < n; ++i, ++ops)
2876 if (ops->code == code)
2878 rtx func = ops->libcall;
2881 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2882 ? ops->vms_func : ops->osf_func);
2883 ops->libcall = func;
2891 /* Most X_floating operations take the rounding mode as an argument.
2892 Compute that here. */
2895 alpha_compute_xfloating_mode_arg (enum rtx_code code,
2896 enum alpha_fp_rounding_mode round)
2902 case ALPHA_FPRM_NORM:
2905 case ALPHA_FPRM_MINF:
2908 case ALPHA_FPRM_CHOP:
2911 case ALPHA_FPRM_DYN:
2917 /* XXX For reference, round to +inf is mode = 3. */
2920 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
2926 /* Emit an X_floating library function call.
2928 Note that these functions do not follow normal calling conventions:
2929 TFmode arguments are passed in two integer registers (as opposed to
2930 indirect); TFmode return values appear in R16+R17.
2932 FUNC is the function to call.
2933 TARGET is where the output belongs.
2934 OPERANDS are the inputs.
2935 NOPERANDS is the count of inputs.
2936 EQUIV is the expression equivalent for the function.
2940 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
2941 int noperands, rtx equiv)
2943 rtx usage = NULL_RTX, tmp, reg;
2948 for (i = 0; i < noperands; ++i)
2950 switch (GET_MODE (operands[i]))
2953 reg = gen_rtx_REG (TFmode, regno);
2958 reg = gen_rtx_REG (DFmode, regno + 32);
2963 gcc_assert (CONST_INT_P (operands[i]));
2966 reg = gen_rtx_REG (DImode, regno);
2974 emit_move_insn (reg, operands[i]);
2975 use_reg (&usage, reg);
2978 switch (GET_MODE (target))
2981 reg = gen_rtx_REG (TFmode, 16);
2984 reg = gen_rtx_REG (DFmode, 32);
2987 reg = gen_rtx_REG (DImode, 0);
2993 tmp = gen_rtx_MEM (QImode, func);
2994 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
2995 const0_rtx, const0_rtx));
2996 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
2997 RTL_CONST_CALL_P (tmp) = 1;
3002 emit_libcall_block (tmp, target, reg, equiv);
3005 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3008 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3012 rtx out_operands[3];
3014 func = alpha_lookup_xfloating_lib_func (code);
3015 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3017 out_operands[0] = operands[1];
3018 out_operands[1] = operands[2];
3019 out_operands[2] = GEN_INT (mode);
3020 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3021 gen_rtx_fmt_ee (code, TFmode, operands[1],
3025 /* Emit an X_floating library function call for a comparison. */
3028 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3030 enum rtx_code cmp_code, res_code;
3031 rtx func, out, operands[2], note;
3033 /* X_floating library comparison functions return
3037 Convert the compare against the raw return value. */
3065 func = alpha_lookup_xfloating_lib_func (cmp_code);
3069 out = gen_reg_rtx (DImode);
3071 /* What's actually returned is -1,0,1, not a proper boolean value,
3072 so use an EXPR_LIST as with a generic libcall instead of a
3073 comparison type expression. */
3074 note = gen_rtx_EXPR_LIST (VOIDmode, op1, NULL_RTX);
3075 note = gen_rtx_EXPR_LIST (VOIDmode, op0, note);
3076 note = gen_rtx_EXPR_LIST (VOIDmode, func, note);
3077 alpha_emit_xfloating_libcall (func, out, operands, 2, note);
3082 /* Emit an X_floating library function call for a conversion. */
3085 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3087 int noperands = 1, mode;
3088 rtx out_operands[2];
3090 enum rtx_code code = orig_code;
3092 if (code == UNSIGNED_FIX)
3095 func = alpha_lookup_xfloating_lib_func (code);
3097 out_operands[0] = operands[1];
3102 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3103 out_operands[1] = GEN_INT (mode);
3106 case FLOAT_TRUNCATE:
3107 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3108 out_operands[1] = GEN_INT (mode);
3115 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3116 gen_rtx_fmt_e (orig_code,
3117 GET_MODE (operands[0]),
3121 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3122 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3123 guarantee that the sequence
3126 is valid. Naturally, output operand ordering is little-endian.
3127 This is used by *movtf_internal and *movti_internal. */
3130 alpha_split_tmode_pair (rtx operands[4], enum machine_mode mode,
3133 switch (GET_CODE (operands[1]))
3136 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3137 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3141 operands[3] = adjust_address (operands[1], DImode, 8);
3142 operands[2] = adjust_address (operands[1], DImode, 0);
3147 gcc_assert (operands[1] == CONST0_RTX (mode));
3148 operands[2] = operands[3] = const0_rtx;
3155 switch (GET_CODE (operands[0]))
3158 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3159 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3163 operands[1] = adjust_address (operands[0], DImode, 8);
3164 operands[0] = adjust_address (operands[0], DImode, 0);
3171 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3174 tmp = operands[0], operands[0] = operands[1], operands[1] = tmp;
3175 tmp = operands[2], operands[2] = operands[3], operands[3] = tmp;
3179 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3180 op2 is a register containing the sign bit, operation is the
3181 logical operation to be performed. */
3184 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3186 rtx high_bit = operands[2];
3190 alpha_split_tmode_pair (operands, TFmode, false);
3192 /* Detect three flavors of operand overlap. */
3194 if (rtx_equal_p (operands[0], operands[2]))
3196 else if (rtx_equal_p (operands[1], operands[2]))
3198 if (rtx_equal_p (operands[0], high_bit))
3205 emit_move_insn (operands[0], operands[2]);
3207 /* ??? If the destination overlaps both source tf and high_bit, then
3208 assume source tf is dead in its entirety and use the other half
3209 for a scratch register. Otherwise "scratch" is just the proper
3210 destination register. */
3211 scratch = operands[move < 2 ? 1 : 3];
3213 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3217 emit_move_insn (operands[0], operands[2]);
3219 emit_move_insn (operands[1], scratch);
3223 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3227 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3228 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3229 lda r3,X(r11) lda r3,X+2(r11)
3230 extwl r1,r3,r1 extql r1,r3,r1
3231 extwh r2,r3,r2 extqh r2,r3,r2
3232 or r1.r2.r1 or r1,r2,r1
3235 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3236 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3237 lda r3,X(r11) lda r3,X(r11)
3238 extll r1,r3,r1 extll r1,r3,r1
3239 extlh r2,r3,r2 extlh r2,r3,r2
3240 or r1.r2.r1 addl r1,r2,r1
3242 quad: ldq_u r1,X(r11)
3251 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3252 HOST_WIDE_INT ofs, int sign)
3254 rtx meml, memh, addr, extl, exth, tmp, mema;
3255 enum machine_mode mode;
3257 if (TARGET_BWX && size == 2)
3259 meml = adjust_address (mem, QImode, ofs);
3260 memh = adjust_address (mem, QImode, ofs+1);
3261 extl = gen_reg_rtx (DImode);
3262 exth = gen_reg_rtx (DImode);
3263 emit_insn (gen_zero_extendqidi2 (extl, meml));
3264 emit_insn (gen_zero_extendqidi2 (exth, memh));
3265 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3266 NULL, 1, OPTAB_LIB_WIDEN);
3267 addr = expand_simple_binop (DImode, IOR, extl, exth,
3268 NULL, 1, OPTAB_LIB_WIDEN);
3270 if (sign && GET_MODE (tgt) != HImode)
3272 addr = gen_lowpart (HImode, addr);
3273 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3277 if (GET_MODE (tgt) != DImode)
3278 addr = gen_lowpart (GET_MODE (tgt), addr);
3279 emit_move_insn (tgt, addr);
3284 meml = gen_reg_rtx (DImode);
3285 memh = gen_reg_rtx (DImode);
3286 addr = gen_reg_rtx (DImode);
3287 extl = gen_reg_rtx (DImode);
3288 exth = gen_reg_rtx (DImode);
3290 mema = XEXP (mem, 0);
3291 if (GET_CODE (mema) == LO_SUM)
3292 mema = force_reg (Pmode, mema);
3294 /* AND addresses cannot be in any alias set, since they may implicitly
3295 alias surrounding code. Ideally we'd have some alias set that
3296 covered all types except those with alignment 8 or higher. */
3298 tmp = change_address (mem, DImode,
3299 gen_rtx_AND (DImode,
3300 plus_constant (DImode, mema, ofs),
3302 set_mem_alias_set (tmp, 0);
3303 emit_move_insn (meml, tmp);
3305 tmp = change_address (mem, DImode,
3306 gen_rtx_AND (DImode,
3307 plus_constant (DImode, mema,
3310 set_mem_alias_set (tmp, 0);
3311 emit_move_insn (memh, tmp);
3313 if (sign && size == 2)
3315 emit_move_insn (addr, plus_constant (Pmode, mema, ofs+2));
3317 emit_insn (gen_extql (extl, meml, addr));
3318 emit_insn (gen_extqh (exth, memh, addr));
3320 /* We must use tgt here for the target. Alpha-vms port fails if we use
3321 addr for the target, because addr is marked as a pointer and combine
3322 knows that pointers are always sign-extended 32-bit values. */
3323 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3324 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3325 addr, 1, OPTAB_WIDEN);
3329 emit_move_insn (addr, plus_constant (Pmode, mema, ofs));
3330 emit_insn (gen_extxl (extl, meml, GEN_INT (size*8), addr));
3334 emit_insn (gen_extwh (exth, memh, addr));
3338 emit_insn (gen_extlh (exth, memh, addr));
3342 emit_insn (gen_extqh (exth, memh, addr));
3349 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3350 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3355 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3358 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3361 alpha_expand_unaligned_store (rtx dst, rtx src,
3362 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3364 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3366 if (TARGET_BWX && size == 2)
3368 if (src != const0_rtx)
3370 dstl = gen_lowpart (QImode, src);
3371 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3372 NULL, 1, OPTAB_LIB_WIDEN);
3373 dsth = gen_lowpart (QImode, dsth);
3376 dstl = dsth = const0_rtx;
3378 meml = adjust_address (dst, QImode, ofs);
3379 memh = adjust_address (dst, QImode, ofs+1);
3381 emit_move_insn (meml, dstl);
3382 emit_move_insn (memh, dsth);
3386 dstl = gen_reg_rtx (DImode);
3387 dsth = gen_reg_rtx (DImode);
3388 insl = gen_reg_rtx (DImode);
3389 insh = gen_reg_rtx (DImode);
3391 dsta = XEXP (dst, 0);
3392 if (GET_CODE (dsta) == LO_SUM)
3393 dsta = force_reg (Pmode, dsta);
3395 /* AND addresses cannot be in any alias set, since they may implicitly
3396 alias surrounding code. Ideally we'd have some alias set that
3397 covered all types except those with alignment 8 or higher. */
3399 meml = change_address (dst, DImode,
3400 gen_rtx_AND (DImode,
3401 plus_constant (DImode, dsta, ofs),
3403 set_mem_alias_set (meml, 0);
3405 memh = change_address (dst, DImode,
3406 gen_rtx_AND (DImode,
3407 plus_constant (DImode, dsta,
3410 set_mem_alias_set (memh, 0);
3412 emit_move_insn (dsth, memh);
3413 emit_move_insn (dstl, meml);
3415 addr = copy_addr_to_reg (plus_constant (Pmode, dsta, ofs));
3417 if (src != CONST0_RTX (GET_MODE (src)))
3419 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3420 GEN_INT (size*8), addr));
3425 emit_insn (gen_inswl (insl, gen_lowpart (HImode, src), addr));
3428 emit_insn (gen_insll (insl, gen_lowpart (SImode, src), addr));
3431 emit_insn (gen_insql (insl, gen_lowpart (DImode, src), addr));
3438 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3443 emit_insn (gen_mskwl (dstl, dstl, addr));
3446 emit_insn (gen_mskll (dstl, dstl, addr));
3449 emit_insn (gen_mskql (dstl, dstl, addr));
3455 if (src != CONST0_RTX (GET_MODE (src)))
3457 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3458 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3461 /* Must store high before low for degenerate case of aligned. */
3462 emit_move_insn (memh, dsth);
3463 emit_move_insn (meml, dstl);
3466 /* The block move code tries to maximize speed by separating loads and
3467 stores at the expense of register pressure: we load all of the data
3468 before we store it back out. There are two secondary effects worth
3469 mentioning, that this speeds copying to/from aligned and unaligned
3470 buffers, and that it makes the code significantly easier to write. */
3472 #define MAX_MOVE_WORDS 8
3474 /* Load an integral number of consecutive unaligned quadwords. */
3477 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3478 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3480 rtx const im8 = GEN_INT (-8);
3481 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3482 rtx sreg, areg, tmp, smema;
3485 smema = XEXP (smem, 0);
3486 if (GET_CODE (smema) == LO_SUM)
3487 smema = force_reg (Pmode, smema);
3489 /* Generate all the tmp registers we need. */
3490 for (i = 0; i < words; ++i)
3492 data_regs[i] = out_regs[i];
3493 ext_tmps[i] = gen_reg_rtx (DImode);
3495 data_regs[words] = gen_reg_rtx (DImode);
3498 smem = adjust_address (smem, GET_MODE (smem), ofs);
3500 /* Load up all of the source data. */
3501 for (i = 0; i < words; ++i)
3503 tmp = change_address (smem, DImode,
3504 gen_rtx_AND (DImode,
3505 plus_constant (DImode, smema, 8*i),
3507 set_mem_alias_set (tmp, 0);
3508 emit_move_insn (data_regs[i], tmp);
3511 tmp = change_address (smem, DImode,
3512 gen_rtx_AND (DImode,
3513 plus_constant (DImode, smema,
3516 set_mem_alias_set (tmp, 0);
3517 emit_move_insn (data_regs[words], tmp);
3519 /* Extract the half-word fragments. Unfortunately DEC decided to make
3520 extxh with offset zero a noop instead of zeroing the register, so
3521 we must take care of that edge condition ourselves with cmov. */
3523 sreg = copy_addr_to_reg (smema);
3524 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3526 for (i = 0; i < words; ++i)
3528 emit_insn (gen_extql (data_regs[i], data_regs[i], sreg));
3529 emit_insn (gen_extqh (ext_tmps[i], data_regs[i+1], sreg));
3530 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3531 gen_rtx_IF_THEN_ELSE (DImode,
3532 gen_rtx_EQ (DImode, areg,
3534 const0_rtx, ext_tmps[i])));
3537 /* Merge the half-words into whole words. */
3538 for (i = 0; i < words; ++i)
3540 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3541 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3545 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3546 may be NULL to store zeros. */
3549 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3550 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3552 rtx const im8 = GEN_INT (-8);
3553 rtx ins_tmps[MAX_MOVE_WORDS];
3554 rtx st_tmp_1, st_tmp_2, dreg;
3555 rtx st_addr_1, st_addr_2, dmema;
3558 dmema = XEXP (dmem, 0);
3559 if (GET_CODE (dmema) == LO_SUM)
3560 dmema = force_reg (Pmode, dmema);
3562 /* Generate all the tmp registers we need. */
3563 if (data_regs != NULL)
3564 for (i = 0; i < words; ++i)
3565 ins_tmps[i] = gen_reg_rtx(DImode);
3566 st_tmp_1 = gen_reg_rtx(DImode);
3567 st_tmp_2 = gen_reg_rtx(DImode);
3570 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3572 st_addr_2 = change_address (dmem, DImode,
3573 gen_rtx_AND (DImode,
3574 plus_constant (DImode, dmema,
3577 set_mem_alias_set (st_addr_2, 0);
3579 st_addr_1 = change_address (dmem, DImode,
3580 gen_rtx_AND (DImode, dmema, im8));
3581 set_mem_alias_set (st_addr_1, 0);
3583 /* Load up the destination end bits. */
3584 emit_move_insn (st_tmp_2, st_addr_2);
3585 emit_move_insn (st_tmp_1, st_addr_1);
3587 /* Shift the input data into place. */
3588 dreg = copy_addr_to_reg (dmema);
3589 if (data_regs != NULL)
3591 for (i = words-1; i >= 0; --i)
3593 emit_insn (gen_insqh (ins_tmps[i], data_regs[i], dreg));
3594 emit_insn (gen_insql (data_regs[i], data_regs[i], dreg));
3596 for (i = words-1; i > 0; --i)
3598 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3599 ins_tmps[i-1], ins_tmps[i-1], 1,
3604 /* Split and merge the ends with the destination data. */
3605 emit_insn (gen_mskqh (st_tmp_2, st_tmp_2, dreg));
3606 emit_insn (gen_mskql (st_tmp_1, st_tmp_1, dreg));
3608 if (data_regs != NULL)
3610 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3611 st_tmp_2, 1, OPTAB_WIDEN);
3612 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3613 st_tmp_1, 1, OPTAB_WIDEN);
3617 emit_move_insn (st_addr_2, st_tmp_2);
3618 for (i = words-1; i > 0; --i)
3620 rtx tmp = change_address (dmem, DImode,
3621 gen_rtx_AND (DImode,
3622 plus_constant (DImode,
3625 set_mem_alias_set (tmp, 0);
3626 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3628 emit_move_insn (st_addr_1, st_tmp_1);
3632 /* Expand string/block move operations.
3634 operands[0] is the pointer to the destination.
3635 operands[1] is the pointer to the source.
3636 operands[2] is the number of bytes to move.
3637 operands[3] is the alignment. */
3640 alpha_expand_block_move (rtx operands[])
3642 rtx bytes_rtx = operands[2];
3643 rtx align_rtx = operands[3];
3644 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3645 HOST_WIDE_INT bytes = orig_bytes;
3646 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3647 HOST_WIDE_INT dst_align = src_align;
3648 rtx orig_src = operands[1];
3649 rtx orig_dst = operands[0];
3650 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3652 unsigned int i, words, ofs, nregs = 0;
3654 if (orig_bytes <= 0)
3656 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3659 /* Look for additional alignment information from recorded register info. */
3661 tmp = XEXP (orig_src, 0);
3663 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3664 else if (GET_CODE (tmp) == PLUS
3665 && REG_P (XEXP (tmp, 0))
3666 && CONST_INT_P (XEXP (tmp, 1)))
3668 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3669 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3673 if (a >= 64 && c % 8 == 0)
3675 else if (a >= 32 && c % 4 == 0)
3677 else if (a >= 16 && c % 2 == 0)
3682 tmp = XEXP (orig_dst, 0);
3684 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3685 else if (GET_CODE (tmp) == PLUS
3686 && REG_P (XEXP (tmp, 0))
3687 && CONST_INT_P (XEXP (tmp, 1)))
3689 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3690 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3694 if (a >= 64 && c % 8 == 0)
3696 else if (a >= 32 && c % 4 == 0)
3698 else if (a >= 16 && c % 2 == 0)
3704 if (src_align >= 64 && bytes >= 8)
3708 for (i = 0; i < words; ++i)
3709 data_regs[nregs + i] = gen_reg_rtx (DImode);
3711 for (i = 0; i < words; ++i)
3712 emit_move_insn (data_regs[nregs + i],
3713 adjust_address (orig_src, DImode, ofs + i * 8));
3720 if (src_align >= 32 && bytes >= 4)
3724 for (i = 0; i < words; ++i)
3725 data_regs[nregs + i] = gen_reg_rtx (SImode);
3727 for (i = 0; i < words; ++i)
3728 emit_move_insn (data_regs[nregs + i],
3729 adjust_address (orig_src, SImode, ofs + i * 4));
3740 for (i = 0; i < words+1; ++i)
3741 data_regs[nregs + i] = gen_reg_rtx (DImode);
3743 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3751 if (! TARGET_BWX && bytes >= 4)
3753 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3754 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3761 if (src_align >= 16)
3764 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3765 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3768 } while (bytes >= 2);
3770 else if (! TARGET_BWX)
3772 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3773 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3781 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3782 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3787 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
3789 /* Now save it back out again. */
3793 /* Write out the data in whatever chunks reading the source allowed. */
3794 if (dst_align >= 64)
3796 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3798 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
3805 if (dst_align >= 32)
3807 /* If the source has remaining DImode regs, write them out in
3809 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3811 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
3812 NULL_RTX, 1, OPTAB_WIDEN);
3814 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3815 gen_lowpart (SImode, data_regs[i]));
3816 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
3817 gen_lowpart (SImode, tmp));
3822 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3824 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3831 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
3833 /* Write out a remaining block of words using unaligned methods. */
3835 for (words = 1; i + words < nregs; words++)
3836 if (GET_MODE (data_regs[i + words]) != DImode)
3840 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
3842 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
3849 /* Due to the above, this won't be aligned. */
3850 /* ??? If we have more than one of these, consider constructing full
3851 words in registers and using alpha_expand_unaligned_store_words. */
3852 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3854 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
3859 if (dst_align >= 16)
3860 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
3862 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
3867 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
3869 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
3874 /* The remainder must be byte copies. */
3877 gcc_assert (GET_MODE (data_regs[i]) == QImode);
3878 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
3887 alpha_expand_block_clear (rtx operands[])
3889 rtx bytes_rtx = operands[1];
3890 rtx align_rtx = operands[3];
3891 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3892 HOST_WIDE_INT bytes = orig_bytes;
3893 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
3894 HOST_WIDE_INT alignofs = 0;
3895 rtx orig_dst = operands[0];
3897 int i, words, ofs = 0;
3899 if (orig_bytes <= 0)
3901 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3904 /* Look for stricter alignment. */
3905 tmp = XEXP (orig_dst, 0);
3907 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3908 else if (GET_CODE (tmp) == PLUS
3909 && REG_P (XEXP (tmp, 0))
3910 && CONST_INT_P (XEXP (tmp, 1)))
3912 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3913 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3918 align = a, alignofs = 8 - c % 8;
3920 align = a, alignofs = 4 - c % 4;
3922 align = a, alignofs = 2 - c % 2;
3926 /* Handle an unaligned prefix first. */
3930 #if HOST_BITS_PER_WIDE_INT >= 64
3931 /* Given that alignofs is bounded by align, the only time BWX could
3932 generate three stores is for a 7 byte fill. Prefer two individual
3933 stores over a load/mask/store sequence. */
3934 if ((!TARGET_BWX || alignofs == 7)
3936 && !(alignofs == 4 && bytes >= 4))
3938 enum machine_mode mode = (align >= 64 ? DImode : SImode);
3939 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
3943 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
3944 set_mem_alias_set (mem, 0);
3946 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
3947 if (bytes < alignofs)
3949 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
3960 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
3961 NULL_RTX, 1, OPTAB_WIDEN);
3963 emit_move_insn (mem, tmp);
3967 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
3969 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
3974 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
3976 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
3981 if (alignofs == 4 && bytes >= 4)
3983 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
3989 /* If we've not used the extra lead alignment information by now,
3990 we won't be able to. Downgrade align to match what's left over. */
3993 alignofs = alignofs & -alignofs;
3994 align = MIN (align, alignofs * BITS_PER_UNIT);
3998 /* Handle a block of contiguous long-words. */
4000 if (align >= 64 && bytes >= 8)
4004 for (i = 0; i < words; ++i)
4005 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4012 /* If the block is large and appropriately aligned, emit a single
4013 store followed by a sequence of stq_u insns. */
4015 if (align >= 32 && bytes > 16)
4019 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4023 orig_dsta = XEXP (orig_dst, 0);
4024 if (GET_CODE (orig_dsta) == LO_SUM)
4025 orig_dsta = force_reg (Pmode, orig_dsta);
4028 for (i = 0; i < words; ++i)
4031 = change_address (orig_dst, DImode,
4032 gen_rtx_AND (DImode,
4033 plus_constant (DImode, orig_dsta,
4036 set_mem_alias_set (mem, 0);
4037 emit_move_insn (mem, const0_rtx);
4040 /* Depending on the alignment, the first stq_u may have overlapped
4041 with the initial stl, which means that the last stq_u didn't
4042 write as much as it would appear. Leave those questionable bytes
4044 bytes -= words * 8 - 4;
4045 ofs += words * 8 - 4;
4048 /* Handle a smaller block of aligned words. */
4050 if ((align >= 64 && bytes == 4)
4051 || (align == 32 && bytes >= 4))
4055 for (i = 0; i < words; ++i)
4056 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4063 /* An unaligned block uses stq_u stores for as many as possible. */
4069 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4075 /* Next clean up any trailing pieces. */
4077 #if HOST_BITS_PER_WIDE_INT >= 64
4078 /* Count the number of bits in BYTES for which aligned stores could
4081 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4085 /* If we have appropriate alignment (and it wouldn't take too many
4086 instructions otherwise), mask out the bytes we need. */
4087 if (TARGET_BWX ? words > 2 : bytes > 0)
4094 mem = adjust_address (orig_dst, DImode, ofs);
4095 set_mem_alias_set (mem, 0);
4097 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4099 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4100 NULL_RTX, 1, OPTAB_WIDEN);
4102 emit_move_insn (mem, tmp);
4105 else if (align >= 32 && bytes < 4)
4110 mem = adjust_address (orig_dst, SImode, ofs);
4111 set_mem_alias_set (mem, 0);
4113 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4115 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4116 NULL_RTX, 1, OPTAB_WIDEN);
4118 emit_move_insn (mem, tmp);
4124 if (!TARGET_BWX && bytes >= 4)
4126 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4136 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4140 } while (bytes >= 2);
4142 else if (! TARGET_BWX)
4144 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4152 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4160 /* Returns a mask so that zap(x, value) == x & mask. */
4163 alpha_expand_zap_mask (HOST_WIDE_INT value)
4168 if (HOST_BITS_PER_WIDE_INT >= 64)
4170 HOST_WIDE_INT mask = 0;
4172 for (i = 7; i >= 0; --i)
4175 if (!((value >> i) & 1))
4179 result = gen_int_mode (mask, DImode);
4183 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4185 gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4187 for (i = 7; i >= 4; --i)
4190 if (!((value >> i) & 1))
4194 for (i = 3; i >= 0; --i)
4197 if (!((value >> i) & 1))
4201 result = immed_double_const (mask_lo, mask_hi, DImode);
4208 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4209 enum machine_mode mode,
4210 rtx op0, rtx op1, rtx op2)
4212 op0 = gen_lowpart (mode, op0);
4214 if (op1 == const0_rtx)
4215 op1 = CONST0_RTX (mode);
4217 op1 = gen_lowpart (mode, op1);
4219 if (op2 == const0_rtx)
4220 op2 = CONST0_RTX (mode);
4222 op2 = gen_lowpart (mode, op2);
4224 emit_insn ((*gen) (op0, op1, op2));
4227 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4228 COND is true. Mark the jump as unlikely to be taken. */
4231 emit_unlikely_jump (rtx cond, rtx label)
4233 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
4236 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4237 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4238 add_reg_note (x, REG_BR_PROB, very_unlikely);
4241 /* A subroutine of the atomic operation splitters. Emit a load-locked
4242 instruction in MODE. */
4245 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
4247 rtx (*fn) (rtx, rtx) = NULL;
4249 fn = gen_load_locked_si;
4250 else if (mode == DImode)
4251 fn = gen_load_locked_di;
4252 emit_insn (fn (reg, mem));
4255 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4256 instruction in MODE. */
4259 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
4261 rtx (*fn) (rtx, rtx, rtx) = NULL;
4263 fn = gen_store_conditional_si;
4264 else if (mode == DImode)
4265 fn = gen_store_conditional_di;
4266 emit_insn (fn (res, mem, val));
4269 /* Subroutines of the atomic operation splitters. Emit barriers
4270 as needed for the memory MODEL. */
4273 alpha_pre_atomic_barrier (enum memmodel model)
4275 if (need_atomic_barrier_p (model, true))
4276 emit_insn (gen_memory_barrier ());
4280 alpha_post_atomic_barrier (enum memmodel model)
4282 if (need_atomic_barrier_p (model, false))
4283 emit_insn (gen_memory_barrier ());
4286 /* A subroutine of the atomic operation splitters. Emit an insxl
4287 instruction in MODE. */
4290 emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
4292 rtx ret = gen_reg_rtx (DImode);
4293 rtx (*fn) (rtx, rtx, rtx);
4313 op1 = force_reg (mode, op1);
4314 emit_insn (fn (ret, op1, op2));
4319 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
4320 to perform. MEM is the memory on which to operate. VAL is the second
4321 operand of the binary operator. BEFORE and AFTER are optional locations to
4322 return the value of MEM either before of after the operation. SCRATCH is
4323 a scratch register. */
4326 alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val, rtx before,
4327 rtx after, rtx scratch, enum memmodel model)
4329 enum machine_mode mode = GET_MODE (mem);
4330 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4332 alpha_pre_atomic_barrier (model);
4334 label = gen_label_rtx ();
4336 label = gen_rtx_LABEL_REF (DImode, label);
4340 emit_load_locked (mode, before, mem);
4344 x = gen_rtx_AND (mode, before, val);
4345 emit_insn (gen_rtx_SET (VOIDmode, val, x));
4347 x = gen_rtx_NOT (mode, val);
4350 x = gen_rtx_fmt_ee (code, mode, before, val);
4352 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
4353 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
4355 emit_store_conditional (mode, cond, mem, scratch);
4357 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4358 emit_unlikely_jump (x, label);
4360 alpha_post_atomic_barrier (model);
4363 /* Expand a compare and swap operation. */
4366 alpha_split_compare_and_swap (rtx operands[])
4368 rtx cond, retval, mem, oldval, newval;
4370 enum memmodel mod_s, mod_f;
4371 enum machine_mode mode;
4372 rtx label1, label2, x;
4375 retval = operands[1];
4377 oldval = operands[3];
4378 newval = operands[4];
4379 is_weak = (operands[5] != const0_rtx);
4380 mod_s = (enum memmodel) INTVAL (operands[6]);
4381 mod_f = (enum memmodel) INTVAL (operands[7]);
4382 mode = GET_MODE (mem);
4384 alpha_pre_atomic_barrier (mod_s);
4389 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4390 emit_label (XEXP (label1, 0));
4392 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4394 emit_load_locked (mode, retval, mem);
4396 x = gen_lowpart (DImode, retval);
4397 if (oldval == const0_rtx)
4399 emit_move_insn (cond, const0_rtx);
4400 x = gen_rtx_NE (DImode, x, const0_rtx);
4404 x = gen_rtx_EQ (DImode, x, oldval);
4405 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4406 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4408 emit_unlikely_jump (x, label2);
4410 emit_move_insn (cond, newval);
4411 emit_store_conditional (mode, cond, mem, gen_lowpart (mode, cond));
4415 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4416 emit_unlikely_jump (x, label1);
4419 if (mod_f != MEMMODEL_RELAXED)
4420 emit_label (XEXP (label2, 0));
4422 alpha_post_atomic_barrier (mod_s);
4424 if (mod_f == MEMMODEL_RELAXED)
4425 emit_label (XEXP (label2, 0));
4429 alpha_expand_compare_and_swap_12 (rtx operands[])
4431 rtx cond, dst, mem, oldval, newval, is_weak, mod_s, mod_f;
4432 enum machine_mode mode;
4433 rtx addr, align, wdst;
4434 rtx (*gen) (rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx);
4439 oldval = operands[3];
4440 newval = operands[4];
4441 is_weak = operands[5];
4442 mod_s = operands[6];
4443 mod_f = operands[7];
4444 mode = GET_MODE (mem);
4446 /* We forced the address into a register via mem_noofs_operand. */
4447 addr = XEXP (mem, 0);
4448 gcc_assert (register_operand (addr, DImode));
4450 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4451 NULL_RTX, 1, OPTAB_DIRECT);
4453 oldval = convert_modes (DImode, mode, oldval, 1);
4455 if (newval != const0_rtx)
4456 newval = emit_insxl (mode, newval, addr);
4458 wdst = gen_reg_rtx (DImode);
4460 gen = gen_atomic_compare_and_swapqi_1;
4462 gen = gen_atomic_compare_and_swaphi_1;
4463 emit_insn (gen (cond, wdst, mem, oldval, newval, align,
4464 is_weak, mod_s, mod_f));
4466 emit_move_insn (dst, gen_lowpart (mode, wdst));
4470 alpha_split_compare_and_swap_12 (rtx operands[])
4472 rtx cond, dest, orig_mem, oldval, newval, align, scratch;
4473 enum machine_mode mode;
4475 enum memmodel mod_s, mod_f;
4476 rtx label1, label2, mem, addr, width, mask, x;
4480 orig_mem = operands[2];
4481 oldval = operands[3];
4482 newval = operands[4];
4483 align = operands[5];
4484 is_weak = (operands[6] != const0_rtx);
4485 mod_s = (enum memmodel) INTVAL (operands[7]);
4486 mod_f = (enum memmodel) INTVAL (operands[8]);
4487 scratch = operands[9];
4488 mode = GET_MODE (orig_mem);
4489 addr = XEXP (orig_mem, 0);
4491 mem = gen_rtx_MEM (DImode, align);
4492 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
4493 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
4494 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
4496 alpha_pre_atomic_barrier (mod_s);
4501 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4502 emit_label (XEXP (label1, 0));
4504 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4506 emit_load_locked (DImode, scratch, mem);
4508 width = GEN_INT (GET_MODE_BITSIZE (mode));
4509 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4510 emit_insn (gen_extxl (dest, scratch, width, addr));
4512 if (oldval == const0_rtx)
4514 emit_move_insn (cond, const0_rtx);
4515 x = gen_rtx_NE (DImode, dest, const0_rtx);
4519 x = gen_rtx_EQ (DImode, dest, oldval);
4520 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4521 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4523 emit_unlikely_jump (x, label2);
4525 emit_insn (gen_mskxl (cond, scratch, mask, addr));
4527 if (newval != const0_rtx)
4528 emit_insn (gen_iordi3 (cond, cond, newval));
4530 emit_store_conditional (DImode, cond, mem, cond);
4534 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4535 emit_unlikely_jump (x, label1);
4538 if (mod_f != MEMMODEL_RELAXED)
4539 emit_label (XEXP (label2, 0));
4541 alpha_post_atomic_barrier (mod_s);
4543 if (mod_f == MEMMODEL_RELAXED)
4544 emit_label (XEXP (label2, 0));
4547 /* Expand an atomic exchange operation. */
4550 alpha_split_atomic_exchange (rtx operands[])
4552 rtx retval, mem, val, scratch;
4553 enum memmodel model;
4554 enum machine_mode mode;
4557 retval = operands[0];
4560 model = (enum memmodel) INTVAL (operands[3]);
4561 scratch = operands[4];
4562 mode = GET_MODE (mem);
4563 cond = gen_lowpart (DImode, scratch);
4565 alpha_pre_atomic_barrier (model);
4567 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4568 emit_label (XEXP (label, 0));
4570 emit_load_locked (mode, retval, mem);
4571 emit_move_insn (scratch, val);
4572 emit_store_conditional (mode, cond, mem, scratch);
4574 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4575 emit_unlikely_jump (x, label);
4577 alpha_post_atomic_barrier (model);
4581 alpha_expand_atomic_exchange_12 (rtx operands[])
4583 rtx dst, mem, val, model;
4584 enum machine_mode mode;
4585 rtx addr, align, wdst;
4586 rtx (*gen) (rtx, rtx, rtx, rtx, rtx);
4591 model = operands[3];
4592 mode = GET_MODE (mem);
4594 /* We forced the address into a register via mem_noofs_operand. */
4595 addr = XEXP (mem, 0);
4596 gcc_assert (register_operand (addr, DImode));
4598 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4599 NULL_RTX, 1, OPTAB_DIRECT);
4601 /* Insert val into the correct byte location within the word. */
4602 if (val != const0_rtx)
4603 val = emit_insxl (mode, val, addr);
4605 wdst = gen_reg_rtx (DImode);
4607 gen = gen_atomic_exchangeqi_1;
4609 gen = gen_atomic_exchangehi_1;
4610 emit_insn (gen (wdst, mem, val, align, model));
4612 emit_move_insn (dst, gen_lowpart (mode, wdst));
4616 alpha_split_atomic_exchange_12 (rtx operands[])
4618 rtx dest, orig_mem, addr, val, align, scratch;
4619 rtx label, mem, width, mask, x;
4620 enum machine_mode mode;
4621 enum memmodel model;
4624 orig_mem = operands[1];
4626 align = operands[3];
4627 model = (enum memmodel) INTVAL (operands[4]);
4628 scratch = operands[5];
4629 mode = GET_MODE (orig_mem);
4630 addr = XEXP (orig_mem, 0);
4632 mem = gen_rtx_MEM (DImode, align);
4633 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
4634 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
4635 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
4637 alpha_pre_atomic_barrier (model);
4639 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4640 emit_label (XEXP (label, 0));
4642 emit_load_locked (DImode, scratch, mem);
4644 width = GEN_INT (GET_MODE_BITSIZE (mode));
4645 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4646 emit_insn (gen_extxl (dest, scratch, width, addr));
4647 emit_insn (gen_mskxl (scratch, scratch, mask, addr));
4648 if (val != const0_rtx)
4649 emit_insn (gen_iordi3 (scratch, scratch, val));
4651 emit_store_conditional (DImode, scratch, mem, scratch);
4653 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4654 emit_unlikely_jump (x, label);
4656 alpha_post_atomic_barrier (model);
4659 /* Adjust the cost of a scheduling dependency. Return the new cost of
4660 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4663 alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4665 enum attr_type dep_insn_type;
4667 /* If the dependence is an anti-dependence, there is no cost. For an
4668 output dependence, there is sometimes a cost, but it doesn't seem
4669 worth handling those few cases. */
4670 if (REG_NOTE_KIND (link) != 0)
4673 /* If we can't recognize the insns, we can't really do anything. */
4674 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4677 dep_insn_type = get_attr_type (dep_insn);
4679 /* Bring in the user-defined memory latency. */
4680 if (dep_insn_type == TYPE_ILD
4681 || dep_insn_type == TYPE_FLD
4682 || dep_insn_type == TYPE_LDSYM)
4683 cost += alpha_memory_latency-1;
4685 /* Everything else handled in DFA bypasses now. */
4690 /* The number of instructions that can be issued per cycle. */
4693 alpha_issue_rate (void)
4695 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4698 /* How many alternative schedules to try. This should be as wide as the
4699 scheduling freedom in the DFA, but no wider. Making this value too
4700 large results extra work for the scheduler.
4702 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4703 alternative schedules. For EV5, we can choose between E0/E1 and
4704 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4707 alpha_multipass_dfa_lookahead (void)
4709 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4712 /* Machine-specific function data. */
4714 struct GTY(()) alpha_links;
4716 struct GTY(()) machine_function
4719 const char *some_ld_name;
4721 /* For flag_reorder_blocks_and_partition. */
4724 /* For VMS condition handlers. */
4725 bool uses_condition_handler;
4727 /* Linkage entries. */
4728 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
4732 /* How to allocate a 'struct machine_function'. */
4734 static struct machine_function *
4735 alpha_init_machine_status (void)
4737 return ggc_alloc_cleared_machine_function ();
4740 /* Support for frame based VMS condition handlers. */
4742 /* A VMS condition handler may be established for a function with a call to
4743 __builtin_establish_vms_condition_handler, and cancelled with a call to
4744 __builtin_revert_vms_condition_handler.
4746 The VMS Condition Handling Facility knows about the existence of a handler
4747 from the procedure descriptor .handler field. As the VMS native compilers,
4748 we store the user specified handler's address at a fixed location in the
4749 stack frame and point the procedure descriptor at a common wrapper which
4750 fetches the real handler's address and issues an indirect call.
4752 The indirection wrapper is "__gcc_shell_handler", provided by libgcc.
4754 We force the procedure kind to PT_STACK, and the fixed frame location is
4755 fp+8, just before the register save area. We use the handler_data field in
4756 the procedure descriptor to state the fp offset at which the installed
4757 handler address can be found. */
4759 #define VMS_COND_HANDLER_FP_OFFSET 8
4761 /* Expand code to store the currently installed user VMS condition handler
4762 into TARGET and install HANDLER as the new condition handler. */
4765 alpha_expand_builtin_establish_vms_condition_handler (rtx target, rtx handler)
4767 rtx handler_slot_address = plus_constant (Pmode, hard_frame_pointer_rtx,
4768 VMS_COND_HANDLER_FP_OFFSET);
4771 = gen_rtx_MEM (DImode, handler_slot_address);
4773 emit_move_insn (target, handler_slot);
4774 emit_move_insn (handler_slot, handler);
4776 /* Notify the start/prologue/epilogue emitters that the condition handler
4777 slot is needed. In addition to reserving the slot space, this will force
4778 the procedure kind to PT_STACK so ensure that the hard_frame_pointer_rtx
4779 use above is correct. */
4780 cfun->machine->uses_condition_handler = true;
4783 /* Expand code to store the current VMS condition handler into TARGET and
4787 alpha_expand_builtin_revert_vms_condition_handler (rtx target)
4789 /* We implement this by establishing a null condition handler, with the tiny
4790 side effect of setting uses_condition_handler. This is a little bit
4791 pessimistic if no actual builtin_establish call is ever issued, which is
4792 not a real problem and expected never to happen anyway. */
4794 alpha_expand_builtin_establish_vms_condition_handler (target, const0_rtx);
4797 /* Functions to save and restore alpha_return_addr_rtx. */
4799 /* Start the ball rolling with RETURN_ADDR_RTX. */
4802 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4807 return get_hard_reg_initial_val (Pmode, REG_RA);
4810 /* Return or create a memory slot containing the gp value for the current
4811 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4814 alpha_gp_save_rtx (void)
4816 rtx seq, m = cfun->machine->gp_save_rtx;
4822 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4823 m = validize_mem (m);
4824 emit_move_insn (m, pic_offset_table_rtx);
4829 /* We used to simply emit the sequence after entry_of_function.
4830 However this breaks the CFG if the first instruction in the
4831 first block is not the NOTE_INSN_BASIC_BLOCK, for example a
4832 label. Emit the sequence properly on the edge. We are only
4833 invoked from dw2_build_landing_pads and finish_eh_generation
4834 will call commit_edge_insertions thanks to a kludge. */
4835 insert_insn_on_edge (seq, single_succ_edge (ENTRY_BLOCK_PTR));
4837 cfun->machine->gp_save_rtx = m;
4844 alpha_instantiate_decls (void)
4846 if (cfun->machine->gp_save_rtx != NULL_RTX)
4847 instantiate_decl_rtl (cfun->machine->gp_save_rtx);
4851 alpha_ra_ever_killed (void)
4855 if (!has_hard_reg_initial_val (Pmode, REG_RA))
4856 return (int)df_regs_ever_live_p (REG_RA);
4858 push_topmost_sequence ();
4860 pop_topmost_sequence ();
4862 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
4866 /* Return the trap mode suffix applicable to the current
4867 instruction, or NULL. */
4870 get_trap_mode_suffix (void)
4872 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4876 case TRAP_SUFFIX_NONE:
4879 case TRAP_SUFFIX_SU:
4880 if (alpha_fptm >= ALPHA_FPTM_SU)
4884 case TRAP_SUFFIX_SUI:
4885 if (alpha_fptm >= ALPHA_FPTM_SUI)
4889 case TRAP_SUFFIX_V_SV:
4897 case ALPHA_FPTM_SUI:
4903 case TRAP_SUFFIX_V_SV_SVI:
4912 case ALPHA_FPTM_SUI:
4919 case TRAP_SUFFIX_U_SU_SUI:
4928 case ALPHA_FPTM_SUI:
4941 /* Return the rounding mode suffix applicable to the current
4942 instruction, or NULL. */
4945 get_round_mode_suffix (void)
4947 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
4951 case ROUND_SUFFIX_NONE:
4953 case ROUND_SUFFIX_NORMAL:
4956 case ALPHA_FPRM_NORM:
4958 case ALPHA_FPRM_MINF:
4960 case ALPHA_FPRM_CHOP:
4962 case ALPHA_FPRM_DYN:
4969 case ROUND_SUFFIX_C:
4978 /* Locate some local-dynamic symbol still in use by this function
4979 so that we can print its name in some movdi_er_tlsldm pattern. */
4982 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
4986 if (GET_CODE (x) == SYMBOL_REF
4987 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
4989 cfun->machine->some_ld_name = XSTR (x, 0);
4997 get_some_local_dynamic_name (void)
5001 if (cfun->machine->some_ld_name)
5002 return cfun->machine->some_ld_name;
5004 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5006 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5007 return cfun->machine->some_ld_name;
5012 /* Print an operand. Recognize special options, documented below. */
5015 print_operand (FILE *file, rtx x, int code)
5022 /* Print the assembler name of the current function. */
5023 assemble_name (file, alpha_fnname);
5027 assemble_name (file, get_some_local_dynamic_name ());
5032 const char *trap = get_trap_mode_suffix ();
5033 const char *round = get_round_mode_suffix ();
5036 fprintf (file, "/%s%s", (trap ? trap : ""), (round ? round : ""));
5041 /* Generates single precision instruction suffix. */
5042 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
5046 /* Generates double precision instruction suffix. */
5047 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
5051 if (alpha_this_literal_sequence_number == 0)
5052 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5053 fprintf (file, "%d", alpha_this_literal_sequence_number);
5057 if (alpha_this_gpdisp_sequence_number == 0)
5058 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5059 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5063 if (GET_CODE (x) == HIGH)
5064 output_addr_const (file, XEXP (x, 0));
5066 output_operand_lossage ("invalid %%H value");
5073 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5075 x = XVECEXP (x, 0, 0);
5076 lituse = "lituse_tlsgd";
5078 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5080 x = XVECEXP (x, 0, 0);
5081 lituse = "lituse_tlsldm";
5083 else if (CONST_INT_P (x))
5084 lituse = "lituse_jsr";
5087 output_operand_lossage ("invalid %%J value");
5091 if (x != const0_rtx)
5092 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5100 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5101 lituse = "lituse_jsrdirect";
5103 lituse = "lituse_jsr";
5106 gcc_assert (INTVAL (x) != 0);
5107 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5111 /* If this operand is the constant zero, write it as "$31". */
5113 fprintf (file, "%s", reg_names[REGNO (x)]);
5114 else if (x == CONST0_RTX (GET_MODE (x)))
5115 fprintf (file, "$31");
5117 output_operand_lossage ("invalid %%r value");
5121 /* Similar, but for floating-point. */
5123 fprintf (file, "%s", reg_names[REGNO (x)]);
5124 else if (x == CONST0_RTX (GET_MODE (x)))
5125 fprintf (file, "$f31");
5127 output_operand_lossage ("invalid %%R value");
5131 /* Write the 1's complement of a constant. */
5132 if (!CONST_INT_P (x))
5133 output_operand_lossage ("invalid %%N value");
5135 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5139 /* Write 1 << C, for a constant C. */
5140 if (!CONST_INT_P (x))
5141 output_operand_lossage ("invalid %%P value");
5143 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5147 /* Write the high-order 16 bits of a constant, sign-extended. */
5148 if (!CONST_INT_P (x))
5149 output_operand_lossage ("invalid %%h value");
5151 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5155 /* Write the low-order 16 bits of a constant, sign-extended. */
5156 if (!CONST_INT_P (x))
5157 output_operand_lossage ("invalid %%L value");
5159 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5160 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5164 /* Write mask for ZAP insn. */
5165 if (GET_CODE (x) == CONST_DOUBLE)
5167 HOST_WIDE_INT mask = 0;
5168 HOST_WIDE_INT value;
5170 value = CONST_DOUBLE_LOW (x);
5171 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5176 value = CONST_DOUBLE_HIGH (x);
5177 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5180 mask |= (1 << (i + sizeof (int)));
5182 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5185 else if (CONST_INT_P (x))
5187 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5189 for (i = 0; i < 8; i++, value >>= 8)
5193 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5196 output_operand_lossage ("invalid %%m value");
5200 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5201 if (!CONST_INT_P (x)
5202 || (INTVAL (x) != 8 && INTVAL (x) != 16
5203 && INTVAL (x) != 32 && INTVAL (x) != 64))
5204 output_operand_lossage ("invalid %%M value");
5206 fprintf (file, "%s",
5207 (INTVAL (x) == 8 ? "b"
5208 : INTVAL (x) == 16 ? "w"
5209 : INTVAL (x) == 32 ? "l"
5214 /* Similar, except do it from the mask. */
5215 if (CONST_INT_P (x))
5217 HOST_WIDE_INT value = INTVAL (x);
5224 if (value == 0xffff)
5229 if (value == 0xffffffff)
5240 else if (HOST_BITS_PER_WIDE_INT == 32
5241 && GET_CODE (x) == CONST_DOUBLE
5242 && CONST_DOUBLE_LOW (x) == 0xffffffff
5243 && CONST_DOUBLE_HIGH (x) == 0)
5248 output_operand_lossage ("invalid %%U value");
5252 /* Write the constant value divided by 8. */
5253 if (!CONST_INT_P (x)
5254 || (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5255 || (INTVAL (x) & 7) != 0)
5256 output_operand_lossage ("invalid %%s value");
5258 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) / 8);
5262 /* Same, except compute (64 - c) / 8 */
5264 if (!CONST_INT_P (x)
5265 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5266 && (INTVAL (x) & 7) != 8)
5267 output_operand_lossage ("invalid %%s value");
5269 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5272 case 'C': case 'D': case 'c': case 'd':
5273 /* Write out comparison name. */
5275 enum rtx_code c = GET_CODE (x);
5277 if (!COMPARISON_P (x))
5278 output_operand_lossage ("invalid %%C value");
5280 else if (code == 'D')
5281 c = reverse_condition (c);
5282 else if (code == 'c')
5283 c = swap_condition (c);
5284 else if (code == 'd')
5285 c = swap_condition (reverse_condition (c));
5288 fprintf (file, "ule");
5290 fprintf (file, "ult");
5291 else if (c == UNORDERED)
5292 fprintf (file, "un");
5294 fprintf (file, "%s", GET_RTX_NAME (c));
5299 /* Write the divide or modulus operator. */
5300 switch (GET_CODE (x))
5303 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5306 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5309 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5312 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5315 output_operand_lossage ("invalid %%E value");
5321 /* Write "_u" for unaligned access. */
5322 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
5323 fprintf (file, "_u");
5328 fprintf (file, "%s", reg_names[REGNO (x)]);
5330 output_address (XEXP (x, 0));
5331 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5333 switch (XINT (XEXP (x, 0), 1))
5337 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5340 output_operand_lossage ("unknown relocation unspec");
5345 output_addr_const (file, x);
5349 output_operand_lossage ("invalid %%xn code");
5354 print_operand_address (FILE *file, rtx addr)
5357 HOST_WIDE_INT offset = 0;
5359 if (GET_CODE (addr) == AND)
5360 addr = XEXP (addr, 0);
5362 if (GET_CODE (addr) == PLUS
5363 && CONST_INT_P (XEXP (addr, 1)))
5365 offset = INTVAL (XEXP (addr, 1));
5366 addr = XEXP (addr, 0);
5369 if (GET_CODE (addr) == LO_SUM)
5371 const char *reloc16, *reloclo;
5372 rtx op1 = XEXP (addr, 1);
5374 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5376 op1 = XEXP (op1, 0);
5377 switch (XINT (op1, 1))
5381 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5385 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5388 output_operand_lossage ("unknown relocation unspec");
5392 output_addr_const (file, XVECEXP (op1, 0, 0));
5397 reloclo = "gprellow";
5398 output_addr_const (file, op1);
5402 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5404 addr = XEXP (addr, 0);
5405 switch (GET_CODE (addr))
5408 basereg = REGNO (addr);
5412 basereg = subreg_regno (addr);
5419 fprintf (file, "($%d)\t\t!%s", basereg,
5420 (basereg == 29 ? reloc16 : reloclo));
5424 switch (GET_CODE (addr))
5427 basereg = REGNO (addr);
5431 basereg = subreg_regno (addr);
5435 offset = INTVAL (addr);
5438 #if TARGET_ABI_OPEN_VMS
5440 fprintf (file, "%s", XSTR (addr, 0));
5444 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5445 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5446 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5447 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5448 INTVAL (XEXP (XEXP (addr, 0), 1)));
5456 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5459 /* Emit RTL insns to initialize the variable parts of a trampoline at
5460 M_TRAMP. FNDECL is target function's decl. CHAIN_VALUE is an rtx
5461 for the static chain value for the function. */
5464 alpha_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
5466 rtx fnaddr, mem, word1, word2;
5468 fnaddr = XEXP (DECL_RTL (fndecl), 0);
5470 #ifdef POINTERS_EXTEND_UNSIGNED
5471 fnaddr = convert_memory_address (Pmode, fnaddr);
5472 chain_value = convert_memory_address (Pmode, chain_value);
5475 if (TARGET_ABI_OPEN_VMS)
5480 /* Construct the name of the trampoline entry point. */
5481 fnname = XSTR (fnaddr, 0);
5482 trname = (char *) alloca (strlen (fnname) + 5);
5483 strcpy (trname, fnname);
5484 strcat (trname, "..tr");
5485 fnname = ggc_alloc_string (trname, strlen (trname) + 1);
5486 word2 = gen_rtx_SYMBOL_REF (Pmode, fnname);
5488 /* Trampoline (or "bounded") procedure descriptor is constructed from
5489 the function's procedure descriptor with certain fields zeroed IAW
5490 the VMS calling standard. This is stored in the first quadword. */
5491 word1 = force_reg (DImode, gen_const_mem (DImode, fnaddr));
5492 word1 = expand_and (DImode, word1,
5493 GEN_INT (HOST_WIDE_INT_C (0xffff0fff0000fff0)),
5498 /* These 4 instructions are:
5503 We don't bother setting the HINT field of the jump; the nop
5504 is merely there for padding. */
5505 word1 = GEN_INT (HOST_WIDE_INT_C (0xa77b0010a43b0018));
5506 word2 = GEN_INT (HOST_WIDE_INT_C (0x47ff041f6bfb0000));
5509 /* Store the first two words, as computed above. */
5510 mem = adjust_address (m_tramp, DImode, 0);
5511 emit_move_insn (mem, word1);
5512 mem = adjust_address (m_tramp, DImode, 8);
5513 emit_move_insn (mem, word2);
5515 /* Store function address and static chain value. */
5516 mem = adjust_address (m_tramp, Pmode, 16);
5517 emit_move_insn (mem, fnaddr);
5518 mem = adjust_address (m_tramp, Pmode, 24);
5519 emit_move_insn (mem, chain_value);
5523 emit_insn (gen_imb ());
5524 #ifdef HAVE_ENABLE_EXECUTE_STACK
5525 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5526 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
5531 /* Determine where to put an argument to a function.
5532 Value is zero to push the argument on the stack,
5533 or a hard register in which to store the argument.
5535 MODE is the argument's machine mode.
5536 TYPE is the data type of the argument (as a tree).
5537 This is null for libcalls where that information may
5539 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5540 the preceding args and about the function being called.
5541 NAMED is nonzero if this argument is a named parameter
5542 (otherwise it is an extra parameter matching an ellipsis).
5544 On Alpha the first 6 words of args are normally in registers
5545 and the rest are pushed. */
5548 alpha_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
5549 const_tree type, bool named ATTRIBUTE_UNUSED)
5551 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
5555 /* Don't get confused and pass small structures in FP registers. */
5556 if (type && AGGREGATE_TYPE_P (type))
5560 #ifdef ENABLE_CHECKING
5561 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5563 gcc_assert (!COMPLEX_MODE_P (mode));
5566 /* Set up defaults for FP operands passed in FP registers, and
5567 integral operands passed in integer registers. */
5568 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5574 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5575 the two platforms, so we can't avoid conditional compilation. */
5576 #if TARGET_ABI_OPEN_VMS
5578 if (mode == VOIDmode)
5579 return alpha_arg_info_reg_val (*cum);
5581 num_args = cum->num_args;
5583 || targetm.calls.must_pass_in_stack (mode, type))
5586 #elif TARGET_ABI_OSF
5592 /* VOID is passed as a special flag for "last argument". */
5593 if (type == void_type_node)
5595 else if (targetm.calls.must_pass_in_stack (mode, type))
5599 #error Unhandled ABI
5602 return gen_rtx_REG (mode, num_args + basereg);
5605 /* Update the data in CUM to advance over an argument
5606 of mode MODE and data type TYPE.
5607 (TYPE is null for libcalls where that information may not be available.) */
5610 alpha_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
5611 const_tree type, bool named ATTRIBUTE_UNUSED)
5613 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
5614 bool onstack = targetm.calls.must_pass_in_stack (mode, type);
5615 int increment = onstack ? 6 : ALPHA_ARG_SIZE (mode, type, named);
5620 if (!onstack && cum->num_args < 6)
5621 cum->atypes[cum->num_args] = alpha_arg_type (mode);
5622 cum->num_args += increment;
5627 alpha_arg_partial_bytes (cumulative_args_t cum_v,
5628 enum machine_mode mode ATTRIBUTE_UNUSED,
5629 tree type ATTRIBUTE_UNUSED,
5630 bool named ATTRIBUTE_UNUSED)
5633 CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED = get_cumulative_args (cum_v);
5635 #if TARGET_ABI_OPEN_VMS
5636 if (cum->num_args < 6
5637 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5638 words = 6 - cum->num_args;
5639 #elif TARGET_ABI_OSF
5640 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5643 #error Unhandled ABI
5646 return words * UNITS_PER_WORD;
5650 /* Return true if TYPE must be returned in memory, instead of in registers. */
5653 alpha_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
5655 enum machine_mode mode = VOIDmode;
5660 mode = TYPE_MODE (type);
5662 /* All aggregates are returned in memory, except on OpenVMS where
5663 records that fit 64 bits should be returned by immediate value
5664 as required by section 3.8.7.1 of the OpenVMS Calling Standard. */
5665 if (TARGET_ABI_OPEN_VMS
5666 && TREE_CODE (type) != ARRAY_TYPE
5667 && (unsigned HOST_WIDE_INT) int_size_in_bytes(type) <= 8)
5670 if (AGGREGATE_TYPE_P (type))
5674 size = GET_MODE_SIZE (mode);
5675 switch (GET_MODE_CLASS (mode))
5677 case MODE_VECTOR_FLOAT:
5678 /* Pass all float vectors in memory, like an aggregate. */
5681 case MODE_COMPLEX_FLOAT:
5682 /* We judge complex floats on the size of their element,
5683 not the size of the whole type. */
5684 size = GET_MODE_UNIT_SIZE (mode);
5689 case MODE_COMPLEX_INT:
5690 case MODE_VECTOR_INT:
5694 /* ??? We get called on all sorts of random stuff from
5695 aggregate_value_p. We must return something, but it's not
5696 clear what's safe to return. Pretend it's a struct I
5701 /* Otherwise types must fit in one register. */
5702 return size > UNITS_PER_WORD;
5705 /* Return true if TYPE should be passed by invisible reference. */
5708 alpha_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
5709 enum machine_mode mode,
5710 const_tree type ATTRIBUTE_UNUSED,
5711 bool named ATTRIBUTE_UNUSED)
5713 return mode == TFmode || mode == TCmode;
5716 /* Define how to find the value returned by a function. VALTYPE is the
5717 data type of the value (as a tree). If the precise function being
5718 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5719 MODE is set instead of VALTYPE for libcalls.
5721 On Alpha the value is found in $0 for integer functions and
5722 $f0 for floating-point functions. */
5725 function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
5726 enum machine_mode mode)
5728 unsigned int regnum, dummy ATTRIBUTE_UNUSED;
5729 enum mode_class mclass;
5731 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5734 mode = TYPE_MODE (valtype);
5736 mclass = GET_MODE_CLASS (mode);
5740 /* Do the same thing as PROMOTE_MODE except for libcalls on VMS,
5741 where we have them returning both SImode and DImode. */
5742 if (!(TARGET_ABI_OPEN_VMS && valtype && AGGREGATE_TYPE_P (valtype)))
5743 PROMOTE_MODE (mode, dummy, valtype);
5746 case MODE_COMPLEX_INT:
5747 case MODE_VECTOR_INT:
5755 case MODE_COMPLEX_FLOAT:
5757 enum machine_mode cmode = GET_MODE_INNER (mode);
5759 return gen_rtx_PARALLEL
5762 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5764 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5765 GEN_INT (GET_MODE_SIZE (cmode)))));
5769 /* We should only reach here for BLKmode on VMS. */
5770 gcc_assert (TARGET_ABI_OPEN_VMS && mode == BLKmode);
5778 return gen_rtx_REG (mode, regnum);
5781 /* TCmode complex values are passed by invisible reference. We
5782 should not split these values. */
5785 alpha_split_complex_arg (const_tree type)
5787 return TYPE_MODE (type) != TCmode;
5791 alpha_build_builtin_va_list (void)
5793 tree base, ofs, space, record, type_decl;
5795 if (TARGET_ABI_OPEN_VMS)
5796 return ptr_type_node;
5798 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5799 type_decl = build_decl (BUILTINS_LOCATION,
5800 TYPE_DECL, get_identifier ("__va_list_tag"), record);
5801 TYPE_STUB_DECL (record) = type_decl;
5802 TYPE_NAME (record) = type_decl;
5804 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5806 /* Dummy field to prevent alignment warnings. */
5807 space = build_decl (BUILTINS_LOCATION,
5808 FIELD_DECL, NULL_TREE, integer_type_node);
5809 DECL_FIELD_CONTEXT (space) = record;
5810 DECL_ARTIFICIAL (space) = 1;
5811 DECL_IGNORED_P (space) = 1;
5813 ofs = build_decl (BUILTINS_LOCATION,
5814 FIELD_DECL, get_identifier ("__offset"),
5816 DECL_FIELD_CONTEXT (ofs) = record;
5817 DECL_CHAIN (ofs) = space;
5818 /* ??? This is a hack, __offset is marked volatile to prevent
5819 DCE that confuses stdarg optimization and results in
5820 gcc.c-torture/execute/stdarg-1.c failure. See PR 41089. */
5821 TREE_THIS_VOLATILE (ofs) = 1;
5823 base = build_decl (BUILTINS_LOCATION,
5824 FIELD_DECL, get_identifier ("__base"),
5826 DECL_FIELD_CONTEXT (base) = record;
5827 DECL_CHAIN (base) = ofs;
5829 TYPE_FIELDS (record) = base;
5830 layout_type (record);
5832 va_list_gpr_counter_field = ofs;
5837 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5838 and constant additions. */
5841 va_list_skip_additions (tree lhs)
5847 enum tree_code code;
5849 stmt = SSA_NAME_DEF_STMT (lhs);
5851 if (gimple_code (stmt) == GIMPLE_PHI)
5854 if (!is_gimple_assign (stmt)
5855 || gimple_assign_lhs (stmt) != lhs)
5858 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
5860 code = gimple_assign_rhs_code (stmt);
5861 if (!CONVERT_EXPR_CODE_P (code)
5862 && ((code != PLUS_EXPR && code != POINTER_PLUS_EXPR)
5863 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST
5864 || !host_integerp (gimple_assign_rhs2 (stmt), 1)))
5867 lhs = gimple_assign_rhs1 (stmt);
5871 /* Check if LHS = RHS statement is
5872 LHS = *(ap.__base + ap.__offset + cst)
5875 + ((ap.__offset + cst <= 47)
5876 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5877 If the former, indicate that GPR registers are needed,
5878 if the latter, indicate that FPR registers are needed.
5880 Also look for LHS = (*ptr).field, where ptr is one of the forms
5883 On alpha, cfun->va_list_gpr_size is used as size of the needed
5884 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
5885 registers are needed and bit 1 set if FPR registers are needed.
5886 Return true if va_list references should not be scanned for the
5887 current statement. */
5890 alpha_stdarg_optimize_hook (struct stdarg_info *si, const_gimple stmt)
5892 tree base, offset, rhs;
5896 if (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
5897 != GIMPLE_SINGLE_RHS)
5900 rhs = gimple_assign_rhs1 (stmt);
5901 while (handled_component_p (rhs))
5902 rhs = TREE_OPERAND (rhs, 0);
5903 if (TREE_CODE (rhs) != MEM_REF
5904 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5907 stmt = va_list_skip_additions (TREE_OPERAND (rhs, 0));
5909 || !is_gimple_assign (stmt)
5910 || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
5913 base = gimple_assign_rhs1 (stmt);
5914 if (TREE_CODE (base) == SSA_NAME)
5916 base_stmt = va_list_skip_additions (base);
5918 && is_gimple_assign (base_stmt)
5919 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
5920 base = gimple_assign_rhs1 (base_stmt);
5923 if (TREE_CODE (base) != COMPONENT_REF
5924 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5926 base = gimple_assign_rhs2 (stmt);
5927 if (TREE_CODE (base) == SSA_NAME)
5929 base_stmt = va_list_skip_additions (base);
5931 && is_gimple_assign (base_stmt)
5932 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
5933 base = gimple_assign_rhs1 (base_stmt);
5936 if (TREE_CODE (base) != COMPONENT_REF
5937 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5943 base = get_base_address (base);
5944 if (TREE_CODE (base) != VAR_DECL
5945 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base) + num_ssa_names))
5948 offset = gimple_op (stmt, 1 + offset_arg);
5949 if (TREE_CODE (offset) == SSA_NAME)
5951 gimple offset_stmt = va_list_skip_additions (offset);
5954 && gimple_code (offset_stmt) == GIMPLE_PHI)
5957 gimple arg1_stmt, arg2_stmt;
5959 enum tree_code code1, code2;
5961 if (gimple_phi_num_args (offset_stmt) != 2)
5965 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 0));
5967 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 1));
5968 if (arg1_stmt == NULL
5969 || !is_gimple_assign (arg1_stmt)
5970 || arg2_stmt == NULL
5971 || !is_gimple_assign (arg2_stmt))
5974 code1 = gimple_assign_rhs_code (arg1_stmt);
5975 code2 = gimple_assign_rhs_code (arg2_stmt);
5976 if (code1 == COMPONENT_REF
5977 && (code2 == MINUS_EXPR || code2 == PLUS_EXPR))
5979 else if (code2 == COMPONENT_REF
5980 && (code1 == MINUS_EXPR || code1 == PLUS_EXPR))
5982 gimple tem = arg1_stmt;
5984 arg1_stmt = arg2_stmt;
5990 if (!host_integerp (gimple_assign_rhs2 (arg2_stmt), 0))
5993 sub = tree_low_cst (gimple_assign_rhs2 (arg2_stmt), 0);
5994 if (code2 == MINUS_EXPR)
5996 if (sub < -48 || sub > -32)
5999 arg1 = gimple_assign_rhs1 (arg1_stmt);
6000 arg2 = gimple_assign_rhs1 (arg2_stmt);
6001 if (TREE_CODE (arg2) == SSA_NAME)
6003 arg2_stmt = va_list_skip_additions (arg2);
6004 if (arg2_stmt == NULL
6005 || !is_gimple_assign (arg2_stmt)
6006 || gimple_assign_rhs_code (arg2_stmt) != COMPONENT_REF)
6008 arg2 = gimple_assign_rhs1 (arg2_stmt);
6013 if (TREE_CODE (arg1) != COMPONENT_REF
6014 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
6015 || get_base_address (arg1) != base)
6018 /* Need floating point regs. */
6019 cfun->va_list_fpr_size |= 2;
6023 && is_gimple_assign (offset_stmt)
6024 && gimple_assign_rhs_code (offset_stmt) == COMPONENT_REF)
6025 offset = gimple_assign_rhs1 (offset_stmt);
6027 if (TREE_CODE (offset) != COMPONENT_REF
6028 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
6029 || get_base_address (offset) != base)
6032 /* Need general regs. */
6033 cfun->va_list_fpr_size |= 1;
6037 si->va_list_escapes = true;
6042 /* Perform any needed actions needed for a function that is receiving a
6043 variable number of arguments. */
6046 alpha_setup_incoming_varargs (cumulative_args_t pcum, enum machine_mode mode,
6047 tree type, int *pretend_size, int no_rtl)
6049 CUMULATIVE_ARGS cum = *get_cumulative_args (pcum);
6051 /* Skip the current argument. */
6052 targetm.calls.function_arg_advance (pack_cumulative_args (&cum), mode, type,
6055 #if TARGET_ABI_OPEN_VMS
6056 /* For VMS, we allocate space for all 6 arg registers plus a count.
6058 However, if NO registers need to be saved, don't allocate any space.
6059 This is not only because we won't need the space, but because AP
6060 includes the current_pretend_args_size and we don't want to mess up
6061 any ap-relative addresses already made. */
6062 if (cum.num_args < 6)
6066 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6067 emit_insn (gen_arg_home ());
6069 *pretend_size = 7 * UNITS_PER_WORD;
6072 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6073 only push those that are remaining. However, if NO registers need to
6074 be saved, don't allocate any space. This is not only because we won't
6075 need the space, but because AP includes the current_pretend_args_size
6076 and we don't want to mess up any ap-relative addresses already made.
6078 If we are not to use the floating-point registers, save the integer
6079 registers where we would put the floating-point registers. This is
6080 not the most efficient way to implement varargs with just one register
6081 class, but it isn't worth doing anything more efficient in this rare
6089 alias_set_type set = get_varargs_alias_set ();
6092 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6093 if (count > 6 - cum)
6096 /* Detect whether integer registers or floating-point registers
6097 are needed by the detected va_arg statements. See above for
6098 how these values are computed. Note that the "escape" value
6099 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6101 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6103 if (cfun->va_list_fpr_size & 1)
6105 tmp = gen_rtx_MEM (BLKmode,
6106 plus_constant (Pmode, virtual_incoming_args_rtx,
6107 (cum + 6) * UNITS_PER_WORD));
6108 MEM_NOTRAP_P (tmp) = 1;
6109 set_mem_alias_set (tmp, set);
6110 move_block_from_reg (16 + cum, tmp, count);
6113 if (cfun->va_list_fpr_size & 2)
6115 tmp = gen_rtx_MEM (BLKmode,
6116 plus_constant (Pmode, virtual_incoming_args_rtx,
6117 cum * UNITS_PER_WORD));
6118 MEM_NOTRAP_P (tmp) = 1;
6119 set_mem_alias_set (tmp, set);
6120 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6123 *pretend_size = 12 * UNITS_PER_WORD;
6128 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6130 HOST_WIDE_INT offset;
6131 tree t, offset_field, base_field;
6133 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6136 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6137 up by 48, storing fp arg registers in the first 48 bytes, and the
6138 integer arg registers in the next 48 bytes. This is only done,
6139 however, if any integer registers need to be stored.
6141 If no integer registers need be stored, then we must subtract 48
6142 in order to account for the integer arg registers which are counted
6143 in argsize above, but which are not actually stored on the stack.
6144 Must further be careful here about structures straddling the last
6145 integer argument register; that futzes with pretend_args_size,
6146 which changes the meaning of AP. */
6149 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6151 offset = -6 * UNITS_PER_WORD + crtl->args.pretend_args_size;
6153 if (TARGET_ABI_OPEN_VMS)
6155 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6156 t = fold_build_pointer_plus_hwi (t, offset + NUM_ARGS * UNITS_PER_WORD);
6157 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
6158 TREE_SIDE_EFFECTS (t) = 1;
6159 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6163 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6164 offset_field = DECL_CHAIN (base_field);
6166 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6167 valist, base_field, NULL_TREE);
6168 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6169 valist, offset_field, NULL_TREE);
6171 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6172 t = fold_build_pointer_plus_hwi (t, offset);
6173 t = build2 (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
6174 TREE_SIDE_EFFECTS (t) = 1;
6175 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6177 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6178 t = build2 (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
6179 TREE_SIDE_EFFECTS (t) = 1;
6180 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6185 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset,
6188 tree type_size, ptr_type, addend, t, addr;
6189 gimple_seq internal_post;
6191 /* If the type could not be passed in registers, skip the block
6192 reserved for the registers. */
6193 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6195 t = build_int_cst (TREE_TYPE (offset), 6*8);
6196 gimplify_assign (offset,
6197 build2 (MAX_EXPR, TREE_TYPE (offset), offset, t),
6202 ptr_type = build_pointer_type_for_mode (type, ptr_mode, true);
6204 if (TREE_CODE (type) == COMPLEX_TYPE)
6206 tree real_part, imag_part, real_temp;
6208 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6211 /* Copy the value into a new temporary, lest the formal temporary
6212 be reused out from under us. */
6213 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6215 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6218 return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
6220 else if (TREE_CODE (type) == REAL_TYPE)
6222 tree fpaddend, cond, fourtyeight;
6224 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6225 fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6226 addend, fourtyeight);
6227 cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6228 addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6232 /* Build the final address and force that value into a temporary. */
6233 addr = fold_build_pointer_plus (fold_convert (ptr_type, base), addend);
6234 internal_post = NULL;
6235 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6236 gimple_seq_add_seq (pre_p, internal_post);
6238 /* Update the offset field. */
6239 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6240 if (type_size == NULL || TREE_OVERFLOW (type_size))
6244 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6245 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6246 t = size_binop (MULT_EXPR, t, size_int (8));
6248 t = fold_convert (TREE_TYPE (offset), t);
6249 gimplify_assign (offset, build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t),
6252 return build_va_arg_indirect_ref (addr);
6256 alpha_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6259 tree offset_field, base_field, offset, base, t, r;
6262 if (TARGET_ABI_OPEN_VMS)
6263 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6265 base_field = TYPE_FIELDS (va_list_type_node);
6266 offset_field = DECL_CHAIN (base_field);
6267 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6268 valist, base_field, NULL_TREE);
6269 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6270 valist, offset_field, NULL_TREE);
6272 /* Pull the fields of the structure out into temporaries. Since we never
6273 modify the base field, we can use a formal temporary. Sign-extend the
6274 offset field so that it's the proper width for pointer arithmetic. */
6275 base = get_formal_tmp_var (base_field, pre_p);
6277 t = fold_convert (build_nonstandard_integer_type (64, 0), offset_field);
6278 offset = get_initialized_tmp_var (t, pre_p, NULL);
6280 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6282 type = build_pointer_type_for_mode (type, ptr_mode, true);
6284 /* Find the value. Note that this will be a stable indirection, or
6285 a composite of stable indirections in the case of complex. */
6286 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6288 /* Stuff the offset temporary back into its field. */
6289 gimplify_assign (unshare_expr (offset_field),
6290 fold_convert (TREE_TYPE (offset_field), offset), pre_p);
6293 r = build_va_arg_indirect_ref (r);
6302 ALPHA_BUILTIN_CMPBGE,
6303 ALPHA_BUILTIN_EXTBL,
6304 ALPHA_BUILTIN_EXTWL,
6305 ALPHA_BUILTIN_EXTLL,
6306 ALPHA_BUILTIN_EXTQL,
6307 ALPHA_BUILTIN_EXTWH,
6308 ALPHA_BUILTIN_EXTLH,
6309 ALPHA_BUILTIN_EXTQH,
6310 ALPHA_BUILTIN_INSBL,
6311 ALPHA_BUILTIN_INSWL,
6312 ALPHA_BUILTIN_INSLL,
6313 ALPHA_BUILTIN_INSQL,
6314 ALPHA_BUILTIN_INSWH,
6315 ALPHA_BUILTIN_INSLH,
6316 ALPHA_BUILTIN_INSQH,
6317 ALPHA_BUILTIN_MSKBL,
6318 ALPHA_BUILTIN_MSKWL,
6319 ALPHA_BUILTIN_MSKLL,
6320 ALPHA_BUILTIN_MSKQL,
6321 ALPHA_BUILTIN_MSKWH,
6322 ALPHA_BUILTIN_MSKLH,
6323 ALPHA_BUILTIN_MSKQH,
6324 ALPHA_BUILTIN_UMULH,
6326 ALPHA_BUILTIN_ZAPNOT,
6327 ALPHA_BUILTIN_AMASK,
6328 ALPHA_BUILTIN_IMPLVER,
6330 ALPHA_BUILTIN_THREAD_POINTER,
6331 ALPHA_BUILTIN_SET_THREAD_POINTER,
6332 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6333 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER,
6336 ALPHA_BUILTIN_MINUB8,
6337 ALPHA_BUILTIN_MINSB8,
6338 ALPHA_BUILTIN_MINUW4,
6339 ALPHA_BUILTIN_MINSW4,
6340 ALPHA_BUILTIN_MAXUB8,
6341 ALPHA_BUILTIN_MAXSB8,
6342 ALPHA_BUILTIN_MAXUW4,
6343 ALPHA_BUILTIN_MAXSW4,
6347 ALPHA_BUILTIN_UNPKBL,
6348 ALPHA_BUILTIN_UNPKBW,
6353 ALPHA_BUILTIN_CTPOP,
6358 static enum insn_code const code_for_builtin[ALPHA_BUILTIN_max] = {
6359 CODE_FOR_builtin_cmpbge,
6367 CODE_FOR_builtin_insbl,
6368 CODE_FOR_builtin_inswl,
6369 CODE_FOR_builtin_insll,
6381 CODE_FOR_umuldi3_highpart,
6382 CODE_FOR_builtin_zap,
6383 CODE_FOR_builtin_zapnot,
6384 CODE_FOR_builtin_amask,
6385 CODE_FOR_builtin_implver,
6386 CODE_FOR_builtin_rpcc,
6389 CODE_FOR_builtin_establish_vms_condition_handler,
6390 CODE_FOR_builtin_revert_vms_condition_handler,
6393 CODE_FOR_builtin_minub8,
6394 CODE_FOR_builtin_minsb8,
6395 CODE_FOR_builtin_minuw4,
6396 CODE_FOR_builtin_minsw4,
6397 CODE_FOR_builtin_maxub8,
6398 CODE_FOR_builtin_maxsb8,
6399 CODE_FOR_builtin_maxuw4,
6400 CODE_FOR_builtin_maxsw4,
6401 CODE_FOR_builtin_perr,
6402 CODE_FOR_builtin_pklb,
6403 CODE_FOR_builtin_pkwb,
6404 CODE_FOR_builtin_unpkbl,
6405 CODE_FOR_builtin_unpkbw,
6410 CODE_FOR_popcountdi2
6413 struct alpha_builtin_def
6416 enum alpha_builtin code;
6417 unsigned int target_mask;
6421 static struct alpha_builtin_def const zero_arg_builtins[] = {
6422 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6423 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6426 static struct alpha_builtin_def const one_arg_builtins[] = {
6427 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6428 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6429 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6430 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6431 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6432 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6433 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6434 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6437 static struct alpha_builtin_def const two_arg_builtins[] = {
6438 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6439 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6440 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6441 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6442 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6443 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6444 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6445 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6446 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6447 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6448 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6449 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6450 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6451 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6452 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6453 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6454 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6455 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6456 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6457 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6458 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6459 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6460 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6461 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6462 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6463 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6464 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6465 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6466 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6467 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6468 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6469 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6470 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6471 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6474 static GTY(()) tree alpha_dimode_u;
6475 static GTY(()) tree alpha_v8qi_u;
6476 static GTY(()) tree alpha_v8qi_s;
6477 static GTY(()) tree alpha_v4hi_u;
6478 static GTY(()) tree alpha_v4hi_s;
6480 static GTY(()) tree alpha_builtins[(int) ALPHA_BUILTIN_max];
6482 /* Return the alpha builtin for CODE. */
6485 alpha_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
6487 if (code >= ALPHA_BUILTIN_max)
6488 return error_mark_node;
6489 return alpha_builtins[code];
6492 /* Helper function of alpha_init_builtins. Add the built-in specified
6493 by NAME, TYPE, CODE, and ECF. */
6496 alpha_builtin_function (const char *name, tree ftype,
6497 enum alpha_builtin code, unsigned ecf)
6499 tree decl = add_builtin_function (name, ftype, (int) code,
6500 BUILT_IN_MD, NULL, NULL_TREE);
6502 if (ecf & ECF_CONST)
6503 TREE_READONLY (decl) = 1;
6504 if (ecf & ECF_NOTHROW)
6505 TREE_NOTHROW (decl) = 1;
6507 alpha_builtins [(int) code] = decl;
6510 /* Helper function of alpha_init_builtins. Add the COUNT built-in
6511 functions pointed to by P, with function type FTYPE. */
6514 alpha_add_builtins (const struct alpha_builtin_def *p, size_t count,
6519 for (i = 0; i < count; ++i, ++p)
6520 if ((target_flags & p->target_mask) == p->target_mask)
6521 alpha_builtin_function (p->name, ftype, p->code,
6522 (p->is_const ? ECF_CONST : 0) | ECF_NOTHROW);
6526 alpha_init_builtins (void)
6530 alpha_dimode_u = lang_hooks.types.type_for_mode (DImode, 1);
6531 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6532 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6533 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6534 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6536 ftype = build_function_type_list (alpha_dimode_u, NULL_TREE);
6537 alpha_add_builtins (zero_arg_builtins, ARRAY_SIZE (zero_arg_builtins), ftype);
6539 ftype = build_function_type_list (alpha_dimode_u, alpha_dimode_u, NULL_TREE);
6540 alpha_add_builtins (one_arg_builtins, ARRAY_SIZE (one_arg_builtins), ftype);
6542 ftype = build_function_type_list (alpha_dimode_u, alpha_dimode_u,
6543 alpha_dimode_u, NULL_TREE);
6544 alpha_add_builtins (two_arg_builtins, ARRAY_SIZE (two_arg_builtins), ftype);
6546 ftype = build_function_type_list (ptr_type_node, NULL_TREE);
6547 alpha_builtin_function ("__builtin_thread_pointer", ftype,
6548 ALPHA_BUILTIN_THREAD_POINTER, ECF_NOTHROW);
6550 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6551 alpha_builtin_function ("__builtin_set_thread_pointer", ftype,
6552 ALPHA_BUILTIN_SET_THREAD_POINTER, ECF_NOTHROW);
6554 if (TARGET_ABI_OPEN_VMS)
6556 ftype = build_function_type_list (ptr_type_node, ptr_type_node,
6558 alpha_builtin_function ("__builtin_establish_vms_condition_handler",
6560 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6563 ftype = build_function_type_list (ptr_type_node, void_type_node,
6565 alpha_builtin_function ("__builtin_revert_vms_condition_handler", ftype,
6566 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER, 0);
6568 vms_patch_builtins ();
6572 /* Expand an expression EXP that calls a built-in function,
6573 with result going to TARGET if that's convenient
6574 (and in mode MODE if that's convenient).
6575 SUBTARGET may be used as the target for computing one of EXP's operands.
6576 IGNORE is nonzero if the value is to be ignored. */
6579 alpha_expand_builtin (tree exp, rtx target,
6580 rtx subtarget ATTRIBUTE_UNUSED,
6581 enum machine_mode mode ATTRIBUTE_UNUSED,
6582 int ignore ATTRIBUTE_UNUSED)
6586 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
6587 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6589 call_expr_arg_iterator iter;
6590 enum insn_code icode;
6591 rtx op[MAX_ARGS], pat;
6595 if (fcode >= ALPHA_BUILTIN_max)
6596 internal_error ("bad builtin fcode");
6597 icode = code_for_builtin[fcode];
6599 internal_error ("bad builtin fcode");
6601 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6604 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
6606 const struct insn_operand_data *insn_op;
6608 if (arg == error_mark_node)
6610 if (arity > MAX_ARGS)
6613 insn_op = &insn_data[icode].operand[arity + nonvoid];
6615 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
6617 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6618 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6624 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6626 || GET_MODE (target) != tmode
6627 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6628 target = gen_reg_rtx (tmode);
6634 pat = GEN_FCN (icode) (target);
6638 pat = GEN_FCN (icode) (target, op[0]);
6640 pat = GEN_FCN (icode) (op[0]);
6643 pat = GEN_FCN (icode) (target, op[0], op[1]);
6659 /* Several bits below assume HWI >= 64 bits. This should be enforced
6661 #if HOST_BITS_PER_WIDE_INT < 64
6662 # error "HOST_WIDE_INT too small"
6665 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6666 with an 8-bit output vector. OPINT contains the integer operands; bit N
6667 of OP_CONST is set if OPINT[N] is valid. */
6670 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6675 for (i = 0, val = 0; i < 8; ++i)
6677 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6678 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6682 return build_int_cst (alpha_dimode_u, val);
6684 else if (op_const == 2 && opint[1] == 0)
6685 return build_int_cst (alpha_dimode_u, 0xff);
6689 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6690 specialized form of an AND operation. Other byte manipulation instructions
6691 are defined in terms of this instruction, so this is also used as a
6692 subroutine for other builtins.
6694 OP contains the tree operands; OPINT contains the extracted integer values.
6695 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6696 OPINT may be considered. */
6699 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6704 unsigned HOST_WIDE_INT mask = 0;
6707 for (i = 0; i < 8; ++i)
6708 if ((opint[1] >> i) & 1)
6709 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6712 return build_int_cst (alpha_dimode_u, opint[0] & mask);
6715 return fold_build2 (BIT_AND_EXPR, alpha_dimode_u, op[0],
6716 build_int_cst (alpha_dimode_u, mask));
6718 else if ((op_const & 1) && opint[0] == 0)
6719 return build_int_cst (alpha_dimode_u, 0);
6723 /* Fold the builtins for the EXT family of instructions. */
6726 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6727 long op_const, unsigned HOST_WIDE_INT bytemask,
6731 tree *zap_op = NULL;
6735 unsigned HOST_WIDE_INT loc;
6738 loc *= BITS_PER_UNIT;
6744 unsigned HOST_WIDE_INT temp = opint[0];
6757 opint[1] = bytemask;
6758 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6761 /* Fold the builtins for the INS family of instructions. */
6764 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6765 long op_const, unsigned HOST_WIDE_INT bytemask,
6768 if ((op_const & 1) && opint[0] == 0)
6769 return build_int_cst (alpha_dimode_u, 0);
6773 unsigned HOST_WIDE_INT temp, loc, byteloc;
6774 tree *zap_op = NULL;
6782 byteloc = (64 - (loc * 8)) & 0x3f;
6799 opint[1] = bytemask;
6800 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6807 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6808 long op_const, unsigned HOST_WIDE_INT bytemask,
6813 unsigned HOST_WIDE_INT loc;
6821 opint[1] = bytemask ^ 0xff;
6824 return alpha_fold_builtin_zapnot (op, opint, op_const);
6828 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6830 tree op0 = fold_convert (vtype, op[0]);
6831 tree op1 = fold_convert (vtype, op[1]);
6832 tree val = fold_build2 (code, vtype, op0, op1);
6833 return fold_build1 (VIEW_CONVERT_EXPR, alpha_dimode_u, val);
6837 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6839 unsigned HOST_WIDE_INT temp = 0;
6845 for (i = 0; i < 8; ++i)
6847 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6848 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6855 return build_int_cst (alpha_dimode_u, temp);
6859 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6861 unsigned HOST_WIDE_INT temp;
6866 temp = opint[0] & 0xff;
6867 temp |= (opint[0] >> 24) & 0xff00;
6869 return build_int_cst (alpha_dimode_u, temp);
6873 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
6875 unsigned HOST_WIDE_INT temp;
6880 temp = opint[0] & 0xff;
6881 temp |= (opint[0] >> 8) & 0xff00;
6882 temp |= (opint[0] >> 16) & 0xff0000;
6883 temp |= (opint[0] >> 24) & 0xff000000;
6885 return build_int_cst (alpha_dimode_u, temp);
6889 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
6891 unsigned HOST_WIDE_INT temp;
6896 temp = opint[0] & 0xff;
6897 temp |= (opint[0] & 0xff00) << 24;
6899 return build_int_cst (alpha_dimode_u, temp);
6903 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
6905 unsigned HOST_WIDE_INT temp;
6910 temp = opint[0] & 0xff;
6911 temp |= (opint[0] & 0x0000ff00) << 8;
6912 temp |= (opint[0] & 0x00ff0000) << 16;
6913 temp |= (opint[0] & 0xff000000) << 24;
6915 return build_int_cst (alpha_dimode_u, temp);
6919 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
6921 unsigned HOST_WIDE_INT temp;
6929 temp = exact_log2 (opint[0] & -opint[0]);
6931 return build_int_cst (alpha_dimode_u, temp);
6935 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
6937 unsigned HOST_WIDE_INT temp;
6945 temp = 64 - floor_log2 (opint[0]) - 1;
6947 return build_int_cst (alpha_dimode_u, temp);
6951 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
6953 unsigned HOST_WIDE_INT temp, op;
6961 temp++, op &= op - 1;
6963 return build_int_cst (alpha_dimode_u, temp);
6966 /* Fold one of our builtin functions. */
6969 alpha_fold_builtin (tree fndecl, int n_args, tree *op,
6970 bool ignore ATTRIBUTE_UNUSED)
6972 unsigned HOST_WIDE_INT opint[MAX_ARGS];
6976 if (n_args > MAX_ARGS)
6979 for (i = 0; i < n_args; i++)
6982 if (arg == error_mark_node)
6986 if (TREE_CODE (arg) == INTEGER_CST)
6988 op_const |= 1L << i;
6989 opint[i] = int_cst_value (arg);
6993 switch (DECL_FUNCTION_CODE (fndecl))
6995 case ALPHA_BUILTIN_CMPBGE:
6996 return alpha_fold_builtin_cmpbge (opint, op_const);
6998 case ALPHA_BUILTIN_EXTBL:
6999 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
7000 case ALPHA_BUILTIN_EXTWL:
7001 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
7002 case ALPHA_BUILTIN_EXTLL:
7003 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
7004 case ALPHA_BUILTIN_EXTQL:
7005 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
7006 case ALPHA_BUILTIN_EXTWH:
7007 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
7008 case ALPHA_BUILTIN_EXTLH:
7009 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
7010 case ALPHA_BUILTIN_EXTQH:
7011 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
7013 case ALPHA_BUILTIN_INSBL:
7014 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
7015 case ALPHA_BUILTIN_INSWL:
7016 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
7017 case ALPHA_BUILTIN_INSLL:
7018 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
7019 case ALPHA_BUILTIN_INSQL:
7020 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
7021 case ALPHA_BUILTIN_INSWH:
7022 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
7023 case ALPHA_BUILTIN_INSLH:
7024 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
7025 case ALPHA_BUILTIN_INSQH:
7026 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
7028 case ALPHA_BUILTIN_MSKBL:
7029 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
7030 case ALPHA_BUILTIN_MSKWL:
7031 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
7032 case ALPHA_BUILTIN_MSKLL:
7033 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
7034 case ALPHA_BUILTIN_MSKQL:
7035 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
7036 case ALPHA_BUILTIN_MSKWH:
7037 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
7038 case ALPHA_BUILTIN_MSKLH:
7039 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
7040 case ALPHA_BUILTIN_MSKQH:
7041 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
7043 case ALPHA_BUILTIN_UMULH:
7044 return fold_build2 (MULT_HIGHPART_EXPR, alpha_dimode_u, op[0], op[1]);
7046 case ALPHA_BUILTIN_ZAP:
7049 case ALPHA_BUILTIN_ZAPNOT:
7050 return alpha_fold_builtin_zapnot (op, opint, op_const);
7052 case ALPHA_BUILTIN_MINUB8:
7053 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7054 case ALPHA_BUILTIN_MINSB8:
7055 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7056 case ALPHA_BUILTIN_MINUW4:
7057 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7058 case ALPHA_BUILTIN_MINSW4:
7059 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7060 case ALPHA_BUILTIN_MAXUB8:
7061 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7062 case ALPHA_BUILTIN_MAXSB8:
7063 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7064 case ALPHA_BUILTIN_MAXUW4:
7065 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7066 case ALPHA_BUILTIN_MAXSW4:
7067 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7069 case ALPHA_BUILTIN_PERR:
7070 return alpha_fold_builtin_perr (opint, op_const);
7071 case ALPHA_BUILTIN_PKLB:
7072 return alpha_fold_builtin_pklb (opint, op_const);
7073 case ALPHA_BUILTIN_PKWB:
7074 return alpha_fold_builtin_pkwb (opint, op_const);
7075 case ALPHA_BUILTIN_UNPKBL:
7076 return alpha_fold_builtin_unpkbl (opint, op_const);
7077 case ALPHA_BUILTIN_UNPKBW:
7078 return alpha_fold_builtin_unpkbw (opint, op_const);
7080 case ALPHA_BUILTIN_CTTZ:
7081 return alpha_fold_builtin_cttz (opint, op_const);
7082 case ALPHA_BUILTIN_CTLZ:
7083 return alpha_fold_builtin_ctlz (opint, op_const);
7084 case ALPHA_BUILTIN_CTPOP:
7085 return alpha_fold_builtin_ctpop (opint, op_const);
7087 case ALPHA_BUILTIN_AMASK:
7088 case ALPHA_BUILTIN_IMPLVER:
7089 case ALPHA_BUILTIN_RPCC:
7090 case ALPHA_BUILTIN_THREAD_POINTER:
7091 case ALPHA_BUILTIN_SET_THREAD_POINTER:
7092 /* None of these are foldable at compile-time. */
7098 /* This page contains routines that are used to determine what the function
7099 prologue and epilogue code will do and write them out. */
7101 /* Compute the size of the save area in the stack. */
7103 /* These variables are used for communication between the following functions.
7104 They indicate various things about the current function being compiled
7105 that are used to tell what kind of prologue, epilogue and procedure
7106 descriptor to generate. */
7108 /* Nonzero if we need a stack procedure. */
7109 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7110 static enum alpha_procedure_types alpha_procedure_type;
7112 /* Register number (either FP or SP) that is used to unwind the frame. */
7113 static int vms_unwind_regno;
7115 /* Register number used to save FP. We need not have one for RA since
7116 we don't modify it for register procedures. This is only defined
7117 for register frame procedures. */
7118 static int vms_save_fp_regno;
7120 /* Register number used to reference objects off our PV. */
7121 static int vms_base_regno;
7123 /* Compute register masks for saved registers. */
7126 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7128 unsigned long imask = 0;
7129 unsigned long fmask = 0;
7132 /* When outputting a thunk, we don't have valid register life info,
7133 but assemble_start_function wants to output .frame and .mask
7142 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7143 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7145 /* One for every register we have to save. */
7146 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7147 if (! fixed_regs[i] && ! call_used_regs[i]
7148 && df_regs_ever_live_p (i) && i != REG_RA)
7151 imask |= (1UL << i);
7153 fmask |= (1UL << (i - 32));
7156 /* We need to restore these for the handler. */
7157 if (crtl->calls_eh_return)
7161 unsigned regno = EH_RETURN_DATA_REGNO (i);
7162 if (regno == INVALID_REGNUM)
7164 imask |= 1UL << regno;
7168 /* If any register spilled, then spill the return address also. */
7169 /* ??? This is required by the Digital stack unwind specification
7170 and isn't needed if we're doing Dwarf2 unwinding. */
7171 if (imask || fmask || alpha_ra_ever_killed ())
7172 imask |= (1UL << REG_RA);
7179 alpha_sa_size (void)
7181 unsigned long mask[2];
7185 alpha_sa_mask (&mask[0], &mask[1]);
7187 for (j = 0; j < 2; ++j)
7188 for (i = 0; i < 32; ++i)
7189 if ((mask[j] >> i) & 1)
7192 if (TARGET_ABI_OPEN_VMS)
7194 /* Start with a stack procedure if we make any calls (REG_RA used), or
7195 need a frame pointer, with a register procedure if we otherwise need
7196 at least a slot, and with a null procedure in other cases. */
7197 if ((mask[0] >> REG_RA) & 1 || frame_pointer_needed)
7198 alpha_procedure_type = PT_STACK;
7199 else if (get_frame_size() != 0)
7200 alpha_procedure_type = PT_REGISTER;
7202 alpha_procedure_type = PT_NULL;
7204 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7205 made the final decision on stack procedure vs register procedure. */
7206 if (alpha_procedure_type == PT_STACK)
7209 /* Decide whether to refer to objects off our PV via FP or PV.
7210 If we need FP for something else or if we receive a nonlocal
7211 goto (which expects PV to contain the value), we must use PV.
7212 Otherwise, start by assuming we can use FP. */
7215 = (frame_pointer_needed
7216 || cfun->has_nonlocal_label
7217 || alpha_procedure_type == PT_STACK
7218 || crtl->outgoing_args_size)
7219 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7221 /* If we want to copy PV into FP, we need to find some register
7222 in which to save FP. */
7224 vms_save_fp_regno = -1;
7225 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7226 for (i = 0; i < 32; i++)
7227 if (! fixed_regs[i] && call_used_regs[i] && ! df_regs_ever_live_p (i))
7228 vms_save_fp_regno = i;
7230 /* A VMS condition handler requires a stack procedure in our
7231 implementation. (not required by the calling standard). */
7232 if ((vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7233 || cfun->machine->uses_condition_handler)
7234 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7235 else if (alpha_procedure_type == PT_NULL)
7236 vms_base_regno = REG_PV;
7238 /* Stack unwinding should be done via FP unless we use it for PV. */
7239 vms_unwind_regno = (vms_base_regno == REG_PV
7240 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7242 /* If this is a stack procedure, allow space for saving FP, RA and
7243 a condition handler slot if needed. */
7244 if (alpha_procedure_type == PT_STACK)
7245 sa_size += 2 + cfun->machine->uses_condition_handler;
7249 /* Our size must be even (multiple of 16 bytes). */
7257 /* Define the offset between two registers, one to be eliminated,
7258 and the other its replacement, at the start of a routine. */
7261 alpha_initial_elimination_offset (unsigned int from,
7262 unsigned int to ATTRIBUTE_UNUSED)
7266 ret = alpha_sa_size ();
7267 ret += ALPHA_ROUND (crtl->outgoing_args_size);
7271 case FRAME_POINTER_REGNUM:
7274 case ARG_POINTER_REGNUM:
7275 ret += (ALPHA_ROUND (get_frame_size ()
7276 + crtl->args.pretend_args_size)
7277 - crtl->args.pretend_args_size);
7287 #if TARGET_ABI_OPEN_VMS
7289 /* Worker function for TARGET_CAN_ELIMINATE. */
7292 alpha_vms_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
7294 /* We need the alpha_procedure_type to decide. Evaluate it now. */
7297 switch (alpha_procedure_type)
7300 /* NULL procedures have no frame of their own and we only
7301 know how to resolve from the current stack pointer. */
7302 return to == STACK_POINTER_REGNUM;
7306 /* We always eliminate except to the stack pointer if there is no
7307 usable frame pointer at hand. */
7308 return (to != STACK_POINTER_REGNUM
7309 || vms_unwind_regno != HARD_FRAME_POINTER_REGNUM);
7315 /* FROM is to be eliminated for TO. Return the offset so that TO+offset
7316 designates the same location as FROM. */
7319 alpha_vms_initial_elimination_offset (unsigned int from, unsigned int to)
7321 /* The only possible attempts we ever expect are ARG or FRAME_PTR to
7322 HARD_FRAME or STACK_PTR. We need the alpha_procedure_type to decide
7323 on the proper computations and will need the register save area size
7326 HOST_WIDE_INT sa_size = alpha_sa_size ();
7328 /* PT_NULL procedures have no frame of their own and we only allow
7329 elimination to the stack pointer. This is the argument pointer and we
7330 resolve the soft frame pointer to that as well. */
7332 if (alpha_procedure_type == PT_NULL)
7335 /* For a PT_STACK procedure the frame layout looks as follows
7337 -----> decreasing addresses
7339 < size rounded up to 16 | likewise >
7340 --------------#------------------------------+++--------------+++-------#
7341 incoming args # pretended args | "frame" | regs sa | PV | outgoing args #
7342 --------------#---------------------------------------------------------#
7344 ARG_PTR FRAME_PTR HARD_FRAME_PTR STACK_PTR
7347 PT_REGISTER procedures are similar in that they may have a frame of their
7348 own. They have no regs-sa/pv/outgoing-args area.
7350 We first compute offset to HARD_FRAME_PTR, then add what we need to get
7351 to STACK_PTR if need be. */
7354 HOST_WIDE_INT offset;
7355 HOST_WIDE_INT pv_save_size = alpha_procedure_type == PT_STACK ? 8 : 0;
7359 case FRAME_POINTER_REGNUM:
7360 offset = ALPHA_ROUND (sa_size + pv_save_size);
7362 case ARG_POINTER_REGNUM:
7363 offset = (ALPHA_ROUND (sa_size + pv_save_size
7365 + crtl->args.pretend_args_size)
7366 - crtl->args.pretend_args_size);
7372 if (to == STACK_POINTER_REGNUM)
7373 offset += ALPHA_ROUND (crtl->outgoing_args_size);
7379 #define COMMON_OBJECT "common_object"
7382 common_object_handler (tree *node, tree name ATTRIBUTE_UNUSED,
7383 tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED,
7384 bool *no_add_attrs ATTRIBUTE_UNUSED)
7387 gcc_assert (DECL_P (decl));
7389 DECL_COMMON (decl) = 1;
7393 static const struct attribute_spec vms_attribute_table[] =
7395 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
7396 affects_type_identity } */
7397 { COMMON_OBJECT, 0, 1, true, false, false, common_object_handler, false },
7398 { NULL, 0, 0, false, false, false, NULL, false }
7402 vms_output_aligned_decl_common(FILE *file, tree decl, const char *name,
7403 unsigned HOST_WIDE_INT size,
7406 tree attr = DECL_ATTRIBUTES (decl);
7407 fprintf (file, "%s", COMMON_ASM_OP);
7408 assemble_name (file, name);
7409 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED, size);
7410 /* ??? Unlike on OSF/1, the alignment factor is not in log units. */
7411 fprintf (file, ",%u", align / BITS_PER_UNIT);
7414 attr = lookup_attribute (COMMON_OBJECT, attr);
7416 fprintf (file, ",%s",
7417 IDENTIFIER_POINTER (TREE_VALUE (TREE_VALUE (attr))));
7422 #undef COMMON_OBJECT
7427 find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
7429 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
7433 alpha_find_lo_sum_using_gp (rtx insn)
7435 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
7439 alpha_does_function_need_gp (void)
7443 /* The GP being variable is an OSF abi thing. */
7444 if (! TARGET_ABI_OSF)
7447 /* We need the gp to load the address of __mcount. */
7448 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7451 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7455 /* The nonlocal receiver pattern assumes that the gp is valid for
7456 the nested function. Reasonable because it's almost always set
7457 correctly already. For the cases where that's wrong, make sure
7458 the nested function loads its gp on entry. */
7459 if (crtl->has_nonlocal_goto)
7462 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7463 Even if we are a static function, we still need to do this in case
7464 our address is taken and passed to something like qsort. */
7466 push_topmost_sequence ();
7467 insn = get_insns ();
7468 pop_topmost_sequence ();
7470 for (; insn; insn = NEXT_INSN (insn))
7471 if (NONDEBUG_INSN_P (insn)
7472 && ! JUMP_TABLE_DATA_P (insn)
7473 && GET_CODE (PATTERN (insn)) != USE
7474 && GET_CODE (PATTERN (insn)) != CLOBBER
7475 && get_attr_usegp (insn))
7482 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7486 set_frame_related_p (void)
7488 rtx seq = get_insns ();
7499 while (insn != NULL_RTX)
7501 RTX_FRAME_RELATED_P (insn) = 1;
7502 insn = NEXT_INSN (insn);
7504 seq = emit_insn (seq);
7508 seq = emit_insn (seq);
7509 RTX_FRAME_RELATED_P (seq) = 1;
7514 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7516 /* Generates a store with the proper unwind info attached. VALUE is
7517 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7518 contains SP+FRAME_BIAS, and that is the unwind info that should be
7519 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7520 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7523 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7524 HOST_WIDE_INT base_ofs, rtx frame_reg)
7526 rtx addr, mem, insn;
7528 addr = plus_constant (Pmode, base_reg, base_ofs);
7529 mem = gen_frame_mem (DImode, addr);
7531 insn = emit_move_insn (mem, value);
7532 RTX_FRAME_RELATED_P (insn) = 1;
7534 if (frame_bias || value != frame_reg)
7538 addr = plus_constant (Pmode, stack_pointer_rtx,
7539 frame_bias + base_ofs);
7540 mem = gen_rtx_MEM (DImode, addr);
7543 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
7544 gen_rtx_SET (VOIDmode, mem, frame_reg));
7549 emit_frame_store (unsigned int regno, rtx base_reg,
7550 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7552 rtx reg = gen_rtx_REG (DImode, regno);
7553 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7556 /* Compute the frame size. SIZE is the size of the "naked" frame
7557 and SA_SIZE is the size of the register save area. */
7559 static HOST_WIDE_INT
7560 compute_frame_size (HOST_WIDE_INT size, HOST_WIDE_INT sa_size)
7562 if (TARGET_ABI_OPEN_VMS)
7563 return ALPHA_ROUND (sa_size
7564 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7566 + crtl->args.pretend_args_size);
7568 return ALPHA_ROUND (crtl->outgoing_args_size)
7571 + crtl->args.pretend_args_size);
7574 /* Write function prologue. */
7576 /* On vms we have two kinds of functions:
7578 - stack frame (PROC_STACK)
7579 these are 'normal' functions with local vars and which are
7580 calling other functions
7581 - register frame (PROC_REGISTER)
7582 keeps all data in registers, needs no stack
7584 We must pass this to the assembler so it can generate the
7585 proper pdsc (procedure descriptor)
7586 This is done with the '.pdesc' command.
7588 On not-vms, we don't really differentiate between the two, as we can
7589 simply allocate stack without saving registers. */
7592 alpha_expand_prologue (void)
7594 /* Registers to save. */
7595 unsigned long imask = 0;
7596 unsigned long fmask = 0;
7597 /* Stack space needed for pushing registers clobbered by us. */
7598 HOST_WIDE_INT sa_size, sa_bias;
7599 /* Complete stack size needed. */
7600 HOST_WIDE_INT frame_size;
7601 /* Probed stack size; it additionally includes the size of
7602 the "reserve region" if any. */
7603 HOST_WIDE_INT probed_size;
7604 /* Offset from base reg to register save area. */
7605 HOST_WIDE_INT reg_offset;
7609 sa_size = alpha_sa_size ();
7610 frame_size = compute_frame_size (get_frame_size (), sa_size);
7612 if (flag_stack_usage_info)
7613 current_function_static_stack_size = frame_size;
7615 if (TARGET_ABI_OPEN_VMS)
7616 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
7618 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
7620 alpha_sa_mask (&imask, &fmask);
7622 /* Emit an insn to reload GP, if needed. */
7625 alpha_function_needs_gp = alpha_does_function_need_gp ();
7626 if (alpha_function_needs_gp)
7627 emit_insn (gen_prologue_ldgp ());
7630 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7631 the call to mcount ourselves, rather than having the linker do it
7632 magically in response to -pg. Since _mcount has special linkage,
7633 don't represent the call as a call. */
7634 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7635 emit_insn (gen_prologue_mcount ());
7637 /* Adjust the stack by the frame size. If the frame size is > 4096
7638 bytes, we need to be sure we probe somewhere in the first and last
7639 4096 bytes (we can probably get away without the latter test) and
7640 every 8192 bytes in between. If the frame size is > 32768, we
7641 do this in a loop. Otherwise, we generate the explicit probe
7644 Note that we are only allowed to adjust sp once in the prologue. */
7646 probed_size = frame_size;
7647 if (flag_stack_check)
7648 probed_size += STACK_CHECK_PROTECT;
7650 if (probed_size <= 32768)
7652 if (probed_size > 4096)
7656 for (probed = 4096; probed < probed_size; probed += 8192)
7657 emit_insn (gen_probe_stack (GEN_INT (-probed)));
7659 /* We only have to do this probe if we aren't saving registers or
7660 if we are probing beyond the frame because of -fstack-check. */
7661 if ((sa_size == 0 && probed_size > probed - 4096)
7662 || flag_stack_check)
7663 emit_insn (gen_probe_stack (GEN_INT (-probed_size)));
7666 if (frame_size != 0)
7667 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7668 GEN_INT (-frame_size))));
7672 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7673 number of 8192 byte blocks to probe. We then probe each block
7674 in the loop and then set SP to the proper location. If the
7675 amount remaining is > 4096, we have to do one more probe if we
7676 are not saving any registers or if we are probing beyond the
7677 frame because of -fstack-check. */
7679 HOST_WIDE_INT blocks = (probed_size + 4096) / 8192;
7680 HOST_WIDE_INT leftover = probed_size + 4096 - blocks * 8192;
7681 rtx ptr = gen_rtx_REG (DImode, 22);
7682 rtx count = gen_rtx_REG (DImode, 23);
7685 emit_move_insn (count, GEN_INT (blocks));
7686 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx, GEN_INT (4096)));
7688 /* Because of the difficulty in emitting a new basic block this
7689 late in the compilation, generate the loop as a single insn. */
7690 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7692 if ((leftover > 4096 && sa_size == 0) || flag_stack_check)
7694 rtx last = gen_rtx_MEM (DImode,
7695 plus_constant (Pmode, ptr, -leftover));
7696 MEM_VOLATILE_P (last) = 1;
7697 emit_move_insn (last, const0_rtx);
7700 if (flag_stack_check)
7702 /* If -fstack-check is specified we have to load the entire
7703 constant into a register and subtract from the sp in one go,
7704 because the probed stack size is not equal to the frame size. */
7705 HOST_WIDE_INT lo, hi;
7706 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7707 hi = frame_size - lo;
7709 emit_move_insn (ptr, GEN_INT (hi));
7710 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7711 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7716 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7717 GEN_INT (-leftover)));
7720 /* This alternative is special, because the DWARF code cannot
7721 possibly intuit through the loop above. So we invent this
7722 note it looks at instead. */
7723 RTX_FRAME_RELATED_P (seq) = 1;
7724 add_reg_note (seq, REG_FRAME_RELATED_EXPR,
7725 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7726 plus_constant (Pmode, stack_pointer_rtx,
7730 /* Cope with very large offsets to the register save area. */
7732 sa_reg = stack_pointer_rtx;
7733 if (reg_offset + sa_size > 0x8000)
7735 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7738 if (low + sa_size <= 0x8000)
7739 sa_bias = reg_offset - low, reg_offset = low;
7741 sa_bias = reg_offset, reg_offset = 0;
7743 sa_reg = gen_rtx_REG (DImode, 24);
7744 sa_bias_rtx = GEN_INT (sa_bias);
7746 if (add_operand (sa_bias_rtx, DImode))
7747 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7750 emit_move_insn (sa_reg, sa_bias_rtx);
7751 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7755 /* Save regs in stack order. Beginning with VMS PV. */
7756 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7757 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7759 /* Save register RA next. */
7760 if (imask & (1UL << REG_RA))
7762 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7763 imask &= ~(1UL << REG_RA);
7767 /* Now save any other registers required to be saved. */
7768 for (i = 0; i < 31; i++)
7769 if (imask & (1UL << i))
7771 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7775 for (i = 0; i < 31; i++)
7776 if (fmask & (1UL << i))
7778 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7782 if (TARGET_ABI_OPEN_VMS)
7784 /* Register frame procedures save the fp. */
7785 if (alpha_procedure_type == PT_REGISTER)
7787 rtx insn = emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7788 hard_frame_pointer_rtx);
7789 add_reg_note (insn, REG_CFA_REGISTER, NULL);
7790 RTX_FRAME_RELATED_P (insn) = 1;
7793 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
7794 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7795 gen_rtx_REG (DImode, REG_PV)));
7797 if (alpha_procedure_type != PT_NULL
7798 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7799 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7801 /* If we have to allocate space for outgoing args, do it now. */
7802 if (crtl->outgoing_args_size != 0)
7805 = emit_move_insn (stack_pointer_rtx,
7807 (Pmode, hard_frame_pointer_rtx,
7809 (crtl->outgoing_args_size))));
7811 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7812 if ! frame_pointer_needed. Setting the bit will change the CFA
7813 computation rule to use sp again, which would be wrong if we had
7814 frame_pointer_needed, as this means sp might move unpredictably
7818 frame_pointer_needed
7819 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7821 crtl->outgoing_args_size != 0
7822 => alpha_procedure_type != PT_NULL,
7824 so when we are not setting the bit here, we are guaranteed to
7825 have emitted an FRP frame pointer update just before. */
7826 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7831 /* If we need a frame pointer, set it from the stack pointer. */
7832 if (frame_pointer_needed)
7834 if (TARGET_CAN_FAULT_IN_PROLOGUE)
7835 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7837 /* This must always be the last instruction in the
7838 prologue, thus we emit a special move + clobber. */
7839 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7840 stack_pointer_rtx, sa_reg)));
7844 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7845 the prologue, for exception handling reasons, we cannot do this for
7846 any insn that might fault. We could prevent this for mems with a
7847 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7848 have to prevent all such scheduling with a blockage.
7850 Linux, on the other hand, never bothered to implement OSF/1's
7851 exception handling, and so doesn't care about such things. Anyone
7852 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7854 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7855 emit_insn (gen_blockage ());
7858 /* Count the number of .file directives, so that .loc is up to date. */
7859 int num_source_filenames = 0;
7861 /* Output the textual info surrounding the prologue. */
7864 alpha_start_function (FILE *file, const char *fnname,
7865 tree decl ATTRIBUTE_UNUSED)
7867 unsigned long imask = 0;
7868 unsigned long fmask = 0;
7869 /* Stack space needed for pushing registers clobbered by us. */
7870 HOST_WIDE_INT sa_size;
7871 /* Complete stack size needed. */
7872 unsigned HOST_WIDE_INT frame_size;
7873 /* The maximum debuggable frame size. */
7874 unsigned HOST_WIDE_INT max_frame_size = 1UL << 31;
7875 /* Offset from base reg to register save area. */
7876 HOST_WIDE_INT reg_offset;
7877 char *entry_label = (char *) alloca (strlen (fnname) + 6);
7878 char *tramp_label = (char *) alloca (strlen (fnname) + 6);
7881 #if TARGET_ABI_OPEN_VMS
7882 vms_start_function (fnname);
7885 alpha_fnname = fnname;
7886 sa_size = alpha_sa_size ();
7887 frame_size = compute_frame_size (get_frame_size (), sa_size);
7889 if (TARGET_ABI_OPEN_VMS)
7890 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
7892 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
7894 alpha_sa_mask (&imask, &fmask);
7896 /* Issue function start and label. */
7897 if (TARGET_ABI_OPEN_VMS || !flag_inhibit_size_directive)
7899 fputs ("\t.ent ", file);
7900 assemble_name (file, fnname);
7903 /* If the function needs GP, we'll write the "..ng" label there.
7904 Otherwise, do it here. */
7906 && ! alpha_function_needs_gp
7907 && ! cfun->is_thunk)
7910 assemble_name (file, fnname);
7911 fputs ("..ng:\n", file);
7914 /* Nested functions on VMS that are potentially called via trampoline
7915 get a special transfer entry point that loads the called functions
7916 procedure descriptor and static chain. */
7917 if (TARGET_ABI_OPEN_VMS
7918 && !TREE_PUBLIC (decl)
7919 && DECL_CONTEXT (decl)
7920 && !TYPE_P (DECL_CONTEXT (decl))
7921 && TREE_CODE (DECL_CONTEXT (decl)) != TRANSLATION_UNIT_DECL)
7923 strcpy (tramp_label, fnname);
7924 strcat (tramp_label, "..tr");
7925 ASM_OUTPUT_LABEL (file, tramp_label);
7926 fprintf (file, "\tldq $1,24($27)\n");
7927 fprintf (file, "\tldq $27,16($27)\n");
7930 strcpy (entry_label, fnname);
7931 if (TARGET_ABI_OPEN_VMS)
7932 strcat (entry_label, "..en");
7934 ASM_OUTPUT_LABEL (file, entry_label);
7935 inside_function = TRUE;
7937 if (TARGET_ABI_OPEN_VMS)
7938 fprintf (file, "\t.base $%d\n", vms_base_regno);
7941 && TARGET_IEEE_CONFORMANT
7942 && !flag_inhibit_size_directive)
7944 /* Set flags in procedure descriptor to request IEEE-conformant
7945 math-library routines. The value we set it to is PDSC_EXC_IEEE
7946 (/usr/include/pdsc.h). */
7947 fputs ("\t.eflag 48\n", file);
7950 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7951 alpha_auto_offset = -frame_size + crtl->args.pretend_args_size;
7952 alpha_arg_offset = -frame_size + 48;
7954 /* Describe our frame. If the frame size is larger than an integer,
7955 print it as zero to avoid an assembler error. We won't be
7956 properly describing such a frame, but that's the best we can do. */
7957 if (TARGET_ABI_OPEN_VMS)
7958 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
7959 HOST_WIDE_INT_PRINT_DEC "\n",
7961 frame_size >= (1UL << 31) ? 0 : frame_size,
7963 else if (!flag_inhibit_size_directive)
7964 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
7965 (frame_pointer_needed
7966 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
7967 frame_size >= max_frame_size ? 0 : frame_size,
7968 crtl->args.pretend_args_size);
7970 /* Describe which registers were spilled. */
7971 if (TARGET_ABI_OPEN_VMS)
7974 /* ??? Does VMS care if mask contains ra? The old code didn't
7975 set it, so I don't here. */
7976 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
7978 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
7979 if (alpha_procedure_type == PT_REGISTER)
7980 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
7982 else if (!flag_inhibit_size_directive)
7986 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
7987 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
7989 for (i = 0; i < 32; ++i)
7990 if (imask & (1UL << i))
7995 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
7996 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
7999 #if TARGET_ABI_OPEN_VMS
8000 /* If a user condition handler has been installed at some point, emit
8001 the procedure descriptor bits to point the Condition Handling Facility
8002 at the indirection wrapper, and state the fp offset at which the user
8003 handler may be found. */
8004 if (cfun->machine->uses_condition_handler)
8006 fprintf (file, "\t.handler __gcc_shell_handler\n");
8007 fprintf (file, "\t.handler_data %d\n", VMS_COND_HANDLER_FP_OFFSET);
8010 #ifdef TARGET_VMS_CRASH_DEBUG
8011 /* Support of minimal traceback info. */
8012 switch_to_section (readonly_data_section);
8013 fprintf (file, "\t.align 3\n");
8014 assemble_name (file, fnname); fputs ("..na:\n", file);
8015 fputs ("\t.ascii \"", file);
8016 assemble_name (file, fnname);
8017 fputs ("\\0\"\n", file);
8018 switch_to_section (text_section);
8020 #endif /* TARGET_ABI_OPEN_VMS */
8023 /* Emit the .prologue note at the scheduled end of the prologue. */
8026 alpha_output_function_end_prologue (FILE *file)
8028 if (TARGET_ABI_OPEN_VMS)
8029 fputs ("\t.prologue\n", file);
8030 else if (!flag_inhibit_size_directive)
8031 fprintf (file, "\t.prologue %d\n",
8032 alpha_function_needs_gp || cfun->is_thunk);
8035 /* Write function epilogue. */
8038 alpha_expand_epilogue (void)
8040 /* Registers to save. */
8041 unsigned long imask = 0;
8042 unsigned long fmask = 0;
8043 /* Stack space needed for pushing registers clobbered by us. */
8044 HOST_WIDE_INT sa_size;
8045 /* Complete stack size needed. */
8046 HOST_WIDE_INT frame_size;
8047 /* Offset from base reg to register save area. */
8048 HOST_WIDE_INT reg_offset;
8049 int fp_is_frame_pointer, fp_offset;
8050 rtx sa_reg, sa_reg_exp = NULL;
8051 rtx sp_adj1, sp_adj2, mem, reg, insn;
8053 rtx cfa_restores = NULL_RTX;
8056 sa_size = alpha_sa_size ();
8057 frame_size = compute_frame_size (get_frame_size (), sa_size);
8059 if (TARGET_ABI_OPEN_VMS)
8061 if (alpha_procedure_type == PT_STACK)
8062 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
8067 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
8069 alpha_sa_mask (&imask, &fmask);
8072 = (TARGET_ABI_OPEN_VMS
8073 ? alpha_procedure_type == PT_STACK
8074 : frame_pointer_needed);
8076 sa_reg = stack_pointer_rtx;
8078 if (crtl->calls_eh_return)
8079 eh_ofs = EH_RETURN_STACKADJ_RTX;
8085 /* If we have a frame pointer, restore SP from it. */
8086 if (TARGET_ABI_OPEN_VMS
8087 ? vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
8088 : frame_pointer_needed)
8089 emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
8091 /* Cope with very large offsets to the register save area. */
8092 if (reg_offset + sa_size > 0x8000)
8094 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8097 if (low + sa_size <= 0x8000)
8098 bias = reg_offset - low, reg_offset = low;
8100 bias = reg_offset, reg_offset = 0;
8102 sa_reg = gen_rtx_REG (DImode, 22);
8103 sa_reg_exp = plus_constant (Pmode, stack_pointer_rtx, bias);
8105 emit_move_insn (sa_reg, sa_reg_exp);
8108 /* Restore registers in order, excepting a true frame pointer. */
8110 mem = gen_frame_mem (DImode, plus_constant (Pmode, sa_reg, reg_offset));
8111 reg = gen_rtx_REG (DImode, REG_RA);
8112 emit_move_insn (reg, mem);
8113 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8116 imask &= ~(1UL << REG_RA);
8118 for (i = 0; i < 31; ++i)
8119 if (imask & (1UL << i))
8121 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8122 fp_offset = reg_offset;
8125 mem = gen_frame_mem (DImode,
8126 plus_constant (Pmode, sa_reg,
8128 reg = gen_rtx_REG (DImode, i);
8129 emit_move_insn (reg, mem);
8130 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
8136 for (i = 0; i < 31; ++i)
8137 if (fmask & (1UL << i))
8139 mem = gen_frame_mem (DFmode, plus_constant (Pmode, sa_reg,
8141 reg = gen_rtx_REG (DFmode, i+32);
8142 emit_move_insn (reg, mem);
8143 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8148 if (frame_size || eh_ofs)
8150 sp_adj1 = stack_pointer_rtx;
8154 sp_adj1 = gen_rtx_REG (DImode, 23);
8155 emit_move_insn (sp_adj1,
8156 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8159 /* If the stack size is large, begin computation into a temporary
8160 register so as not to interfere with a potential fp restore,
8161 which must be consecutive with an SP restore. */
8162 if (frame_size < 32768 && !cfun->calls_alloca)
8163 sp_adj2 = GEN_INT (frame_size);
8164 else if (frame_size < 0x40007fffL)
8166 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8168 sp_adj2 = plus_constant (Pmode, sp_adj1, frame_size - low);
8169 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8173 sp_adj1 = gen_rtx_REG (DImode, 23);
8174 emit_move_insn (sp_adj1, sp_adj2);
8176 sp_adj2 = GEN_INT (low);
8180 rtx tmp = gen_rtx_REG (DImode, 23);
8181 sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size, 3, false);
8184 /* We can't drop new things to memory this late, afaik,
8185 so build it up by pieces. */
8186 sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8188 gcc_assert (sp_adj2);
8192 /* From now on, things must be in order. So emit blockages. */
8194 /* Restore the frame pointer. */
8195 if (fp_is_frame_pointer)
8197 emit_insn (gen_blockage ());
8198 mem = gen_frame_mem (DImode, plus_constant (Pmode, sa_reg,
8200 emit_move_insn (hard_frame_pointer_rtx, mem);
8201 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8202 hard_frame_pointer_rtx, cfa_restores);
8204 else if (TARGET_ABI_OPEN_VMS)
8206 emit_insn (gen_blockage ());
8207 emit_move_insn (hard_frame_pointer_rtx,
8208 gen_rtx_REG (DImode, vms_save_fp_regno));
8209 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8210 hard_frame_pointer_rtx, cfa_restores);
8213 /* Restore the stack pointer. */
8214 emit_insn (gen_blockage ());
8215 if (sp_adj2 == const0_rtx)
8216 insn = emit_move_insn (stack_pointer_rtx, sp_adj1);
8218 insn = emit_move_insn (stack_pointer_rtx,
8219 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2));
8220 REG_NOTES (insn) = cfa_restores;
8221 add_reg_note (insn, REG_CFA_DEF_CFA, stack_pointer_rtx);
8222 RTX_FRAME_RELATED_P (insn) = 1;
8226 gcc_assert (cfa_restores == NULL);
8228 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8230 emit_insn (gen_blockage ());
8231 insn = emit_move_insn (hard_frame_pointer_rtx,
8232 gen_rtx_REG (DImode, vms_save_fp_regno));
8233 add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
8234 RTX_FRAME_RELATED_P (insn) = 1;
8239 /* Output the rest of the textual info surrounding the epilogue. */
8242 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8246 /* We output a nop after noreturn calls at the very end of the function to
8247 ensure that the return address always remains in the caller's code range,
8248 as not doing so might confuse unwinding engines. */
8249 insn = get_last_insn ();
8251 insn = prev_active_insn (insn);
8252 if (insn && CALL_P (insn))
8253 output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL);
8255 #if TARGET_ABI_OPEN_VMS
8256 /* Write the linkage entries. */
8257 alpha_write_linkage (file, fnname);
8260 /* End the function. */
8261 if (TARGET_ABI_OPEN_VMS
8262 || !flag_inhibit_size_directive)
8264 fputs ("\t.end ", file);
8265 assemble_name (file, fnname);
8268 inside_function = FALSE;
8272 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8274 In order to avoid the hordes of differences between generated code
8275 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8276 lots of code loading up large constants, generate rtl and emit it
8277 instead of going straight to text.
8279 Not sure why this idea hasn't been explored before... */
8282 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8283 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8286 HOST_WIDE_INT hi, lo;
8287 rtx this_rtx, insn, funexp;
8289 /* We always require a valid GP. */
8290 emit_insn (gen_prologue_ldgp ());
8291 emit_note (NOTE_INSN_PROLOGUE_END);
8293 /* Find the "this" pointer. If the function returns a structure,
8294 the structure return pointer is in $16. */
8295 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8296 this_rtx = gen_rtx_REG (Pmode, 17);
8298 this_rtx = gen_rtx_REG (Pmode, 16);
8300 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8301 entire constant for the add. */
8302 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8303 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8304 if (hi + lo == delta)
8307 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (hi)));
8309 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (lo)));
8313 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8314 delta, -(delta < 0));
8315 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8318 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8323 tmp = gen_rtx_REG (Pmode, 0);
8324 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
8326 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8327 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8328 if (hi + lo == vcall_offset)
8331 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8335 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8336 vcall_offset, -(vcall_offset < 0));
8337 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8341 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8344 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8346 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8349 /* Generate a tail call to the target function. */
8350 if (! TREE_USED (function))
8352 assemble_external (function);
8353 TREE_USED (function) = 1;
8355 funexp = XEXP (DECL_RTL (function), 0);
8356 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8357 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8358 SIBLING_CALL_P (insn) = 1;
8360 /* Run just enough of rest_of_compilation to get the insns emitted.
8361 There's not really enough bulk here to make other passes such as
8362 instruction scheduling worth while. Note that use_thunk calls
8363 assemble_start_function and assemble_end_function. */
8364 insn = get_insns ();
8365 insn_locators_alloc ();
8366 shorten_branches (insn);
8367 final_start_function (insn, file, 1);
8368 final (insn, file, 1);
8369 final_end_function ();
8371 #endif /* TARGET_ABI_OSF */
8373 /* Debugging support. */
8377 /* Name of the file containing the current function. */
8379 static const char *current_function_file = "";
8381 /* Offsets to alpha virtual arg/local debugging pointers. */
8383 long alpha_arg_offset;
8384 long alpha_auto_offset;
8386 /* Emit a new filename to a stream. */
8389 alpha_output_filename (FILE *stream, const char *name)
8391 static int first_time = TRUE;
8396 ++num_source_filenames;
8397 current_function_file = name;
8398 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8399 output_quoted_string (stream, name);
8400 fprintf (stream, "\n");
8403 else if (name != current_function_file
8404 && strcmp (name, current_function_file) != 0)
8406 ++num_source_filenames;
8407 current_function_file = name;
8408 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8410 output_quoted_string (stream, name);
8411 fprintf (stream, "\n");
8415 /* Structure to show the current status of registers and memory. */
8417 struct shadow_summary
8420 unsigned int i : 31; /* Mask of int regs */
8421 unsigned int fp : 31; /* Mask of fp regs */
8422 unsigned int mem : 1; /* mem == imem | fpmem */
8426 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8427 to the summary structure. SET is nonzero if the insn is setting the
8428 object, otherwise zero. */
8431 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8433 const char *format_ptr;
8439 switch (GET_CODE (x))
8441 /* ??? Note that this case would be incorrect if the Alpha had a
8442 ZERO_EXTRACT in SET_DEST. */
8444 summarize_insn (SET_SRC (x), sum, 0);
8445 summarize_insn (SET_DEST (x), sum, 1);
8449 summarize_insn (XEXP (x, 0), sum, 1);
8453 summarize_insn (XEXP (x, 0), sum, 0);
8457 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8458 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8462 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8463 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8467 summarize_insn (SUBREG_REG (x), sum, 0);
8472 int regno = REGNO (x);
8473 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8475 if (regno == 31 || regno == 63)
8481 sum->defd.i |= mask;
8483 sum->defd.fp |= mask;
8488 sum->used.i |= mask;
8490 sum->used.fp |= mask;
8501 /* Find the regs used in memory address computation: */
8502 summarize_insn (XEXP (x, 0), sum, 0);
8505 case CONST_INT: case CONST_DOUBLE:
8506 case SYMBOL_REF: case LABEL_REF: case CONST:
8507 case SCRATCH: case ASM_INPUT:
8510 /* Handle common unary and binary ops for efficiency. */
8511 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8512 case MOD: case UDIV: case UMOD: case AND: case IOR:
8513 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8514 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8515 case NE: case EQ: case GE: case GT: case LE:
8516 case LT: case GEU: case GTU: case LEU: case LTU:
8517 summarize_insn (XEXP (x, 0), sum, 0);
8518 summarize_insn (XEXP (x, 1), sum, 0);
8521 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8522 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8523 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8524 case SQRT: case FFS:
8525 summarize_insn (XEXP (x, 0), sum, 0);
8529 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8530 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8531 switch (format_ptr[i])
8534 summarize_insn (XEXP (x, i), sum, 0);
8538 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8539 summarize_insn (XVECEXP (x, i, j), sum, 0);
8551 /* Ensure a sufficient number of `trapb' insns are in the code when
8552 the user requests code with a trap precision of functions or
8555 In naive mode, when the user requests a trap-precision of
8556 "instruction", a trapb is needed after every instruction that may
8557 generate a trap. This ensures that the code is resumption safe but
8560 When optimizations are turned on, we delay issuing a trapb as long
8561 as possible. In this context, a trap shadow is the sequence of
8562 instructions that starts with a (potentially) trap generating
8563 instruction and extends to the next trapb or call_pal instruction
8564 (but GCC never generates call_pal by itself). We can delay (and
8565 therefore sometimes omit) a trapb subject to the following
8568 (a) On entry to the trap shadow, if any Alpha register or memory
8569 location contains a value that is used as an operand value by some
8570 instruction in the trap shadow (live on entry), then no instruction
8571 in the trap shadow may modify the register or memory location.
8573 (b) Within the trap shadow, the computation of the base register
8574 for a memory load or store instruction may not involve using the
8575 result of an instruction that might generate an UNPREDICTABLE
8578 (c) Within the trap shadow, no register may be used more than once
8579 as a destination register. (This is to make life easier for the
8582 (d) The trap shadow may not include any branch instructions. */
8585 alpha_handle_trap_shadows (void)
8587 struct shadow_summary shadow;
8588 int trap_pending, exception_nesting;
8592 exception_nesting = 0;
8595 shadow.used.mem = 0;
8596 shadow.defd = shadow.used;
8598 for (i = get_insns (); i ; i = NEXT_INSN (i))
8602 switch (NOTE_KIND (i))
8604 case NOTE_INSN_EH_REGION_BEG:
8605 exception_nesting++;
8610 case NOTE_INSN_EH_REGION_END:
8611 exception_nesting--;
8616 case NOTE_INSN_EPILOGUE_BEG:
8617 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8622 else if (trap_pending)
8624 if (alpha_tp == ALPHA_TP_FUNC)
8627 && GET_CODE (PATTERN (i)) == RETURN)
8630 else if (alpha_tp == ALPHA_TP_INSN)
8634 struct shadow_summary sum;
8639 sum.defd = sum.used;
8641 switch (GET_CODE (i))
8644 /* Annoyingly, get_attr_trap will die on these. */
8645 if (GET_CODE (PATTERN (i)) == USE
8646 || GET_CODE (PATTERN (i)) == CLOBBER)
8649 summarize_insn (PATTERN (i), &sum, 0);
8651 if ((sum.defd.i & shadow.defd.i)
8652 || (sum.defd.fp & shadow.defd.fp))
8654 /* (c) would be violated */
8658 /* Combine shadow with summary of current insn: */
8659 shadow.used.i |= sum.used.i;
8660 shadow.used.fp |= sum.used.fp;
8661 shadow.used.mem |= sum.used.mem;
8662 shadow.defd.i |= sum.defd.i;
8663 shadow.defd.fp |= sum.defd.fp;
8664 shadow.defd.mem |= sum.defd.mem;
8666 if ((sum.defd.i & shadow.used.i)
8667 || (sum.defd.fp & shadow.used.fp)
8668 || (sum.defd.mem & shadow.used.mem))
8670 /* (a) would be violated (also takes care of (b)) */
8671 gcc_assert (get_attr_trap (i) != TRAP_YES
8672 || (!(sum.defd.i & sum.used.i)
8673 && !(sum.defd.fp & sum.used.fp)));
8691 n = emit_insn_before (gen_trapb (), i);
8692 PUT_MODE (n, TImode);
8693 PUT_MODE (i, TImode);
8697 shadow.used.mem = 0;
8698 shadow.defd = shadow.used;
8703 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8704 && NONJUMP_INSN_P (i)
8705 && GET_CODE (PATTERN (i)) != USE
8706 && GET_CODE (PATTERN (i)) != CLOBBER
8707 && get_attr_trap (i) == TRAP_YES)
8709 if (optimize && !trap_pending)
8710 summarize_insn (PATTERN (i), &shadow, 0);
8716 /* Alpha can only issue instruction groups simultaneously if they are
8717 suitably aligned. This is very processor-specific. */
8718 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8719 that are marked "fake". These instructions do not exist on that target,
8720 but it is possible to see these insns with deranged combinations of
8721 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8722 choose a result at random. */
8724 enum alphaev4_pipe {
8731 enum alphaev5_pipe {
8742 static enum alphaev4_pipe
8743 alphaev4_insn_pipe (rtx insn)
8745 if (recog_memoized (insn) < 0)
8747 if (get_attr_length (insn) != 4)
8750 switch (get_attr_type (insn))
8766 case TYPE_MVI: /* fake */
8781 case TYPE_FSQRT: /* fake */
8782 case TYPE_FTOI: /* fake */
8783 case TYPE_ITOF: /* fake */
8791 static enum alphaev5_pipe
8792 alphaev5_insn_pipe (rtx insn)
8794 if (recog_memoized (insn) < 0)
8796 if (get_attr_length (insn) != 4)
8799 switch (get_attr_type (insn))
8819 case TYPE_FTOI: /* fake */
8820 case TYPE_ITOF: /* fake */
8835 case TYPE_FSQRT: /* fake */
8846 /* IN_USE is a mask of the slots currently filled within the insn group.
8847 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8848 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8850 LEN is, of course, the length of the group in bytes. */
8853 alphaev4_next_group (rtx insn, int *pin_use, int *plen)
8860 || GET_CODE (PATTERN (insn)) == CLOBBER
8861 || GET_CODE (PATTERN (insn)) == USE)
8866 enum alphaev4_pipe pipe;
8868 pipe = alphaev4_insn_pipe (insn);
8872 /* Force complex instructions to start new groups. */
8876 /* If this is a completely unrecognized insn, it's an asm.
8877 We don't know how long it is, so record length as -1 to
8878 signal a needed realignment. */
8879 if (recog_memoized (insn) < 0)
8882 len = get_attr_length (insn);
8886 if (in_use & EV4_IB0)
8888 if (in_use & EV4_IB1)
8893 in_use |= EV4_IB0 | EV4_IBX;
8897 if (in_use & EV4_IB0)
8899 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
8907 if (in_use & EV4_IB1)
8917 /* Haifa doesn't do well scheduling branches. */
8922 insn = next_nonnote_insn (insn);
8924 if (!insn || ! INSN_P (insn))
8927 /* Let Haifa tell us where it thinks insn group boundaries are. */
8928 if (GET_MODE (insn) == TImode)
8931 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8936 insn = next_nonnote_insn (insn);
8944 /* IN_USE is a mask of the slots currently filled within the insn group.
8945 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
8946 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
8948 LEN is, of course, the length of the group in bytes. */
8951 alphaev5_next_group (rtx insn, int *pin_use, int *plen)
8958 || GET_CODE (PATTERN (insn)) == CLOBBER
8959 || GET_CODE (PATTERN (insn)) == USE)
8964 enum alphaev5_pipe pipe;
8966 pipe = alphaev5_insn_pipe (insn);
8970 /* Force complex instructions to start new groups. */
8974 /* If this is a completely unrecognized insn, it's an asm.
8975 We don't know how long it is, so record length as -1 to
8976 signal a needed realignment. */
8977 if (recog_memoized (insn) < 0)
8980 len = get_attr_length (insn);
8983 /* ??? Most of the places below, we would like to assert never
8984 happen, as it would indicate an error either in Haifa, or
8985 in the scheduling description. Unfortunately, Haifa never
8986 schedules the last instruction of the BB, so we don't have
8987 an accurate TI bit to go off. */
8989 if (in_use & EV5_E0)
8991 if (in_use & EV5_E1)
8996 in_use |= EV5_E0 | EV5_E01;
9000 if (in_use & EV5_E0)
9002 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
9010 if (in_use & EV5_E1)
9016 if (in_use & EV5_FA)
9018 if (in_use & EV5_FM)
9023 in_use |= EV5_FA | EV5_FAM;
9027 if (in_use & EV5_FA)
9033 if (in_use & EV5_FM)
9046 /* Haifa doesn't do well scheduling branches. */
9047 /* ??? If this is predicted not-taken, slotting continues, except
9048 that no more IBR, FBR, or JSR insns may be slotted. */
9053 insn = next_nonnote_insn (insn);
9055 if (!insn || ! INSN_P (insn))
9058 /* Let Haifa tell us where it thinks insn group boundaries are. */
9059 if (GET_MODE (insn) == TImode)
9062 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9067 insn = next_nonnote_insn (insn);
9076 alphaev4_next_nop (int *pin_use)
9078 int in_use = *pin_use;
9081 if (!(in_use & EV4_IB0))
9086 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9091 else if (TARGET_FP && !(in_use & EV4_IB1))
9104 alphaev5_next_nop (int *pin_use)
9106 int in_use = *pin_use;
9109 if (!(in_use & EV5_E1))
9114 else if (TARGET_FP && !(in_use & EV5_FA))
9119 else if (TARGET_FP && !(in_use & EV5_FM))
9131 /* The instruction group alignment main loop. */
9134 alpha_align_insns (unsigned int max_align,
9135 rtx (*next_group) (rtx, int *, int *),
9136 rtx (*next_nop) (int *))
9138 /* ALIGN is the known alignment for the insn group. */
9140 /* OFS is the offset of the current insn in the insn group. */
9142 int prev_in_use, in_use, len, ldgp;
9145 /* Let shorten branches care for assigning alignments to code labels. */
9146 shorten_branches (get_insns ());
9148 if (align_functions < 4)
9150 else if ((unsigned int) align_functions < max_align)
9151 align = align_functions;
9155 ofs = prev_in_use = 0;
9158 i = next_nonnote_insn (i);
9160 ldgp = alpha_function_needs_gp ? 8 : 0;
9164 next = (*next_group) (i, &in_use, &len);
9166 /* When we see a label, resync alignment etc. */
9169 unsigned int new_align = 1 << label_to_alignment (i);
9171 if (new_align >= align)
9173 align = new_align < max_align ? new_align : max_align;
9177 else if (ofs & (new_align-1))
9178 ofs = (ofs | (new_align-1)) + 1;
9182 /* Handle complex instructions special. */
9183 else if (in_use == 0)
9185 /* Asms will have length < 0. This is a signal that we have
9186 lost alignment knowledge. Assume, however, that the asm
9187 will not mis-align instructions. */
9196 /* If the known alignment is smaller than the recognized insn group,
9197 realign the output. */
9198 else if ((int) align < len)
9200 unsigned int new_log_align = len > 8 ? 4 : 3;
9203 where = prev = prev_nonnote_insn (i);
9204 if (!where || !LABEL_P (where))
9207 /* Can't realign between a call and its gp reload. */
9208 if (! (TARGET_EXPLICIT_RELOCS
9209 && prev && CALL_P (prev)))
9211 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9212 align = 1 << new_log_align;
9217 /* We may not insert padding inside the initial ldgp sequence. */
9221 /* If the group won't fit in the same INT16 as the previous,
9222 we need to add padding to keep the group together. Rather
9223 than simply leaving the insn filling to the assembler, we
9224 can make use of the knowledge of what sorts of instructions
9225 were issued in the previous group to make sure that all of
9226 the added nops are really free. */
9227 else if (ofs + len > (int) align)
9229 int nop_count = (align - ofs) / 4;
9232 /* Insert nops before labels, branches, and calls to truly merge
9233 the execution of the nops with the previous instruction group. */
9234 where = prev_nonnote_insn (i);
9237 if (LABEL_P (where))
9239 rtx where2 = prev_nonnote_insn (where);
9240 if (where2 && JUMP_P (where2))
9243 else if (NONJUMP_INSN_P (where))
9250 emit_insn_before ((*next_nop)(&prev_in_use), where);
9251 while (--nop_count);
9255 ofs = (ofs + len) & (align - 1);
9256 prev_in_use = in_use;
9261 /* Insert an unop between sibcall or noreturn function call and GP load. */
9264 alpha_pad_function_end (void)
9268 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9270 if (! (CALL_P (insn)
9271 && (SIBLING_CALL_P (insn)
9272 || find_reg_note (insn, REG_NORETURN, NULL_RTX))))
9275 /* Make sure we do not split a call and its corresponding
9276 CALL_ARG_LOCATION note. */
9279 next = NEXT_INSN (insn);
9280 if (next && NOTE_P (next)
9281 && NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION)
9285 next = next_active_insn (insn);
9289 rtx pat = PATTERN (next);
9291 if (GET_CODE (pat) == SET
9292 && GET_CODE (SET_SRC (pat)) == UNSPEC_VOLATILE
9293 && XINT (SET_SRC (pat), 1) == UNSPECV_LDGP1)
9294 emit_insn_after (gen_unop (), insn);
9299 /* Machine dependent reorg pass. */
9304 /* Workaround for a linker error that triggers when an exception
9305 handler immediatelly follows a sibcall or a noreturn function.
9307 In the sibcall case:
9309 The instruction stream from an object file:
9311 1d8: 00 00 fb 6b jmp (t12)
9312 1dc: 00 00 ba 27 ldah gp,0(ra)
9313 1e0: 00 00 bd 23 lda gp,0(gp)
9314 1e4: 00 00 7d a7 ldq t12,0(gp)
9315 1e8: 00 40 5b 6b jsr ra,(t12),1ec <__funcZ+0x1ec>
9317 was converted in the final link pass to:
9319 12003aa88: 67 fa ff c3 br 120039428 <...>
9320 12003aa8c: 00 00 fe 2f unop
9321 12003aa90: 00 00 fe 2f unop
9322 12003aa94: 48 83 7d a7 ldq t12,-31928(gp)
9323 12003aa98: 00 40 5b 6b jsr ra,(t12),12003aa9c <__func+0x1ec>
9325 And in the noreturn case:
9327 The instruction stream from an object file:
9329 54: 00 40 5b 6b jsr ra,(t12),58 <__func+0x58>
9330 58: 00 00 ba 27 ldah gp,0(ra)
9331 5c: 00 00 bd 23 lda gp,0(gp)
9332 60: 00 00 7d a7 ldq t12,0(gp)
9333 64: 00 40 5b 6b jsr ra,(t12),68 <__func+0x68>
9335 was converted in the final link pass to:
9337 fdb24: a0 03 40 d3 bsr ra,fe9a8 <_called_func+0x8>
9338 fdb28: 00 00 fe 2f unop
9339 fdb2c: 00 00 fe 2f unop
9340 fdb30: 30 82 7d a7 ldq t12,-32208(gp)
9341 fdb34: 00 40 5b 6b jsr ra,(t12),fdb38 <__func+0x68>
9343 GP load instructions were wrongly cleared by the linker relaxation
9344 pass. This workaround prevents removal of GP loads by inserting
9345 an unop instruction between a sibcall or noreturn function call and
9346 exception handler prologue. */
9348 if (current_function_has_exception_handlers ())
9349 alpha_pad_function_end ();
9351 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
9352 alpha_handle_trap_shadows ();
9354 /* Due to the number of extra trapb insns, don't bother fixing up
9355 alignment when trap precision is instruction. Moreover, we can
9356 only do our job when sched2 is run. */
9357 if (optimize && !optimize_size
9358 && alpha_tp != ALPHA_TP_INSN
9359 && flag_schedule_insns_after_reload)
9361 if (alpha_tune == PROCESSOR_EV4)
9362 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
9363 else if (alpha_tune == PROCESSOR_EV5)
9364 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
9369 alpha_file_start (void)
9371 default_file_start ();
9373 fputs ("\t.set noreorder\n", asm_out_file);
9374 fputs ("\t.set volatile\n", asm_out_file);
9376 fputs ("\t.set noat\n", asm_out_file);
9377 if (TARGET_EXPLICIT_RELOCS)
9378 fputs ("\t.set nomacro\n", asm_out_file);
9379 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9383 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9385 else if (TARGET_MAX)
9387 else if (TARGET_BWX)
9389 else if (alpha_cpu == PROCESSOR_EV5)
9394 fprintf (asm_out_file, "\t.arch %s\n", arch);
9398 /* Since we don't have a .dynbss section, we should not allow global
9399 relocations in the .rodata section. */
9402 alpha_elf_reloc_rw_mask (void)
9404 return flag_pic ? 3 : 2;
9407 /* Return a section for X. The only special thing we do here is to
9408 honor small data. */
9411 alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
9412 unsigned HOST_WIDE_INT align)
9414 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9415 /* ??? Consider using mergeable sdata sections. */
9416 return sdata_section;
9418 return default_elf_select_rtx_section (mode, x, align);
9422 alpha_elf_section_type_flags (tree decl, const char *name, int reloc)
9424 unsigned int flags = 0;
9426 if (strcmp (name, ".sdata") == 0
9427 || strncmp (name, ".sdata.", 7) == 0
9428 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9429 || strcmp (name, ".sbss") == 0
9430 || strncmp (name, ".sbss.", 6) == 0
9431 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9432 flags = SECTION_SMALL;
9434 flags |= default_section_type_flags (decl, name, reloc);
9438 /* Structure to collect function names for final output in link section. */
9439 /* Note that items marked with GTY can't be ifdef'ed out. */
9447 struct GTY(()) alpha_links
9451 enum reloc_kind rkind;
9454 #if TARGET_ABI_OPEN_VMS
9456 /* Return the VMS argument type corresponding to MODE. */
9459 alpha_arg_type (enum machine_mode mode)
9464 return TARGET_FLOAT_VAX ? FF : FS;
9466 return TARGET_FLOAT_VAX ? FD : FT;
9472 /* Return an rtx for an integer representing the VMS Argument Information
9476 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9478 unsigned HOST_WIDE_INT regval = cum.num_args;
9481 for (i = 0; i < 6; i++)
9482 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9484 return GEN_INT (regval);
9488 /* Return a SYMBOL_REF representing the reference to the .linkage entry
9489 of function FUNC built for calls made from CFUNDECL. LFLAG is 1 if
9490 this is the reference to the linkage pointer value, 0 if this is the
9491 reference to the function entry value. RFLAG is 1 if this a reduced
9492 reference (code address only), 0 if this is a full reference. */
9495 alpha_use_linkage (rtx func, bool lflag, bool rflag)
9497 struct alpha_links *al = NULL;
9498 const char *name = XSTR (func, 0);
9500 if (cfun->machine->links)
9502 splay_tree_node lnode;
9504 /* Is this name already defined? */
9505 lnode = splay_tree_lookup (cfun->machine->links, (splay_tree_key) name);
9507 al = (struct alpha_links *) lnode->value;
9510 cfun->machine->links = splay_tree_new_ggc
9511 ((splay_tree_compare_fn) strcmp,
9512 ggc_alloc_splay_tree_str_alpha_links_splay_tree_s,
9513 ggc_alloc_splay_tree_str_alpha_links_splay_tree_node_s);
9524 /* Follow transparent alias, as this is used for CRTL translations. */
9525 id = maybe_get_identifier (name);
9528 while (IDENTIFIER_TRANSPARENT_ALIAS (id))
9529 id = TREE_CHAIN (id);
9530 name = IDENTIFIER_POINTER (id);
9533 buf_len = strlen (name) + 8 + 9;
9534 linksym = (char *) alloca (buf_len);
9535 snprintf (linksym, buf_len, "$%d..%s..lk", cfun->funcdef_no, name);
9537 al = ggc_alloc_alpha_links ();
9539 al->linkage = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (linksym));
9541 splay_tree_insert (cfun->machine->links,
9542 (splay_tree_key) ggc_strdup (name),
9543 (splay_tree_value) al);
9546 al->rkind = rflag ? KIND_CODEADDR : KIND_LINKAGE;
9549 return gen_rtx_MEM (Pmode, plus_constant (Pmode, al->linkage, 8));
9555 alpha_write_one_linkage (splay_tree_node node, void *data)
9557 const char *const name = (const char *) node->key;
9558 struct alpha_links *link = (struct alpha_links *) node->value;
9559 FILE *stream = (FILE *) data;
9561 ASM_OUTPUT_INTERNAL_LABEL (stream, XSTR (link->linkage, 0));
9562 if (link->rkind == KIND_CODEADDR)
9564 /* External and used, request code address. */
9565 fprintf (stream, "\t.code_address ");
9569 if (!SYMBOL_REF_EXTERNAL_P (link->func)
9570 && SYMBOL_REF_LOCAL_P (link->func))
9572 /* Locally defined, build linkage pair. */
9573 fprintf (stream, "\t.quad %s..en\n", name);
9574 fprintf (stream, "\t.quad ");
9578 /* External, request linkage pair. */
9579 fprintf (stream, "\t.linkage ");
9582 assemble_name (stream, name);
9583 fputs ("\n", stream);
9589 alpha_write_linkage (FILE *stream, const char *funname)
9591 fprintf (stream, "\t.link\n");
9592 fprintf (stream, "\t.align 3\n");
9595 #ifdef TARGET_VMS_CRASH_DEBUG
9596 fputs ("\t.name ", stream);
9597 assemble_name (stream, funname);
9598 fputs ("..na\n", stream);
9601 ASM_OUTPUT_LABEL (stream, funname);
9602 fprintf (stream, "\t.pdesc ");
9603 assemble_name (stream, funname);
9604 fprintf (stream, "..en,%s\n",
9605 alpha_procedure_type == PT_STACK ? "stack"
9606 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9608 if (cfun->machine->links)
9610 splay_tree_foreach (cfun->machine->links, alpha_write_one_linkage, stream);
9611 /* splay_tree_delete (func->links); */
9615 /* Switch to an arbitrary section NAME with attributes as specified
9616 by FLAGS. ALIGN specifies any known alignment requirements for
9617 the section; 0 if the default should be used. */
9620 vms_asm_named_section (const char *name, unsigned int flags,
9621 tree decl ATTRIBUTE_UNUSED)
9623 fputc ('\n', asm_out_file);
9624 fprintf (asm_out_file, ".section\t%s", name);
9626 if (flags & SECTION_DEBUG)
9627 fprintf (asm_out_file, ",NOWRT");
9629 fputc ('\n', asm_out_file);
9632 /* Record an element in the table of global constructors. SYMBOL is
9633 a SYMBOL_REF of the function to be called; PRIORITY is a number
9634 between 0 and MAX_INIT_PRIORITY.
9636 Differs from default_ctors_section_asm_out_constructor in that the
9637 width of the .ctors entry is always 64 bits, rather than the 32 bits
9638 used by a normal pointer. */
9641 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9643 switch_to_section (ctors_section);
9644 assemble_align (BITS_PER_WORD);
9645 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9649 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9651 switch_to_section (dtors_section);
9652 assemble_align (BITS_PER_WORD);
9653 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9657 alpha_use_linkage (rtx func ATTRIBUTE_UNUSED,
9658 bool lflag ATTRIBUTE_UNUSED,
9659 bool rflag ATTRIBUTE_UNUSED)
9664 #endif /* TARGET_ABI_OPEN_VMS */
9667 alpha_init_libfuncs (void)
9669 if (TARGET_ABI_OPEN_VMS)
9671 /* Use the VMS runtime library functions for division and
9673 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
9674 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
9675 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
9676 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
9677 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
9678 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
9679 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
9680 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
9681 abort_libfunc = init_one_libfunc ("decc$abort");
9682 memcmp_libfunc = init_one_libfunc ("decc$memcmp");
9683 #ifdef MEM_LIBFUNCS_INIT
9689 /* On the Alpha, we use this to disable the floating-point registers
9690 when they don't exist. */
9693 alpha_conditional_register_usage (void)
9696 if (! TARGET_FPREGS)
9697 for (i = 32; i < 63; i++)
9698 fixed_regs[i] = call_used_regs[i] = 1;
9701 /* Initialize the GCC target structure. */
9702 #if TARGET_ABI_OPEN_VMS
9703 # undef TARGET_ATTRIBUTE_TABLE
9704 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
9705 # undef TARGET_CAN_ELIMINATE
9706 # define TARGET_CAN_ELIMINATE alpha_vms_can_eliminate
9709 #undef TARGET_IN_SMALL_DATA_P
9710 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
9712 #undef TARGET_ASM_ALIGNED_HI_OP
9713 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
9714 #undef TARGET_ASM_ALIGNED_DI_OP
9715 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
9717 /* Default unaligned ops are provided for ELF systems. To get unaligned
9718 data for non-ELF systems, we have to turn off auto alignment. */
9719 #if TARGET_ABI_OPEN_VMS
9720 #undef TARGET_ASM_UNALIGNED_HI_OP
9721 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
9722 #undef TARGET_ASM_UNALIGNED_SI_OP
9723 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
9724 #undef TARGET_ASM_UNALIGNED_DI_OP
9725 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
9728 #undef TARGET_ASM_RELOC_RW_MASK
9729 #define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
9730 #undef TARGET_ASM_SELECT_RTX_SECTION
9731 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
9732 #undef TARGET_SECTION_TYPE_FLAGS
9733 #define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags
9735 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
9736 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
9738 #undef TARGET_INIT_LIBFUNCS
9739 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
9741 #undef TARGET_LEGITIMIZE_ADDRESS
9742 #define TARGET_LEGITIMIZE_ADDRESS alpha_legitimize_address
9743 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
9744 #define TARGET_MODE_DEPENDENT_ADDRESS_P alpha_mode_dependent_address_p
9746 #undef TARGET_ASM_FILE_START
9747 #define TARGET_ASM_FILE_START alpha_file_start
9749 #undef TARGET_SCHED_ADJUST_COST
9750 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
9751 #undef TARGET_SCHED_ISSUE_RATE
9752 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
9753 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
9754 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
9755 alpha_multipass_dfa_lookahead
9757 #undef TARGET_HAVE_TLS
9758 #define TARGET_HAVE_TLS HAVE_AS_TLS
9760 #undef TARGET_BUILTIN_DECL
9761 #define TARGET_BUILTIN_DECL alpha_builtin_decl
9762 #undef TARGET_INIT_BUILTINS
9763 #define TARGET_INIT_BUILTINS alpha_init_builtins
9764 #undef TARGET_EXPAND_BUILTIN
9765 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
9766 #undef TARGET_FOLD_BUILTIN
9767 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
9769 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
9770 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
9771 #undef TARGET_CANNOT_COPY_INSN_P
9772 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
9773 #undef TARGET_LEGITIMATE_CONSTANT_P
9774 #define TARGET_LEGITIMATE_CONSTANT_P alpha_legitimate_constant_p
9775 #undef TARGET_CANNOT_FORCE_CONST_MEM
9776 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
9779 #undef TARGET_ASM_OUTPUT_MI_THUNK
9780 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
9781 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
9782 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
9783 #undef TARGET_STDARG_OPTIMIZE_HOOK
9784 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
9787 /* Use 16-bits anchor. */
9788 #undef TARGET_MIN_ANCHOR_OFFSET
9789 #define TARGET_MIN_ANCHOR_OFFSET -0x7fff - 1
9790 #undef TARGET_MAX_ANCHOR_OFFSET
9791 #define TARGET_MAX_ANCHOR_OFFSET 0x7fff
9792 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
9793 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
9795 #undef TARGET_RTX_COSTS
9796 #define TARGET_RTX_COSTS alpha_rtx_costs
9797 #undef TARGET_ADDRESS_COST
9798 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
9800 #undef TARGET_MACHINE_DEPENDENT_REORG
9801 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
9803 #undef TARGET_PROMOTE_FUNCTION_MODE
9804 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
9805 #undef TARGET_PROMOTE_PROTOTYPES
9806 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false
9807 #undef TARGET_RETURN_IN_MEMORY
9808 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
9809 #undef TARGET_PASS_BY_REFERENCE
9810 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
9811 #undef TARGET_SETUP_INCOMING_VARARGS
9812 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
9813 #undef TARGET_STRICT_ARGUMENT_NAMING
9814 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
9815 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
9816 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
9817 #undef TARGET_SPLIT_COMPLEX_ARG
9818 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
9819 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
9820 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
9821 #undef TARGET_ARG_PARTIAL_BYTES
9822 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
9823 #undef TARGET_FUNCTION_ARG
9824 #define TARGET_FUNCTION_ARG alpha_function_arg
9825 #undef TARGET_FUNCTION_ARG_ADVANCE
9826 #define TARGET_FUNCTION_ARG_ADVANCE alpha_function_arg_advance
9827 #undef TARGET_TRAMPOLINE_INIT
9828 #define TARGET_TRAMPOLINE_INIT alpha_trampoline_init
9830 #undef TARGET_INSTANTIATE_DECLS
9831 #define TARGET_INSTANTIATE_DECLS alpha_instantiate_decls
9833 #undef TARGET_SECONDARY_RELOAD
9834 #define TARGET_SECONDARY_RELOAD alpha_secondary_reload
9836 #undef TARGET_SCALAR_MODE_SUPPORTED_P
9837 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
9838 #undef TARGET_VECTOR_MODE_SUPPORTED_P
9839 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
9841 #undef TARGET_BUILD_BUILTIN_VA_LIST
9842 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
9844 #undef TARGET_EXPAND_BUILTIN_VA_START
9845 #define TARGET_EXPAND_BUILTIN_VA_START alpha_va_start
9847 /* The Alpha architecture does not require sequential consistency. See
9848 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
9849 for an example of how it can be violated in practice. */
9850 #undef TARGET_RELAXED_ORDERING
9851 #define TARGET_RELAXED_ORDERING true
9853 #undef TARGET_OPTION_OVERRIDE
9854 #define TARGET_OPTION_OVERRIDE alpha_option_override
9856 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
9857 #undef TARGET_MANGLE_TYPE
9858 #define TARGET_MANGLE_TYPE alpha_mangle_type
9861 #undef TARGET_LEGITIMATE_ADDRESS_P
9862 #define TARGET_LEGITIMATE_ADDRESS_P alpha_legitimate_address_p
9864 #undef TARGET_CONDITIONAL_REGISTER_USAGE
9865 #define TARGET_CONDITIONAL_REGISTER_USAGE alpha_conditional_register_usage
9867 struct gcc_target targetm = TARGET_INITIALIZER;
9870 #include "gt-alpha.h"