1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
5 Free Software Foundation, Inc.
6 Contributed by Michael Tiemann (tiemann@cygnus.com)
7 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
10 This file is part of GCC.
12 GCC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 3, or (at your option)
17 GCC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
28 #include "coretypes.h"
33 #include "hard-reg-set.h"
34 #include "insn-config.h"
35 #include "insn-codes.h"
36 #include "conditions.h"
38 #include "insn-attr.h"
45 #include "diagnostic-core.h"
50 #include "target-def.h"
51 #include "common/common-target.h"
52 #include "cfglayout.h"
54 #include "langhooks.h"
58 #include "dwarf2out.h"
63 struct processor_costs {
67 /* Integer signed load */
70 /* Integer zeroed load */
76 /* fmov, fneg, fabs */
80 const int float_plusminus;
86 const int float_cmove;
92 const int float_div_sf;
95 const int float_div_df;
98 const int float_sqrt_sf;
101 const int float_sqrt_df;
109 /* integer multiply cost for each bit set past the most
110 significant 3, so the formula for multiply cost becomes:
113 highest_bit = highest_clear_bit(rs1);
115 highest_bit = highest_set_bit(rs1);
118 cost = int_mul{,X} + ((highest_bit - 3) / int_mul_bit_factor);
120 A value of zero indicates that the multiply costs is fixed,
122 const int int_mul_bit_factor;
133 /* penalty for shifts, due to scheduling rules etc. */
134 const int shift_penalty;
138 struct processor_costs cypress_costs = {
139 COSTS_N_INSNS (2), /* int load */
140 COSTS_N_INSNS (2), /* int signed load */
141 COSTS_N_INSNS (2), /* int zeroed load */
142 COSTS_N_INSNS (2), /* float load */
143 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
144 COSTS_N_INSNS (5), /* fadd, fsub */
145 COSTS_N_INSNS (1), /* fcmp */
146 COSTS_N_INSNS (1), /* fmov, fmovr */
147 COSTS_N_INSNS (7), /* fmul */
148 COSTS_N_INSNS (37), /* fdivs */
149 COSTS_N_INSNS (37), /* fdivd */
150 COSTS_N_INSNS (63), /* fsqrts */
151 COSTS_N_INSNS (63), /* fsqrtd */
152 COSTS_N_INSNS (1), /* imul */
153 COSTS_N_INSNS (1), /* imulX */
154 0, /* imul bit factor */
155 COSTS_N_INSNS (1), /* idiv */
156 COSTS_N_INSNS (1), /* idivX */
157 COSTS_N_INSNS (1), /* movcc/movr */
158 0, /* shift penalty */
162 struct processor_costs supersparc_costs = {
163 COSTS_N_INSNS (1), /* int load */
164 COSTS_N_INSNS (1), /* int signed load */
165 COSTS_N_INSNS (1), /* int zeroed load */
166 COSTS_N_INSNS (0), /* float load */
167 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
168 COSTS_N_INSNS (3), /* fadd, fsub */
169 COSTS_N_INSNS (3), /* fcmp */
170 COSTS_N_INSNS (1), /* fmov, fmovr */
171 COSTS_N_INSNS (3), /* fmul */
172 COSTS_N_INSNS (6), /* fdivs */
173 COSTS_N_INSNS (9), /* fdivd */
174 COSTS_N_INSNS (12), /* fsqrts */
175 COSTS_N_INSNS (12), /* fsqrtd */
176 COSTS_N_INSNS (4), /* imul */
177 COSTS_N_INSNS (4), /* imulX */
178 0, /* imul bit factor */
179 COSTS_N_INSNS (4), /* idiv */
180 COSTS_N_INSNS (4), /* idivX */
181 COSTS_N_INSNS (1), /* movcc/movr */
182 1, /* shift penalty */
186 struct processor_costs hypersparc_costs = {
187 COSTS_N_INSNS (1), /* int load */
188 COSTS_N_INSNS (1), /* int signed load */
189 COSTS_N_INSNS (1), /* int zeroed load */
190 COSTS_N_INSNS (1), /* float load */
191 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
192 COSTS_N_INSNS (1), /* fadd, fsub */
193 COSTS_N_INSNS (1), /* fcmp */
194 COSTS_N_INSNS (1), /* fmov, fmovr */
195 COSTS_N_INSNS (1), /* fmul */
196 COSTS_N_INSNS (8), /* fdivs */
197 COSTS_N_INSNS (12), /* fdivd */
198 COSTS_N_INSNS (17), /* fsqrts */
199 COSTS_N_INSNS (17), /* fsqrtd */
200 COSTS_N_INSNS (17), /* imul */
201 COSTS_N_INSNS (17), /* imulX */
202 0, /* imul bit factor */
203 COSTS_N_INSNS (17), /* idiv */
204 COSTS_N_INSNS (17), /* idivX */
205 COSTS_N_INSNS (1), /* movcc/movr */
206 0, /* shift penalty */
210 struct processor_costs leon_costs = {
211 COSTS_N_INSNS (1), /* int load */
212 COSTS_N_INSNS (1), /* int signed load */
213 COSTS_N_INSNS (1), /* int zeroed load */
214 COSTS_N_INSNS (1), /* float load */
215 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
216 COSTS_N_INSNS (1), /* fadd, fsub */
217 COSTS_N_INSNS (1), /* fcmp */
218 COSTS_N_INSNS (1), /* fmov, fmovr */
219 COSTS_N_INSNS (1), /* fmul */
220 COSTS_N_INSNS (15), /* fdivs */
221 COSTS_N_INSNS (15), /* fdivd */
222 COSTS_N_INSNS (23), /* fsqrts */
223 COSTS_N_INSNS (23), /* fsqrtd */
224 COSTS_N_INSNS (5), /* imul */
225 COSTS_N_INSNS (5), /* imulX */
226 0, /* imul bit factor */
227 COSTS_N_INSNS (5), /* idiv */
228 COSTS_N_INSNS (5), /* idivX */
229 COSTS_N_INSNS (1), /* movcc/movr */
230 0, /* shift penalty */
234 struct processor_costs sparclet_costs = {
235 COSTS_N_INSNS (3), /* int load */
236 COSTS_N_INSNS (3), /* int signed load */
237 COSTS_N_INSNS (1), /* int zeroed load */
238 COSTS_N_INSNS (1), /* float load */
239 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
240 COSTS_N_INSNS (1), /* fadd, fsub */
241 COSTS_N_INSNS (1), /* fcmp */
242 COSTS_N_INSNS (1), /* fmov, fmovr */
243 COSTS_N_INSNS (1), /* fmul */
244 COSTS_N_INSNS (1), /* fdivs */
245 COSTS_N_INSNS (1), /* fdivd */
246 COSTS_N_INSNS (1), /* fsqrts */
247 COSTS_N_INSNS (1), /* fsqrtd */
248 COSTS_N_INSNS (5), /* imul */
249 COSTS_N_INSNS (5), /* imulX */
250 0, /* imul bit factor */
251 COSTS_N_INSNS (5), /* idiv */
252 COSTS_N_INSNS (5), /* idivX */
253 COSTS_N_INSNS (1), /* movcc/movr */
254 0, /* shift penalty */
258 struct processor_costs ultrasparc_costs = {
259 COSTS_N_INSNS (2), /* int load */
260 COSTS_N_INSNS (3), /* int signed load */
261 COSTS_N_INSNS (2), /* int zeroed load */
262 COSTS_N_INSNS (2), /* float load */
263 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
264 COSTS_N_INSNS (4), /* fadd, fsub */
265 COSTS_N_INSNS (1), /* fcmp */
266 COSTS_N_INSNS (2), /* fmov, fmovr */
267 COSTS_N_INSNS (4), /* fmul */
268 COSTS_N_INSNS (13), /* fdivs */
269 COSTS_N_INSNS (23), /* fdivd */
270 COSTS_N_INSNS (13), /* fsqrts */
271 COSTS_N_INSNS (23), /* fsqrtd */
272 COSTS_N_INSNS (4), /* imul */
273 COSTS_N_INSNS (4), /* imulX */
274 2, /* imul bit factor */
275 COSTS_N_INSNS (37), /* idiv */
276 COSTS_N_INSNS (68), /* idivX */
277 COSTS_N_INSNS (2), /* movcc/movr */
278 2, /* shift penalty */
282 struct processor_costs ultrasparc3_costs = {
283 COSTS_N_INSNS (2), /* int load */
284 COSTS_N_INSNS (3), /* int signed load */
285 COSTS_N_INSNS (3), /* int zeroed load */
286 COSTS_N_INSNS (2), /* float load */
287 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
288 COSTS_N_INSNS (4), /* fadd, fsub */
289 COSTS_N_INSNS (5), /* fcmp */
290 COSTS_N_INSNS (3), /* fmov, fmovr */
291 COSTS_N_INSNS (4), /* fmul */
292 COSTS_N_INSNS (17), /* fdivs */
293 COSTS_N_INSNS (20), /* fdivd */
294 COSTS_N_INSNS (20), /* fsqrts */
295 COSTS_N_INSNS (29), /* fsqrtd */
296 COSTS_N_INSNS (6), /* imul */
297 COSTS_N_INSNS (6), /* imulX */
298 0, /* imul bit factor */
299 COSTS_N_INSNS (40), /* idiv */
300 COSTS_N_INSNS (71), /* idivX */
301 COSTS_N_INSNS (2), /* movcc/movr */
302 0, /* shift penalty */
306 struct processor_costs niagara_costs = {
307 COSTS_N_INSNS (3), /* int load */
308 COSTS_N_INSNS (3), /* int signed load */
309 COSTS_N_INSNS (3), /* int zeroed load */
310 COSTS_N_INSNS (9), /* float load */
311 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
312 COSTS_N_INSNS (8), /* fadd, fsub */
313 COSTS_N_INSNS (26), /* fcmp */
314 COSTS_N_INSNS (8), /* fmov, fmovr */
315 COSTS_N_INSNS (29), /* fmul */
316 COSTS_N_INSNS (54), /* fdivs */
317 COSTS_N_INSNS (83), /* fdivd */
318 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
319 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
320 COSTS_N_INSNS (11), /* imul */
321 COSTS_N_INSNS (11), /* imulX */
322 0, /* imul bit factor */
323 COSTS_N_INSNS (72), /* idiv */
324 COSTS_N_INSNS (72), /* idivX */
325 COSTS_N_INSNS (1), /* movcc/movr */
326 0, /* shift penalty */
330 struct processor_costs niagara2_costs = {
331 COSTS_N_INSNS (3), /* int load */
332 COSTS_N_INSNS (3), /* int signed load */
333 COSTS_N_INSNS (3), /* int zeroed load */
334 COSTS_N_INSNS (3), /* float load */
335 COSTS_N_INSNS (6), /* fmov, fneg, fabs */
336 COSTS_N_INSNS (6), /* fadd, fsub */
337 COSTS_N_INSNS (6), /* fcmp */
338 COSTS_N_INSNS (6), /* fmov, fmovr */
339 COSTS_N_INSNS (6), /* fmul */
340 COSTS_N_INSNS (19), /* fdivs */
341 COSTS_N_INSNS (33), /* fdivd */
342 COSTS_N_INSNS (19), /* fsqrts */
343 COSTS_N_INSNS (33), /* fsqrtd */
344 COSTS_N_INSNS (5), /* imul */
345 COSTS_N_INSNS (5), /* imulX */
346 0, /* imul bit factor */
347 COSTS_N_INSNS (26), /* idiv, average of 12 - 41 cycle range */
348 COSTS_N_INSNS (26), /* idivX, average of 12 - 41 cycle range */
349 COSTS_N_INSNS (1), /* movcc/movr */
350 0, /* shift penalty */
354 struct processor_costs niagara3_costs = {
355 COSTS_N_INSNS (3), /* int load */
356 COSTS_N_INSNS (3), /* int signed load */
357 COSTS_N_INSNS (3), /* int zeroed load */
358 COSTS_N_INSNS (3), /* float load */
359 COSTS_N_INSNS (9), /* fmov, fneg, fabs */
360 COSTS_N_INSNS (9), /* fadd, fsub */
361 COSTS_N_INSNS (9), /* fcmp */
362 COSTS_N_INSNS (9), /* fmov, fmovr */
363 COSTS_N_INSNS (9), /* fmul */
364 COSTS_N_INSNS (23), /* fdivs */
365 COSTS_N_INSNS (37), /* fdivd */
366 COSTS_N_INSNS (23), /* fsqrts */
367 COSTS_N_INSNS (37), /* fsqrtd */
368 COSTS_N_INSNS (9), /* imul */
369 COSTS_N_INSNS (9), /* imulX */
370 0, /* imul bit factor */
371 COSTS_N_INSNS (31), /* idiv, average of 17 - 45 cycle range */
372 COSTS_N_INSNS (30), /* idivX, average of 16 - 44 cycle range */
373 COSTS_N_INSNS (1), /* movcc/movr */
374 0, /* shift penalty */
377 static const struct processor_costs *sparc_costs = &cypress_costs;
379 #ifdef HAVE_AS_RELAX_OPTION
380 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
381 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
382 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
383 somebody does not branch between the sethi and jmp. */
384 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
386 #define LEAF_SIBCALL_SLOT_RESERVED_P \
387 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
390 /* Vector to say how input registers are mapped to output registers.
391 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
392 eliminate it. You must use -fomit-frame-pointer to get that. */
393 char leaf_reg_remap[] =
394 { 0, 1, 2, 3, 4, 5, 6, 7,
395 -1, -1, -1, -1, -1, -1, 14, -1,
396 -1, -1, -1, -1, -1, -1, -1, -1,
397 8, 9, 10, 11, 12, 13, -1, 15,
399 32, 33, 34, 35, 36, 37, 38, 39,
400 40, 41, 42, 43, 44, 45, 46, 47,
401 48, 49, 50, 51, 52, 53, 54, 55,
402 56, 57, 58, 59, 60, 61, 62, 63,
403 64, 65, 66, 67, 68, 69, 70, 71,
404 72, 73, 74, 75, 76, 77, 78, 79,
405 80, 81, 82, 83, 84, 85, 86, 87,
406 88, 89, 90, 91, 92, 93, 94, 95,
407 96, 97, 98, 99, 100, 101, 102};
409 /* Vector, indexed by hard register number, which contains 1
410 for a register that is allowable in a candidate for leaf
411 function treatment. */
412 char sparc_leaf_regs[] =
413 { 1, 1, 1, 1, 1, 1, 1, 1,
414 0, 0, 0, 0, 0, 0, 1, 0,
415 0, 0, 0, 0, 0, 0, 0, 0,
416 1, 1, 1, 1, 1, 1, 0, 1,
417 1, 1, 1, 1, 1, 1, 1, 1,
418 1, 1, 1, 1, 1, 1, 1, 1,
419 1, 1, 1, 1, 1, 1, 1, 1,
420 1, 1, 1, 1, 1, 1, 1, 1,
421 1, 1, 1, 1, 1, 1, 1, 1,
422 1, 1, 1, 1, 1, 1, 1, 1,
423 1, 1, 1, 1, 1, 1, 1, 1,
424 1, 1, 1, 1, 1, 1, 1, 1,
425 1, 1, 1, 1, 1, 1, 1};
427 struct GTY(()) machine_function
429 /* Size of the frame of the function. */
430 HOST_WIDE_INT frame_size;
432 /* Size of the frame of the function minus the register window save area
433 and the outgoing argument area. */
434 HOST_WIDE_INT apparent_frame_size;
436 /* Register we pretend the frame pointer is allocated to. Normally, this
437 is %fp, but if we are in a leaf procedure, this is (%sp + offset). We
438 record "offset" separately as it may be too big for (reg + disp). */
440 HOST_WIDE_INT frame_base_offset;
442 /* Some local-dynamic TLS symbol name. */
443 const char *some_ld_name;
445 /* Number of global or FP registers to be saved (as 4-byte quantities). */
446 int n_global_fp_regs;
448 /* True if the current function is leaf and uses only leaf regs,
449 so that the SPARC leaf function optimization can be applied.
450 Private version of current_function_uses_only_leaf_regs, see
451 sparc_expand_prologue for the rationale. */
454 /* True if the prologue saves local or in registers. */
455 bool save_local_in_regs_p;
457 /* True if the data calculated by sparc_expand_prologue are valid. */
458 bool prologue_data_valid_p;
461 #define sparc_frame_size cfun->machine->frame_size
462 #define sparc_apparent_frame_size cfun->machine->apparent_frame_size
463 #define sparc_frame_base_reg cfun->machine->frame_base_reg
464 #define sparc_frame_base_offset cfun->machine->frame_base_offset
465 #define sparc_n_global_fp_regs cfun->machine->n_global_fp_regs
466 #define sparc_leaf_function_p cfun->machine->leaf_function_p
467 #define sparc_save_local_in_regs_p cfun->machine->save_local_in_regs_p
468 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
470 /* 1 if the next opcode is to be specially indented. */
471 int sparc_indent_opcode = 0;
473 static void sparc_option_override (void);
474 static void sparc_init_modes (void);
475 static void scan_record_type (const_tree, int *, int *, int *);
476 static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
477 const_tree, bool, bool, int *, int *);
479 static int supersparc_adjust_cost (rtx, rtx, rtx, int);
480 static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
482 static void sparc_emit_set_const32 (rtx, rtx);
483 static void sparc_emit_set_const64 (rtx, rtx);
484 static void sparc_output_addr_vec (rtx);
485 static void sparc_output_addr_diff_vec (rtx);
486 static void sparc_output_deferred_case_vectors (void);
487 static bool sparc_legitimate_address_p (enum machine_mode, rtx, bool);
488 static bool sparc_legitimate_constant_p (enum machine_mode, rtx);
489 static rtx sparc_builtin_saveregs (void);
490 static int epilogue_renumber (rtx *, int);
491 static bool sparc_assemble_integer (rtx, unsigned int, int);
492 static int set_extends (rtx);
493 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
494 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
495 #ifdef TARGET_SOLARIS
496 static void sparc_solaris_elf_asm_named_section (const char *, unsigned int,
497 tree) ATTRIBUTE_UNUSED;
499 static int sparc_adjust_cost (rtx, rtx, rtx, int);
500 static int sparc_issue_rate (void);
501 static void sparc_sched_init (FILE *, int, int);
502 static int sparc_use_sched_lookahead (void);
504 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
505 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
506 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
507 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
508 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
510 static bool sparc_function_ok_for_sibcall (tree, tree);
511 static void sparc_init_libfuncs (void);
512 static void sparc_init_builtins (void);
513 static void sparc_vis_init_builtins (void);
514 static rtx sparc_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
515 static tree sparc_fold_builtin (tree, int, tree *, bool);
516 static int sparc_vis_mul8x16 (int, int);
517 static tree sparc_handle_vis_mul8x16 (int, tree, tree, tree);
518 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
519 HOST_WIDE_INT, tree);
520 static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT,
521 HOST_WIDE_INT, const_tree);
522 static void sparc_reorg (void);
523 static struct machine_function * sparc_init_machine_status (void);
524 static bool sparc_cannot_force_const_mem (enum machine_mode, rtx);
525 static rtx sparc_tls_get_addr (void);
526 static rtx sparc_tls_got (void);
527 static const char *get_some_local_dynamic_name (void);
528 static int get_some_local_dynamic_name_1 (rtx *, void *);
529 static int sparc_register_move_cost (enum machine_mode,
530 reg_class_t, reg_class_t);
531 static bool sparc_rtx_costs (rtx, int, int, int, int *, bool);
532 static rtx sparc_function_value (const_tree, const_tree, bool);
533 static rtx sparc_libcall_value (enum machine_mode, const_rtx);
534 static bool sparc_function_value_regno_p (const unsigned int);
535 static rtx sparc_struct_value_rtx (tree, int);
536 static enum machine_mode sparc_promote_function_mode (const_tree, enum machine_mode,
537 int *, const_tree, int);
538 static bool sparc_return_in_memory (const_tree, const_tree);
539 static bool sparc_strict_argument_naming (cumulative_args_t);
540 static void sparc_va_start (tree, rtx);
541 static tree sparc_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
542 static bool sparc_vector_mode_supported_p (enum machine_mode);
543 static bool sparc_tls_referenced_p (rtx);
544 static rtx sparc_legitimize_tls_address (rtx);
545 static rtx sparc_legitimize_pic_address (rtx, rtx);
546 static rtx sparc_legitimize_address (rtx, rtx, enum machine_mode);
547 static rtx sparc_delegitimize_address (rtx);
548 static bool sparc_mode_dependent_address_p (const_rtx);
549 static bool sparc_pass_by_reference (cumulative_args_t,
550 enum machine_mode, const_tree, bool);
551 static void sparc_function_arg_advance (cumulative_args_t,
552 enum machine_mode, const_tree, bool);
553 static rtx sparc_function_arg_1 (cumulative_args_t,
554 enum machine_mode, const_tree, bool, bool);
555 static rtx sparc_function_arg (cumulative_args_t,
556 enum machine_mode, const_tree, bool);
557 static rtx sparc_function_incoming_arg (cumulative_args_t,
558 enum machine_mode, const_tree, bool);
559 static unsigned int sparc_function_arg_boundary (enum machine_mode,
561 static int sparc_arg_partial_bytes (cumulative_args_t,
562 enum machine_mode, tree, bool);
563 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
564 static void sparc_file_end (void);
565 static bool sparc_frame_pointer_required (void);
566 static bool sparc_can_eliminate (const int, const int);
567 static rtx sparc_builtin_setjmp_frame_value (void);
568 static void sparc_conditional_register_usage (void);
569 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
570 static const char *sparc_mangle_type (const_tree);
572 static void sparc_trampoline_init (rtx, tree, rtx);
573 static enum machine_mode sparc_preferred_simd_mode (enum machine_mode);
574 static reg_class_t sparc_preferred_reload_class (rtx x, reg_class_t rclass);
575 static bool sparc_print_operand_punct_valid_p (unsigned char);
576 static void sparc_print_operand (FILE *, rtx, int);
577 static void sparc_print_operand_address (FILE *, rtx);
578 static reg_class_t sparc_secondary_reload (bool, rtx, reg_class_t,
579 enum machine_mode, secondary_reload_info *);
581 #ifdef SUBTARGET_ATTRIBUTE_TABLE
582 /* Table of valid machine attributes. */
583 static const struct attribute_spec sparc_attribute_table[] =
585 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
587 SUBTARGET_ATTRIBUTE_TABLE,
588 { NULL, 0, 0, false, false, false, NULL, false }
592 /* Option handling. */
595 enum cmodel sparc_cmodel;
597 char sparc_hard_reg_printed[8];
599 /* Initialize the GCC target structure. */
601 /* The default is to use .half rather than .short for aligned HI objects. */
602 #undef TARGET_ASM_ALIGNED_HI_OP
603 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
605 #undef TARGET_ASM_UNALIGNED_HI_OP
606 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
607 #undef TARGET_ASM_UNALIGNED_SI_OP
608 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
609 #undef TARGET_ASM_UNALIGNED_DI_OP
610 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
612 /* The target hook has to handle DI-mode values. */
613 #undef TARGET_ASM_INTEGER
614 #define TARGET_ASM_INTEGER sparc_assemble_integer
616 #undef TARGET_ASM_FUNCTION_PROLOGUE
617 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
618 #undef TARGET_ASM_FUNCTION_EPILOGUE
619 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
621 #undef TARGET_SCHED_ADJUST_COST
622 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
623 #undef TARGET_SCHED_ISSUE_RATE
624 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
625 #undef TARGET_SCHED_INIT
626 #define TARGET_SCHED_INIT sparc_sched_init
627 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
628 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
630 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
631 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
633 #undef TARGET_INIT_LIBFUNCS
634 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
635 #undef TARGET_INIT_BUILTINS
636 #define TARGET_INIT_BUILTINS sparc_init_builtins
638 #undef TARGET_LEGITIMIZE_ADDRESS
639 #define TARGET_LEGITIMIZE_ADDRESS sparc_legitimize_address
640 #undef TARGET_DELEGITIMIZE_ADDRESS
641 #define TARGET_DELEGITIMIZE_ADDRESS sparc_delegitimize_address
642 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
643 #define TARGET_MODE_DEPENDENT_ADDRESS_P sparc_mode_dependent_address_p
645 #undef TARGET_EXPAND_BUILTIN
646 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
647 #undef TARGET_FOLD_BUILTIN
648 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
651 #undef TARGET_HAVE_TLS
652 #define TARGET_HAVE_TLS true
655 #undef TARGET_CANNOT_FORCE_CONST_MEM
656 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
658 #undef TARGET_ASM_OUTPUT_MI_THUNK
659 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
660 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
661 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
663 #undef TARGET_MACHINE_DEPENDENT_REORG
664 #define TARGET_MACHINE_DEPENDENT_REORG sparc_reorg
666 #undef TARGET_RTX_COSTS
667 #define TARGET_RTX_COSTS sparc_rtx_costs
668 #undef TARGET_ADDRESS_COST
669 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
670 #undef TARGET_REGISTER_MOVE_COST
671 #define TARGET_REGISTER_MOVE_COST sparc_register_move_cost
673 #undef TARGET_PROMOTE_FUNCTION_MODE
674 #define TARGET_PROMOTE_FUNCTION_MODE sparc_promote_function_mode
676 #undef TARGET_FUNCTION_VALUE
677 #define TARGET_FUNCTION_VALUE sparc_function_value
678 #undef TARGET_LIBCALL_VALUE
679 #define TARGET_LIBCALL_VALUE sparc_libcall_value
680 #undef TARGET_FUNCTION_VALUE_REGNO_P
681 #define TARGET_FUNCTION_VALUE_REGNO_P sparc_function_value_regno_p
683 #undef TARGET_STRUCT_VALUE_RTX
684 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
685 #undef TARGET_RETURN_IN_MEMORY
686 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
687 #undef TARGET_MUST_PASS_IN_STACK
688 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
689 #undef TARGET_PASS_BY_REFERENCE
690 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
691 #undef TARGET_ARG_PARTIAL_BYTES
692 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
693 #undef TARGET_FUNCTION_ARG_ADVANCE
694 #define TARGET_FUNCTION_ARG_ADVANCE sparc_function_arg_advance
695 #undef TARGET_FUNCTION_ARG
696 #define TARGET_FUNCTION_ARG sparc_function_arg
697 #undef TARGET_FUNCTION_INCOMING_ARG
698 #define TARGET_FUNCTION_INCOMING_ARG sparc_function_incoming_arg
699 #undef TARGET_FUNCTION_ARG_BOUNDARY
700 #define TARGET_FUNCTION_ARG_BOUNDARY sparc_function_arg_boundary
702 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
703 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
704 #undef TARGET_STRICT_ARGUMENT_NAMING
705 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
707 #undef TARGET_EXPAND_BUILTIN_VA_START
708 #define TARGET_EXPAND_BUILTIN_VA_START sparc_va_start
709 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
710 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
712 #undef TARGET_VECTOR_MODE_SUPPORTED_P
713 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
715 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
716 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE sparc_preferred_simd_mode
718 #ifdef SUBTARGET_INSERT_ATTRIBUTES
719 #undef TARGET_INSERT_ATTRIBUTES
720 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
723 #ifdef SUBTARGET_ATTRIBUTE_TABLE
724 #undef TARGET_ATTRIBUTE_TABLE
725 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
728 #undef TARGET_RELAXED_ORDERING
729 #define TARGET_RELAXED_ORDERING SPARC_RELAXED_ORDERING
731 #undef TARGET_OPTION_OVERRIDE
732 #define TARGET_OPTION_OVERRIDE sparc_option_override
734 #if TARGET_GNU_TLS && defined(HAVE_AS_SPARC_UA_PCREL)
735 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
736 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
739 #undef TARGET_ASM_FILE_END
740 #define TARGET_ASM_FILE_END sparc_file_end
742 #undef TARGET_FRAME_POINTER_REQUIRED
743 #define TARGET_FRAME_POINTER_REQUIRED sparc_frame_pointer_required
745 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
746 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE sparc_builtin_setjmp_frame_value
748 #undef TARGET_CAN_ELIMINATE
749 #define TARGET_CAN_ELIMINATE sparc_can_eliminate
751 #undef TARGET_PREFERRED_RELOAD_CLASS
752 #define TARGET_PREFERRED_RELOAD_CLASS sparc_preferred_reload_class
754 #undef TARGET_SECONDARY_RELOAD
755 #define TARGET_SECONDARY_RELOAD sparc_secondary_reload
757 #undef TARGET_CONDITIONAL_REGISTER_USAGE
758 #define TARGET_CONDITIONAL_REGISTER_USAGE sparc_conditional_register_usage
760 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
761 #undef TARGET_MANGLE_TYPE
762 #define TARGET_MANGLE_TYPE sparc_mangle_type
765 #undef TARGET_LEGITIMATE_ADDRESS_P
766 #define TARGET_LEGITIMATE_ADDRESS_P sparc_legitimate_address_p
768 #undef TARGET_LEGITIMATE_CONSTANT_P
769 #define TARGET_LEGITIMATE_CONSTANT_P sparc_legitimate_constant_p
771 #undef TARGET_TRAMPOLINE_INIT
772 #define TARGET_TRAMPOLINE_INIT sparc_trampoline_init
774 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
775 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P sparc_print_operand_punct_valid_p
776 #undef TARGET_PRINT_OPERAND
777 #define TARGET_PRINT_OPERAND sparc_print_operand
778 #undef TARGET_PRINT_OPERAND_ADDRESS
779 #define TARGET_PRINT_OPERAND_ADDRESS sparc_print_operand_address
781 struct gcc_target targetm = TARGET_INITIALIZER;
784 dump_target_flag_bits (const int flags)
786 if (flags & MASK_64BIT)
787 fprintf (stderr, "64BIT ");
788 if (flags & MASK_APP_REGS)
789 fprintf (stderr, "APP_REGS ");
790 if (flags & MASK_FASTER_STRUCTS)
791 fprintf (stderr, "FASTER_STRUCTS ");
792 if (flags & MASK_FLAT)
793 fprintf (stderr, "FLAT ");
794 if (flags & MASK_FMAF)
795 fprintf (stderr, "FMAF ");
796 if (flags & MASK_FPU)
797 fprintf (stderr, "FPU ");
798 if (flags & MASK_HARD_QUAD)
799 fprintf (stderr, "HARD_QUAD ");
800 if (flags & MASK_POPC)
801 fprintf (stderr, "POPC ");
802 if (flags & MASK_PTR64)
803 fprintf (stderr, "PTR64 ");
804 if (flags & MASK_STACK_BIAS)
805 fprintf (stderr, "STACK_BIAS ");
806 if (flags & MASK_UNALIGNED_DOUBLES)
807 fprintf (stderr, "UNALIGNED_DOUBLES ");
808 if (flags & MASK_V8PLUS)
809 fprintf (stderr, "V8PLUS ");
810 if (flags & MASK_VIS)
811 fprintf (stderr, "VIS ");
812 if (flags & MASK_VIS2)
813 fprintf (stderr, "VIS2 ");
814 if (flags & MASK_VIS3)
815 fprintf (stderr, "VIS3 ");
816 if (flags & MASK_DEPRECATED_V8_INSNS)
817 fprintf (stderr, "DEPRECATED_V8_INSNS ");
818 if (flags & MASK_SPARCLET)
819 fprintf (stderr, "SPARCLET ");
820 if (flags & MASK_SPARCLITE)
821 fprintf (stderr, "SPARCLITE ");
823 fprintf (stderr, "V8 ");
825 fprintf (stderr, "V9 ");
829 dump_target_flags (const char *prefix, const int flags)
831 fprintf (stderr, "%s: (%08x) [ ", prefix, flags);
832 dump_target_flag_bits (flags);
833 fprintf(stderr, "]\n");
836 /* Validate and override various options, and do some machine dependent
840 sparc_option_override (void)
842 static struct code_model {
843 const char *const name;
844 const enum cmodel value;
845 } const cmodels[] = {
847 { "medlow", CM_MEDLOW },
848 { "medmid", CM_MEDMID },
849 { "medany", CM_MEDANY },
850 { "embmedany", CM_EMBMEDANY },
851 { NULL, (enum cmodel) 0 }
853 const struct code_model *cmodel;
854 /* Map TARGET_CPU_DEFAULT to value for -m{cpu,tune}=. */
855 static struct cpu_default {
857 const enum processor_type processor;
858 } const cpu_default[] = {
859 /* There must be one entry here for each TARGET_CPU value. */
860 { TARGET_CPU_sparc, PROCESSOR_CYPRESS },
861 { TARGET_CPU_v8, PROCESSOR_V8 },
862 { TARGET_CPU_supersparc, PROCESSOR_SUPERSPARC },
863 { TARGET_CPU_hypersparc, PROCESSOR_HYPERSPARC },
864 { TARGET_CPU_leon, PROCESSOR_LEON },
865 { TARGET_CPU_sparclite, PROCESSOR_F930 },
866 { TARGET_CPU_sparclite86x, PROCESSOR_SPARCLITE86X },
867 { TARGET_CPU_sparclet, PROCESSOR_TSC701 },
868 { TARGET_CPU_v9, PROCESSOR_V9 },
869 { TARGET_CPU_ultrasparc, PROCESSOR_ULTRASPARC },
870 { TARGET_CPU_ultrasparc3, PROCESSOR_ULTRASPARC3 },
871 { TARGET_CPU_niagara, PROCESSOR_NIAGARA },
872 { TARGET_CPU_niagara2, PROCESSOR_NIAGARA2 },
873 { TARGET_CPU_niagara3, PROCESSOR_NIAGARA3 },
874 { TARGET_CPU_niagara4, PROCESSOR_NIAGARA4 },
877 const struct cpu_default *def;
878 /* Table of values for -m{cpu,tune}=. This must match the order of
879 the PROCESSOR_* enumeration. */
880 static struct cpu_table {
881 const char *const name;
884 } const cpu_table[] = {
885 { "v7", MASK_ISA, 0 },
886 { "cypress", MASK_ISA, 0 },
887 { "v8", MASK_ISA, MASK_V8 },
888 /* TI TMS390Z55 supersparc */
889 { "supersparc", MASK_ISA, MASK_V8 },
890 { "hypersparc", MASK_ISA, MASK_V8|MASK_FPU },
892 { "leon", MASK_ISA, MASK_V8|MASK_FPU },
893 { "sparclite", MASK_ISA, MASK_SPARCLITE },
894 /* The Fujitsu MB86930 is the original sparclite chip, with no FPU. */
895 { "f930", MASK_ISA|MASK_FPU, MASK_SPARCLITE },
896 /* The Fujitsu MB86934 is the recent sparclite chip, with an FPU. */
897 { "f934", MASK_ISA, MASK_SPARCLITE|MASK_FPU },
898 { "sparclite86x", MASK_ISA|MASK_FPU, MASK_SPARCLITE },
899 { "sparclet", MASK_ISA, MASK_SPARCLET },
901 { "tsc701", MASK_ISA, MASK_SPARCLET },
902 { "v9", MASK_ISA, MASK_V9 },
903 /* UltraSPARC I, II, IIi */
904 { "ultrasparc", MASK_ISA,
905 /* Although insns using %y are deprecated, it is a clear win. */
906 MASK_V9|MASK_DEPRECATED_V8_INSNS },
908 /* ??? Check if %y issue still holds true. */
909 { "ultrasparc3", MASK_ISA,
910 MASK_V9|MASK_DEPRECATED_V8_INSNS|MASK_VIS2 },
912 { "niagara", MASK_ISA,
913 MASK_V9|MASK_DEPRECATED_V8_INSNS },
915 { "niagara2", MASK_ISA,
916 MASK_V9|MASK_POPC|MASK_VIS2 },
918 { "niagara3", MASK_ISA,
919 MASK_V9|MASK_POPC|MASK_VIS2|MASK_VIS3|MASK_FMAF },
921 { "niagara4", MASK_ISA,
922 MASK_V9|MASK_POPC|MASK_VIS2|MASK_VIS3|MASK_FMAF },
924 const struct cpu_table *cpu;
928 if (sparc_debug_string != NULL)
933 p = ASTRDUP (sparc_debug_string);
934 while ((q = strtok (p, ",")) != NULL)
948 if (! strcmp (q, "all"))
949 mask = MASK_DEBUG_ALL;
950 else if (! strcmp (q, "options"))
951 mask = MASK_DEBUG_OPTIONS;
953 error ("unknown -mdebug-%s switch", q);
956 sparc_debug &= ~mask;
962 if (TARGET_DEBUG_OPTIONS)
964 dump_target_flags("Initial target_flags", target_flags);
965 dump_target_flags("target_flags_explicit", target_flags_explicit);
968 #ifdef SUBTARGET_OVERRIDE_OPTIONS
969 SUBTARGET_OVERRIDE_OPTIONS;
972 #ifndef SPARC_BI_ARCH
973 /* Check for unsupported architecture size. */
974 if (! TARGET_64BIT != DEFAULT_ARCH32_P)
975 error ("%s is not supported by this configuration",
976 DEFAULT_ARCH32_P ? "-m64" : "-m32");
979 /* We force all 64bit archs to use 128 bit long double */
980 if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128)
982 error ("-mlong-double-64 not allowed with -m64");
983 target_flags |= MASK_LONG_DOUBLE_128;
986 /* Code model selection. */
987 sparc_cmodel = SPARC_DEFAULT_CMODEL;
991 sparc_cmodel = CM_32;
994 if (sparc_cmodel_string != NULL)
998 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
999 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
1001 if (cmodel->name == NULL)
1002 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
1004 sparc_cmodel = cmodel->value;
1007 error ("-mcmodel= is not supported on 32 bit systems");
1010 /* Check that -fcall-saved-REG wasn't specified for out registers. */
1011 for (i = 8; i < 16; i++)
1012 if (!call_used_regs [i])
1014 error ("-fcall-saved-REG is not supported for out registers");
1015 call_used_regs [i] = 1;
1018 fpu = target_flags & MASK_FPU; /* save current -mfpu status */
1020 /* Set the default CPU. */
1021 if (!global_options_set.x_sparc_cpu_and_features)
1023 for (def = &cpu_default[0]; def->cpu != -1; ++def)
1024 if (def->cpu == TARGET_CPU_DEFAULT)
1026 gcc_assert (def->cpu != -1);
1027 sparc_cpu_and_features = def->processor;
1030 if (!global_options_set.x_sparc_cpu)
1031 sparc_cpu = sparc_cpu_and_features;
1033 cpu = &cpu_table[(int) sparc_cpu_and_features];
1035 if (TARGET_DEBUG_OPTIONS)
1037 fprintf (stderr, "sparc_cpu_and_features: %s\n", cpu->name);
1038 fprintf (stderr, "sparc_cpu: %s\n",
1039 cpu_table[(int) sparc_cpu].name);
1040 dump_target_flags ("cpu->disable", cpu->disable);
1041 dump_target_flags ("cpu->enable", cpu->enable);
1044 target_flags &= ~cpu->disable;
1045 target_flags |= (cpu->enable
1046 #ifndef HAVE_AS_FMAF_HPC_VIS3
1047 & ~(MASK_FMAF | MASK_VIS3)
1051 /* If -mfpu or -mno-fpu was explicitly used, don't override with
1052 the processor default. */
1053 if (target_flags_explicit & MASK_FPU)
1054 target_flags = (target_flags & ~MASK_FPU) | fpu;
1056 /* -mvis2 implies -mvis */
1058 target_flags |= MASK_VIS;
1060 /* -mvis3 implies -mvis2 and -mvis */
1062 target_flags |= MASK_VIS2 | MASK_VIS;
1064 /* Don't allow -mvis, -mvis2, -mvis3, or -mfmaf if FPU is disabled. */
1066 target_flags &= ~(MASK_VIS | MASK_VIS2 | MASK_VIS3 | MASK_FMAF);
1068 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
1070 -m64 also implies v9. */
1071 if (TARGET_VIS || TARGET_ARCH64)
1073 target_flags |= MASK_V9;
1074 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
1077 /* -mvis also implies -mv8plus on 32-bit */
1078 if (TARGET_VIS && ! TARGET_ARCH64)
1079 target_flags |= MASK_V8PLUS;
1081 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
1082 if (TARGET_V9 && TARGET_ARCH32)
1083 target_flags |= MASK_DEPRECATED_V8_INSNS;
1085 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
1086 if (! TARGET_V9 || TARGET_ARCH64)
1087 target_flags &= ~MASK_V8PLUS;
1089 /* Don't use stack biasing in 32 bit mode. */
1091 target_flags &= ~MASK_STACK_BIAS;
1093 /* Supply a default value for align_functions. */
1094 if (align_functions == 0
1095 && (sparc_cpu == PROCESSOR_ULTRASPARC
1096 || sparc_cpu == PROCESSOR_ULTRASPARC3
1097 || sparc_cpu == PROCESSOR_NIAGARA
1098 || sparc_cpu == PROCESSOR_NIAGARA2
1099 || sparc_cpu == PROCESSOR_NIAGARA3
1100 || sparc_cpu == PROCESSOR_NIAGARA4))
1101 align_functions = 32;
1103 /* Validate PCC_STRUCT_RETURN. */
1104 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
1105 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
1107 /* Only use .uaxword when compiling for a 64-bit target. */
1109 targetm.asm_out.unaligned_op.di = NULL;
1111 /* Do various machine dependent initializations. */
1112 sparc_init_modes ();
1114 /* Set up function hooks. */
1115 init_machine_status = sparc_init_machine_status;
1120 case PROCESSOR_CYPRESS:
1121 sparc_costs = &cypress_costs;
1124 case PROCESSOR_SPARCLITE:
1125 case PROCESSOR_SUPERSPARC:
1126 sparc_costs = &supersparc_costs;
1128 case PROCESSOR_F930:
1129 case PROCESSOR_F934:
1130 case PROCESSOR_HYPERSPARC:
1131 case PROCESSOR_SPARCLITE86X:
1132 sparc_costs = &hypersparc_costs;
1134 case PROCESSOR_LEON:
1135 sparc_costs = &leon_costs;
1137 case PROCESSOR_SPARCLET:
1138 case PROCESSOR_TSC701:
1139 sparc_costs = &sparclet_costs;
1142 case PROCESSOR_ULTRASPARC:
1143 sparc_costs = &ultrasparc_costs;
1145 case PROCESSOR_ULTRASPARC3:
1146 sparc_costs = &ultrasparc3_costs;
1148 case PROCESSOR_NIAGARA:
1149 sparc_costs = &niagara_costs;
1151 case PROCESSOR_NIAGARA2:
1152 sparc_costs = &niagara2_costs;
1154 case PROCESSOR_NIAGARA3:
1155 case PROCESSOR_NIAGARA4:
1156 sparc_costs = &niagara3_costs;
1158 case PROCESSOR_NATIVE:
1162 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
1163 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
1164 target_flags |= MASK_LONG_DOUBLE_128;
1167 if (TARGET_DEBUG_OPTIONS)
1168 dump_target_flags ("Final target_flags", target_flags);
1170 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
1171 ((sparc_cpu == PROCESSOR_ULTRASPARC
1172 || sparc_cpu == PROCESSOR_NIAGARA
1173 || sparc_cpu == PROCESSOR_NIAGARA2
1174 || sparc_cpu == PROCESSOR_NIAGARA3
1175 || sparc_cpu == PROCESSOR_NIAGARA4)
1177 : (sparc_cpu == PROCESSOR_ULTRASPARC3
1179 global_options.x_param_values,
1180 global_options_set.x_param_values);
1181 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
1182 ((sparc_cpu == PROCESSOR_ULTRASPARC
1183 || sparc_cpu == PROCESSOR_ULTRASPARC3
1184 || sparc_cpu == PROCESSOR_NIAGARA
1185 || sparc_cpu == PROCESSOR_NIAGARA2
1186 || sparc_cpu == PROCESSOR_NIAGARA3
1187 || sparc_cpu == PROCESSOR_NIAGARA4)
1189 global_options.x_param_values,
1190 global_options_set.x_param_values);
1192 /* Disable save slot sharing for call-clobbered registers by default.
1193 The IRA sharing algorithm works on single registers only and this
1194 pessimizes for double floating-point registers. */
1195 if (!global_options_set.x_flag_ira_share_save_slots)
1196 flag_ira_share_save_slots = 0;
1199 /* Miscellaneous utilities. */
1201 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
1202 or branch on register contents instructions. */
1205 v9_regcmp_p (enum rtx_code code)
1207 return (code == EQ || code == NE || code == GE || code == LT
1208 || code == LE || code == GT);
1211 /* Nonzero if OP is a floating point constant which can
1212 be loaded into an integer register using a single
1213 sethi instruction. */
1218 if (GET_CODE (op) == CONST_DOUBLE)
1223 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
1224 REAL_VALUE_TO_TARGET_SINGLE (r, i);
1225 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
1231 /* Nonzero if OP is a floating point constant which can
1232 be loaded into an integer register using a single
1238 if (GET_CODE (op) == CONST_DOUBLE)
1243 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
1244 REAL_VALUE_TO_TARGET_SINGLE (r, i);
1245 return SPARC_SIMM13_P (i);
1251 /* Nonzero if OP is a floating point constant which can
1252 be loaded into an integer register using a high/losum
1253 instruction sequence. */
1256 fp_high_losum_p (rtx op)
1258 /* The constraints calling this should only be in
1259 SFmode move insns, so any constant which cannot
1260 be moved using a single insn will do. */
1261 if (GET_CODE (op) == CONST_DOUBLE)
1266 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
1267 REAL_VALUE_TO_TARGET_SINGLE (r, i);
1268 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
1274 /* Return true if the address of LABEL can be loaded by means of the
1275 mov{si,di}_pic_label_ref patterns in PIC mode. */
1278 can_use_mov_pic_label_ref (rtx label)
1280 /* VxWorks does not impose a fixed gap between segments; the run-time
1281 gap can be different from the object-file gap. We therefore can't
1282 assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
1283 are absolutely sure that X is in the same segment as the GOT.
1284 Unfortunately, the flexibility of linker scripts means that we
1285 can't be sure of that in general, so assume that GOT-relative
1286 accesses are never valid on VxWorks. */
1287 if (TARGET_VXWORKS_RTP)
1290 /* Similarly, if the label is non-local, it might end up being placed
1291 in a different section than the current one; now mov_pic_label_ref
1292 requires the label and the code to be in the same section. */
1293 if (LABEL_REF_NONLOCAL_P (label))
1296 /* Finally, if we are reordering basic blocks and partition into hot
1297 and cold sections, this might happen for any label. */
1298 if (flag_reorder_blocks_and_partition)
1304 /* Expand a move instruction. Return true if all work is done. */
1307 sparc_expand_move (enum machine_mode mode, rtx *operands)
1309 /* Handle sets of MEM first. */
1310 if (GET_CODE (operands[0]) == MEM)
1312 /* 0 is a register (or a pair of registers) on SPARC. */
1313 if (register_or_zero_operand (operands[1], mode))
1316 if (!reload_in_progress)
1318 operands[0] = validize_mem (operands[0]);
1319 operands[1] = force_reg (mode, operands[1]);
1323 /* Fixup TLS cases. */
1325 && CONSTANT_P (operands[1])
1326 && sparc_tls_referenced_p (operands [1]))
1328 operands[1] = sparc_legitimize_tls_address (operands[1]);
1332 /* Fixup PIC cases. */
1333 if (flag_pic && CONSTANT_P (operands[1]))
1335 if (pic_address_needs_scratch (operands[1]))
1336 operands[1] = sparc_legitimize_pic_address (operands[1], NULL_RTX);
1338 /* We cannot use the mov{si,di}_pic_label_ref patterns in all cases. */
1339 if (GET_CODE (operands[1]) == LABEL_REF
1340 && can_use_mov_pic_label_ref (operands[1]))
1344 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
1350 gcc_assert (TARGET_ARCH64);
1351 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
1356 if (symbolic_operand (operands[1], mode))
1359 = sparc_legitimize_pic_address (operands[1],
1361 ? operands[0] : NULL_RTX);
1366 /* If we are trying to toss an integer constant into FP registers,
1367 or loading a FP or vector constant, force it into memory. */
1368 if (CONSTANT_P (operands[1])
1369 && REG_P (operands[0])
1370 && (SPARC_FP_REG_P (REGNO (operands[0]))
1371 || SCALAR_FLOAT_MODE_P (mode)
1372 || VECTOR_MODE_P (mode)))
1374 /* emit_group_store will send such bogosity to us when it is
1375 not storing directly into memory. So fix this up to avoid
1376 crashes in output_constant_pool. */
1377 if (operands [1] == const0_rtx)
1378 operands[1] = CONST0_RTX (mode);
1380 /* We can clear or set to all-ones FP registers if TARGET_VIS, and
1381 always other regs. */
1382 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
1383 && (const_zero_operand (operands[1], mode)
1384 || const_all_ones_operand (operands[1], mode)))
1387 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
1388 /* We are able to build any SF constant in integer registers
1389 with at most 2 instructions. */
1391 /* And any DF constant in integer registers. */
1393 && ! can_create_pseudo_p ())))
1396 operands[1] = force_const_mem (mode, operands[1]);
1397 if (!reload_in_progress)
1398 operands[1] = validize_mem (operands[1]);
1402 /* Accept non-constants and valid constants unmodified. */
1403 if (!CONSTANT_P (operands[1])
1404 || GET_CODE (operands[1]) == HIGH
1405 || input_operand (operands[1], mode))
1411 /* All QImode constants require only one insn, so proceed. */
1416 sparc_emit_set_const32 (operands[0], operands[1]);
1420 /* input_operand should have filtered out 32-bit mode. */
1421 sparc_emit_set_const64 (operands[0], operands[1]);
1431 /* Load OP1, a 32-bit constant, into OP0, a register.
1432 We know it can't be done in one insn when we get
1433 here, the move expander guarantees this. */
1436 sparc_emit_set_const32 (rtx op0, rtx op1)
1438 enum machine_mode mode = GET_MODE (op0);
1441 if (can_create_pseudo_p ())
1442 temp = gen_reg_rtx (mode);
1444 if (GET_CODE (op1) == CONST_INT)
1446 gcc_assert (!small_int_operand (op1, mode)
1447 && !const_high_operand (op1, mode));
1449 /* Emit them as real moves instead of a HIGH/LO_SUM,
1450 this way CSE can see everything and reuse intermediate
1451 values if it wants. */
1452 emit_insn (gen_rtx_SET (VOIDmode, temp,
1453 GEN_INT (INTVAL (op1)
1454 & ~(HOST_WIDE_INT)0x3ff)));
1456 emit_insn (gen_rtx_SET (VOIDmode,
1458 gen_rtx_IOR (mode, temp,
1459 GEN_INT (INTVAL (op1) & 0x3ff))));
1463 /* A symbol, emit in the traditional way. */
1464 emit_insn (gen_rtx_SET (VOIDmode, temp,
1465 gen_rtx_HIGH (mode, op1)));
1466 emit_insn (gen_rtx_SET (VOIDmode,
1467 op0, gen_rtx_LO_SUM (mode, temp, op1)));
1471 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1472 If TEMP is nonzero, we are forbidden to use any other scratch
1473 registers. Otherwise, we are allowed to generate them as needed.
1475 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1476 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
1479 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
1481 rtx temp1, temp2, temp3, temp4, temp5;
1484 if (temp && GET_MODE (temp) == TImode)
1487 temp = gen_rtx_REG (DImode, REGNO (temp));
1490 /* SPARC-V9 code-model support. */
1491 switch (sparc_cmodel)
1494 /* The range spanned by all instructions in the object is less
1495 than 2^31 bytes (2GB) and the distance from any instruction
1496 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1497 than 2^31 bytes (2GB).
1499 The executable must be in the low 4TB of the virtual address
1502 sethi %hi(symbol), %temp1
1503 or %temp1, %lo(symbol), %reg */
1505 temp1 = temp; /* op0 is allowed. */
1507 temp1 = gen_reg_rtx (DImode);
1509 emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1)));
1510 emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
1514 /* The range spanned by all instructions in the object is less
1515 than 2^31 bytes (2GB) and the distance from any instruction
1516 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1517 than 2^31 bytes (2GB).
1519 The executable must be in the low 16TB of the virtual address
1522 sethi %h44(symbol), %temp1
1523 or %temp1, %m44(symbol), %temp2
1524 sllx %temp2, 12, %temp3
1525 or %temp3, %l44(symbol), %reg */
1530 temp3 = temp; /* op0 is allowed. */
1534 temp1 = gen_reg_rtx (DImode);
1535 temp2 = gen_reg_rtx (DImode);
1536 temp3 = gen_reg_rtx (DImode);
1539 emit_insn (gen_seth44 (temp1, op1));
1540 emit_insn (gen_setm44 (temp2, temp1, op1));
1541 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1542 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
1543 emit_insn (gen_setl44 (op0, temp3, op1));
1547 /* The range spanned by all instructions in the object is less
1548 than 2^31 bytes (2GB) and the distance from any instruction
1549 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1550 than 2^31 bytes (2GB).
1552 The executable can be placed anywhere in the virtual address
1555 sethi %hh(symbol), %temp1
1556 sethi %lm(symbol), %temp2
1557 or %temp1, %hm(symbol), %temp3
1558 sllx %temp3, 32, %temp4
1559 or %temp4, %temp2, %temp5
1560 or %temp5, %lo(symbol), %reg */
1563 /* It is possible that one of the registers we got for operands[2]
1564 might coincide with that of operands[0] (which is why we made
1565 it TImode). Pick the other one to use as our scratch. */
1566 if (rtx_equal_p (temp, op0))
1568 gcc_assert (ti_temp);
1569 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1572 temp2 = temp; /* op0 is _not_ allowed, see above. */
1579 temp1 = gen_reg_rtx (DImode);
1580 temp2 = gen_reg_rtx (DImode);
1581 temp3 = gen_reg_rtx (DImode);
1582 temp4 = gen_reg_rtx (DImode);
1583 temp5 = gen_reg_rtx (DImode);
1586 emit_insn (gen_sethh (temp1, op1));
1587 emit_insn (gen_setlm (temp2, op1));
1588 emit_insn (gen_sethm (temp3, temp1, op1));
1589 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1590 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1591 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1592 gen_rtx_PLUS (DImode, temp4, temp2)));
1593 emit_insn (gen_setlo (op0, temp5, op1));
1597 /* Old old old backwards compatibility kruft here.
1598 Essentially it is MEDLOW with a fixed 64-bit
1599 virtual base added to all data segment addresses.
1600 Text-segment stuff is computed like MEDANY, we can't
1601 reuse the code above because the relocation knobs
1604 Data segment: sethi %hi(symbol), %temp1
1605 add %temp1, EMBMEDANY_BASE_REG, %temp2
1606 or %temp2, %lo(symbol), %reg */
1607 if (data_segment_operand (op1, GET_MODE (op1)))
1611 temp1 = temp; /* op0 is allowed. */
1616 temp1 = gen_reg_rtx (DImode);
1617 temp2 = gen_reg_rtx (DImode);
1620 emit_insn (gen_embmedany_sethi (temp1, op1));
1621 emit_insn (gen_embmedany_brsum (temp2, temp1));
1622 emit_insn (gen_embmedany_losum (op0, temp2, op1));
1625 /* Text segment: sethi %uhi(symbol), %temp1
1626 sethi %hi(symbol), %temp2
1627 or %temp1, %ulo(symbol), %temp3
1628 sllx %temp3, 32, %temp4
1629 or %temp4, %temp2, %temp5
1630 or %temp5, %lo(symbol), %reg */
1635 /* It is possible that one of the registers we got for operands[2]
1636 might coincide with that of operands[0] (which is why we made
1637 it TImode). Pick the other one to use as our scratch. */
1638 if (rtx_equal_p (temp, op0))
1640 gcc_assert (ti_temp);
1641 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1644 temp2 = temp; /* op0 is _not_ allowed, see above. */
1651 temp1 = gen_reg_rtx (DImode);
1652 temp2 = gen_reg_rtx (DImode);
1653 temp3 = gen_reg_rtx (DImode);
1654 temp4 = gen_reg_rtx (DImode);
1655 temp5 = gen_reg_rtx (DImode);
1658 emit_insn (gen_embmedany_textuhi (temp1, op1));
1659 emit_insn (gen_embmedany_texthi (temp2, op1));
1660 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
1661 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1662 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1663 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1664 gen_rtx_PLUS (DImode, temp4, temp2)));
1665 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
1674 #if HOST_BITS_PER_WIDE_INT == 32
1676 sparc_emit_set_const64 (rtx op0 ATTRIBUTE_UNUSED, rtx op1 ATTRIBUTE_UNUSED)
1681 /* These avoid problems when cross compiling. If we do not
1682 go through all this hair then the optimizer will see
1683 invalid REG_EQUAL notes or in some cases none at all. */
1684 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
1685 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
1686 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
1687 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
1689 /* The optimizer is not to assume anything about exactly
1690 which bits are set for a HIGH, they are unspecified.
1691 Unfortunately this leads to many missed optimizations
1692 during CSE. We mask out the non-HIGH bits, and matches
1693 a plain movdi, to alleviate this problem. */
1695 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
1697 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
1701 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
1703 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val));
1707 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
1709 return gen_rtx_IOR (DImode, src, GEN_INT (val));
1713 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
1715 return gen_rtx_XOR (DImode, src, GEN_INT (val));
1718 /* Worker routines for 64-bit constant formation on arch64.
1719 One of the key things to be doing in these emissions is
1720 to create as many temp REGs as possible. This makes it
1721 possible for half-built constants to be used later when
1722 such values are similar to something required later on.
1723 Without doing this, the optimizer cannot see such
1726 static void sparc_emit_set_const64_quick1 (rtx, rtx,
1727 unsigned HOST_WIDE_INT, int);
1730 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
1731 unsigned HOST_WIDE_INT low_bits, int is_neg)
1733 unsigned HOST_WIDE_INT high_bits;
1736 high_bits = (~low_bits) & 0xffffffff;
1738 high_bits = low_bits;
1740 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1743 emit_insn (gen_rtx_SET (VOIDmode, op0,
1744 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1748 /* If we are XOR'ing with -1, then we should emit a one's complement
1749 instead. This way the combiner will notice logical operations
1750 such as ANDN later on and substitute. */
1751 if ((low_bits & 0x3ff) == 0x3ff)
1753 emit_insn (gen_rtx_SET (VOIDmode, op0,
1754 gen_rtx_NOT (DImode, temp)));
1758 emit_insn (gen_rtx_SET (VOIDmode, op0,
1759 gen_safe_XOR64 (temp,
1760 (-(HOST_WIDE_INT)0x400
1761 | (low_bits & 0x3ff)))));
1766 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
1767 unsigned HOST_WIDE_INT, int);
1770 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
1771 unsigned HOST_WIDE_INT high_bits,
1772 unsigned HOST_WIDE_INT low_immediate,
1777 if ((high_bits & 0xfffffc00) != 0)
1779 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1780 if ((high_bits & ~0xfffffc00) != 0)
1781 emit_insn (gen_rtx_SET (VOIDmode, op0,
1782 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1788 emit_insn (gen_safe_SET64 (temp, high_bits));
1792 /* Now shift it up into place. */
1793 emit_insn (gen_rtx_SET (VOIDmode, op0,
1794 gen_rtx_ASHIFT (DImode, temp2,
1795 GEN_INT (shift_count))));
1797 /* If there is a low immediate part piece, finish up by
1798 putting that in as well. */
1799 if (low_immediate != 0)
1800 emit_insn (gen_rtx_SET (VOIDmode, op0,
1801 gen_safe_OR64 (op0, low_immediate)));
1804 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
1805 unsigned HOST_WIDE_INT);
1807 /* Full 64-bit constant decomposition. Even though this is the
1808 'worst' case, we still optimize a few things away. */
1810 sparc_emit_set_const64_longway (rtx op0, rtx temp,
1811 unsigned HOST_WIDE_INT high_bits,
1812 unsigned HOST_WIDE_INT low_bits)
1816 if (can_create_pseudo_p ())
1817 sub_temp = gen_reg_rtx (DImode);
1819 if ((high_bits & 0xfffffc00) != 0)
1821 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1822 if ((high_bits & ~0xfffffc00) != 0)
1823 emit_insn (gen_rtx_SET (VOIDmode,
1825 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1831 emit_insn (gen_safe_SET64 (temp, high_bits));
1835 if (can_create_pseudo_p ())
1837 rtx temp2 = gen_reg_rtx (DImode);
1838 rtx temp3 = gen_reg_rtx (DImode);
1839 rtx temp4 = gen_reg_rtx (DImode);
1841 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1842 gen_rtx_ASHIFT (DImode, sub_temp,
1845 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
1846 if ((low_bits & ~0xfffffc00) != 0)
1848 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1849 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
1850 emit_insn (gen_rtx_SET (VOIDmode, op0,
1851 gen_rtx_PLUS (DImode, temp4, temp3)));
1855 emit_insn (gen_rtx_SET (VOIDmode, op0,
1856 gen_rtx_PLUS (DImode, temp4, temp2)));
1861 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
1862 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
1863 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
1866 /* We are in the middle of reload, so this is really
1867 painful. However we do still make an attempt to
1868 avoid emitting truly stupid code. */
1869 if (low1 != const0_rtx)
1871 emit_insn (gen_rtx_SET (VOIDmode, op0,
1872 gen_rtx_ASHIFT (DImode, sub_temp,
1873 GEN_INT (to_shift))));
1874 emit_insn (gen_rtx_SET (VOIDmode, op0,
1875 gen_rtx_IOR (DImode, op0, low1)));
1883 if (low2 != const0_rtx)
1885 emit_insn (gen_rtx_SET (VOIDmode, op0,
1886 gen_rtx_ASHIFT (DImode, sub_temp,
1887 GEN_INT (to_shift))));
1888 emit_insn (gen_rtx_SET (VOIDmode, op0,
1889 gen_rtx_IOR (DImode, op0, low2)));
1897 emit_insn (gen_rtx_SET (VOIDmode, op0,
1898 gen_rtx_ASHIFT (DImode, sub_temp,
1899 GEN_INT (to_shift))));
1900 if (low3 != const0_rtx)
1901 emit_insn (gen_rtx_SET (VOIDmode, op0,
1902 gen_rtx_IOR (DImode, op0, low3)));
1907 /* Analyze a 64-bit constant for certain properties. */
1908 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
1909 unsigned HOST_WIDE_INT,
1910 int *, int *, int *);
1913 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
1914 unsigned HOST_WIDE_INT low_bits,
1915 int *hbsp, int *lbsp, int *abbasp)
1917 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
1920 lowest_bit_set = highest_bit_set = -1;
1924 if ((lowest_bit_set == -1)
1925 && ((low_bits >> i) & 1))
1927 if ((highest_bit_set == -1)
1928 && ((high_bits >> (32 - i - 1)) & 1))
1929 highest_bit_set = (64 - i - 1);
1932 && ((highest_bit_set == -1)
1933 || (lowest_bit_set == -1)));
1939 if ((lowest_bit_set == -1)
1940 && ((high_bits >> i) & 1))
1941 lowest_bit_set = i + 32;
1942 if ((highest_bit_set == -1)
1943 && ((low_bits >> (32 - i - 1)) & 1))
1944 highest_bit_set = 32 - i - 1;
1947 && ((highest_bit_set == -1)
1948 || (lowest_bit_set == -1)));
1950 /* If there are no bits set this should have gone out
1951 as one instruction! */
1952 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
1953 all_bits_between_are_set = 1;
1954 for (i = lowest_bit_set; i <= highest_bit_set; i++)
1958 if ((low_bits & (1 << i)) != 0)
1963 if ((high_bits & (1 << (i - 32))) != 0)
1966 all_bits_between_are_set = 0;
1969 *hbsp = highest_bit_set;
1970 *lbsp = lowest_bit_set;
1971 *abbasp = all_bits_between_are_set;
1974 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
1977 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
1978 unsigned HOST_WIDE_INT low_bits)
1980 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
1983 || high_bits == 0xffffffff)
1986 analyze_64bit_constant (high_bits, low_bits,
1987 &highest_bit_set, &lowest_bit_set,
1988 &all_bits_between_are_set);
1990 if ((highest_bit_set == 63
1991 || lowest_bit_set == 0)
1992 && all_bits_between_are_set != 0)
1995 if ((highest_bit_set - lowest_bit_set) < 21)
2001 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
2002 unsigned HOST_WIDE_INT,
2005 static unsigned HOST_WIDE_INT
2006 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
2007 unsigned HOST_WIDE_INT low_bits,
2008 int lowest_bit_set, int shift)
2010 HOST_WIDE_INT hi, lo;
2012 if (lowest_bit_set < 32)
2014 lo = (low_bits >> lowest_bit_set) << shift;
2015 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
2020 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
2022 gcc_assert (! (hi & lo));
2026 /* Here we are sure to be arch64 and this is an integer constant
2027 being loaded into a register. Emit the most efficient
2028 insn sequence possible. Detection of all the 1-insn cases
2029 has been done already. */
2031 sparc_emit_set_const64 (rtx op0, rtx op1)
2033 unsigned HOST_WIDE_INT high_bits, low_bits;
2034 int lowest_bit_set, highest_bit_set;
2035 int all_bits_between_are_set;
2038 /* Sanity check that we know what we are working with. */
2039 gcc_assert (TARGET_ARCH64
2040 && (GET_CODE (op0) == SUBREG
2041 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
2043 if (! can_create_pseudo_p ())
2046 if (GET_CODE (op1) != CONST_INT)
2048 sparc_emit_set_symbolic_const64 (op0, op1, temp);
2053 temp = gen_reg_rtx (DImode);
2055 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
2056 low_bits = (INTVAL (op1) & 0xffffffff);
2058 /* low_bits bits 0 --> 31
2059 high_bits bits 32 --> 63 */
2061 analyze_64bit_constant (high_bits, low_bits,
2062 &highest_bit_set, &lowest_bit_set,
2063 &all_bits_between_are_set);
2065 /* First try for a 2-insn sequence. */
2067 /* These situations are preferred because the optimizer can
2068 * do more things with them:
2070 * sllx %reg, shift, %reg
2072 * srlx %reg, shift, %reg
2073 * 3) mov some_small_const, %reg
2074 * sllx %reg, shift, %reg
2076 if (((highest_bit_set == 63
2077 || lowest_bit_set == 0)
2078 && all_bits_between_are_set != 0)
2079 || ((highest_bit_set - lowest_bit_set) < 12))
2081 HOST_WIDE_INT the_const = -1;
2082 int shift = lowest_bit_set;
2084 if ((highest_bit_set != 63
2085 && lowest_bit_set != 0)
2086 || all_bits_between_are_set == 0)
2089 create_simple_focus_bits (high_bits, low_bits,
2092 else if (lowest_bit_set == 0)
2093 shift = -(63 - highest_bit_set);
2095 gcc_assert (SPARC_SIMM13_P (the_const));
2096 gcc_assert (shift != 0);
2098 emit_insn (gen_safe_SET64 (temp, the_const));
2100 emit_insn (gen_rtx_SET (VOIDmode,
2102 gen_rtx_ASHIFT (DImode,
2106 emit_insn (gen_rtx_SET (VOIDmode,
2108 gen_rtx_LSHIFTRT (DImode,
2110 GEN_INT (-shift))));
2114 /* Now a range of 22 or less bits set somewhere.
2115 * 1) sethi %hi(focus_bits), %reg
2116 * sllx %reg, shift, %reg
2117 * 2) sethi %hi(focus_bits), %reg
2118 * srlx %reg, shift, %reg
2120 if ((highest_bit_set - lowest_bit_set) < 21)
2122 unsigned HOST_WIDE_INT focus_bits =
2123 create_simple_focus_bits (high_bits, low_bits,
2124 lowest_bit_set, 10);
2126 gcc_assert (SPARC_SETHI_P (focus_bits));
2127 gcc_assert (lowest_bit_set != 10);
2129 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
2131 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
2132 if (lowest_bit_set < 10)
2133 emit_insn (gen_rtx_SET (VOIDmode,
2135 gen_rtx_LSHIFTRT (DImode, temp,
2136 GEN_INT (10 - lowest_bit_set))));
2137 else if (lowest_bit_set > 10)
2138 emit_insn (gen_rtx_SET (VOIDmode,
2140 gen_rtx_ASHIFT (DImode, temp,
2141 GEN_INT (lowest_bit_set - 10))));
2145 /* 1) sethi %hi(low_bits), %reg
2146 * or %reg, %lo(low_bits), %reg
2147 * 2) sethi %hi(~low_bits), %reg
2148 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
2151 || high_bits == 0xffffffff)
2153 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
2154 (high_bits == 0xffffffff));
2158 /* Now, try 3-insn sequences. */
2160 /* 1) sethi %hi(high_bits), %reg
2161 * or %reg, %lo(high_bits), %reg
2162 * sllx %reg, 32, %reg
2166 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
2170 /* We may be able to do something quick
2171 when the constant is negated, so try that. */
2172 if (const64_is_2insns ((~high_bits) & 0xffffffff,
2173 (~low_bits) & 0xfffffc00))
2175 /* NOTE: The trailing bits get XOR'd so we need the
2176 non-negated bits, not the negated ones. */
2177 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
2179 if ((((~high_bits) & 0xffffffff) == 0
2180 && ((~low_bits) & 0x80000000) == 0)
2181 || (((~high_bits) & 0xffffffff) == 0xffffffff
2182 && ((~low_bits) & 0x80000000) != 0))
2184 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
2186 if ((SPARC_SETHI_P (fast_int)
2187 && (~high_bits & 0xffffffff) == 0)
2188 || SPARC_SIMM13_P (fast_int))
2189 emit_insn (gen_safe_SET64 (temp, fast_int));
2191 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
2196 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
2197 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
2198 sparc_emit_set_const64 (temp, negated_const);
2201 /* If we are XOR'ing with -1, then we should emit a one's complement
2202 instead. This way the combiner will notice logical operations
2203 such as ANDN later on and substitute. */
2204 if (trailing_bits == 0x3ff)
2206 emit_insn (gen_rtx_SET (VOIDmode, op0,
2207 gen_rtx_NOT (DImode, temp)));
2211 emit_insn (gen_rtx_SET (VOIDmode,
2213 gen_safe_XOR64 (temp,
2214 (-0x400 | trailing_bits))));
2219 /* 1) sethi %hi(xxx), %reg
2220 * or %reg, %lo(xxx), %reg
2221 * sllx %reg, yyy, %reg
2223 * ??? This is just a generalized version of the low_bits==0
2224 * thing above, FIXME...
2226 if ((highest_bit_set - lowest_bit_set) < 32)
2228 unsigned HOST_WIDE_INT focus_bits =
2229 create_simple_focus_bits (high_bits, low_bits,
2232 /* We can't get here in this state. */
2233 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
2235 /* So what we know is that the set bits straddle the
2236 middle of the 64-bit word. */
2237 sparc_emit_set_const64_quick2 (op0, temp,
2243 /* 1) sethi %hi(high_bits), %reg
2244 * or %reg, %lo(high_bits), %reg
2245 * sllx %reg, 32, %reg
2246 * or %reg, low_bits, %reg
2248 if (SPARC_SIMM13_P(low_bits)
2249 && ((int)low_bits > 0))
2251 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
2255 /* The easiest way when all else fails, is full decomposition. */
2256 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
2258 #endif /* HOST_BITS_PER_WIDE_INT == 32 */
2260 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
2261 return the mode to be used for the comparison. For floating-point,
2262 CCFP[E]mode is used. CC_NOOVmode should be used when the first operand
2263 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
2264 processing is needed. */
2267 select_cc_mode (enum rtx_code op, rtx x, rtx y ATTRIBUTE_UNUSED)
2269 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2295 else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
2296 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
2298 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2299 return CCX_NOOVmode;
2305 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2312 /* Emit the compare insn and return the CC reg for a CODE comparison
2313 with operands X and Y. */
2316 gen_compare_reg_1 (enum rtx_code code, rtx x, rtx y)
2318 enum machine_mode mode;
2321 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC)
2324 mode = SELECT_CC_MODE (code, x, y);
2326 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
2327 fcc regs (cse can't tell they're really call clobbered regs and will
2328 remove a duplicate comparison even if there is an intervening function
2329 call - it will then try to reload the cc reg via an int reg which is why
2330 we need the movcc patterns). It is possible to provide the movcc
2331 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
2332 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
2333 to tell cse that CCFPE mode registers (even pseudos) are call
2336 /* ??? This is an experiment. Rather than making changes to cse which may
2337 or may not be easy/clean, we do our own cse. This is possible because
2338 we will generate hard registers. Cse knows they're call clobbered (it
2339 doesn't know the same thing about pseudos). If we guess wrong, no big
2340 deal, but if we win, great! */
2342 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2343 #if 1 /* experiment */
2346 /* We cycle through the registers to ensure they're all exercised. */
2347 static int next_fcc_reg = 0;
2348 /* Previous x,y for each fcc reg. */
2349 static rtx prev_args[4][2];
2351 /* Scan prev_args for x,y. */
2352 for (reg = 0; reg < 4; reg++)
2353 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
2358 prev_args[reg][0] = x;
2359 prev_args[reg][1] = y;
2360 next_fcc_reg = (next_fcc_reg + 1) & 3;
2362 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
2365 cc_reg = gen_reg_rtx (mode);
2366 #endif /* ! experiment */
2367 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2368 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
2370 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
2372 /* We shouldn't get there for TFmode if !TARGET_HARD_QUAD. If we do, this
2373 will only result in an unrecognizable insn so no point in asserting. */
2374 emit_insn (gen_rtx_SET (VOIDmode, cc_reg, gen_rtx_COMPARE (mode, x, y)));
2380 /* Emit the compare insn and return the CC reg for the comparison in CMP. */
2383 gen_compare_reg (rtx cmp)
2385 return gen_compare_reg_1 (GET_CODE (cmp), XEXP (cmp, 0), XEXP (cmp, 1));
2388 /* This function is used for v9 only.
2389 DEST is the target of the Scc insn.
2390 CODE is the code for an Scc's comparison.
2391 X and Y are the values we compare.
2393 This function is needed to turn
2396 (gt (reg:CCX 100 %icc)
2400 (gt:DI (reg:CCX 100 %icc)
2403 IE: The instruction recognizer needs to see the mode of the comparison to
2404 find the right instruction. We could use "gt:DI" right in the
2405 define_expand, but leaving it out allows us to handle DI, SI, etc. */
2408 gen_v9_scc (rtx dest, enum rtx_code compare_code, rtx x, rtx y)
2411 && (GET_MODE (x) == DImode
2412 || GET_MODE (dest) == DImode))
2415 /* Try to use the movrCC insns. */
2417 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
2419 && v9_regcmp_p (compare_code))
2424 /* Special case for op0 != 0. This can be done with one instruction if
2427 if (compare_code == NE
2428 && GET_MODE (dest) == DImode
2429 && rtx_equal_p (op0, dest))
2431 emit_insn (gen_rtx_SET (VOIDmode, dest,
2432 gen_rtx_IF_THEN_ELSE (DImode,
2433 gen_rtx_fmt_ee (compare_code, DImode,
2440 if (reg_overlap_mentioned_p (dest, op0))
2442 /* Handle the case where dest == x.
2443 We "early clobber" the result. */
2444 op0 = gen_reg_rtx (GET_MODE (x));
2445 emit_move_insn (op0, x);
2448 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2449 if (GET_MODE (op0) != DImode)
2451 temp = gen_reg_rtx (DImode);
2452 convert_move (temp, op0, 0);
2456 emit_insn (gen_rtx_SET (VOIDmode, dest,
2457 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2458 gen_rtx_fmt_ee (compare_code, DImode,
2466 x = gen_compare_reg_1 (compare_code, x, y);
2469 gcc_assert (GET_MODE (x) != CC_NOOVmode
2470 && GET_MODE (x) != CCX_NOOVmode);
2472 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2473 emit_insn (gen_rtx_SET (VOIDmode, dest,
2474 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2475 gen_rtx_fmt_ee (compare_code,
2476 GET_MODE (x), x, y),
2477 const1_rtx, dest)));
2483 /* Emit an scc insn. For seq, sne, sgeu, and sltu, we can do this
2484 without jumps using the addx/subx instructions. */
2487 emit_scc_insn (rtx operands[])
2494 /* The quad-word fp compare library routines all return nonzero to indicate
2495 true, which is different from the equivalent libgcc routines, so we must
2496 handle them specially here. */
2497 if (GET_MODE (operands[2]) == TFmode && ! TARGET_HARD_QUAD)
2499 operands[1] = sparc_emit_float_lib_cmp (operands[2], operands[3],
2500 GET_CODE (operands[1]));
2501 operands[2] = XEXP (operands[1], 0);
2502 operands[3] = XEXP (operands[1], 1);
2505 code = GET_CODE (operands[1]);
2509 /* For seq/sne on v9 we use the same code as v8 (the addx/subx method has
2510 more applications). The exception to this is "reg != 0" which can
2511 be done in one instruction on v9 (so we do it). */
2514 if (GET_MODE (x) == SImode)
2516 rtx pat = gen_seqsi_special (operands[0], x, y);
2520 else if (GET_MODE (x) == DImode)
2522 rtx pat = gen_seqdi_special (operands[0], x, y);
2530 if (GET_MODE (x) == SImode)
2532 rtx pat = gen_snesi_special (operands[0], x, y);
2536 else if (GET_MODE (x) == DImode)
2538 rtx pat = gen_snedi_special (operands[0], x, y);
2545 && GET_MODE (x) == DImode
2546 && gen_v9_scc (operands[0], code, x, y))
2549 /* We can do LTU and GEU using the addx/subx instructions too. And
2550 for GTU/LEU, if both operands are registers swap them and fall
2551 back to the easy case. */
2552 if (code == GTU || code == LEU)
2554 if ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
2555 && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG))
2560 code = swap_condition (code);
2564 if (code == LTU || code == GEU)
2566 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2567 gen_rtx_fmt_ee (code, SImode,
2568 gen_compare_reg_1 (code, x, y),
2573 /* All the posibilities to use addx/subx based sequences has been
2574 exhausted, try for a 3 instruction sequence using v9 conditional
2576 if (TARGET_V9 && gen_v9_scc (operands[0], code, x, y))
2579 /* Nope, do branches. */
2583 /* Emit a conditional jump insn for the v9 architecture using comparison code
2584 CODE and jump target LABEL.
2585 This function exists to take advantage of the v9 brxx insns. */
2588 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
2590 emit_jump_insn (gen_rtx_SET (VOIDmode,
2592 gen_rtx_IF_THEN_ELSE (VOIDmode,
2593 gen_rtx_fmt_ee (code, GET_MODE (op0),
2595 gen_rtx_LABEL_REF (VOIDmode, label),
2600 emit_conditional_branch_insn (rtx operands[])
2602 /* The quad-word fp compare library routines all return nonzero to indicate
2603 true, which is different from the equivalent libgcc routines, so we must
2604 handle them specially here. */
2605 if (GET_MODE (operands[1]) == TFmode && ! TARGET_HARD_QUAD)
2607 operands[0] = sparc_emit_float_lib_cmp (operands[1], operands[2],
2608 GET_CODE (operands[0]));
2609 operands[1] = XEXP (operands[0], 0);
2610 operands[2] = XEXP (operands[0], 1);
2613 if (TARGET_ARCH64 && operands[2] == const0_rtx
2614 && GET_CODE (operands[1]) == REG
2615 && GET_MODE (operands[1]) == DImode)
2617 emit_v9_brxx_insn (GET_CODE (operands[0]), operands[1], operands[3]);
2621 operands[1] = gen_compare_reg (operands[0]);
2622 operands[2] = const0_rtx;
2623 operands[0] = gen_rtx_fmt_ee (GET_CODE (operands[0]), VOIDmode,
2624 operands[1], operands[2]);
2625 emit_jump_insn (gen_cbranchcc4 (operands[0], operands[1], operands[2],
2630 /* Generate a DFmode part of a hard TFmode register.
2631 REG is the TFmode hard register, LOW is 1 for the
2632 low 64bit of the register and 0 otherwise.
2635 gen_df_reg (rtx reg, int low)
2637 int regno = REGNO (reg);
2639 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
2640 regno += (TARGET_ARCH64 && SPARC_INT_REG_P (regno)) ? 1 : 2;
2641 return gen_rtx_REG (DFmode, regno);
2644 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
2645 Unlike normal calls, TFmode operands are passed by reference. It is
2646 assumed that no more than 3 operands are required. */
2649 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
2651 rtx ret_slot = NULL, arg[3], func_sym;
2654 /* We only expect to be called for conversions, unary, and binary ops. */
2655 gcc_assert (nargs == 2 || nargs == 3);
2657 for (i = 0; i < nargs; ++i)
2659 rtx this_arg = operands[i];
2662 /* TFmode arguments and return values are passed by reference. */
2663 if (GET_MODE (this_arg) == TFmode)
2665 int force_stack_temp;
2667 force_stack_temp = 0;
2668 if (TARGET_BUGGY_QP_LIB && i == 0)
2669 force_stack_temp = 1;
2671 if (GET_CODE (this_arg) == MEM
2672 && ! force_stack_temp)
2673 this_arg = XEXP (this_arg, 0);
2674 else if (CONSTANT_P (this_arg)
2675 && ! force_stack_temp)
2677 this_slot = force_const_mem (TFmode, this_arg);
2678 this_arg = XEXP (this_slot, 0);
2682 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode), 0);
2684 /* Operand 0 is the return value. We'll copy it out later. */
2686 emit_move_insn (this_slot, this_arg);
2688 ret_slot = this_slot;
2690 this_arg = XEXP (this_slot, 0);
2697 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
2699 if (GET_MODE (operands[0]) == TFmode)
2702 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
2703 arg[0], GET_MODE (arg[0]),
2704 arg[1], GET_MODE (arg[1]));
2706 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
2707 arg[0], GET_MODE (arg[0]),
2708 arg[1], GET_MODE (arg[1]),
2709 arg[2], GET_MODE (arg[2]));
2712 emit_move_insn (operands[0], ret_slot);
2718 gcc_assert (nargs == 2);
2720 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
2721 GET_MODE (operands[0]), 1,
2722 arg[1], GET_MODE (arg[1]));
2724 if (ret != operands[0])
2725 emit_move_insn (operands[0], ret);
2729 /* Expand soft-float TFmode calls to sparc abi routines. */
2732 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
2754 emit_soft_tfmode_libcall (func, 3, operands);
2758 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
2762 gcc_assert (code == SQRT);
2765 emit_soft_tfmode_libcall (func, 2, operands);
2769 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
2776 switch (GET_MODE (operands[1]))
2789 case FLOAT_TRUNCATE:
2790 switch (GET_MODE (operands[0]))
2804 switch (GET_MODE (operands[1]))
2809 operands[1] = gen_rtx_SIGN_EXTEND (DImode, operands[1]);
2819 case UNSIGNED_FLOAT:
2820 switch (GET_MODE (operands[1]))
2825 operands[1] = gen_rtx_ZERO_EXTEND (DImode, operands[1]);
2836 switch (GET_MODE (operands[0]))
2850 switch (GET_MODE (operands[0]))
2867 emit_soft_tfmode_libcall (func, 2, operands);
2870 /* Expand a hard-float tfmode operation. All arguments must be in
2874 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
2878 if (GET_RTX_CLASS (code) == RTX_UNARY)
2880 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2881 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
2885 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2886 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
2887 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
2888 operands[1], operands[2]);
2891 if (register_operand (operands[0], VOIDmode))
2894 dest = gen_reg_rtx (GET_MODE (operands[0]));
2896 emit_insn (gen_rtx_SET (VOIDmode, dest, op));
2898 if (dest != operands[0])
2899 emit_move_insn (operands[0], dest);
2903 emit_tfmode_binop (enum rtx_code code, rtx *operands)
2905 if (TARGET_HARD_QUAD)
2906 emit_hard_tfmode_operation (code, operands);
2908 emit_soft_tfmode_binop (code, operands);
2912 emit_tfmode_unop (enum rtx_code code, rtx *operands)
2914 if (TARGET_HARD_QUAD)
2915 emit_hard_tfmode_operation (code, operands);
2917 emit_soft_tfmode_unop (code, operands);
2921 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
2923 if (TARGET_HARD_QUAD)
2924 emit_hard_tfmode_operation (code, operands);
2926 emit_soft_tfmode_cvt (code, operands);
2929 /* Return nonzero if a branch/jump/call instruction will be emitting
2930 nop into its delay slot. */
2933 empty_delay_slot (rtx insn)
2937 /* If no previous instruction (should not happen), return true. */
2938 if (PREV_INSN (insn) == NULL)
2941 seq = NEXT_INSN (PREV_INSN (insn));
2942 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
2948 /* Return nonzero if TRIAL can go into the call delay slot. */
2951 tls_call_delay (rtx trial)
2956 call __tls_get_addr, %tgd_call (foo)
2957 add %l7, %o0, %o0, %tgd_add (foo)
2958 while Sun as/ld does not. */
2959 if (TARGET_GNU_TLS || !TARGET_TLS)
2962 pat = PATTERN (trial);
2964 /* We must reject tgd_add{32|64}, i.e.
2965 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
2966 and tldm_add{32|64}, i.e.
2967 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
2969 if (GET_CODE (pat) == SET
2970 && GET_CODE (SET_SRC (pat)) == PLUS)
2972 rtx unspec = XEXP (SET_SRC (pat), 1);
2974 if (GET_CODE (unspec) == UNSPEC
2975 && (XINT (unspec, 1) == UNSPEC_TLSGD
2976 || XINT (unspec, 1) == UNSPEC_TLSLDM))
2983 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
2984 instruction. RETURN_P is true if the v9 variant 'return' is to be
2985 considered in the test too.
2987 TRIAL must be a SET whose destination is a REG appropriate for the
2988 'restore' instruction or, if RETURN_P is true, for the 'return'
2992 eligible_for_restore_insn (rtx trial, bool return_p)
2994 rtx pat = PATTERN (trial);
2995 rtx src = SET_SRC (pat);
2996 bool src_is_freg = false;
2999 /* Since we now can do moves between float and integer registers when
3000 VIS3 is enabled, we have to catch this case. We can allow such
3001 moves when doing a 'return' however. */
3003 if (GET_CODE (src_reg) == SUBREG)
3004 src_reg = SUBREG_REG (src_reg);
3005 if (GET_CODE (src_reg) == REG
3006 && SPARC_FP_REG_P (REGNO (src_reg)))
3009 /* The 'restore src,%g0,dest' pattern for word mode and below. */
3010 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
3011 && arith_operand (src, GET_MODE (src))
3015 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
3017 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
3020 /* The 'restore src,%g0,dest' pattern for double-word mode. */
3021 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
3022 && arith_double_operand (src, GET_MODE (src))
3024 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
3026 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
3027 else if (! TARGET_FPU && register_operand (src, SFmode))
3030 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
3031 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
3034 /* If we have the 'return' instruction, anything that does not use
3035 local or output registers and can go into a delay slot wins. */
3038 && !epilogue_renumber (&pat, 1)
3039 && get_attr_in_uncond_branch_delay (trial)
3040 == IN_UNCOND_BRANCH_DELAY_TRUE)
3043 /* The 'restore src1,src2,dest' pattern for SImode. */
3044 else if (GET_CODE (src) == PLUS
3045 && register_operand (XEXP (src, 0), SImode)
3046 && arith_operand (XEXP (src, 1), SImode))
3049 /* The 'restore src1,src2,dest' pattern for DImode. */
3050 else if (GET_CODE (src) == PLUS
3051 && register_operand (XEXP (src, 0), DImode)
3052 && arith_double_operand (XEXP (src, 1), DImode))
3055 /* The 'restore src1,%lo(src2),dest' pattern. */
3056 else if (GET_CODE (src) == LO_SUM
3057 && ! TARGET_CM_MEDMID
3058 && ((register_operand (XEXP (src, 0), SImode)
3059 && immediate_operand (XEXP (src, 1), SImode))
3061 && register_operand (XEXP (src, 0), DImode)
3062 && immediate_operand (XEXP (src, 1), DImode))))
3065 /* The 'restore src,src,dest' pattern. */
3066 else if (GET_CODE (src) == ASHIFT
3067 && (register_operand (XEXP (src, 0), SImode)
3068 || register_operand (XEXP (src, 0), DImode))
3069 && XEXP (src, 1) == const1_rtx)
3075 /* Return nonzero if TRIAL can go into the function return's delay slot. */
3078 eligible_for_return_delay (rtx trial)
3083 if (GET_CODE (trial) != INSN)
3086 if (get_attr_length (trial) != 1)
3089 /* If the function uses __builtin_eh_return, the eh_return machinery
3090 occupies the delay slot. */
3091 if (crtl->calls_eh_return)
3094 /* In the case of a leaf or flat function, anything can go into the slot. */
3095 if (sparc_leaf_function_p || TARGET_FLAT)
3097 get_attr_in_uncond_branch_delay (trial) == IN_UNCOND_BRANCH_DELAY_TRUE;
3099 pat = PATTERN (trial);
3100 if (GET_CODE (pat) == PARALLEL)
3106 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
3108 rtx expr = XVECEXP (pat, 0, i);
3109 if (GET_CODE (expr) != SET)
3111 if (GET_CODE (SET_DEST (expr)) != REG)
3113 regno = REGNO (SET_DEST (expr));
3114 if (regno >= 8 && regno < 24)
3117 return !epilogue_renumber (&pat, 1)
3118 && (get_attr_in_uncond_branch_delay (trial)
3119 == IN_UNCOND_BRANCH_DELAY_TRUE);
3122 if (GET_CODE (pat) != SET)
3125 if (GET_CODE (SET_DEST (pat)) != REG)
3128 regno = REGNO (SET_DEST (pat));
3130 /* Otherwise, only operations which can be done in tandem with
3131 a `restore' or `return' insn can go into the delay slot. */
3132 if (regno >= 8 && regno < 24)
3135 /* If this instruction sets up floating point register and we have a return
3136 instruction, it can probably go in. But restore will not work
3138 if (! SPARC_INT_REG_P (regno))
3140 && !epilogue_renumber (&pat, 1)
3141 && get_attr_in_uncond_branch_delay (trial)
3142 == IN_UNCOND_BRANCH_DELAY_TRUE);
3144 return eligible_for_restore_insn (trial, true);
3147 /* Return nonzero if TRIAL can go into the sibling call's delay slot. */
3150 eligible_for_sibcall_delay (rtx trial)
3154 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
3157 if (get_attr_length (trial) != 1)
3160 pat = PATTERN (trial);
3162 if (sparc_leaf_function_p || TARGET_FLAT)
3164 /* If the tail call is done using the call instruction,
3165 we have to restore %o7 in the delay slot. */
3166 if (LEAF_SIBCALL_SLOT_RESERVED_P)
3169 /* %g1 is used to build the function address */
3170 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
3176 /* Otherwise, only operations which can be done in tandem with
3177 a `restore' insn can go into the delay slot. */
3178 if (GET_CODE (SET_DEST (pat)) != REG
3179 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
3180 || ! SPARC_INT_REG_P (REGNO (SET_DEST (pat))))
3183 /* If it mentions %o7, it can't go in, because sibcall will clobber it
3185 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
3188 return eligible_for_restore_insn (trial, false);
3191 /* Determine if it's legal to put X into the constant pool. This
3192 is not possible if X contains the address of a symbol that is
3193 not constant (TLS) or not known at final link time (PIC). */
3196 sparc_cannot_force_const_mem (enum machine_mode mode, rtx x)
3198 switch (GET_CODE (x))
3203 /* Accept all non-symbolic constants. */
3207 /* Labels are OK iff we are non-PIC. */
3208 return flag_pic != 0;
3211 /* 'Naked' TLS symbol references are never OK,
3212 non-TLS symbols are OK iff we are non-PIC. */
3213 if (SYMBOL_REF_TLS_MODEL (x))
3216 return flag_pic != 0;
3219 return sparc_cannot_force_const_mem (mode, XEXP (x, 0));
3222 return sparc_cannot_force_const_mem (mode, XEXP (x, 0))
3223 || sparc_cannot_force_const_mem (mode, XEXP (x, 1));
3231 /* Global Offset Table support. */
3232 static GTY(()) rtx got_helper_rtx = NULL_RTX;
3233 static GTY(()) rtx global_offset_table_rtx = NULL_RTX;
3235 /* Return the SYMBOL_REF for the Global Offset Table. */
3237 static GTY(()) rtx sparc_got_symbol = NULL_RTX;
3242 if (!sparc_got_symbol)
3243 sparc_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3245 return sparc_got_symbol;
3248 /* Ensure that we are not using patterns that are not OK with PIC. */
3258 op = recog_data.operand[i];
3259 gcc_assert (GET_CODE (op) != SYMBOL_REF
3260 && (GET_CODE (op) != CONST
3261 || (GET_CODE (XEXP (op, 0)) == MINUS
3262 && XEXP (XEXP (op, 0), 0) == sparc_got ()
3263 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST)));
3270 /* Return true if X is an address which needs a temporary register when
3271 reloaded while generating PIC code. */
3274 pic_address_needs_scratch (rtx x)
3276 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
3277 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
3278 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
3279 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3280 && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
3286 /* Determine if a given RTX is a valid constant. We already know this
3287 satisfies CONSTANT_P. */
3290 sparc_legitimate_constant_p (enum machine_mode mode, rtx x)
3292 switch (GET_CODE (x))
3296 if (sparc_tls_referenced_p (x))
3301 if (GET_MODE (x) == VOIDmode)
3304 /* Floating point constants are generally not ok.
3305 The only exception is 0.0 and all-ones in VIS. */
3307 && SCALAR_FLOAT_MODE_P (mode)
3308 && (const_zero_operand (x, mode)
3309 || const_all_ones_operand (x, mode)))
3315 /* Vector constants are generally not ok.
3316 The only exception is 0 or -1 in VIS. */
3318 && (const_zero_operand (x, mode)
3319 || const_all_ones_operand (x, mode)))
3331 /* Determine if a given RTX is a valid constant address. */
3334 constant_address_p (rtx x)
3336 switch (GET_CODE (x))
3344 if (flag_pic && pic_address_needs_scratch (x))
3346 return sparc_legitimate_constant_p (Pmode, x);
3349 return !flag_pic && sparc_legitimate_constant_p (Pmode, x);
3356 /* Nonzero if the constant value X is a legitimate general operand
3357 when generating PIC code. It is given that flag_pic is on and
3358 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
3361 legitimate_pic_operand_p (rtx x)
3363 if (pic_address_needs_scratch (x))
3365 if (sparc_tls_referenced_p (x))
3370 #define RTX_OK_FOR_OFFSET_P(X, MODE) \
3372 && INTVAL (X) >= -0x1000 \
3373 && INTVAL (X) < (0x1000 - GET_MODE_SIZE (MODE)))
3375 #define RTX_OK_FOR_OLO10_P(X, MODE) \
3377 && INTVAL (X) >= -0x1000 \
3378 && INTVAL (X) < (0xc00 - GET_MODE_SIZE (MODE)))
3380 /* Handle the TARGET_LEGITIMATE_ADDRESS_P target hook.
3382 On SPARC, the actual legitimate addresses must be REG+REG or REG+SMALLINT
3383 ordinarily. This changes a bit when generating PIC. */
3386 sparc_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3388 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
3390 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
3392 else if (GET_CODE (addr) == PLUS)
3394 rs1 = XEXP (addr, 0);
3395 rs2 = XEXP (addr, 1);
3397 /* Canonicalize. REG comes first, if there are no regs,
3398 LO_SUM comes first. */
3400 && GET_CODE (rs1) != SUBREG
3402 || GET_CODE (rs2) == SUBREG
3403 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
3405 rs1 = XEXP (addr, 1);
3406 rs2 = XEXP (addr, 0);
3410 && rs1 == pic_offset_table_rtx
3412 && GET_CODE (rs2) != SUBREG
3413 && GET_CODE (rs2) != LO_SUM
3414 && GET_CODE (rs2) != MEM
3415 && !(GET_CODE (rs2) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs2))
3416 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
3417 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
3419 || GET_CODE (rs1) == SUBREG)
3420 && RTX_OK_FOR_OFFSET_P (rs2, mode)))
3425 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
3426 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
3428 /* We prohibit REG + REG for TFmode when there are no quad move insns
3429 and we consequently need to split. We do this because REG+REG
3430 is not an offsettable address. If we get the situation in reload
3431 where source and destination of a movtf pattern are both MEMs with
3432 REG+REG address, then only one of them gets converted to an
3433 offsettable address. */
3435 && ! (TARGET_FPU && TARGET_ARCH64 && TARGET_HARD_QUAD))
3438 /* We prohibit REG + REG on ARCH32 if not optimizing for
3439 DFmode/DImode because then mem_min_alignment is likely to be zero
3440 after reload and the forced split would lack a matching splitter
3442 if (TARGET_ARCH32 && !optimize
3443 && (mode == DFmode || mode == DImode))
3446 else if (USE_AS_OFFSETABLE_LO10
3447 && GET_CODE (rs1) == LO_SUM
3449 && ! TARGET_CM_MEDMID
3450 && RTX_OK_FOR_OLO10_P (rs2, mode))
3453 imm1 = XEXP (rs1, 1);
3454 rs1 = XEXP (rs1, 0);
3455 if (!CONSTANT_P (imm1)
3456 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
3460 else if (GET_CODE (addr) == LO_SUM)
3462 rs1 = XEXP (addr, 0);
3463 imm1 = XEXP (addr, 1);
3465 if (!CONSTANT_P (imm1)
3466 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
3469 /* We can't allow TFmode in 32-bit mode, because an offset greater
3470 than the alignment (8) may cause the LO_SUM to overflow. */
3471 if (mode == TFmode && TARGET_ARCH32)
3474 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
3479 if (GET_CODE (rs1) == SUBREG)
3480 rs1 = SUBREG_REG (rs1);
3486 if (GET_CODE (rs2) == SUBREG)
3487 rs2 = SUBREG_REG (rs2);
3494 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
3495 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
3500 if ((! SPARC_INT_REG_P (REGNO (rs1))
3501 && REGNO (rs1) != FRAME_POINTER_REGNUM
3502 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
3504 && (! SPARC_INT_REG_P (REGNO (rs2))
3505 && REGNO (rs2) != FRAME_POINTER_REGNUM
3506 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
3512 /* Return the SYMBOL_REF for the tls_get_addr function. */
3514 static GTY(()) rtx sparc_tls_symbol = NULL_RTX;
3517 sparc_tls_get_addr (void)
3519 if (!sparc_tls_symbol)
3520 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
3522 return sparc_tls_symbol;
3525 /* Return the Global Offset Table to be used in TLS mode. */
3528 sparc_tls_got (void)
3530 /* In PIC mode, this is just the PIC offset table. */
3533 crtl->uses_pic_offset_table = 1;
3534 return pic_offset_table_rtx;
3537 /* In non-PIC mode, Sun as (unlike GNU as) emits PC-relative relocations for
3538 the GOT symbol with the 32-bit ABI, so we reload the GOT register. */
3539 if (TARGET_SUN_TLS && TARGET_ARCH32)
3541 load_got_register ();
3542 return global_offset_table_rtx;
3545 /* In all other cases, we load a new pseudo with the GOT symbol. */
3546 return copy_to_reg (sparc_got ());
3549 /* Return true if X contains a thread-local symbol. */
3552 sparc_tls_referenced_p (rtx x)
3554 if (!TARGET_HAVE_TLS)
3557 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS)
3558 x = XEXP (XEXP (x, 0), 0);
3560 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x))
3563 /* That's all we handle in sparc_legitimize_tls_address for now. */
3567 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3568 this (thread-local) address. */
3571 sparc_legitimize_tls_address (rtx addr)
3573 rtx temp1, temp2, temp3, ret, o0, got, insn;
3575 gcc_assert (can_create_pseudo_p ());
3577 if (GET_CODE (addr) == SYMBOL_REF)
3578 switch (SYMBOL_REF_TLS_MODEL (addr))
3580 case TLS_MODEL_GLOBAL_DYNAMIC:
3582 temp1 = gen_reg_rtx (SImode);
3583 temp2 = gen_reg_rtx (SImode);
3584 ret = gen_reg_rtx (Pmode);
3585 o0 = gen_rtx_REG (Pmode, 8);
3586 got = sparc_tls_got ();
3587 emit_insn (gen_tgd_hi22 (temp1, addr));
3588 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
3591 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
3592 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
3597 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
3598 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
3601 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
3602 insn = get_insns ();
3604 emit_libcall_block (insn, ret, o0, addr);
3607 case TLS_MODEL_LOCAL_DYNAMIC:
3609 temp1 = gen_reg_rtx (SImode);
3610 temp2 = gen_reg_rtx (SImode);
3611 temp3 = gen_reg_rtx (Pmode);
3612 ret = gen_reg_rtx (Pmode);
3613 o0 = gen_rtx_REG (Pmode, 8);
3614 got = sparc_tls_got ();
3615 emit_insn (gen_tldm_hi22 (temp1));
3616 emit_insn (gen_tldm_lo10 (temp2, temp1));
3619 emit_insn (gen_tldm_add32 (o0, got, temp2));
3620 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
3625 emit_insn (gen_tldm_add64 (o0, got, temp2));
3626 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
3629 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
3630 insn = get_insns ();
3632 emit_libcall_block (insn, temp3, o0,
3633 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
3634 UNSPEC_TLSLD_BASE));
3635 temp1 = gen_reg_rtx (SImode);
3636 temp2 = gen_reg_rtx (SImode);
3637 emit_insn (gen_tldo_hix22 (temp1, addr));
3638 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
3640 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
3642 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
3645 case TLS_MODEL_INITIAL_EXEC:
3646 temp1 = gen_reg_rtx (SImode);
3647 temp2 = gen_reg_rtx (SImode);
3648 temp3 = gen_reg_rtx (Pmode);
3649 got = sparc_tls_got ();
3650 emit_insn (gen_tie_hi22 (temp1, addr));
3651 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
3653 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
3655 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
3658 ret = gen_reg_rtx (Pmode);
3660 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
3663 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
3667 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
3670 case TLS_MODEL_LOCAL_EXEC:
3671 temp1 = gen_reg_rtx (Pmode);
3672 temp2 = gen_reg_rtx (Pmode);
3675 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
3676 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
3680 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
3681 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
3683 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
3690 else if (GET_CODE (addr) == CONST)
3694 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS);
3696 base = sparc_legitimize_tls_address (XEXP (XEXP (addr, 0), 0));
3697 offset = XEXP (XEXP (addr, 0), 1);
3699 base = force_operand (base, NULL_RTX);
3700 if (!(GET_CODE (offset) == CONST_INT && SMALL_INT (offset)))
3701 offset = force_reg (Pmode, offset);
3702 ret = gen_rtx_PLUS (Pmode, base, offset);
3706 gcc_unreachable (); /* for now ... */
3711 /* Legitimize PIC addresses. If the address is already position-independent,
3712 we return ORIG. Newly generated position-independent addresses go into a
3713 reg. This is REG if nonzero, otherwise we allocate register(s) as
3717 sparc_legitimize_pic_address (rtx orig, rtx reg)
3719 bool gotdata_op = false;
3721 if (GET_CODE (orig) == SYMBOL_REF
3722 /* See the comment in sparc_expand_move. */
3723 || (GET_CODE (orig) == LABEL_REF && !can_use_mov_pic_label_ref (orig)))
3725 rtx pic_ref, address;
3730 gcc_assert (can_create_pseudo_p ());
3731 reg = gen_reg_rtx (Pmode);
3736 /* If not during reload, allocate another temp reg here for loading
3737 in the address, so that these instructions can be optimized
3739 rtx temp_reg = (! can_create_pseudo_p ()
3740 ? reg : gen_reg_rtx (Pmode));
3742 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
3743 won't get confused into thinking that these two instructions
3744 are loading in the true address of the symbol. If in the
3745 future a PIC rtx exists, that should be used instead. */
3748 emit_insn (gen_movdi_high_pic (temp_reg, orig));
3749 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
3753 emit_insn (gen_movsi_high_pic (temp_reg, orig));
3754 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
3762 crtl->uses_pic_offset_table = 1;
3766 insn = emit_insn (gen_movdi_pic_gotdata_op (reg,
3767 pic_offset_table_rtx,
3770 insn = emit_insn (gen_movsi_pic_gotdata_op (reg,
3771 pic_offset_table_rtx,
3777 = gen_const_mem (Pmode,
3778 gen_rtx_PLUS (Pmode,
3779 pic_offset_table_rtx, address));
3780 insn = emit_move_insn (reg, pic_ref);
3783 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3785 set_unique_reg_note (insn, REG_EQUAL, orig);
3788 else if (GET_CODE (orig) == CONST)
3792 if (GET_CODE (XEXP (orig, 0)) == PLUS
3793 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3798 gcc_assert (can_create_pseudo_p ());
3799 reg = gen_reg_rtx (Pmode);
3802 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3803 base = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 0), reg);
3804 offset = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
3805 base == reg ? NULL_RTX : reg);
3807 if (GET_CODE (offset) == CONST_INT)
3809 if (SMALL_INT (offset))
3810 return plus_constant (base, INTVAL (offset));
3811 else if (can_create_pseudo_p ())
3812 offset = force_reg (Pmode, offset);
3814 /* If we reach here, then something is seriously wrong. */
3817 return gen_rtx_PLUS (Pmode, base, offset);
3819 else if (GET_CODE (orig) == LABEL_REF)
3820 /* ??? We ought to be checking that the register is live instead, in case
3821 it is eliminated. */
3822 crtl->uses_pic_offset_table = 1;
3827 /* Try machine-dependent ways of modifying an illegitimate address X
3828 to be legitimate. If we find one, return the new, valid address.
3830 OLDX is the address as it was before break_out_memory_refs was called.
3831 In some cases it is useful to look at this to decide what needs to be done.
3833 MODE is the mode of the operand pointed to by X.
3835 On SPARC, change REG+N into REG+REG, and REG+(X*Y) into REG+REG. */
3838 sparc_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3839 enum machine_mode mode)
3843 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
3844 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3845 force_operand (XEXP (x, 0), NULL_RTX));
3846 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
3847 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3848 force_operand (XEXP (x, 1), NULL_RTX));
3849 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
3850 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
3852 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
3853 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3854 force_operand (XEXP (x, 1), NULL_RTX));
3856 if (x != orig_x && sparc_legitimate_address_p (mode, x, FALSE))
3859 if (sparc_tls_referenced_p (x))
3860 x = sparc_legitimize_tls_address (x);
3862 x = sparc_legitimize_pic_address (x, NULL_RTX);
3863 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
3864 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3865 copy_to_mode_reg (Pmode, XEXP (x, 1)));
3866 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
3867 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3868 copy_to_mode_reg (Pmode, XEXP (x, 0)));
3869 else if (GET_CODE (x) == SYMBOL_REF
3870 || GET_CODE (x) == CONST
3871 || GET_CODE (x) == LABEL_REF)
3872 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
3877 /* Delegitimize an address that was legitimized by the above function. */
3880 sparc_delegitimize_address (rtx x)
3882 x = delegitimize_mem_from_attrs (x);
3884 if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 1)) == UNSPEC)
3885 switch (XINT (XEXP (x, 1), 1))
3887 case UNSPEC_MOVE_PIC:
3889 x = XVECEXP (XEXP (x, 1), 0, 0);
3890 gcc_assert (GET_CODE (x) == SYMBOL_REF);
3896 /* This is generated by mov{si,di}_pic_label_ref in PIC mode. */
3897 if (GET_CODE (x) == MINUS
3898 && REG_P (XEXP (x, 0))
3899 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
3900 && GET_CODE (XEXP (x, 1)) == LO_SUM
3901 && GET_CODE (XEXP (XEXP (x, 1), 1)) == UNSPEC
3902 && XINT (XEXP (XEXP (x, 1), 1), 1) == UNSPEC_MOVE_PIC_LABEL)
3904 x = XVECEXP (XEXP (XEXP (x, 1), 1), 0, 0);
3905 gcc_assert (GET_CODE (x) == LABEL_REF);
3911 /* SPARC implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
3912 replace the input X, or the original X if no replacement is called for.
3913 The output parameter *WIN is 1 if the calling macro should goto WIN,
3916 For SPARC, we wish to handle addresses by splitting them into
3917 HIGH+LO_SUM pairs, retaining the LO_SUM in the memory reference.
3918 This cuts the number of extra insns by one.
3920 Do nothing when generating PIC code and the address is a symbolic
3921 operand or requires a scratch register. */
3924 sparc_legitimize_reload_address (rtx x, enum machine_mode mode,
3925 int opnum, int type,
3926 int ind_levels ATTRIBUTE_UNUSED, int *win)
3928 /* Decompose SImode constants into HIGH+LO_SUM. */
3930 && (mode != TFmode || TARGET_ARCH64)
3931 && GET_MODE (x) == SImode
3932 && GET_CODE (x) != LO_SUM
3933 && GET_CODE (x) != HIGH
3934 && sparc_cmodel <= CM_MEDLOW
3936 && (symbolic_operand (x, Pmode) || pic_address_needs_scratch (x))))
3938 x = gen_rtx_LO_SUM (GET_MODE (x), gen_rtx_HIGH (GET_MODE (x), x), x);
3939 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
3940 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
3941 opnum, (enum reload_type)type);
3946 /* We have to recognize what we have already generated above. */
3947 if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 0)) == HIGH)
3949 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
3950 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
3951 opnum, (enum reload_type)type);
3960 /* Return true if ADDR (a legitimate address expression)
3961 has an effect that depends on the machine mode it is used for.
3967 is not equivalent to
3969 (mem:QI [%l7+a]) (mem:QI [%l7+a+1])
3971 because [%l7+a+1] is interpreted as the address of (a+1). */
3975 sparc_mode_dependent_address_p (const_rtx addr)
3977 if (flag_pic && GET_CODE (addr) == PLUS)
3979 rtx op0 = XEXP (addr, 0);
3980 rtx op1 = XEXP (addr, 1);
3981 if (op0 == pic_offset_table_rtx
3982 && symbolic_operand (op1, VOIDmode))
3989 #ifdef HAVE_GAS_HIDDEN
3990 # define USE_HIDDEN_LINKONCE 1
3992 # define USE_HIDDEN_LINKONCE 0
3996 get_pc_thunk_name (char name[32], unsigned int regno)
3998 const char *reg_name = reg_names[regno];
4000 /* Skip the leading '%' as that cannot be used in a
4004 if (USE_HIDDEN_LINKONCE)
4005 sprintf (name, "__sparc_get_pc_thunk.%s", reg_name);
4007 ASM_GENERATE_INTERNAL_LABEL (name, "LADDPC", regno);
4010 /* Wrapper around the load_pcrel_sym{si,di} patterns. */
4013 gen_load_pcrel_sym (rtx op0, rtx op1, rtx op2, rtx op3)
4015 int orig_flag_pic = flag_pic;
4018 /* The load_pcrel_sym{si,di} patterns require absolute addressing. */
4021 insn = gen_load_pcrel_symdi (op0, op1, op2, op3);
4023 insn = gen_load_pcrel_symsi (op0, op1, op2, op3);
4024 flag_pic = orig_flag_pic;
4029 /* Emit code to load the GOT register. */
4032 load_got_register (void)
4034 /* In PIC mode, this will retrieve pic_offset_table_rtx. */
4035 if (!global_offset_table_rtx)
4036 global_offset_table_rtx = gen_rtx_REG (Pmode, GLOBAL_OFFSET_TABLE_REGNUM);
4038 if (TARGET_VXWORKS_RTP)
4039 emit_insn (gen_vxworks_load_got ());
4042 /* The GOT symbol is subject to a PC-relative relocation so we need a
4043 helper function to add the PC value and thus get the final value. */
4044 if (!got_helper_rtx)
4047 get_pc_thunk_name (name, GLOBAL_OFFSET_TABLE_REGNUM);
4048 got_helper_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
4051 emit_insn (gen_load_pcrel_sym (global_offset_table_rtx, sparc_got (),
4053 GEN_INT (GLOBAL_OFFSET_TABLE_REGNUM)));
4056 /* Need to emit this whether or not we obey regdecls,
4057 since setjmp/longjmp can cause life info to screw up.
4058 ??? In the case where we don't obey regdecls, this is not sufficient
4059 since we may not fall out the bottom. */
4060 emit_use (global_offset_table_rtx);
4063 /* Emit a call instruction with the pattern given by PAT. ADDR is the
4064 address of the call target. */
4067 sparc_emit_call_insn (rtx pat, rtx addr)
4071 insn = emit_call_insn (pat);
4073 /* The PIC register is live on entry to VxWorks PIC PLT entries. */
4074 if (TARGET_VXWORKS_RTP
4076 && GET_CODE (addr) == SYMBOL_REF
4077 && (SYMBOL_REF_DECL (addr)
4078 ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
4079 : !SYMBOL_REF_LOCAL_P (addr)))
4081 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
4082 crtl->uses_pic_offset_table = 1;
4086 /* Return 1 if RTX is a MEM which is known to be aligned to at
4087 least a DESIRED byte boundary. */
4090 mem_min_alignment (rtx mem, int desired)
4092 rtx addr, base, offset;
4094 /* If it's not a MEM we can't accept it. */
4095 if (GET_CODE (mem) != MEM)
4099 if (!TARGET_UNALIGNED_DOUBLES
4100 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
4103 /* ??? The rest of the function predates MEM_ALIGN so
4104 there is probably a bit of redundancy. */
4105 addr = XEXP (mem, 0);
4106 base = offset = NULL_RTX;
4107 if (GET_CODE (addr) == PLUS)
4109 if (GET_CODE (XEXP (addr, 0)) == REG)
4111 base = XEXP (addr, 0);
4113 /* What we are saying here is that if the base
4114 REG is aligned properly, the compiler will make
4115 sure any REG based index upon it will be so
4117 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
4118 offset = XEXP (addr, 1);
4120 offset = const0_rtx;
4123 else if (GET_CODE (addr) == REG)
4126 offset = const0_rtx;
4129 if (base != NULL_RTX)
4131 int regno = REGNO (base);
4133 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
4135 /* Check if the compiler has recorded some information
4136 about the alignment of the base REG. If reload has
4137 completed, we already matched with proper alignments.
4138 If not running global_alloc, reload might give us
4139 unaligned pointer to local stack though. */
4141 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
4142 || (optimize && reload_completed))
4143 && (INTVAL (offset) & (desired - 1)) == 0)
4148 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
4152 else if (! TARGET_UNALIGNED_DOUBLES
4153 || CONSTANT_P (addr)
4154 || GET_CODE (addr) == LO_SUM)
4156 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
4157 is true, in which case we can only assume that an access is aligned if
4158 it is to a constant address, or the address involves a LO_SUM. */
4162 /* An obviously unaligned address. */
4167 /* Vectors to keep interesting information about registers where it can easily
4168 be got. We used to use the actual mode value as the bit number, but there
4169 are more than 32 modes now. Instead we use two tables: one indexed by
4170 hard register number, and one indexed by mode. */
4172 /* The purpose of sparc_mode_class is to shrink the range of modes so that
4173 they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
4174 mapped into one sparc_mode_class mode. */
4176 enum sparc_mode_class {
4177 S_MODE, D_MODE, T_MODE, O_MODE,
4178 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
4182 /* Modes for single-word and smaller quantities. */
4183 #define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
4185 /* Modes for double-word and smaller quantities. */
4186 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
4188 /* Modes for quad-word and smaller quantities. */
4189 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
4191 /* Modes for 8-word and smaller quantities. */
4192 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
4194 /* Modes for single-float quantities. We must allow any single word or
4195 smaller quantity. This is because the fix/float conversion instructions
4196 take integer inputs/outputs from the float registers. */
4197 #define SF_MODES (S_MODES)
4199 /* Modes for double-float and smaller quantities. */
4200 #define DF_MODES (D_MODES)
4202 /* Modes for quad-float and smaller quantities. */
4203 #define TF_MODES (DF_MODES | (1 << (int) TF_MODE))
4205 /* Modes for quad-float pairs and smaller quantities. */
4206 #define OF_MODES (TF_MODES | (1 << (int) OF_MODE))
4208 /* Modes for double-float only quantities. */
4209 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
4211 /* Modes for quad-float and double-float only quantities. */
4212 #define TF_MODES_NO_S (DF_MODES_NO_S | (1 << (int) TF_MODE))
4214 /* Modes for quad-float pairs and double-float only quantities. */
4215 #define OF_MODES_NO_S (TF_MODES_NO_S | (1 << (int) OF_MODE))
4217 /* Modes for condition codes. */
4218 #define CC_MODES (1 << (int) CC_MODE)
4219 #define CCFP_MODES (1 << (int) CCFP_MODE)
4221 /* Value is 1 if register/mode pair is acceptable on sparc.
4222 The funny mixture of D and T modes is because integer operations
4223 do not specially operate on tetra quantities, so non-quad-aligned
4224 registers can hold quadword quantities (except %o4 and %i4 because
4225 they cross fixed registers). */
4227 /* This points to either the 32 bit or the 64 bit version. */
4228 const int *hard_regno_mode_classes;
4230 static const int hard_32bit_mode_classes[] = {
4231 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
4232 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
4233 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
4234 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
4236 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4237 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4238 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4239 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
4241 /* FP regs f32 to f63. Only the even numbered registers actually exist,
4242 and none can hold SFmode/SImode values. */
4243 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4244 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4245 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4246 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4249 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
4251 /* %icc, %sfp, %gsr */
4252 CC_MODES, 0, D_MODES
4255 static const int hard_64bit_mode_classes[] = {
4256 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4257 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4258 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4259 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4261 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4262 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4263 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4264 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
4266 /* FP regs f32 to f63. Only the even numbered registers actually exist,
4267 and none can hold SFmode/SImode values. */
4268 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4269 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4270 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4271 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4274 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
4276 /* %icc, %sfp, %gsr */
4277 CC_MODES, 0, D_MODES
4280 int sparc_mode_class [NUM_MACHINE_MODES];
4282 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
4285 sparc_init_modes (void)
4289 for (i = 0; i < NUM_MACHINE_MODES; i++)
4291 switch (GET_MODE_CLASS (i))
4294 case MODE_PARTIAL_INT:
4295 case MODE_COMPLEX_INT:
4296 if (GET_MODE_SIZE (i) <= 4)
4297 sparc_mode_class[i] = 1 << (int) S_MODE;
4298 else if (GET_MODE_SIZE (i) == 8)
4299 sparc_mode_class[i] = 1 << (int) D_MODE;
4300 else if (GET_MODE_SIZE (i) == 16)
4301 sparc_mode_class[i] = 1 << (int) T_MODE;
4302 else if (GET_MODE_SIZE (i) == 32)
4303 sparc_mode_class[i] = 1 << (int) O_MODE;
4305 sparc_mode_class[i] = 0;
4307 case MODE_VECTOR_INT:
4308 if (GET_MODE_SIZE (i) <= 4)
4309 sparc_mode_class[i] = 1 << (int)SF_MODE;
4310 else if (GET_MODE_SIZE (i) == 8)
4311 sparc_mode_class[i] = 1 << (int)DF_MODE;
4314 case MODE_COMPLEX_FLOAT:
4315 if (GET_MODE_SIZE (i) <= 4)
4316 sparc_mode_class[i] = 1 << (int) SF_MODE;
4317 else if (GET_MODE_SIZE (i) == 8)
4318 sparc_mode_class[i] = 1 << (int) DF_MODE;
4319 else if (GET_MODE_SIZE (i) == 16)
4320 sparc_mode_class[i] = 1 << (int) TF_MODE;
4321 else if (GET_MODE_SIZE (i) == 32)
4322 sparc_mode_class[i] = 1 << (int) OF_MODE;
4324 sparc_mode_class[i] = 0;
4327 if (i == (int) CCFPmode || i == (int) CCFPEmode)
4328 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
4330 sparc_mode_class[i] = 1 << (int) CC_MODE;
4333 sparc_mode_class[i] = 0;
4339 hard_regno_mode_classes = hard_64bit_mode_classes;
4341 hard_regno_mode_classes = hard_32bit_mode_classes;
4343 /* Initialize the array used by REGNO_REG_CLASS. */
4344 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4346 if (i < 16 && TARGET_V8PLUS)
4347 sparc_regno_reg_class[i] = I64_REGS;
4348 else if (i < 32 || i == FRAME_POINTER_REGNUM)
4349 sparc_regno_reg_class[i] = GENERAL_REGS;
4351 sparc_regno_reg_class[i] = FP_REGS;
4353 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
4355 sparc_regno_reg_class[i] = FPCC_REGS;
4357 sparc_regno_reg_class[i] = NO_REGS;
4361 /* Return whether REGNO, a global or FP register, must be saved/restored. */
4364 save_global_or_fp_reg_p (unsigned int regno,
4365 int leaf_function ATTRIBUTE_UNUSED)
4367 return !call_used_regs[regno] && df_regs_ever_live_p (regno);
4370 /* Return whether the return address register (%i7) is needed. */
4373 return_addr_reg_needed_p (int leaf_function)
4375 /* If it is live, for example because of __builtin_return_address (0). */
4376 if (df_regs_ever_live_p (RETURN_ADDR_REGNUM))
4379 /* Otherwise, it is needed as save register if %o7 is clobbered. */
4381 /* Loading the GOT register clobbers %o7. */
4382 || crtl->uses_pic_offset_table
4383 || df_regs_ever_live_p (INCOMING_RETURN_ADDR_REGNUM))
4389 /* Return whether REGNO, a local or in register, must be saved/restored. */
4392 save_local_or_in_reg_p (unsigned int regno, int leaf_function)
4394 /* General case: call-saved registers live at some point. */
4395 if (!call_used_regs[regno] && df_regs_ever_live_p (regno))
4398 /* Frame pointer register (%fp) if needed. */
4399 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
4402 /* Return address register (%i7) if needed. */
4403 if (regno == RETURN_ADDR_REGNUM && return_addr_reg_needed_p (leaf_function))
4406 /* GOT register (%l7) if needed. */
4407 if (regno == PIC_OFFSET_TABLE_REGNUM && crtl->uses_pic_offset_table)
4410 /* If the function accesses prior frames, the frame pointer and the return
4411 address of the previous frame must be saved on the stack. */
4412 if (crtl->accesses_prior_frames
4413 && (regno == HARD_FRAME_POINTER_REGNUM || regno == RETURN_ADDR_REGNUM))
4419 /* Compute the frame size required by the function. This function is called
4420 during the reload pass and also by sparc_expand_prologue. */
4423 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function)
4425 HOST_WIDE_INT frame_size, apparent_frame_size;
4426 int args_size, n_global_fp_regs = 0;
4427 bool save_local_in_regs_p = false;
4430 /* If the function allocates dynamic stack space, the dynamic offset is
4431 computed early and contains REG_PARM_STACK_SPACE, so we need to cope. */
4432 if (leaf_function && !cfun->calls_alloca)
4435 args_size = crtl->outgoing_args_size + REG_PARM_STACK_SPACE (cfun->decl);
4437 /* Calculate space needed for global registers. */
4439 for (i = 0; i < 8; i++)
4440 if (save_global_or_fp_reg_p (i, 0))
4441 n_global_fp_regs += 2;
4443 for (i = 0; i < 8; i += 2)
4444 if (save_global_or_fp_reg_p (i, 0) || save_global_or_fp_reg_p (i + 1, 0))
4445 n_global_fp_regs += 2;
4447 /* In the flat window model, find out which local and in registers need to
4448 be saved. We don't reserve space in the current frame for them as they
4449 will be spilled into the register window save area of the caller's frame.
4450 However, as soon as we use this register window save area, we must create
4451 that of the current frame to make it the live one. */
4453 for (i = 16; i < 32; i++)
4454 if (save_local_or_in_reg_p (i, leaf_function))
4456 save_local_in_regs_p = true;
4460 /* Calculate space needed for FP registers. */
4461 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
4462 if (save_global_or_fp_reg_p (i, 0) || save_global_or_fp_reg_p (i + 1, 0))
4463 n_global_fp_regs += 2;
4466 && n_global_fp_regs == 0
4468 && !save_local_in_regs_p)
4469 frame_size = apparent_frame_size = 0;
4472 /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
4473 apparent_frame_size = (size - STARTING_FRAME_OFFSET + 7) & -8;
4474 apparent_frame_size += n_global_fp_regs * 4;
4476 /* We need to add the size of the outgoing argument area. */
4477 frame_size = apparent_frame_size + ((args_size + 7) & -8);
4479 /* And that of the register window save area. */
4480 frame_size += FIRST_PARM_OFFSET (cfun->decl);
4482 /* Finally, bump to the appropriate alignment. */
4483 frame_size = SPARC_STACK_ALIGN (frame_size);
4486 /* Set up values for use in prologue and epilogue. */
4487 sparc_frame_size = frame_size;
4488 sparc_apparent_frame_size = apparent_frame_size;
4489 sparc_n_global_fp_regs = n_global_fp_regs;
4490 sparc_save_local_in_regs_p = save_local_in_regs_p;
4495 /* Output any necessary .register pseudo-ops. */
4498 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
4500 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
4506 /* Check if %g[2367] were used without
4507 .register being printed for them already. */
4508 for (i = 2; i < 8; i++)
4510 if (df_regs_ever_live_p (i)
4511 && ! sparc_hard_reg_printed [i])
4513 sparc_hard_reg_printed [i] = 1;
4514 /* %g7 is used as TLS base register, use #ignore
4515 for it instead of #scratch. */
4516 fprintf (file, "\t.register\t%%g%d, #%s\n", i,
4517 i == 7 ? "ignore" : "scratch");
4524 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
4526 #if PROBE_INTERVAL > 4096
4527 #error Cannot use indexed addressing mode for stack probing
4530 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
4531 inclusive. These are offsets from the current stack pointer.
4533 Note that we don't use the REG+REG addressing mode for the probes because
4534 of the stack bias in 64-bit mode. And it doesn't really buy us anything
4535 so the advantages of having a single code win here. */
4538 sparc_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
4540 rtx g1 = gen_rtx_REG (Pmode, 1);
4542 /* See if we have a constant small number of probes to generate. If so,
4543 that's the easy case. */
4544 if (size <= PROBE_INTERVAL)
4546 emit_move_insn (g1, GEN_INT (first));
4547 emit_insn (gen_rtx_SET (VOIDmode, g1,
4548 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4549 emit_stack_probe (plus_constant (g1, -size));
4552 /* The run-time loop is made up of 10 insns in the generic case while the
4553 compile-time loop is made up of 4+2*(n-2) insns for n # of intervals. */
4554 else if (size <= 5 * PROBE_INTERVAL)
4558 emit_move_insn (g1, GEN_INT (first + PROBE_INTERVAL));
4559 emit_insn (gen_rtx_SET (VOIDmode, g1,
4560 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4561 emit_stack_probe (g1);
4563 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 2 until
4564 it exceeds SIZE. If only two probes are needed, this will not
4565 generate any code. Then probe at FIRST + SIZE. */
4566 for (i = 2 * PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
4568 emit_insn (gen_rtx_SET (VOIDmode, g1,
4569 plus_constant (g1, -PROBE_INTERVAL)));
4570 emit_stack_probe (g1);
4573 emit_stack_probe (plus_constant (g1, (i - PROBE_INTERVAL) - size));
4576 /* Otherwise, do the same as above, but in a loop. Note that we must be
4577 extra careful with variables wrapping around because we might be at
4578 the very top (or the very bottom) of the address space and we have
4579 to be able to handle this case properly; in particular, we use an
4580 equality test for the loop condition. */
4583 HOST_WIDE_INT rounded_size;
4584 rtx g4 = gen_rtx_REG (Pmode, 4);
4586 emit_move_insn (g1, GEN_INT (first));
4589 /* Step 1: round SIZE to the previous multiple of the interval. */
4591 rounded_size = size & -PROBE_INTERVAL;
4592 emit_move_insn (g4, GEN_INT (rounded_size));
4595 /* Step 2: compute initial and final value of the loop counter. */
4597 /* TEST_ADDR = SP + FIRST. */
4598 emit_insn (gen_rtx_SET (VOIDmode, g1,
4599 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4601 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
4602 emit_insn (gen_rtx_SET (VOIDmode, g4, gen_rtx_MINUS (Pmode, g1, g4)));
4607 while (TEST_ADDR != LAST_ADDR)
4609 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
4613 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
4614 until it is equal to ROUNDED_SIZE. */
4617 emit_insn (gen_probe_stack_rangedi (g1, g1, g4));
4619 emit_insn (gen_probe_stack_rangesi (g1, g1, g4));
4622 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
4623 that SIZE is equal to ROUNDED_SIZE. */
4625 if (size != rounded_size)
4626 emit_stack_probe (plus_constant (g4, rounded_size - size));
4629 /* Make sure nothing is scheduled before we are done. */
4630 emit_insn (gen_blockage ());
4633 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
4634 absolute addresses. */
4637 output_probe_stack_range (rtx reg1, rtx reg2)
4639 static int labelno = 0;
4640 char loop_lab[32], end_lab[32];
4643 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
4644 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
4646 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
4648 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
4651 output_asm_insn ("cmp\t%0, %1", xops);
4653 fputs ("\tbe,pn\t%xcc,", asm_out_file);
4655 fputs ("\tbe\t", asm_out_file);
4656 assemble_name_raw (asm_out_file, end_lab);
4657 fputc ('\n', asm_out_file);
4659 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
4660 xops[1] = GEN_INT (-PROBE_INTERVAL);
4661 output_asm_insn (" add\t%0, %1, %0", xops);
4663 /* Probe at TEST_ADDR and branch. */
4665 fputs ("\tba,pt\t%xcc,", asm_out_file);
4667 fputs ("\tba\t", asm_out_file);
4668 assemble_name_raw (asm_out_file, loop_lab);
4669 fputc ('\n', asm_out_file);
4670 xops[1] = GEN_INT (SPARC_STACK_BIAS);
4671 output_asm_insn (" st\t%%g0, [%0+%1]", xops);
4673 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
4678 /* Emit code to save/restore registers from LOW to HIGH at BASE+OFFSET as
4679 needed. LOW is supposed to be double-word aligned for 32-bit registers.
4680 SAVE_P decides whether a register must be saved/restored. ACTION_TRUE
4681 is the action to be performed if SAVE_P returns true and ACTION_FALSE
4682 the action to be performed if it returns false. Return the new offset. */
4684 typedef bool (*sorr_pred_t) (unsigned int, int);
4685 typedef enum { SORR_NONE, SORR_ADVANCE, SORR_SAVE, SORR_RESTORE } sorr_act_t;
4688 emit_save_or_restore_regs (unsigned int low, unsigned int high, rtx base,
4689 int offset, int leaf_function, sorr_pred_t save_p,
4690 sorr_act_t action_true, sorr_act_t action_false)
4695 if (TARGET_ARCH64 && high <= 32)
4699 for (i = low; i < high; i++)
4701 if (save_p (i, leaf_function))
4703 mem = gen_frame_mem (DImode, plus_constant (base, offset));
4704 if (action_true == SORR_SAVE)
4706 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
4707 RTX_FRAME_RELATED_P (insn) = 1;
4709 else /* action_true == SORR_RESTORE */
4711 /* The frame pointer must be restored last since its old
4712 value may be used as base address for the frame. This
4713 is problematic in 64-bit mode only because of the lack
4714 of double-word load instruction. */
4715 if (i == HARD_FRAME_POINTER_REGNUM)
4718 emit_move_insn (gen_rtx_REG (DImode, i), mem);
4722 else if (action_false == SORR_ADVANCE)
4728 mem = gen_frame_mem (DImode, plus_constant (base, fp_offset));
4729 emit_move_insn (hard_frame_pointer_rtx, mem);
4734 for (i = low; i < high; i += 2)
4736 bool reg0 = save_p (i, leaf_function);
4737 bool reg1 = save_p (i + 1, leaf_function);
4738 enum machine_mode mode;
4743 mode = SPARC_INT_REG_P (i) ? DImode : DFmode;
4748 mode = SPARC_INT_REG_P (i) ? SImode : SFmode;
4753 mode = SPARC_INT_REG_P (i) ? SImode : SFmode;
4759 if (action_false == SORR_ADVANCE)
4764 mem = gen_frame_mem (mode, plus_constant (base, offset));
4765 if (action_true == SORR_SAVE)
4767 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
4768 RTX_FRAME_RELATED_P (insn) = 1;
4772 mem = gen_frame_mem (SImode, plus_constant (base, offset));
4773 set1 = gen_rtx_SET (VOIDmode, mem,
4774 gen_rtx_REG (SImode, regno));
4775 RTX_FRAME_RELATED_P (set1) = 1;
4777 = gen_frame_mem (SImode, plus_constant (base, offset + 4));
4778 set2 = gen_rtx_SET (VOIDmode, mem,
4779 gen_rtx_REG (SImode, regno + 1));
4780 RTX_FRAME_RELATED_P (set2) = 1;
4781 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4782 gen_rtx_PARALLEL (VOIDmode,
4783 gen_rtvec (2, set1, set2)));
4786 else /* action_true == SORR_RESTORE */
4787 emit_move_insn (gen_rtx_REG (mode, regno), mem);
4789 /* Always preserve double-word alignment. */
4790 offset = (offset + 8) & -8;
4797 /* Emit code to adjust BASE to OFFSET. Return the new base. */
4800 emit_adjust_base_to_offset (rtx base, int offset)
4802 /* ??? This might be optimized a little as %g1 might already have a
4803 value close enough that a single add insn will do. */
4804 /* ??? Although, all of this is probably only a temporary fix because
4805 if %g1 can hold a function result, then sparc_expand_epilogue will
4806 lose (the result will be clobbered). */
4807 rtx new_base = gen_rtx_REG (Pmode, 1);
4808 emit_move_insn (new_base, GEN_INT (offset));
4809 emit_insn (gen_rtx_SET (VOIDmode,
4810 new_base, gen_rtx_PLUS (Pmode, base, new_base)));
4814 /* Emit code to save/restore call-saved global and FP registers. */
4817 emit_save_or_restore_global_fp_regs (rtx base, int offset, sorr_act_t action)
4819 if (offset < -4096 || offset + sparc_n_global_fp_regs * 4 > 4095)
4821 base = emit_adjust_base_to_offset (base, offset);
4826 = emit_save_or_restore_regs (0, 8, base, offset, 0,
4827 save_global_or_fp_reg_p, action, SORR_NONE);
4828 emit_save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, 0,
4829 save_global_or_fp_reg_p, action, SORR_NONE);
4832 /* Emit code to save/restore call-saved local and in registers. */
4835 emit_save_or_restore_local_in_regs (rtx base, int offset, sorr_act_t action)
4837 if (offset < -4096 || offset + 16 * UNITS_PER_WORD > 4095)
4839 base = emit_adjust_base_to_offset (base, offset);
4843 emit_save_or_restore_regs (16, 32, base, offset, sparc_leaf_function_p,
4844 save_local_or_in_reg_p, action, SORR_ADVANCE);
4847 /* Emit a window_save insn. */
4850 emit_window_save (rtx increment)
4852 rtx insn = emit_insn (gen_window_save (increment));
4853 RTX_FRAME_RELATED_P (insn) = 1;
4855 /* The incoming return address (%o7) is saved in %i7. */
4856 add_reg_note (insn, REG_CFA_REGISTER,
4857 gen_rtx_SET (VOIDmode,
4858 gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM),
4860 INCOMING_RETURN_ADDR_REGNUM)));
4862 /* The window save event. */
4863 add_reg_note (insn, REG_CFA_WINDOW_SAVE, const0_rtx);
4865 /* The CFA is %fp, the hard frame pointer. */
4866 add_reg_note (insn, REG_CFA_DEF_CFA,
4867 plus_constant (hard_frame_pointer_rtx,
4868 INCOMING_FRAME_SP_OFFSET));
4873 /* Generate an increment for the stack pointer. */
4876 gen_stack_pointer_inc (rtx increment)
4878 return gen_rtx_SET (VOIDmode,
4880 gen_rtx_PLUS (Pmode,
4885 /* Generate a decrement for the stack pointer. */
4888 gen_stack_pointer_dec (rtx decrement)
4890 return gen_rtx_SET (VOIDmode,
4892 gen_rtx_MINUS (Pmode,
4897 /* Expand the function prologue. The prologue is responsible for reserving
4898 storage for the frame, saving the call-saved registers and loading the
4899 GOT register if needed. */
4902 sparc_expand_prologue (void)
4907 /* Compute a snapshot of current_function_uses_only_leaf_regs. Relying
4908 on the final value of the flag means deferring the prologue/epilogue
4909 expansion until just before the second scheduling pass, which is too
4910 late to emit multiple epilogues or return insns.
4912 Of course we are making the assumption that the value of the flag
4913 will not change between now and its final value. Of the three parts
4914 of the formula, only the last one can reasonably vary. Let's take a
4915 closer look, after assuming that the first two ones are set to true
4916 (otherwise the last value is effectively silenced).
4918 If only_leaf_regs_used returns false, the global predicate will also
4919 be false so the actual frame size calculated below will be positive.
4920 As a consequence, the save_register_window insn will be emitted in
4921 the instruction stream; now this insn explicitly references %fp
4922 which is not a leaf register so only_leaf_regs_used will always
4923 return false subsequently.
4925 If only_leaf_regs_used returns true, we hope that the subsequent
4926 optimization passes won't cause non-leaf registers to pop up. For
4927 example, the regrename pass has special provisions to not rename to
4928 non-leaf registers in a leaf function. */
4929 sparc_leaf_function_p
4930 = optimize > 0 && current_function_is_leaf && only_leaf_regs_used ();
4932 size = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
4934 if (flag_stack_usage_info)
4935 current_function_static_stack_size = size;
4937 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && size)
4938 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
4942 else if (sparc_leaf_function_p)
4944 rtx size_int_rtx = GEN_INT (-size);
4947 insn = emit_insn (gen_stack_pointer_inc (size_int_rtx));
4948 else if (size <= 8192)
4950 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
4951 /* %sp is still the CFA register. */
4952 RTX_FRAME_RELATED_P (insn) = 1;
4953 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
4957 rtx size_rtx = gen_rtx_REG (Pmode, 1);
4958 emit_move_insn (size_rtx, size_int_rtx);
4959 insn = emit_insn (gen_stack_pointer_inc (size_rtx));
4960 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4961 gen_stack_pointer_inc (size_int_rtx));
4964 RTX_FRAME_RELATED_P (insn) = 1;
4968 rtx size_int_rtx = GEN_INT (-size);
4971 emit_window_save (size_int_rtx);
4972 else if (size <= 8192)
4974 emit_window_save (GEN_INT (-4096));
4975 /* %sp is not the CFA register anymore. */
4976 emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
4980 rtx size_rtx = gen_rtx_REG (Pmode, 1);
4981 emit_move_insn (size_rtx, size_int_rtx);
4982 emit_window_save (size_rtx);
4986 if (sparc_leaf_function_p)
4988 sparc_frame_base_reg = stack_pointer_rtx;
4989 sparc_frame_base_offset = size + SPARC_STACK_BIAS;
4993 sparc_frame_base_reg = hard_frame_pointer_rtx;
4994 sparc_frame_base_offset = SPARC_STACK_BIAS;
4997 if (sparc_n_global_fp_regs > 0)
4998 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
4999 sparc_frame_base_offset
5000 - sparc_apparent_frame_size,
5003 /* Load the GOT register if needed. */
5004 if (crtl->uses_pic_offset_table)
5005 load_got_register ();
5007 /* Advertise that the data calculated just above are now valid. */
5008 sparc_prologue_data_valid_p = true;
5011 /* Expand the function prologue. The prologue is responsible for reserving
5012 storage for the frame, saving the call-saved registers and loading the
5013 GOT register if needed. */
5016 sparc_flat_expand_prologue (void)
5021 sparc_leaf_function_p = optimize > 0 && current_function_is_leaf;
5023 size = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
5025 if (flag_stack_usage_info)
5026 current_function_static_stack_size = size;
5028 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && size)
5029 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
5031 if (sparc_save_local_in_regs_p)
5032 emit_save_or_restore_local_in_regs (stack_pointer_rtx, SPARC_STACK_BIAS,
5039 rtx size_int_rtx, size_rtx;
5041 size_rtx = size_int_rtx = GEN_INT (-size);
5043 /* We establish the frame (i.e. decrement the stack pointer) first, even
5044 if we use a frame pointer, because we cannot clobber any call-saved
5045 registers, including the frame pointer, if we haven't created a new
5046 register save area, for the sake of compatibility with the ABI. */
5048 insn = emit_insn (gen_stack_pointer_inc (size_int_rtx));
5049 else if (size <= 8192 && !frame_pointer_needed)
5051 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
5052 RTX_FRAME_RELATED_P (insn) = 1;
5053 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
5057 size_rtx = gen_rtx_REG (Pmode, 1);
5058 emit_move_insn (size_rtx, size_int_rtx);
5059 insn = emit_insn (gen_stack_pointer_inc (size_rtx));
5060 add_reg_note (insn, REG_CFA_ADJUST_CFA,
5061 gen_stack_pointer_inc (size_int_rtx));
5063 RTX_FRAME_RELATED_P (insn) = 1;
5065 /* Ensure nothing is scheduled until after the frame is established. */
5066 emit_insn (gen_blockage ());
5068 if (frame_pointer_needed)
5070 insn = emit_insn (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
5071 gen_rtx_MINUS (Pmode,
5074 RTX_FRAME_RELATED_P (insn) = 1;
5076 add_reg_note (insn, REG_CFA_ADJUST_CFA,
5077 gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
5078 plus_constant (stack_pointer_rtx,
5082 if (return_addr_reg_needed_p (sparc_leaf_function_p))
5084 rtx o7 = gen_rtx_REG (Pmode, INCOMING_RETURN_ADDR_REGNUM);
5085 rtx i7 = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
5087 insn = emit_move_insn (i7, o7);
5088 RTX_FRAME_RELATED_P (insn) = 1;
5090 add_reg_note (insn, REG_CFA_REGISTER,
5091 gen_rtx_SET (VOIDmode, i7, o7));
5093 /* Prevent this instruction from ever being considered dead,
5094 even if this function has no epilogue. */
5095 emit_insn (gen_rtx_USE (VOIDmode, i7));
5099 if (frame_pointer_needed)
5101 sparc_frame_base_reg = hard_frame_pointer_rtx;
5102 sparc_frame_base_offset = SPARC_STACK_BIAS;
5106 sparc_frame_base_reg = stack_pointer_rtx;
5107 sparc_frame_base_offset = size + SPARC_STACK_BIAS;
5110 if (sparc_n_global_fp_regs > 0)
5111 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
5112 sparc_frame_base_offset
5113 - sparc_apparent_frame_size,
5116 /* Load the GOT register if needed. */
5117 if (crtl->uses_pic_offset_table)
5118 load_got_register ();
5120 /* Advertise that the data calculated just above are now valid. */
5121 sparc_prologue_data_valid_p = true;
5124 /* This function generates the assembly code for function entry, which boils
5125 down to emitting the necessary .register directives. */
5128 sparc_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
5130 /* Check that the assumption we made in sparc_expand_prologue is valid. */
5132 gcc_assert (sparc_leaf_function_p == current_function_uses_only_leaf_regs);
5134 sparc_output_scratch_registers (file);
5137 /* Expand the function epilogue, either normal or part of a sibcall.
5138 We emit all the instructions except the return or the call. */
5141 sparc_expand_epilogue (bool for_eh)
5143 HOST_WIDE_INT size = sparc_frame_size;
5145 if (sparc_n_global_fp_regs > 0)
5146 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
5147 sparc_frame_base_offset
5148 - sparc_apparent_frame_size,
5151 if (size == 0 || for_eh)
5153 else if (sparc_leaf_function_p)
5156 emit_insn (gen_stack_pointer_dec (GEN_INT (-size)));
5157 else if (size <= 8192)
5159 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
5160 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - size)));
5164 rtx reg = gen_rtx_REG (Pmode, 1);
5165 emit_move_insn (reg, GEN_INT (-size));
5166 emit_insn (gen_stack_pointer_dec (reg));
5171 /* Expand the function epilogue, either normal or part of a sibcall.
5172 We emit all the instructions except the return or the call. */
5175 sparc_flat_expand_epilogue (bool for_eh)
5177 HOST_WIDE_INT size = sparc_frame_size;
5179 if (sparc_n_global_fp_regs > 0)
5180 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
5181 sparc_frame_base_offset
5182 - sparc_apparent_frame_size,
5185 /* If we have a frame pointer, we'll need both to restore it before the
5186 frame is destroyed and use its current value in destroying the frame.
5187 Since we don't have an atomic way to do that in the flat window model,
5188 we save the current value into a temporary register (%g1). */
5189 if (frame_pointer_needed && !for_eh)
5190 emit_move_insn (gen_rtx_REG (Pmode, 1), hard_frame_pointer_rtx);
5192 if (return_addr_reg_needed_p (sparc_leaf_function_p))
5193 emit_move_insn (gen_rtx_REG (Pmode, INCOMING_RETURN_ADDR_REGNUM),
5194 gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM));
5196 if (sparc_save_local_in_regs_p)
5197 emit_save_or_restore_local_in_regs (sparc_frame_base_reg,
5198 sparc_frame_base_offset,
5201 if (size == 0 || for_eh)
5203 else if (frame_pointer_needed)
5205 /* Make sure the frame is destroyed after everything else is done. */
5206 emit_insn (gen_blockage ());
5208 emit_move_insn (stack_pointer_rtx, gen_rtx_REG (Pmode, 1));
5213 emit_insn (gen_blockage ());
5216 emit_insn (gen_stack_pointer_dec (GEN_INT (-size)));
5217 else if (size <= 8192)
5219 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
5220 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - size)));
5224 rtx reg = gen_rtx_REG (Pmode, 1);
5225 emit_move_insn (reg, GEN_INT (-size));
5226 emit_insn (gen_stack_pointer_dec (reg));
5231 /* Return true if it is appropriate to emit `return' instructions in the
5232 body of a function. */
5235 sparc_can_use_return_insn_p (void)
5237 return sparc_prologue_data_valid_p
5238 && sparc_n_global_fp_regs == 0
5240 ? (sparc_frame_size == 0 && !sparc_save_local_in_regs_p)
5241 : (sparc_frame_size == 0 || !sparc_leaf_function_p);
5244 /* This function generates the assembly code for function exit. */
5247 sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
5249 /* If the last two instructions of a function are "call foo; dslot;"
5250 the return address might point to the first instruction in the next
5251 function and we have to output a dummy nop for the sake of sane
5252 backtraces in such cases. This is pointless for sibling calls since
5253 the return address is explicitly adjusted. */
5255 rtx insn, last_real_insn;
5257 insn = get_last_insn ();
5259 last_real_insn = prev_real_insn (insn);
5261 && GET_CODE (last_real_insn) == INSN
5262 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
5263 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
5266 && CALL_P (last_real_insn)
5267 && !SIBLING_CALL_P (last_real_insn))
5268 fputs("\tnop\n", file);
5270 sparc_output_deferred_case_vectors ();
5273 /* Output a 'restore' instruction. */
5276 output_restore (rtx pat)
5282 fputs ("\t restore\n", asm_out_file);
5286 gcc_assert (GET_CODE (pat) == SET);
5288 operands[0] = SET_DEST (pat);
5289 pat = SET_SRC (pat);
5291 switch (GET_CODE (pat))
5294 operands[1] = XEXP (pat, 0);
5295 operands[2] = XEXP (pat, 1);
5296 output_asm_insn (" restore %r1, %2, %Y0", operands);
5299 operands[1] = XEXP (pat, 0);
5300 operands[2] = XEXP (pat, 1);
5301 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
5304 operands[1] = XEXP (pat, 0);
5305 gcc_assert (XEXP (pat, 1) == const1_rtx);
5306 output_asm_insn (" restore %r1, %r1, %Y0", operands);
5310 output_asm_insn (" restore %%g0, %1, %Y0", operands);
5315 /* Output a return. */
5318 output_return (rtx insn)
5320 if (crtl->calls_eh_return)
5322 /* If the function uses __builtin_eh_return, the eh_return
5323 machinery occupies the delay slot. */
5324 gcc_assert (!final_sequence);
5326 if (flag_delayed_branch)
5328 if (!TARGET_FLAT && TARGET_V9)
5329 fputs ("\treturn\t%i7+8\n", asm_out_file);
5333 fputs ("\trestore\n", asm_out_file);
5335 fputs ("\tjmp\t%o7+8\n", asm_out_file);
5338 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
5343 fputs ("\trestore\n", asm_out_file);
5345 fputs ("\tadd\t%sp, %g1, %sp\n", asm_out_file);
5346 fputs ("\tjmp\t%o7+8\n\t nop\n", asm_out_file);
5349 else if (sparc_leaf_function_p || TARGET_FLAT)
5351 /* This is a leaf or flat function so we don't have to bother restoring
5352 the register window, which frees us from dealing with the convoluted
5353 semantics of restore/return. We simply output the jump to the
5354 return address and the insn in the delay slot (if any). */
5356 return "jmp\t%%o7+%)%#";
5360 /* This is a regular function so we have to restore the register window.
5361 We may have a pending insn for the delay slot, which will be either
5362 combined with the 'restore' instruction or put in the delay slot of
5363 the 'return' instruction. */
5369 delay = NEXT_INSN (insn);
5372 pat = PATTERN (delay);
5374 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
5376 epilogue_renumber (&pat, 0);
5377 return "return\t%%i7+%)%#";
5381 output_asm_insn ("jmp\t%%i7+%)", NULL);
5382 output_restore (pat);
5383 PATTERN (delay) = gen_blockage ();
5384 INSN_CODE (delay) = -1;
5389 /* The delay slot is empty. */
5391 return "return\t%%i7+%)\n\t nop";
5392 else if (flag_delayed_branch)
5393 return "jmp\t%%i7+%)\n\t restore";
5395 return "restore\n\tjmp\t%%o7+%)\n\t nop";
5402 /* Output a sibling call. */
5405 output_sibcall (rtx insn, rtx call_operand)
5409 gcc_assert (flag_delayed_branch);
5411 operands[0] = call_operand;
5413 if (sparc_leaf_function_p || TARGET_FLAT)
5415 /* This is a leaf or flat function so we don't have to bother restoring
5416 the register window. We simply output the jump to the function and
5417 the insn in the delay slot (if any). */
5419 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
5422 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
5425 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
5426 it into branch if possible. */
5427 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
5432 /* This is a regular function so we have to restore the register window.
5433 We may have a pending insn for the delay slot, which will be combined
5434 with the 'restore' instruction. */
5436 output_asm_insn ("call\t%a0, 0", operands);
5440 rtx delay = NEXT_INSN (insn);
5443 output_restore (PATTERN (delay));
5445 PATTERN (delay) = gen_blockage ();
5446 INSN_CODE (delay) = -1;
5449 output_restore (NULL_RTX);
5455 /* Functions for handling argument passing.
5457 For 32-bit, the first 6 args are normally in registers and the rest are
5458 pushed. Any arg that starts within the first 6 words is at least
5459 partially passed in a register unless its data type forbids.
5461 For 64-bit, the argument registers are laid out as an array of 16 elements
5462 and arguments are added sequentially. The first 6 int args and up to the
5463 first 16 fp args (depending on size) are passed in regs.
5465 Slot Stack Integral Float Float in structure Double Long Double
5466 ---- ----- -------- ----- ------------------ ------ -----------
5467 15 [SP+248] %f31 %f30,%f31 %d30
5468 14 [SP+240] %f29 %f28,%f29 %d28 %q28
5469 13 [SP+232] %f27 %f26,%f27 %d26
5470 12 [SP+224] %f25 %f24,%f25 %d24 %q24
5471 11 [SP+216] %f23 %f22,%f23 %d22
5472 10 [SP+208] %f21 %f20,%f21 %d20 %q20
5473 9 [SP+200] %f19 %f18,%f19 %d18
5474 8 [SP+192] %f17 %f16,%f17 %d16 %q16
5475 7 [SP+184] %f15 %f14,%f15 %d14
5476 6 [SP+176] %f13 %f12,%f13 %d12 %q12
5477 5 [SP+168] %o5 %f11 %f10,%f11 %d10
5478 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
5479 3 [SP+152] %o3 %f7 %f6,%f7 %d6
5480 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
5481 1 [SP+136] %o1 %f3 %f2,%f3 %d2
5482 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
5484 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
5486 Integral arguments are always passed as 64-bit quantities appropriately
5489 Passing of floating point values is handled as follows.
5490 If a prototype is in scope:
5491 If the value is in a named argument (i.e. not a stdarg function or a
5492 value not part of the `...') then the value is passed in the appropriate
5494 If the value is part of the `...' and is passed in one of the first 6
5495 slots then the value is passed in the appropriate int reg.
5496 If the value is part of the `...' and is not passed in one of the first 6
5497 slots then the value is passed in memory.
5498 If a prototype is not in scope:
5499 If the value is one of the first 6 arguments the value is passed in the
5500 appropriate integer reg and the appropriate fp reg.
5501 If the value is not one of the first 6 arguments the value is passed in
5502 the appropriate fp reg and in memory.
5505 Summary of the calling conventions implemented by GCC on the SPARC:
5508 size argument return value
5510 small integer <4 int. reg. int. reg.
5511 word 4 int. reg. int. reg.
5512 double word 8 int. reg. int. reg.
5514 _Complex small integer <8 int. reg. int. reg.
5515 _Complex word 8 int. reg. int. reg.
5516 _Complex double word 16 memory int. reg.
5518 vector integer <=8 int. reg. FP reg.
5519 vector integer >8 memory memory
5521 float 4 int. reg. FP reg.
5522 double 8 int. reg. FP reg.
5523 long double 16 memory memory
5525 _Complex float 8 memory FP reg.
5526 _Complex double 16 memory FP reg.
5527 _Complex long double 32 memory FP reg.
5529 vector float any memory memory
5531 aggregate any memory memory
5536 size argument return value
5538 small integer <8 int. reg. int. reg.
5539 word 8 int. reg. int. reg.
5540 double word 16 int. reg. int. reg.
5542 _Complex small integer <16 int. reg. int. reg.
5543 _Complex word 16 int. reg. int. reg.
5544 _Complex double word 32 memory int. reg.
5546 vector integer <=16 FP reg. FP reg.
5547 vector integer 16<s<=32 memory FP reg.
5548 vector integer >32 memory memory
5550 float 4 FP reg. FP reg.
5551 double 8 FP reg. FP reg.
5552 long double 16 FP reg. FP reg.
5554 _Complex float 8 FP reg. FP reg.
5555 _Complex double 16 FP reg. FP reg.
5556 _Complex long double 32 memory FP reg.
5558 vector float <=16 FP reg. FP reg.
5559 vector float 16<s<=32 memory FP reg.
5560 vector float >32 memory memory
5562 aggregate <=16 reg. reg.
5563 aggregate 16<s<=32 memory reg.
5564 aggregate >32 memory memory
5568 Note #1: complex floating-point types follow the extended SPARC ABIs as
5569 implemented by the Sun compiler.
5571 Note #2: integral vector types follow the scalar floating-point types
5572 conventions to match what is implemented by the Sun VIS SDK.
5574 Note #3: floating-point vector types follow the aggregate types
5578 /* Maximum number of int regs for args. */
5579 #define SPARC_INT_ARG_MAX 6
5580 /* Maximum number of fp regs for args. */
5581 #define SPARC_FP_ARG_MAX 16
5583 #define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
5585 /* Handle the INIT_CUMULATIVE_ARGS macro.
5586 Initialize a variable CUM of type CUMULATIVE_ARGS
5587 for a call to a function whose data type is FNTYPE.
5588 For a library call, FNTYPE is 0. */
5591 init_cumulative_args (struct sparc_args *cum, tree fntype,
5592 rtx libname ATTRIBUTE_UNUSED,
5593 tree fndecl ATTRIBUTE_UNUSED)
5596 cum->prototype_p = fntype && prototype_p (fntype);
5597 cum->libcall_p = fntype == 0;
5600 /* Handle promotion of pointer and integer arguments. */
5602 static enum machine_mode
5603 sparc_promote_function_mode (const_tree type,
5604 enum machine_mode mode,
5606 const_tree fntype ATTRIBUTE_UNUSED,
5607 int for_return ATTRIBUTE_UNUSED)
5609 if (type != NULL_TREE && POINTER_TYPE_P (type))
5611 *punsignedp = POINTERS_EXTEND_UNSIGNED;
5615 /* Integral arguments are passed as full words, as per the ABI. */
5616 if (GET_MODE_CLASS (mode) == MODE_INT
5617 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5623 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
5626 sparc_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
5628 return TARGET_ARCH64 ? true : false;
5631 /* Scan the record type TYPE and return the following predicates:
5632 - INTREGS_P: the record contains at least one field or sub-field
5633 that is eligible for promotion in integer registers.
5634 - FP_REGS_P: the record contains at least one field or sub-field
5635 that is eligible for promotion in floating-point registers.
5636 - PACKED_P: the record contains at least one field that is packed.
5638 Sub-fields are not taken into account for the PACKED_P predicate. */
5641 scan_record_type (const_tree type, int *intregs_p, int *fpregs_p,
5646 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5648 if (TREE_CODE (field) == FIELD_DECL)
5650 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5651 scan_record_type (TREE_TYPE (field), intregs_p, fpregs_p, 0);
5652 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5653 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5659 if (packed_p && DECL_PACKED (field))
5665 /* Compute the slot number to pass an argument in.
5666 Return the slot number or -1 if passing on the stack.
5668 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5669 the preceding args and about the function being called.
5670 MODE is the argument's machine mode.
5671 TYPE is the data type of the argument (as a tree).
5672 This is null for libcalls where that information may
5674 NAMED is nonzero if this argument is a named parameter
5675 (otherwise it is an extra parameter matching an ellipsis).
5676 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
5677 *PREGNO records the register number to use if scalar type.
5678 *PPADDING records the amount of padding needed in words. */
5681 function_arg_slotno (const struct sparc_args *cum, enum machine_mode mode,
5682 const_tree type, bool named, bool incoming_p,
5683 int *pregno, int *ppadding)
5685 int regbase = (incoming_p
5686 ? SPARC_INCOMING_INT_ARG_FIRST
5687 : SPARC_OUTGOING_INT_ARG_FIRST);
5688 int slotno = cum->words;
5689 enum mode_class mclass;
5694 if (type && TREE_ADDRESSABLE (type))
5700 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
5703 /* For SPARC64, objects requiring 16-byte alignment get it. */
5705 && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
5706 && (slotno & 1) != 0)
5707 slotno++, *ppadding = 1;
5709 mclass = GET_MODE_CLASS (mode);
5710 if (type && TREE_CODE (type) == VECTOR_TYPE)
5712 /* Vector types deserve special treatment because they are
5713 polymorphic wrt their mode, depending upon whether VIS
5714 instructions are enabled. */
5715 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
5717 /* The SPARC port defines no floating-point vector modes. */
5718 gcc_assert (mode == BLKmode);
5722 /* Integral vector types should either have a vector
5723 mode or an integral mode, because we are guaranteed
5724 by pass_by_reference that their size is not greater
5725 than 16 bytes and TImode is 16-byte wide. */
5726 gcc_assert (mode != BLKmode);
5728 /* Vector integers are handled like floats according to
5730 mclass = MODE_FLOAT;
5737 case MODE_COMPLEX_FLOAT:
5738 case MODE_VECTOR_INT:
5739 if (TARGET_ARCH64 && TARGET_FPU && named)
5741 if (slotno >= SPARC_FP_ARG_MAX)
5743 regno = SPARC_FP_ARG_FIRST + slotno * 2;
5744 /* Arguments filling only one single FP register are
5745 right-justified in the outer double FP register. */
5746 if (GET_MODE_SIZE (mode) <= 4)
5753 case MODE_COMPLEX_INT:
5754 if (slotno >= SPARC_INT_ARG_MAX)
5756 regno = regbase + slotno;
5760 if (mode == VOIDmode)
5761 /* MODE is VOIDmode when generating the actual call. */
5764 gcc_assert (mode == BLKmode);
5768 || (TREE_CODE (type) != VECTOR_TYPE
5769 && TREE_CODE (type) != RECORD_TYPE))
5771 if (slotno >= SPARC_INT_ARG_MAX)
5773 regno = regbase + slotno;
5775 else /* TARGET_ARCH64 && type */
5777 int intregs_p = 0, fpregs_p = 0, packed_p = 0;
5779 /* First see what kinds of registers we would need. */
5780 if (TREE_CODE (type) == VECTOR_TYPE)
5783 scan_record_type (type, &intregs_p, &fpregs_p, &packed_p);
5785 /* The ABI obviously doesn't specify how packed structures
5786 are passed. These are defined to be passed in int regs
5787 if possible, otherwise memory. */
5788 if (packed_p || !named)
5789 fpregs_p = 0, intregs_p = 1;
5791 /* If all arg slots are filled, then must pass on stack. */
5792 if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
5795 /* If there are only int args and all int arg slots are filled,
5796 then must pass on stack. */
5797 if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
5800 /* Note that even if all int arg slots are filled, fp members may
5801 still be passed in regs if such regs are available.
5802 *PREGNO isn't set because there may be more than one, it's up
5803 to the caller to compute them. */
5816 /* Handle recursive register counting for structure field layout. */
5818 struct function_arg_record_value_parms
5820 rtx ret; /* return expression being built. */
5821 int slotno; /* slot number of the argument. */
5822 int named; /* whether the argument is named. */
5823 int regbase; /* regno of the base register. */
5824 int stack; /* 1 if part of the argument is on the stack. */
5825 int intoffset; /* offset of the first pending integer field. */
5826 unsigned int nregs; /* number of words passed in registers. */
5829 static void function_arg_record_value_3
5830 (HOST_WIDE_INT, struct function_arg_record_value_parms *);
5831 static void function_arg_record_value_2
5832 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
5833 static void function_arg_record_value_1
5834 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
5835 static rtx function_arg_record_value (const_tree, enum machine_mode, int, int, int);
5836 static rtx function_arg_union_value (int, enum machine_mode, int, int);
5838 /* A subroutine of function_arg_record_value. Traverse the structure
5839 recursively and determine how many registers will be required. */
5842 function_arg_record_value_1 (const_tree type, HOST_WIDE_INT startbitpos,
5843 struct function_arg_record_value_parms *parms,
5848 /* We need to compute how many registers are needed so we can
5849 allocate the PARALLEL but before we can do that we need to know
5850 whether there are any packed fields. The ABI obviously doesn't
5851 specify how structures are passed in this case, so they are
5852 defined to be passed in int regs if possible, otherwise memory,
5853 regardless of whether there are fp values present. */
5856 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5858 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
5865 /* Compute how many registers we need. */
5866 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5868 if (TREE_CODE (field) == FIELD_DECL)
5870 HOST_WIDE_INT bitpos = startbitpos;
5872 if (DECL_SIZE (field) != 0)
5874 if (integer_zerop (DECL_SIZE (field)))
5877 if (host_integerp (bit_position (field), 1))
5878 bitpos += int_bit_position (field);
5881 /* ??? FIXME: else assume zero offset. */
5883 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5884 function_arg_record_value_1 (TREE_TYPE (field),
5888 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5889 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5894 if (parms->intoffset != -1)
5896 unsigned int startbit, endbit;
5897 int intslots, this_slotno;
5899 startbit = parms->intoffset & -BITS_PER_WORD;
5900 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5902 intslots = (endbit - startbit) / BITS_PER_WORD;
5903 this_slotno = parms->slotno + parms->intoffset
5906 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
5908 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
5909 /* We need to pass this field on the stack. */
5913 parms->nregs += intslots;
5914 parms->intoffset = -1;
5917 /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
5918 If it wasn't true we wouldn't be here. */
5919 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
5920 && DECL_MODE (field) == BLKmode)
5921 parms->nregs += TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
5922 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
5929 if (parms->intoffset == -1)
5930 parms->intoffset = bitpos;
5936 /* A subroutine of function_arg_record_value. Assign the bits of the
5937 structure between parms->intoffset and bitpos to integer registers. */
5940 function_arg_record_value_3 (HOST_WIDE_INT bitpos,
5941 struct function_arg_record_value_parms *parms)
5943 enum machine_mode mode;
5945 unsigned int startbit, endbit;
5946 int this_slotno, intslots, intoffset;
5949 if (parms->intoffset == -1)
5952 intoffset = parms->intoffset;
5953 parms->intoffset = -1;
5955 startbit = intoffset & -BITS_PER_WORD;
5956 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5957 intslots = (endbit - startbit) / BITS_PER_WORD;
5958 this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
5960 intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
5964 /* If this is the trailing part of a word, only load that much into
5965 the register. Otherwise load the whole register. Note that in
5966 the latter case we may pick up unwanted bits. It's not a problem
5967 at the moment but may wish to revisit. */
5969 if (intoffset % BITS_PER_WORD != 0)
5970 mode = smallest_mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
5975 intoffset /= BITS_PER_UNIT;
5978 regno = parms->regbase + this_slotno;
5979 reg = gen_rtx_REG (mode, regno);
5980 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5981 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
5984 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
5989 while (intslots > 0);
5992 /* A subroutine of function_arg_record_value. Traverse the structure
5993 recursively and assign bits to floating point registers. Track which
5994 bits in between need integer registers; invoke function_arg_record_value_3
5995 to make that happen. */
5998 function_arg_record_value_2 (const_tree type, HOST_WIDE_INT startbitpos,
5999 struct function_arg_record_value_parms *parms,
6005 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6007 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
6014 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6016 if (TREE_CODE (field) == FIELD_DECL)
6018 HOST_WIDE_INT bitpos = startbitpos;
6020 if (DECL_SIZE (field) != 0)
6022 if (integer_zerop (DECL_SIZE (field)))
6025 if (host_integerp (bit_position (field), 1))
6026 bitpos += int_bit_position (field);
6029 /* ??? FIXME: else assume zero offset. */
6031 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
6032 function_arg_record_value_2 (TREE_TYPE (field),
6036 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
6037 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
6042 int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
6043 int regno, nregs, pos;
6044 enum machine_mode mode = DECL_MODE (field);
6047 function_arg_record_value_3 (bitpos, parms);
6049 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
6052 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
6053 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
6055 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
6057 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
6063 regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
6064 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
6066 reg = gen_rtx_REG (mode, regno);
6067 pos = bitpos / BITS_PER_UNIT;
6068 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
6069 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
6073 regno += GET_MODE_SIZE (mode) / 4;
6074 reg = gen_rtx_REG (mode, regno);
6075 pos += GET_MODE_SIZE (mode);
6076 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
6077 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
6083 if (parms->intoffset == -1)
6084 parms->intoffset = bitpos;
6090 /* Used by function_arg and sparc_function_value_1 to implement the complex
6091 conventions of the 64-bit ABI for passing and returning structures.
6092 Return an expression valid as a return value for the FUNCTION_ARG
6093 and TARGET_FUNCTION_VALUE.
6095 TYPE is the data type of the argument (as a tree).
6096 This is null for libcalls where that information may
6098 MODE is the argument's machine mode.
6099 SLOTNO is the index number of the argument's slot in the parameter array.
6100 NAMED is nonzero if this argument is a named parameter
6101 (otherwise it is an extra parameter matching an ellipsis).
6102 REGBASE is the regno of the base register for the parameter array. */
6105 function_arg_record_value (const_tree type, enum machine_mode mode,
6106 int slotno, int named, int regbase)
6108 HOST_WIDE_INT typesize = int_size_in_bytes (type);
6109 struct function_arg_record_value_parms parms;
6112 parms.ret = NULL_RTX;
6113 parms.slotno = slotno;
6114 parms.named = named;
6115 parms.regbase = regbase;
6118 /* Compute how many registers we need. */
6120 parms.intoffset = 0;
6121 function_arg_record_value_1 (type, 0, &parms, false);
6123 /* Take into account pending integer fields. */
6124 if (parms.intoffset != -1)
6126 unsigned int startbit, endbit;
6127 int intslots, this_slotno;
6129 startbit = parms.intoffset & -BITS_PER_WORD;
6130 endbit = (typesize*BITS_PER_UNIT + BITS_PER_WORD - 1) & -BITS_PER_WORD;
6131 intslots = (endbit - startbit) / BITS_PER_WORD;
6132 this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
6134 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
6136 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
6137 /* We need to pass this field on the stack. */
6141 parms.nregs += intslots;
6143 nregs = parms.nregs;
6145 /* Allocate the vector and handle some annoying special cases. */
6148 /* ??? Empty structure has no value? Duh? */
6151 /* Though there's nothing really to store, return a word register
6152 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
6153 leads to breakage due to the fact that there are zero bytes to
6155 return gen_rtx_REG (mode, regbase);
6159 /* ??? C++ has structures with no fields, and yet a size. Give up
6160 for now and pass everything back in integer registers. */
6161 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6163 if (nregs + slotno > SPARC_INT_ARG_MAX)
6164 nregs = SPARC_INT_ARG_MAX - slotno;
6166 gcc_assert (nregs != 0);
6168 parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (parms.stack + nregs));
6170 /* If at least one field must be passed on the stack, generate
6171 (parallel [(expr_list (nil) ...) ...]) so that all fields will
6172 also be passed on the stack. We can't do much better because the
6173 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
6174 of structures for which the fields passed exclusively in registers
6175 are not at the beginning of the structure. */
6177 XVECEXP (parms.ret, 0, 0)
6178 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
6180 /* Fill in the entries. */
6182 parms.intoffset = 0;
6183 function_arg_record_value_2 (type, 0, &parms, false);
6184 function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
6186 gcc_assert (parms.nregs == nregs);
6191 /* Used by function_arg and sparc_function_value_1 to implement the conventions
6192 of the 64-bit ABI for passing and returning unions.
6193 Return an expression valid as a return value for the FUNCTION_ARG
6194 and TARGET_FUNCTION_VALUE.
6196 SIZE is the size in bytes of the union.
6197 MODE is the argument's machine mode.
6198 REGNO is the hard register the union will be passed in. */
6201 function_arg_union_value (int size, enum machine_mode mode, int slotno,
6204 int nwords = ROUND_ADVANCE (size), i;
6207 /* See comment in previous function for empty structures. */
6209 return gen_rtx_REG (mode, regno);
6211 if (slotno == SPARC_INT_ARG_MAX - 1)
6214 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
6216 for (i = 0; i < nwords; i++)
6218 /* Unions are passed left-justified. */
6219 XVECEXP (regs, 0, i)
6220 = gen_rtx_EXPR_LIST (VOIDmode,
6221 gen_rtx_REG (word_mode, regno),
6222 GEN_INT (UNITS_PER_WORD * i));
6229 /* Used by function_arg and sparc_function_value_1 to implement the conventions
6230 for passing and returning large (BLKmode) vectors.
6231 Return an expression valid as a return value for the FUNCTION_ARG
6232 and TARGET_FUNCTION_VALUE.
6234 SIZE is the size in bytes of the vector (at least 8 bytes).
6235 REGNO is the FP hard register the vector will be passed in. */
6238 function_arg_vector_value (int size, int regno)
6240 int i, nregs = size / 8;
6243 regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
6245 for (i = 0; i < nregs; i++)
6247 XVECEXP (regs, 0, i)
6248 = gen_rtx_EXPR_LIST (VOIDmode,
6249 gen_rtx_REG (DImode, regno + 2*i),
6256 /* Determine where to put an argument to a function.
6257 Value is zero to push the argument on the stack,
6258 or a hard register in which to store the argument.
6260 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6261 the preceding args and about the function being called.
6262 MODE is the argument's machine mode.
6263 TYPE is the data type of the argument (as a tree).
6264 This is null for libcalls where that information may
6266 NAMED is true if this argument is a named parameter
6267 (otherwise it is an extra parameter matching an ellipsis).
6268 INCOMING_P is false for TARGET_FUNCTION_ARG, true for
6269 TARGET_FUNCTION_INCOMING_ARG. */
6272 sparc_function_arg_1 (cumulative_args_t cum_v, enum machine_mode mode,
6273 const_tree type, bool named, bool incoming_p)
6275 const CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
6277 int regbase = (incoming_p
6278 ? SPARC_INCOMING_INT_ARG_FIRST
6279 : SPARC_OUTGOING_INT_ARG_FIRST);
6280 int slotno, regno, padding;
6281 enum mode_class mclass = GET_MODE_CLASS (mode);
6283 slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
6288 /* Vector types deserve special treatment because they are polymorphic wrt
6289 their mode, depending upon whether VIS instructions are enabled. */
6290 if (type && TREE_CODE (type) == VECTOR_TYPE)
6292 HOST_WIDE_INT size = int_size_in_bytes (type);
6293 gcc_assert ((TARGET_ARCH32 && size <= 8)
6294 || (TARGET_ARCH64 && size <= 16));
6296 if (mode == BLKmode)
6297 return function_arg_vector_value (size,
6298 SPARC_FP_ARG_FIRST + 2*slotno);
6300 mclass = MODE_FLOAT;
6304 return gen_rtx_REG (mode, regno);
6306 /* Structures up to 16 bytes in size are passed in arg slots on the stack
6307 and are promoted to registers if possible. */
6308 if (type && TREE_CODE (type) == RECORD_TYPE)
6310 HOST_WIDE_INT size = int_size_in_bytes (type);
6311 gcc_assert (size <= 16);
6313 return function_arg_record_value (type, mode, slotno, named, regbase);
6316 /* Unions up to 16 bytes in size are passed in integer registers. */
6317 else if (type && TREE_CODE (type) == UNION_TYPE)
6319 HOST_WIDE_INT size = int_size_in_bytes (type);
6320 gcc_assert (size <= 16);
6322 return function_arg_union_value (size, mode, slotno, regno);
6325 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
6326 but also have the slot allocated for them.
6327 If no prototype is in scope fp values in register slots get passed
6328 in two places, either fp regs and int regs or fp regs and memory. */
6329 else if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
6330 && SPARC_FP_REG_P (regno))
6332 rtx reg = gen_rtx_REG (mode, regno);
6333 if (cum->prototype_p || cum->libcall_p)
6335 /* "* 2" because fp reg numbers are recorded in 4 byte
6338 /* ??? This will cause the value to be passed in the fp reg and
6339 in the stack. When a prototype exists we want to pass the
6340 value in the reg but reserve space on the stack. That's an
6341 optimization, and is deferred [for a bit]. */
6342 if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
6343 return gen_rtx_PARALLEL (mode,
6345 gen_rtx_EXPR_LIST (VOIDmode,
6346 NULL_RTX, const0_rtx),
6347 gen_rtx_EXPR_LIST (VOIDmode,
6351 /* ??? It seems that passing back a register even when past
6352 the area declared by REG_PARM_STACK_SPACE will allocate
6353 space appropriately, and will not copy the data onto the
6354 stack, exactly as we desire.
6356 This is due to locate_and_pad_parm being called in
6357 expand_call whenever reg_parm_stack_space > 0, which
6358 while beneficial to our example here, would seem to be
6359 in error from what had been intended. Ho hum... -- r~ */
6367 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
6371 /* On incoming, we don't need to know that the value
6372 is passed in %f0 and %i0, and it confuses other parts
6373 causing needless spillage even on the simplest cases. */
6377 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
6378 + (regno - SPARC_FP_ARG_FIRST) / 2);
6380 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
6381 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
6383 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
6387 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
6388 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
6389 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
6394 /* All other aggregate types are passed in an integer register in a mode
6395 corresponding to the size of the type. */
6396 else if (type && AGGREGATE_TYPE_P (type))
6398 HOST_WIDE_INT size = int_size_in_bytes (type);
6399 gcc_assert (size <= 16);
6401 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
6404 return gen_rtx_REG (mode, regno);
6407 /* Handle the TARGET_FUNCTION_ARG target hook. */
6410 sparc_function_arg (cumulative_args_t cum, enum machine_mode mode,
6411 const_tree type, bool named)
6413 return sparc_function_arg_1 (cum, mode, type, named, false);
6416 /* Handle the TARGET_FUNCTION_INCOMING_ARG target hook. */
6419 sparc_function_incoming_arg (cumulative_args_t cum, enum machine_mode mode,
6420 const_tree type, bool named)
6422 return sparc_function_arg_1 (cum, mode, type, named, true);
6425 /* For sparc64, objects requiring 16 byte alignment are passed that way. */
6428 sparc_function_arg_boundary (enum machine_mode mode, const_tree type)
6430 return ((TARGET_ARCH64
6431 && (GET_MODE_ALIGNMENT (mode) == 128
6432 || (type && TYPE_ALIGN (type) == 128)))
6437 /* For an arg passed partly in registers and partly in memory,
6438 this is the number of bytes of registers used.
6439 For args passed entirely in registers or entirely in memory, zero.
6441 Any arg that starts in the first 6 regs but won't entirely fit in them
6442 needs partial registers on v8. On v9, structures with integer
6443 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
6444 values that begin in the last fp reg [where "last fp reg" varies with the
6445 mode] will be split between that reg and memory. */
6448 sparc_arg_partial_bytes (cumulative_args_t cum, enum machine_mode mode,
6449 tree type, bool named)
6451 int slotno, regno, padding;
6453 /* We pass false for incoming_p here, it doesn't matter. */
6454 slotno = function_arg_slotno (get_cumulative_args (cum), mode, type, named,
6455 false, ®no, &padding);
6462 if ((slotno + (mode == BLKmode
6463 ? ROUND_ADVANCE (int_size_in_bytes (type))
6464 : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
6465 > SPARC_INT_ARG_MAX)
6466 return (SPARC_INT_ARG_MAX - slotno) * UNITS_PER_WORD;
6470 /* We are guaranteed by pass_by_reference that the size of the
6471 argument is not greater than 16 bytes, so we only need to return
6472 one word if the argument is partially passed in registers. */
6474 if (type && AGGREGATE_TYPE_P (type))
6476 int size = int_size_in_bytes (type);
6478 if (size > UNITS_PER_WORD
6479 && slotno == SPARC_INT_ARG_MAX - 1)
6480 return UNITS_PER_WORD;
6482 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
6483 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
6484 && ! (TARGET_FPU && named)))
6486 /* The complex types are passed as packed types. */
6487 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
6488 && slotno == SPARC_INT_ARG_MAX - 1)
6489 return UNITS_PER_WORD;
6491 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
6493 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
6495 return UNITS_PER_WORD;
6502 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
6503 Specify whether to pass the argument by reference. */
6506 sparc_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
6507 enum machine_mode mode, const_tree type,
6508 bool named ATTRIBUTE_UNUSED)
6511 /* Original SPARC 32-bit ABI says that structures and unions,
6512 and quad-precision floats are passed by reference. For Pascal,
6513 also pass arrays by reference. All other base types are passed
6516 Extended ABI (as implemented by the Sun compiler) says that all
6517 complex floats are passed by reference. Pass complex integers
6518 in registers up to 8 bytes. More generally, enforce the 2-word
6519 cap for passing arguments in registers.
6521 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6522 integers are passed like floats of the same size, that is in
6523 registers up to 8 bytes. Pass all vector floats by reference
6524 like structure and unions. */
6525 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
6527 /* Catch CDImode, TFmode, DCmode and TCmode. */
6528 || GET_MODE_SIZE (mode) > 8
6530 && TREE_CODE (type) == VECTOR_TYPE
6531 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
6533 /* Original SPARC 64-bit ABI says that structures and unions
6534 smaller than 16 bytes are passed in registers, as well as
6535 all other base types.
6537 Extended ABI (as implemented by the Sun compiler) says that
6538 complex floats are passed in registers up to 16 bytes. Pass
6539 all complex integers in registers up to 16 bytes. More generally,
6540 enforce the 2-word cap for passing arguments in registers.
6542 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6543 integers are passed like floats of the same size, that is in
6544 registers (up to 16 bytes). Pass all vector floats like structure
6547 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
6548 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
6549 /* Catch CTImode and TCmode. */
6550 || GET_MODE_SIZE (mode) > 16);
6553 /* Handle the TARGET_FUNCTION_ARG_ADVANCE hook.
6554 Update the data in CUM to advance over an argument
6555 of mode MODE and data type TYPE.
6556 TYPE is null for libcalls where that information may not be available. */
6559 sparc_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
6560 const_tree type, bool named)
6562 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
6565 /* We pass false for incoming_p here, it doesn't matter. */
6566 function_arg_slotno (cum, mode, type, named, false, ®no, &padding);
6568 /* If argument requires leading padding, add it. */
6569 cum->words += padding;
6573 cum->words += (mode != BLKmode
6574 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
6575 : ROUND_ADVANCE (int_size_in_bytes (type)));
6579 if (type && AGGREGATE_TYPE_P (type))
6581 int size = int_size_in_bytes (type);
6585 else if (size <= 16)
6587 else /* passed by reference */
6592 cum->words += (mode != BLKmode
6593 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
6594 : ROUND_ADVANCE (int_size_in_bytes (type)));
6599 /* Handle the FUNCTION_ARG_PADDING macro.
6600 For the 64 bit ABI structs are always stored left shifted in their
6604 function_arg_padding (enum machine_mode mode, const_tree type)
6606 if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
6609 /* Fall back to the default. */
6610 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
6613 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
6614 Specify whether to return the return value in memory. */
6617 sparc_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6620 /* Original SPARC 32-bit ABI says that structures and unions,
6621 and quad-precision floats are returned in memory. All other
6622 base types are returned in registers.
6624 Extended ABI (as implemented by the Sun compiler) says that
6625 all complex floats are returned in registers (8 FP registers
6626 at most for '_Complex long double'). Return all complex integers
6627 in registers (4 at most for '_Complex long long').
6629 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6630 integers are returned like floats of the same size, that is in
6631 registers up to 8 bytes and in memory otherwise. Return all
6632 vector floats in memory like structure and unions; note that
6633 they always have BLKmode like the latter. */
6634 return (TYPE_MODE (type) == BLKmode
6635 || TYPE_MODE (type) == TFmode
6636 || (TREE_CODE (type) == VECTOR_TYPE
6637 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
6639 /* Original SPARC 64-bit ABI says that structures and unions
6640 smaller than 32 bytes are returned in registers, as well as
6641 all other base types.
6643 Extended ABI (as implemented by the Sun compiler) says that all
6644 complex floats are returned in registers (8 FP registers at most
6645 for '_Complex long double'). Return all complex integers in
6646 registers (4 at most for '_Complex TItype').
6648 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6649 integers are returned like floats of the same size, that is in
6650 registers. Return all vector floats like structure and unions;
6651 note that they always have BLKmode like the latter. */
6652 return (TYPE_MODE (type) == BLKmode
6653 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32);
6656 /* Handle the TARGET_STRUCT_VALUE target hook.
6657 Return where to find the structure return value address. */
6660 sparc_struct_value_rtx (tree fndecl, int incoming)
6669 mem = gen_frame_mem (Pmode, plus_constant (frame_pointer_rtx,
6670 STRUCT_VALUE_OFFSET));
6672 mem = gen_frame_mem (Pmode, plus_constant (stack_pointer_rtx,
6673 STRUCT_VALUE_OFFSET));
6675 /* Only follow the SPARC ABI for fixed-size structure returns.
6676 Variable size structure returns are handled per the normal
6677 procedures in GCC. This is enabled by -mstd-struct-return */
6679 && sparc_std_struct_return
6680 && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
6681 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
6683 /* We must check and adjust the return address, as it is
6684 optional as to whether the return object is really
6686 rtx ret_reg = gen_rtx_REG (Pmode, 31);
6687 rtx scratch = gen_reg_rtx (SImode);
6688 rtx endlab = gen_label_rtx ();
6690 /* Calculate the return object size */
6691 tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
6692 rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
6693 /* Construct a temporary return value */
6695 = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
6697 /* Implement SPARC 32-bit psABI callee return struct checking:
6699 Fetch the instruction where we will return to and see if
6700 it's an unimp instruction (the most significant 10 bits
6702 emit_move_insn (scratch, gen_rtx_MEM (SImode,
6703 plus_constant (ret_reg, 8)));
6704 /* Assume the size is valid and pre-adjust */
6705 emit_insn (gen_add3_insn (ret_reg, ret_reg, GEN_INT (4)));
6706 emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode,
6708 emit_insn (gen_sub3_insn (ret_reg, ret_reg, GEN_INT (4)));
6709 /* Write the address of the memory pointed to by temp_val into
6710 the memory pointed to by mem */
6711 emit_move_insn (mem, XEXP (temp_val, 0));
6712 emit_label (endlab);
6719 /* Handle TARGET_FUNCTION_VALUE, and TARGET_LIBCALL_VALUE target hook.
6720 For v9, function return values are subject to the same rules as arguments,
6721 except that up to 32 bytes may be returned in registers. */
6724 sparc_function_value_1 (const_tree type, enum machine_mode mode,
6727 /* Beware that the two values are swapped here wrt function_arg. */
6728 int regbase = (outgoing
6729 ? SPARC_INCOMING_INT_ARG_FIRST
6730 : SPARC_OUTGOING_INT_ARG_FIRST);
6731 enum mode_class mclass = GET_MODE_CLASS (mode);
6734 /* Vector types deserve special treatment because they are polymorphic wrt
6735 their mode, depending upon whether VIS instructions are enabled. */
6736 if (type && TREE_CODE (type) == VECTOR_TYPE)
6738 HOST_WIDE_INT size = int_size_in_bytes (type);
6739 gcc_assert ((TARGET_ARCH32 && size <= 8)
6740 || (TARGET_ARCH64 && size <= 32));
6742 if (mode == BLKmode)
6743 return function_arg_vector_value (size,
6744 SPARC_FP_ARG_FIRST);
6746 mclass = MODE_FLOAT;
6749 if (TARGET_ARCH64 && type)
6751 /* Structures up to 32 bytes in size are returned in registers. */
6752 if (TREE_CODE (type) == RECORD_TYPE)
6754 HOST_WIDE_INT size = int_size_in_bytes (type);
6755 gcc_assert (size <= 32);
6757 return function_arg_record_value (type, mode, 0, 1, regbase);
6760 /* Unions up to 32 bytes in size are returned in integer registers. */
6761 else if (TREE_CODE (type) == UNION_TYPE)
6763 HOST_WIDE_INT size = int_size_in_bytes (type);
6764 gcc_assert (size <= 32);
6766 return function_arg_union_value (size, mode, 0, regbase);
6769 /* Objects that require it are returned in FP registers. */
6770 else if (mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
6773 /* All other aggregate types are returned in an integer register in a
6774 mode corresponding to the size of the type. */
6775 else if (AGGREGATE_TYPE_P (type))
6777 /* All other aggregate types are passed in an integer register
6778 in a mode corresponding to the size of the type. */
6779 HOST_WIDE_INT size = int_size_in_bytes (type);
6780 gcc_assert (size <= 32);
6782 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
6784 /* ??? We probably should have made the same ABI change in
6785 3.4.0 as the one we made for unions. The latter was
6786 required by the SCD though, while the former is not
6787 specified, so we favored compatibility and efficiency.
6789 Now we're stuck for aggregates larger than 16 bytes,
6790 because OImode vanished in the meantime. Let's not
6791 try to be unduly clever, and simply follow the ABI
6792 for unions in that case. */
6793 if (mode == BLKmode)
6794 return function_arg_union_value (size, mode, 0, regbase);
6799 /* We should only have pointer and integer types at this point. This
6800 must match sparc_promote_function_mode. */
6801 else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6805 /* We should only have pointer and integer types at this point. This must
6806 match sparc_promote_function_mode. */
6807 else if (TARGET_ARCH32
6808 && mclass == MODE_INT
6809 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6812 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT) && TARGET_FPU)
6813 regno = SPARC_FP_ARG_FIRST;
6817 return gen_rtx_REG (mode, regno);
6820 /* Handle TARGET_FUNCTION_VALUE.
6821 On the SPARC, the value is found in the first "output" register, but the
6822 called function leaves it in the first "input" register. */
6825 sparc_function_value (const_tree valtype,
6826 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
6829 return sparc_function_value_1 (valtype, TYPE_MODE (valtype), outgoing);
6832 /* Handle TARGET_LIBCALL_VALUE. */
6835 sparc_libcall_value (enum machine_mode mode,
6836 const_rtx fun ATTRIBUTE_UNUSED)
6838 return sparc_function_value_1 (NULL_TREE, mode, false);
6841 /* Handle FUNCTION_VALUE_REGNO_P.
6842 On the SPARC, the first "output" reg is used for integer values, and the
6843 first floating point register is used for floating point values. */
6846 sparc_function_value_regno_p (const unsigned int regno)
6848 return (regno == 8 || regno == 32);
6851 /* Do what is necessary for `va_start'. We look at the current function
6852 to determine if stdarg or varargs is used and return the address of
6853 the first unnamed parameter. */
6856 sparc_builtin_saveregs (void)
6858 int first_reg = crtl->args.info.words;
6862 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
6863 emit_move_insn (gen_rtx_MEM (word_mode,
6864 gen_rtx_PLUS (Pmode,
6866 GEN_INT (FIRST_PARM_OFFSET (0)
6869 gen_rtx_REG (word_mode,
6870 SPARC_INCOMING_INT_ARG_FIRST + regno));
6872 address = gen_rtx_PLUS (Pmode,
6874 GEN_INT (FIRST_PARM_OFFSET (0)
6875 + UNITS_PER_WORD * first_reg));
6880 /* Implement `va_start' for stdarg. */
6883 sparc_va_start (tree valist, rtx nextarg)
6885 nextarg = expand_builtin_saveregs ();
6886 std_expand_builtin_va_start (valist, nextarg);
6889 /* Implement `va_arg' for stdarg. */
6892 sparc_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6895 HOST_WIDE_INT size, rsize, align;
6898 tree ptrtype = build_pointer_type (type);
6900 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
6903 size = rsize = UNITS_PER_WORD;
6909 size = int_size_in_bytes (type);
6910 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
6915 /* For SPARC64, objects requiring 16-byte alignment get it. */
6916 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
6917 align = 2 * UNITS_PER_WORD;
6919 /* SPARC-V9 ABI states that structures up to 16 bytes in size
6920 are left-justified in their slots. */
6921 if (AGGREGATE_TYPE_P (type))
6924 size = rsize = UNITS_PER_WORD;
6934 incr = fold_build_pointer_plus_hwi (incr, align - 1);
6935 incr = fold_convert (sizetype, incr);
6936 incr = fold_build2 (BIT_AND_EXPR, sizetype, incr,
6938 incr = fold_convert (ptr_type_node, incr);
6941 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
6944 if (BYTES_BIG_ENDIAN && size < rsize)
6945 addr = fold_build_pointer_plus_hwi (incr, rsize - size);
6949 addr = fold_convert (build_pointer_type (ptrtype), addr);
6950 addr = build_va_arg_indirect_ref (addr);
6953 /* If the address isn't aligned properly for the type, we need a temporary.
6954 FIXME: This is inefficient, usually we can do this in registers. */
6955 else if (align == 0 && TYPE_ALIGN (type) > BITS_PER_WORD)
6957 tree tmp = create_tmp_var (type, "va_arg_tmp");
6958 tree dest_addr = build_fold_addr_expr (tmp);
6959 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
6960 3, dest_addr, addr, size_int (rsize));
6961 TREE_ADDRESSABLE (tmp) = 1;
6962 gimplify_and_add (copy, pre_p);
6967 addr = fold_convert (ptrtype, addr);
6969 incr = fold_build_pointer_plus_hwi (incr, rsize);
6970 gimplify_assign (valist, incr, post_p);
6972 return build_va_arg_indirect_ref (addr);
6975 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
6976 Specify whether the vector mode is supported by the hardware. */
6979 sparc_vector_mode_supported_p (enum machine_mode mode)
6981 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
6984 /* Implement the TARGET_VECTORIZE_PREFERRED_SIMD_MODE target hook. */
6986 static enum machine_mode
6987 sparc_preferred_simd_mode (enum machine_mode mode)
7005 /* Return the string to output an unconditional branch to LABEL, which is
7006 the operand number of the label.
7008 DEST is the destination insn (i.e. the label), INSN is the source. */
7011 output_ubranch (rtx dest, int label, rtx insn)
7013 static char string[64];
7014 bool v9_form = false;
7017 if (TARGET_V9 && INSN_ADDRESSES_SET_P ())
7019 int delta = (INSN_ADDRESSES (INSN_UID (dest))
7020 - INSN_ADDRESSES (INSN_UID (insn)));
7021 /* Leave some instructions for "slop". */
7022 if (delta >= -260000 && delta < 260000)
7027 strcpy (string, "ba%*,pt\t%%xcc, ");
7029 strcpy (string, "b%*\t");
7031 p = strchr (string, '\0');
7042 /* Return the string to output a conditional branch to LABEL, which is
7043 the operand number of the label. OP is the conditional expression.
7044 XEXP (OP, 0) is assumed to be a condition code register (integer or
7045 floating point) and its mode specifies what kind of comparison we made.
7047 DEST is the destination insn (i.e. the label), INSN is the source.
7049 REVERSED is nonzero if we should reverse the sense of the comparison.
7051 ANNUL is nonzero if we should generate an annulling branch. */
7054 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
7057 static char string[64];
7058 enum rtx_code code = GET_CODE (op);
7059 rtx cc_reg = XEXP (op, 0);
7060 enum machine_mode mode = GET_MODE (cc_reg);
7061 const char *labelno, *branch;
7062 int spaces = 8, far;
7065 /* v9 branches are limited to +-1MB. If it is too far away,
7078 fbne,a,pn %fcc2, .LC29
7086 far = TARGET_V9 && (get_attr_length (insn) >= 3);
7089 /* Reversal of FP compares takes care -- an ordered compare
7090 becomes an unordered compare and vice versa. */
7091 if (mode == CCFPmode || mode == CCFPEmode)
7092 code = reverse_condition_maybe_unordered (code);
7094 code = reverse_condition (code);
7097 /* Start by writing the branch condition. */
7098 if (mode == CCFPmode || mode == CCFPEmode)
7149 /* ??? !v9: FP branches cannot be preceded by another floating point
7150 insn. Because there is currently no concept of pre-delay slots,
7151 we can fix this only by always emitting a nop before a floating
7156 strcpy (string, "nop\n\t");
7157 strcat (string, branch);
7170 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
7182 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
7203 strcpy (string, branch);
7205 spaces -= strlen (branch);
7206 p = strchr (string, '\0');
7208 /* Now add the annulling, the label, and a possible noop. */
7221 if (! far && insn && INSN_ADDRESSES_SET_P ())
7223 int delta = (INSN_ADDRESSES (INSN_UID (dest))
7224 - INSN_ADDRESSES (INSN_UID (insn)));
7225 /* Leave some instructions for "slop". */
7226 if (delta < -260000 || delta >= 260000)
7230 if (mode == CCFPmode || mode == CCFPEmode)
7232 static char v9_fcc_labelno[] = "%%fccX, ";
7233 /* Set the char indicating the number of the fcc reg to use. */
7234 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
7235 labelno = v9_fcc_labelno;
7238 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
7242 else if (mode == CCXmode || mode == CCX_NOOVmode)
7244 labelno = "%%xcc, ";
7249 labelno = "%%icc, ";
7254 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
7257 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
7270 strcpy (p, labelno);
7271 p = strchr (p, '\0');
7274 strcpy (p, ".+12\n\t nop\n\tb\t");
7275 /* Skip the next insn if requested or
7276 if we know that it will be a nop. */
7277 if (annul || ! final_sequence)
7291 /* Emit a library call comparison between floating point X and Y.
7292 COMPARISON is the operator to compare with (EQ, NE, GT, etc).
7293 Return the new operator to be used in the comparison sequence.
7295 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
7296 values as arguments instead of the TFmode registers themselves,
7297 that's why we cannot call emit_float_lib_cmp. */
7300 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
7303 rtx slot0, slot1, result, tem, tem2, libfunc;
7304 enum machine_mode mode;
7305 enum rtx_code new_comparison;
7310 qpfunc = (TARGET_ARCH64 ? "_Qp_feq" : "_Q_feq");
7314 qpfunc = (TARGET_ARCH64 ? "_Qp_fne" : "_Q_fne");
7318 qpfunc = (TARGET_ARCH64 ? "_Qp_fgt" : "_Q_fgt");
7322 qpfunc = (TARGET_ARCH64 ? "_Qp_fge" : "_Q_fge");
7326 qpfunc = (TARGET_ARCH64 ? "_Qp_flt" : "_Q_flt");
7330 qpfunc = (TARGET_ARCH64 ? "_Qp_fle" : "_Q_fle");
7341 qpfunc = (TARGET_ARCH64 ? "_Qp_cmp" : "_Q_cmp");
7354 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
7355 emit_move_insn (slot0, x);
7362 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
7363 emit_move_insn (slot1, y);
7366 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
7367 emit_library_call (libfunc, LCT_NORMAL,
7369 XEXP (slot0, 0), Pmode,
7370 XEXP (slot1, 0), Pmode);
7375 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
7376 emit_library_call (libfunc, LCT_NORMAL,
7378 x, TFmode, y, TFmode);
7383 /* Immediately move the result of the libcall into a pseudo
7384 register so reload doesn't clobber the value if it needs
7385 the return register for a spill reg. */
7386 result = gen_reg_rtx (mode);
7387 emit_move_insn (result, hard_libcall_value (mode, libfunc));
7392 return gen_rtx_NE (VOIDmode, result, const0_rtx);
7395 new_comparison = (comparison == UNORDERED ? EQ : NE);
7396 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, GEN_INT(3));
7399 new_comparison = (comparison == UNGT ? GT : NE);
7400 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, const1_rtx);
7402 return gen_rtx_NE (VOIDmode, result, const2_rtx);
7404 tem = gen_reg_rtx (mode);
7406 emit_insn (gen_andsi3 (tem, result, const1_rtx));
7408 emit_insn (gen_anddi3 (tem, result, const1_rtx));
7409 return gen_rtx_NE (VOIDmode, tem, const0_rtx);
7412 tem = gen_reg_rtx (mode);
7414 emit_insn (gen_addsi3 (tem, result, const1_rtx));
7416 emit_insn (gen_adddi3 (tem, result, const1_rtx));
7417 tem2 = gen_reg_rtx (mode);
7419 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
7421 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
7422 new_comparison = (comparison == UNEQ ? EQ : NE);
7423 return gen_rtx_fmt_ee (new_comparison, VOIDmode, tem2, const0_rtx);
7429 /* Generate an unsigned DImode to FP conversion. This is the same code
7430 optabs would emit if we didn't have TFmode patterns. */
7433 sparc_emit_floatunsdi (rtx *operands, enum machine_mode mode)
7435 rtx neglab, donelab, i0, i1, f0, in, out;
7438 in = force_reg (DImode, operands[1]);
7439 neglab = gen_label_rtx ();
7440 donelab = gen_label_rtx ();
7441 i0 = gen_reg_rtx (DImode);
7442 i1 = gen_reg_rtx (DImode);
7443 f0 = gen_reg_rtx (mode);
7445 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
7447 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
7448 emit_jump_insn (gen_jump (donelab));
7451 emit_label (neglab);
7453 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
7454 emit_insn (gen_anddi3 (i1, in, const1_rtx));
7455 emit_insn (gen_iordi3 (i0, i0, i1));
7456 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
7457 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
7459 emit_label (donelab);
7462 /* Generate an FP to unsigned DImode conversion. This is the same code
7463 optabs would emit if we didn't have TFmode patterns. */
7466 sparc_emit_fixunsdi (rtx *operands, enum machine_mode mode)
7468 rtx neglab, donelab, i0, i1, f0, in, out, limit;
7471 in = force_reg (mode, operands[1]);
7472 neglab = gen_label_rtx ();
7473 donelab = gen_label_rtx ();
7474 i0 = gen_reg_rtx (DImode);
7475 i1 = gen_reg_rtx (DImode);
7476 limit = gen_reg_rtx (mode);
7477 f0 = gen_reg_rtx (mode);
7479 emit_move_insn (limit,
7480 CONST_DOUBLE_FROM_REAL_VALUE (
7481 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
7482 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
7484 emit_insn (gen_rtx_SET (VOIDmode,
7486 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
7487 emit_jump_insn (gen_jump (donelab));
7490 emit_label (neglab);
7492 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_MINUS (mode, in, limit)));
7493 emit_insn (gen_rtx_SET (VOIDmode,
7495 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
7496 emit_insn (gen_movdi (i1, const1_rtx));
7497 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
7498 emit_insn (gen_xordi3 (out, i0, i1));
7500 emit_label (donelab);
7503 /* Return the string to output a conditional branch to LABEL, testing
7504 register REG. LABEL is the operand number of the label; REG is the
7505 operand number of the reg. OP is the conditional expression. The mode
7506 of REG says what kind of comparison we made.
7508 DEST is the destination insn (i.e. the label), INSN is the source.
7510 REVERSED is nonzero if we should reverse the sense of the comparison.
7512 ANNUL is nonzero if we should generate an annulling branch. */
7515 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
7516 int annul, rtx insn)
7518 static char string[64];
7519 enum rtx_code code = GET_CODE (op);
7520 enum machine_mode mode = GET_MODE (XEXP (op, 0));
7525 /* branch on register are limited to +-128KB. If it is too far away,
7538 brgez,a,pn %o1, .LC29
7544 ba,pt %xcc, .LC29 */
7546 far = get_attr_length (insn) >= 3;
7548 /* If not floating-point or if EQ or NE, we can just reverse the code. */
7550 code = reverse_condition (code);
7552 /* Only 64 bit versions of these instructions exist. */
7553 gcc_assert (mode == DImode);
7555 /* Start by writing the branch condition. */
7560 strcpy (string, "brnz");
7564 strcpy (string, "brz");
7568 strcpy (string, "brgez");
7572 strcpy (string, "brlz");
7576 strcpy (string, "brlez");
7580 strcpy (string, "brgz");
7587 p = strchr (string, '\0');
7589 /* Now add the annulling, reg, label, and nop. */
7596 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
7599 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
7604 *p = p < string + 8 ? '\t' : ' ';
7612 int veryfar = 1, delta;
7614 if (INSN_ADDRESSES_SET_P ())
7616 delta = (INSN_ADDRESSES (INSN_UID (dest))
7617 - INSN_ADDRESSES (INSN_UID (insn)));
7618 /* Leave some instructions for "slop". */
7619 if (delta >= -260000 && delta < 260000)
7623 strcpy (p, ".+12\n\t nop\n\t");
7624 /* Skip the next insn if requested or
7625 if we know that it will be a nop. */
7626 if (annul || ! final_sequence)
7636 strcpy (p, "ba,pt\t%%xcc, ");
7650 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
7651 Such instructions cannot be used in the delay slot of return insn on v9.
7652 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
7656 epilogue_renumber (register rtx *where, int test)
7658 register const char *fmt;
7660 register enum rtx_code code;
7665 code = GET_CODE (*where);
7670 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
7672 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
7673 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
7681 /* Do not replace the frame pointer with the stack pointer because
7682 it can cause the delayed instruction to load below the stack.
7683 This occurs when instructions like:
7685 (set (reg/i:SI 24 %i0)
7686 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
7687 (const_int -20 [0xffffffec])) 0))
7689 are in the return delayed slot. */
7691 if (GET_CODE (XEXP (*where, 0)) == REG
7692 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
7693 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
7694 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
7699 if (SPARC_STACK_BIAS
7700 && GET_CODE (XEXP (*where, 0)) == REG
7701 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
7709 fmt = GET_RTX_FORMAT (code);
7711 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7716 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
7717 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
7720 else if (fmt[i] == 'e'
7721 && epilogue_renumber (&(XEXP (*where, i)), test))
7727 /* Leaf functions and non-leaf functions have different needs. */
7730 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
7733 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
7735 static const int *const reg_alloc_orders[] = {
7736 reg_leaf_alloc_order,
7737 reg_nonleaf_alloc_order};
7740 order_regs_for_local_alloc (void)
7742 static int last_order_nonleaf = 1;
7744 if (df_regs_ever_live_p (15) != last_order_nonleaf)
7746 last_order_nonleaf = !last_order_nonleaf;
7747 memcpy ((char *) reg_alloc_order,
7748 (const char *) reg_alloc_orders[last_order_nonleaf],
7749 FIRST_PSEUDO_REGISTER * sizeof (int));
7753 /* Return 1 if REG and MEM are legitimate enough to allow the various
7754 mem<-->reg splits to be run. */
7757 sparc_splitdi_legitimate (rtx reg, rtx mem)
7759 /* Punt if we are here by mistake. */
7760 gcc_assert (reload_completed);
7762 /* We must have an offsettable memory reference. */
7763 if (! offsettable_memref_p (mem))
7766 /* If we have legitimate args for ldd/std, we do not want
7767 the split to happen. */
7768 if ((REGNO (reg) % 2) == 0
7769 && mem_min_alignment (mem, 8))
7776 /* Like sparc_splitdi_legitimate but for REG <--> REG moves. */
7779 sparc_split_regreg_legitimate (rtx reg1, rtx reg2)
7783 if (GET_CODE (reg1) == SUBREG)
7784 reg1 = SUBREG_REG (reg1);
7785 if (GET_CODE (reg1) != REG)
7787 regno1 = REGNO (reg1);
7789 if (GET_CODE (reg2) == SUBREG)
7790 reg2 = SUBREG_REG (reg2);
7791 if (GET_CODE (reg2) != REG)
7793 regno2 = REGNO (reg2);
7795 if (SPARC_INT_REG_P (regno1) && SPARC_INT_REG_P (regno2))
7800 if ((SPARC_INT_REG_P (regno1) && SPARC_FP_REG_P (regno2))
7801 || (SPARC_FP_REG_P (regno1) && SPARC_INT_REG_P (regno2)))
7808 /* Return 1 if x and y are some kind of REG and they refer to
7809 different hard registers. This test is guaranteed to be
7810 run after reload. */
7813 sparc_absnegfloat_split_legitimate (rtx x, rtx y)
7815 if (GET_CODE (x) != REG)
7817 if (GET_CODE (y) != REG)
7819 if (REGNO (x) == REGNO (y))
7824 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
7825 This makes them candidates for using ldd and std insns.
7827 Note reg1 and reg2 *must* be hard registers. */
7830 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
7832 /* We might have been passed a SUBREG. */
7833 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
7836 if (REGNO (reg1) % 2 != 0)
7839 /* Integer ldd is deprecated in SPARC V9 */
7840 if (TARGET_V9 && SPARC_INT_REG_P (REGNO (reg1)))
7843 return (REGNO (reg1) == REGNO (reg2) - 1);
7846 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
7849 This can only happen when addr1 and addr2, the addresses in mem1
7850 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
7851 addr1 must also be aligned on a 64-bit boundary.
7853 Also iff dependent_reg_rtx is not null it should not be used to
7854 compute the address for mem1, i.e. we cannot optimize a sequence
7866 But, note that the transformation from:
7871 is perfectly fine. Thus, the peephole2 patterns always pass us
7872 the destination register of the first load, never the second one.
7874 For stores we don't have a similar problem, so dependent_reg_rtx is
7878 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
7882 HOST_WIDE_INT offset1;
7884 /* The mems cannot be volatile. */
7885 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
7888 /* MEM1 should be aligned on a 64-bit boundary. */
7889 if (MEM_ALIGN (mem1) < 64)
7892 addr1 = XEXP (mem1, 0);
7893 addr2 = XEXP (mem2, 0);
7895 /* Extract a register number and offset (if used) from the first addr. */
7896 if (GET_CODE (addr1) == PLUS)
7898 /* If not a REG, return zero. */
7899 if (GET_CODE (XEXP (addr1, 0)) != REG)
7903 reg1 = REGNO (XEXP (addr1, 0));
7904 /* The offset must be constant! */
7905 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
7907 offset1 = INTVAL (XEXP (addr1, 1));
7910 else if (GET_CODE (addr1) != REG)
7914 reg1 = REGNO (addr1);
7915 /* This was a simple (mem (reg)) expression. Offset is 0. */
7919 /* Make sure the second address is a (mem (plus (reg) (const_int). */
7920 if (GET_CODE (addr2) != PLUS)
7923 if (GET_CODE (XEXP (addr2, 0)) != REG
7924 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
7927 if (reg1 != REGNO (XEXP (addr2, 0)))
7930 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
7933 /* The first offset must be evenly divisible by 8 to ensure the
7934 address is 64 bit aligned. */
7935 if (offset1 % 8 != 0)
7938 /* The offset for the second addr must be 4 more than the first addr. */
7939 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
7942 /* All the tests passed. addr1 and addr2 are valid for ldd and std
7947 /* Return 1 if reg is a pseudo, or is the first register in
7948 a hard register pair. This makes it suitable for use in
7949 ldd and std insns. */
7952 register_ok_for_ldd (rtx reg)
7954 /* We might have been passed a SUBREG. */
7958 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
7959 return (REGNO (reg) % 2 == 0);
7964 /* Return 1 if OP is a memory whose address is known to be
7965 aligned to 8-byte boundary, or a pseudo during reload.
7966 This makes it suitable for use in ldd and std insns. */
7969 memory_ok_for_ldd (rtx op)
7973 /* In 64-bit mode, we assume that the address is word-aligned. */
7974 if (TARGET_ARCH32 && !mem_min_alignment (op, 8))
7977 if (! can_create_pseudo_p ()
7978 && !strict_memory_address_p (Pmode, XEXP (op, 0)))
7981 else if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
7983 if (!(reload_in_progress && reg_renumber [REGNO (op)] < 0))
7992 /* Implement TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
7995 sparc_print_operand_punct_valid_p (unsigned char code)
8008 /* Implement TARGET_PRINT_OPERAND.
8009 Print operand X (an rtx) in assembler syntax to file FILE.
8010 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
8011 For `%' followed by punctuation, CODE is the punctuation and X is null. */
8014 sparc_print_operand (FILE *file, rtx x, int code)
8019 /* Output an insn in a delay slot. */
8021 sparc_indent_opcode = 1;
8023 fputs ("\n\t nop", file);
8026 /* Output an annul flag if there's nothing for the delay slot and we
8027 are optimizing. This is always used with '(' below.
8028 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
8029 this is a dbx bug. So, we only do this when optimizing.
8030 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
8031 Always emit a nop in case the next instruction is a branch. */
8032 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
8036 /* Output a 'nop' if there's nothing for the delay slot and we are
8037 not optimizing. This is always used with '*' above. */
8038 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
8039 fputs ("\n\t nop", file);
8040 else if (final_sequence)
8041 sparc_indent_opcode = 1;
8044 /* Output the right displacement from the saved PC on function return.
8045 The caller may have placed an "unimp" insn immediately after the call
8046 so we have to account for it. This insn is used in the 32-bit ABI
8047 when calling a function that returns a non zero-sized structure. The
8048 64-bit ABI doesn't have it. Be careful to have this test be the same
8049 as that for the call. The exception is when sparc_std_struct_return
8050 is enabled, the psABI is followed exactly and the adjustment is made
8051 by the code in sparc_struct_value_rtx. The call emitted is the same
8052 when sparc_std_struct_return is enabled. */
8054 && cfun->returns_struct
8055 && !sparc_std_struct_return
8056 && DECL_SIZE (DECL_RESULT (current_function_decl))
8057 && TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
8059 && !integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
8065 /* Output the Embedded Medium/Anywhere code model base register. */
8066 fputs (EMBMEDANY_BASE_REG, file);
8069 /* Print some local dynamic TLS name. */
8070 assemble_name (file, get_some_local_dynamic_name ());
8074 /* Adjust the operand to take into account a RESTORE operation. */
8075 if (GET_CODE (x) == CONST_INT)
8077 else if (GET_CODE (x) != REG)
8078 output_operand_lossage ("invalid %%Y operand");
8079 else if (REGNO (x) < 8)
8080 fputs (reg_names[REGNO (x)], file);
8081 else if (REGNO (x) >= 24 && REGNO (x) < 32)
8082 fputs (reg_names[REGNO (x)-16], file);
8084 output_operand_lossage ("invalid %%Y operand");
8087 /* Print out the low order register name of a register pair. */
8088 if (WORDS_BIG_ENDIAN)
8089 fputs (reg_names[REGNO (x)+1], file);
8091 fputs (reg_names[REGNO (x)], file);
8094 /* Print out the high order register name of a register pair. */
8095 if (WORDS_BIG_ENDIAN)
8096 fputs (reg_names[REGNO (x)], file);
8098 fputs (reg_names[REGNO (x)+1], file);
8101 /* Print out the second register name of a register pair or quad.
8102 I.e., R (%o0) => %o1. */
8103 fputs (reg_names[REGNO (x)+1], file);
8106 /* Print out the third register name of a register quad.
8107 I.e., S (%o0) => %o2. */
8108 fputs (reg_names[REGNO (x)+2], file);
8111 /* Print out the fourth register name of a register quad.
8112 I.e., T (%o0) => %o3. */
8113 fputs (reg_names[REGNO (x)+3], file);
8116 /* Print a condition code register. */
8117 if (REGNO (x) == SPARC_ICC_REG)
8119 /* We don't handle CC[X]_NOOVmode because they're not supposed
8121 if (GET_MODE (x) == CCmode)
8122 fputs ("%icc", file);
8123 else if (GET_MODE (x) == CCXmode)
8124 fputs ("%xcc", file);
8129 /* %fccN register */
8130 fputs (reg_names[REGNO (x)], file);
8133 /* Print the operand's address only. */
8134 output_address (XEXP (x, 0));
8137 /* In this case we need a register. Use %g0 if the
8138 operand is const0_rtx. */
8140 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
8142 fputs ("%g0", file);
8149 switch (GET_CODE (x))
8151 case IOR: fputs ("or", file); break;
8152 case AND: fputs ("and", file); break;
8153 case XOR: fputs ("xor", file); break;
8154 default: output_operand_lossage ("invalid %%A operand");
8159 switch (GET_CODE (x))
8161 case IOR: fputs ("orn", file); break;
8162 case AND: fputs ("andn", file); break;
8163 case XOR: fputs ("xnor", file); break;
8164 default: output_operand_lossage ("invalid %%B operand");
8168 /* This is used by the conditional move instructions. */
8171 enum rtx_code rc = GET_CODE (x);
8175 case NE: fputs ("ne", file); break;
8176 case EQ: fputs ("e", file); break;
8177 case GE: fputs ("ge", file); break;
8178 case GT: fputs ("g", file); break;
8179 case LE: fputs ("le", file); break;
8180 case LT: fputs ("l", file); break;
8181 case GEU: fputs ("geu", file); break;
8182 case GTU: fputs ("gu", file); break;
8183 case LEU: fputs ("leu", file); break;
8184 case LTU: fputs ("lu", file); break;
8185 case LTGT: fputs ("lg", file); break;
8186 case UNORDERED: fputs ("u", file); break;
8187 case ORDERED: fputs ("o", file); break;
8188 case UNLT: fputs ("ul", file); break;
8189 case UNLE: fputs ("ule", file); break;
8190 case UNGT: fputs ("ug", file); break;
8191 case UNGE: fputs ("uge", file); break;
8192 case UNEQ: fputs ("ue", file); break;
8193 default: output_operand_lossage ("invalid %%C operand");
8198 /* This are used by the movr instruction pattern. */
8201 enum rtx_code rc = GET_CODE (x);
8204 case NE: fputs ("ne", file); break;
8205 case EQ: fputs ("e", file); break;
8206 case GE: fputs ("gez", file); break;
8207 case LT: fputs ("lz", file); break;
8208 case LE: fputs ("lez", file); break;
8209 case GT: fputs ("gz", file); break;
8210 default: output_operand_lossage ("invalid %%D operand");
8217 /* Print a sign-extended character. */
8218 int i = trunc_int_for_mode (INTVAL (x), QImode);
8219 fprintf (file, "%d", i);
8224 /* Operand must be a MEM; write its address. */
8225 if (GET_CODE (x) != MEM)
8226 output_operand_lossage ("invalid %%f operand");
8227 output_address (XEXP (x, 0));
8232 /* Print a sign-extended 32-bit value. */
8234 if (GET_CODE(x) == CONST_INT)
8236 else if (GET_CODE(x) == CONST_DOUBLE)
8237 i = CONST_DOUBLE_LOW (x);
8240 output_operand_lossage ("invalid %%s operand");
8243 i = trunc_int_for_mode (i, SImode);
8244 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
8249 /* Do nothing special. */
8253 /* Undocumented flag. */
8254 output_operand_lossage ("invalid operand output code");
8257 if (GET_CODE (x) == REG)
8258 fputs (reg_names[REGNO (x)], file);
8259 else if (GET_CODE (x) == MEM)
8262 /* Poor Sun assembler doesn't understand absolute addressing. */
8263 if (CONSTANT_P (XEXP (x, 0)))
8264 fputs ("%g0+", file);
8265 output_address (XEXP (x, 0));
8268 else if (GET_CODE (x) == HIGH)
8270 fputs ("%hi(", file);
8271 output_addr_const (file, XEXP (x, 0));
8274 else if (GET_CODE (x) == LO_SUM)
8276 sparc_print_operand (file, XEXP (x, 0), 0);
8277 if (TARGET_CM_MEDMID)
8278 fputs ("+%l44(", file);
8280 fputs ("+%lo(", file);
8281 output_addr_const (file, XEXP (x, 1));
8284 else if (GET_CODE (x) == CONST_DOUBLE
8285 && (GET_MODE (x) == VOIDmode
8286 || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
8288 if (CONST_DOUBLE_HIGH (x) == 0)
8289 fprintf (file, "%u", (unsigned int) CONST_DOUBLE_LOW (x));
8290 else if (CONST_DOUBLE_HIGH (x) == -1
8291 && CONST_DOUBLE_LOW (x) < 0)
8292 fprintf (file, "%d", (int) CONST_DOUBLE_LOW (x));
8294 output_operand_lossage ("long long constant not a valid immediate operand");
8296 else if (GET_CODE (x) == CONST_DOUBLE)
8297 output_operand_lossage ("floating point constant not a valid immediate operand");
8298 else { output_addr_const (file, x); }
8301 /* Implement TARGET_PRINT_OPERAND_ADDRESS. */
8304 sparc_print_operand_address (FILE *file, rtx x)
8306 register rtx base, index = 0;
8308 register rtx addr = x;
8311 fputs (reg_names[REGNO (addr)], file);
8312 else if (GET_CODE (addr) == PLUS)
8314 if (CONST_INT_P (XEXP (addr, 0)))
8315 offset = INTVAL (XEXP (addr, 0)), base = XEXP (addr, 1);
8316 else if (CONST_INT_P (XEXP (addr, 1)))
8317 offset = INTVAL (XEXP (addr, 1)), base = XEXP (addr, 0);
8319 base = XEXP (addr, 0), index = XEXP (addr, 1);
8320 if (GET_CODE (base) == LO_SUM)
8322 gcc_assert (USE_AS_OFFSETABLE_LO10
8324 && ! TARGET_CM_MEDMID);
8325 output_operand (XEXP (base, 0), 0);
8326 fputs ("+%lo(", file);
8327 output_address (XEXP (base, 1));
8328 fprintf (file, ")+%d", offset);
8332 fputs (reg_names[REGNO (base)], file);
8334 fprintf (file, "%+d", offset);
8335 else if (REG_P (index))
8336 fprintf (file, "+%s", reg_names[REGNO (index)]);
8337 else if (GET_CODE (index) == SYMBOL_REF
8338 || GET_CODE (index) == LABEL_REF
8339 || GET_CODE (index) == CONST)
8340 fputc ('+', file), output_addr_const (file, index);
8341 else gcc_unreachable ();
8344 else if (GET_CODE (addr) == MINUS
8345 && GET_CODE (XEXP (addr, 1)) == LABEL_REF)
8347 output_addr_const (file, XEXP (addr, 0));
8349 output_addr_const (file, XEXP (addr, 1));
8350 fputs ("-.)", file);
8352 else if (GET_CODE (addr) == LO_SUM)
8354 output_operand (XEXP (addr, 0), 0);
8355 if (TARGET_CM_MEDMID)
8356 fputs ("+%l44(", file);
8358 fputs ("+%lo(", file);
8359 output_address (XEXP (addr, 1));
8363 && GET_CODE (addr) == CONST
8364 && GET_CODE (XEXP (addr, 0)) == MINUS
8365 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST
8366 && GET_CODE (XEXP (XEXP (XEXP (addr, 0), 1), 0)) == MINUS
8367 && XEXP (XEXP (XEXP (XEXP (addr, 0), 1), 0), 1) == pc_rtx)
8369 addr = XEXP (addr, 0);
8370 output_addr_const (file, XEXP (addr, 0));
8371 /* Group the args of the second CONST in parenthesis. */
8373 /* Skip past the second CONST--it does nothing for us. */
8374 output_addr_const (file, XEXP (XEXP (addr, 1), 0));
8375 /* Close the parenthesis. */
8380 output_addr_const (file, addr);
8384 /* Target hook for assembling integer objects. The sparc version has
8385 special handling for aligned DI-mode objects. */
8388 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
8390 /* ??? We only output .xword's for symbols and only then in environments
8391 where the assembler can handle them. */
8392 if (aligned_p && size == 8
8393 && (GET_CODE (x) != CONST_INT && GET_CODE (x) != CONST_DOUBLE))
8397 assemble_integer_with_op ("\t.xword\t", x);
8402 assemble_aligned_integer (4, const0_rtx);
8403 assemble_aligned_integer (4, x);
8407 return default_assemble_integer (x, size, aligned_p);
8410 /* Return the value of a code used in the .proc pseudo-op that says
8411 what kind of result this function returns. For non-C types, we pick
8412 the closest C type. */
8414 #ifndef SHORT_TYPE_SIZE
8415 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
8418 #ifndef INT_TYPE_SIZE
8419 #define INT_TYPE_SIZE BITS_PER_WORD
8422 #ifndef LONG_TYPE_SIZE
8423 #define LONG_TYPE_SIZE BITS_PER_WORD
8426 #ifndef LONG_LONG_TYPE_SIZE
8427 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
8430 #ifndef FLOAT_TYPE_SIZE
8431 #define FLOAT_TYPE_SIZE BITS_PER_WORD
8434 #ifndef DOUBLE_TYPE_SIZE
8435 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
8438 #ifndef LONG_DOUBLE_TYPE_SIZE
8439 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
8443 sparc_type_code (register tree type)
8445 register unsigned long qualifiers = 0;
8446 register unsigned shift;
8448 /* Only the first 30 bits of the qualifier are valid. We must refrain from
8449 setting more, since some assemblers will give an error for this. Also,
8450 we must be careful to avoid shifts of 32 bits or more to avoid getting
8451 unpredictable results. */
8453 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
8455 switch (TREE_CODE (type))
8461 qualifiers |= (3 << shift);
8466 qualifiers |= (2 << shift);
8470 case REFERENCE_TYPE:
8472 qualifiers |= (1 << shift);
8476 return (qualifiers | 8);
8479 case QUAL_UNION_TYPE:
8480 return (qualifiers | 9);
8483 return (qualifiers | 10);
8486 return (qualifiers | 16);
8489 /* If this is a range type, consider it to be the underlying
8491 if (TREE_TYPE (type) != 0)
8494 /* Carefully distinguish all the standard types of C,
8495 without messing up if the language is not C. We do this by
8496 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
8497 look at both the names and the above fields, but that's redundant.
8498 Any type whose size is between two C types will be considered
8499 to be the wider of the two types. Also, we do not have a
8500 special code to use for "long long", so anything wider than
8501 long is treated the same. Note that we can't distinguish
8502 between "int" and "long" in this code if they are the same
8503 size, but that's fine, since neither can the assembler. */
8505 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
8506 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
8508 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
8509 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
8511 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
8512 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
8515 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
8518 /* If this is a range type, consider it to be the underlying
8520 if (TREE_TYPE (type) != 0)
8523 /* Carefully distinguish all the standard types of C,
8524 without messing up if the language is not C. */
8526 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
8527 return (qualifiers | 6);
8530 return (qualifiers | 7);
8532 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
8533 /* ??? We need to distinguish between double and float complex types,
8534 but I don't know how yet because I can't reach this code from
8535 existing front-ends. */
8536 return (qualifiers | 7); /* Who knows? */
8539 case BOOLEAN_TYPE: /* Boolean truth value type. */
8545 gcc_unreachable (); /* Not a type! */
8552 /* Nested function support. */
8554 /* Emit RTL insns to initialize the variable parts of a trampoline.
8555 FNADDR is an RTX for the address of the function's pure code.
8556 CXT is an RTX for the static chain value for the function.
8558 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
8559 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
8560 (to store insns). This is a bit excessive. Perhaps a different
8561 mechanism would be better here.
8563 Emit enough FLUSH insns to synchronize the data and instruction caches. */
8566 sparc32_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
8568 /* SPARC 32-bit trampoline:
8571 sethi %hi(static), %g2
8573 or %g2, %lo(static), %g2
8575 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
8576 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
8580 (adjust_address (m_tramp, SImode, 0),
8581 expand_binop (SImode, ior_optab,
8582 expand_shift (RSHIFT_EXPR, SImode, fnaddr, 10, 0, 1),
8583 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
8584 NULL_RTX, 1, OPTAB_DIRECT));
8587 (adjust_address (m_tramp, SImode, 4),
8588 expand_binop (SImode, ior_optab,
8589 expand_shift (RSHIFT_EXPR, SImode, cxt, 10, 0, 1),
8590 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
8591 NULL_RTX, 1, OPTAB_DIRECT));
8594 (adjust_address (m_tramp, SImode, 8),
8595 expand_binop (SImode, ior_optab,
8596 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
8597 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
8598 NULL_RTX, 1, OPTAB_DIRECT));
8601 (adjust_address (m_tramp, SImode, 12),
8602 expand_binop (SImode, ior_optab,
8603 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
8604 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
8605 NULL_RTX, 1, OPTAB_DIRECT));
8607 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
8608 aligned on a 16 byte boundary so one flush clears it all. */
8609 emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 0))));
8610 if (sparc_cpu != PROCESSOR_ULTRASPARC
8611 && sparc_cpu != PROCESSOR_ULTRASPARC3
8612 && sparc_cpu != PROCESSOR_NIAGARA
8613 && sparc_cpu != PROCESSOR_NIAGARA2
8614 && sparc_cpu != PROCESSOR_NIAGARA3
8615 && sparc_cpu != PROCESSOR_NIAGARA4)
8616 emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 8))));
8618 /* Call __enable_execute_stack after writing onto the stack to make sure
8619 the stack address is accessible. */
8620 #ifdef HAVE_ENABLE_EXECUTE_STACK
8621 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
8622 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
8627 /* The 64-bit version is simpler because it makes more sense to load the
8628 values as "immediate" data out of the trampoline. It's also easier since
8629 we can read the PC without clobbering a register. */
8632 sparc64_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
8634 /* SPARC 64-bit trampoline:
8643 emit_move_insn (adjust_address (m_tramp, SImode, 0),
8644 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
8645 emit_move_insn (adjust_address (m_tramp, SImode, 4),
8646 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
8647 emit_move_insn (adjust_address (m_tramp, SImode, 8),
8648 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
8649 emit_move_insn (adjust_address (m_tramp, SImode, 12),
8650 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
8651 emit_move_insn (adjust_address (m_tramp, DImode, 16), cxt);
8652 emit_move_insn (adjust_address (m_tramp, DImode, 24), fnaddr);
8653 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 0))));
8655 if (sparc_cpu != PROCESSOR_ULTRASPARC
8656 && sparc_cpu != PROCESSOR_ULTRASPARC3
8657 && sparc_cpu != PROCESSOR_NIAGARA
8658 && sparc_cpu != PROCESSOR_NIAGARA2
8659 && sparc_cpu != PROCESSOR_NIAGARA3
8660 && sparc_cpu != PROCESSOR_NIAGARA4)
8661 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 8))));
8663 /* Call __enable_execute_stack after writing onto the stack to make sure
8664 the stack address is accessible. */
8665 #ifdef HAVE_ENABLE_EXECUTE_STACK
8666 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
8667 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
8671 /* Worker for TARGET_TRAMPOLINE_INIT. */
8674 sparc_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
8676 rtx fnaddr = force_reg (Pmode, XEXP (DECL_RTL (fndecl), 0));
8677 cxt = force_reg (Pmode, cxt);
8679 sparc64_initialize_trampoline (m_tramp, fnaddr, cxt);
8681 sparc32_initialize_trampoline (m_tramp, fnaddr, cxt);
8684 /* Adjust the cost of a scheduling dependency. Return the new cost of
8685 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
8688 supersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
8690 enum attr_type insn_type;
8692 if (! recog_memoized (insn))
8695 insn_type = get_attr_type (insn);
8697 if (REG_NOTE_KIND (link) == 0)
8699 /* Data dependency; DEP_INSN writes a register that INSN reads some
8702 /* if a load, then the dependence must be on the memory address;
8703 add an extra "cycle". Note that the cost could be two cycles
8704 if the reg was written late in an instruction group; we ca not tell
8706 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
8709 /* Get the delay only if the address of the store is the dependence. */
8710 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
8712 rtx pat = PATTERN(insn);
8713 rtx dep_pat = PATTERN (dep_insn);
8715 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
8716 return cost; /* This should not happen! */
8718 /* The dependency between the two instructions was on the data that
8719 is being stored. Assume that this implies that the address of the
8720 store is not dependent. */
8721 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
8724 return cost + 3; /* An approximation. */
8727 /* A shift instruction cannot receive its data from an instruction
8728 in the same cycle; add a one cycle penalty. */
8729 if (insn_type == TYPE_SHIFT)
8730 return cost + 3; /* Split before cascade into shift. */
8734 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
8735 INSN writes some cycles later. */
8737 /* These are only significant for the fpu unit; writing a fp reg before
8738 the fpu has finished with it stalls the processor. */
8740 /* Reusing an integer register causes no problems. */
8741 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
8749 hypersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
8751 enum attr_type insn_type, dep_type;
8752 rtx pat = PATTERN(insn);
8753 rtx dep_pat = PATTERN (dep_insn);
8755 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
8758 insn_type = get_attr_type (insn);
8759 dep_type = get_attr_type (dep_insn);
8761 switch (REG_NOTE_KIND (link))
8764 /* Data dependency; DEP_INSN writes a register that INSN reads some
8771 /* Get the delay iff the address of the store is the dependence. */
8772 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
8775 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
8782 /* If a load, then the dependence must be on the memory address. If
8783 the addresses aren't equal, then it might be a false dependency */
8784 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
8786 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
8787 || GET_CODE (SET_DEST (dep_pat)) != MEM
8788 || GET_CODE (SET_SRC (pat)) != MEM
8789 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
8790 XEXP (SET_SRC (pat), 0)))
8798 /* Compare to branch latency is 0. There is no benefit from
8799 separating compare and branch. */
8800 if (dep_type == TYPE_COMPARE)
8802 /* Floating point compare to branch latency is less than
8803 compare to conditional move. */
8804 if (dep_type == TYPE_FPCMP)
8813 /* Anti-dependencies only penalize the fpu unit. */
8814 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
8826 sparc_adjust_cost(rtx insn, rtx link, rtx dep, int cost)
8830 case PROCESSOR_SUPERSPARC:
8831 cost = supersparc_adjust_cost (insn, link, dep, cost);
8833 case PROCESSOR_HYPERSPARC:
8834 case PROCESSOR_SPARCLITE86X:
8835 cost = hypersparc_adjust_cost (insn, link, dep, cost);
8844 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
8845 int sched_verbose ATTRIBUTE_UNUSED,
8846 int max_ready ATTRIBUTE_UNUSED)
8850 sparc_use_sched_lookahead (void)
8852 if (sparc_cpu == PROCESSOR_NIAGARA
8853 || sparc_cpu == PROCESSOR_NIAGARA2
8854 || sparc_cpu == PROCESSOR_NIAGARA3
8855 || sparc_cpu == PROCESSOR_NIAGARA4)
8857 if (sparc_cpu == PROCESSOR_ULTRASPARC
8858 || sparc_cpu == PROCESSOR_ULTRASPARC3)
8860 if ((1 << sparc_cpu) &
8861 ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
8862 (1 << PROCESSOR_SPARCLITE86X)))
8868 sparc_issue_rate (void)
8872 case PROCESSOR_NIAGARA:
8873 case PROCESSOR_NIAGARA2:
8874 case PROCESSOR_NIAGARA3:
8875 case PROCESSOR_NIAGARA4:
8879 /* Assume V9 processors are capable of at least dual-issue. */
8881 case PROCESSOR_SUPERSPARC:
8883 case PROCESSOR_HYPERSPARC:
8884 case PROCESSOR_SPARCLITE86X:
8886 case PROCESSOR_ULTRASPARC:
8887 case PROCESSOR_ULTRASPARC3:
8893 set_extends (rtx insn)
8895 register rtx pat = PATTERN (insn);
8897 switch (GET_CODE (SET_SRC (pat)))
8899 /* Load and some shift instructions zero extend. */
8902 /* sethi clears the high bits */
8904 /* LO_SUM is used with sethi. sethi cleared the high
8905 bits and the values used with lo_sum are positive */
8907 /* Store flag stores 0 or 1 */
8917 rtx op0 = XEXP (SET_SRC (pat), 0);
8918 rtx op1 = XEXP (SET_SRC (pat), 1);
8919 if (GET_CODE (op1) == CONST_INT)
8920 return INTVAL (op1) >= 0;
8921 if (GET_CODE (op0) != REG)
8923 if (sparc_check_64 (op0, insn) == 1)
8925 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
8930 rtx op0 = XEXP (SET_SRC (pat), 0);
8931 rtx op1 = XEXP (SET_SRC (pat), 1);
8932 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
8934 if (GET_CODE (op1) == CONST_INT)
8935 return INTVAL (op1) >= 0;
8936 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
8939 return GET_MODE (SET_SRC (pat)) == SImode;
8940 /* Positive integers leave the high bits zero. */
8942 return ! (CONST_DOUBLE_LOW (SET_SRC (pat)) & 0x80000000);
8944 return ! (INTVAL (SET_SRC (pat)) & 0x80000000);
8947 return - (GET_MODE (SET_SRC (pat)) == SImode);
8949 return sparc_check_64 (SET_SRC (pat), insn);
8955 /* We _ought_ to have only one kind per function, but... */
8956 static GTY(()) rtx sparc_addr_diff_list;
8957 static GTY(()) rtx sparc_addr_list;
8960 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
8962 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
8964 sparc_addr_diff_list
8965 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
8967 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
8971 sparc_output_addr_vec (rtx vec)
8973 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
8974 int idx, vlen = XVECLEN (body, 0);
8976 #ifdef ASM_OUTPUT_ADDR_VEC_START
8977 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
8980 #ifdef ASM_OUTPUT_CASE_LABEL
8981 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
8984 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
8987 for (idx = 0; idx < vlen; idx++)
8989 ASM_OUTPUT_ADDR_VEC_ELT
8990 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
8993 #ifdef ASM_OUTPUT_ADDR_VEC_END
8994 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
8999 sparc_output_addr_diff_vec (rtx vec)
9001 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
9002 rtx base = XEXP (XEXP (body, 0), 0);
9003 int idx, vlen = XVECLEN (body, 1);
9005 #ifdef ASM_OUTPUT_ADDR_VEC_START
9006 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
9009 #ifdef ASM_OUTPUT_CASE_LABEL
9010 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
9013 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
9016 for (idx = 0; idx < vlen; idx++)
9018 ASM_OUTPUT_ADDR_DIFF_ELT
9021 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
9022 CODE_LABEL_NUMBER (base));
9025 #ifdef ASM_OUTPUT_ADDR_VEC_END
9026 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
9031 sparc_output_deferred_case_vectors (void)
9036 if (sparc_addr_list == NULL_RTX
9037 && sparc_addr_diff_list == NULL_RTX)
9040 /* Align to cache line in the function's code section. */
9041 switch_to_section (current_function_section ());
9043 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
9045 ASM_OUTPUT_ALIGN (asm_out_file, align);
9047 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
9048 sparc_output_addr_vec (XEXP (t, 0));
9049 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
9050 sparc_output_addr_diff_vec (XEXP (t, 0));
9052 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
9055 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
9056 unknown. Return 1 if the high bits are zero, -1 if the register is
9059 sparc_check_64 (rtx x, rtx insn)
9061 /* If a register is set only once it is safe to ignore insns this
9062 code does not know how to handle. The loop will either recognize
9063 the single set and return the correct value or fail to recognize
9068 gcc_assert (GET_CODE (x) == REG);
9070 if (GET_MODE (x) == DImode)
9071 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
9073 if (flag_expensive_optimizations
9074 && df && DF_REG_DEF_COUNT (REGNO (y)) == 1)
9080 insn = get_last_insn_anywhere ();
9085 while ((insn = PREV_INSN (insn)))
9087 switch (GET_CODE (insn))
9100 rtx pat = PATTERN (insn);
9101 if (GET_CODE (pat) != SET)
9103 if (rtx_equal_p (x, SET_DEST (pat)))
9104 return set_extends (insn);
9105 if (y && rtx_equal_p (y, SET_DEST (pat)))
9106 return set_extends (insn);
9107 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
9115 /* Returns assembly code to perform a DImode shift using
9116 a 64-bit global or out register on SPARC-V8+. */
9118 output_v8plus_shift (rtx *operands, rtx insn, const char *opcode)
9120 static char asm_code[60];
9122 /* The scratch register is only required when the destination
9123 register is not a 64-bit global or out register. */
9124 if (which_alternative != 2)
9125 operands[3] = operands[0];
9127 /* We can only shift by constants <= 63. */
9128 if (GET_CODE (operands[2]) == CONST_INT)
9129 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
9131 if (GET_CODE (operands[1]) == CONST_INT)
9133 output_asm_insn ("mov\t%1, %3", operands);
9137 output_asm_insn ("sllx\t%H1, 32, %3", operands);
9138 if (sparc_check_64 (operands[1], insn) <= 0)
9139 output_asm_insn ("srl\t%L1, 0, %L1", operands);
9140 output_asm_insn ("or\t%L1, %3, %3", operands);
9143 strcpy(asm_code, opcode);
9145 if (which_alternative != 2)
9146 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
9148 return strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
9151 /* Output rtl to increment the profiler label LABELNO
9152 for profiling a function entry. */
9155 sparc_profile_hook (int labelno)
9160 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
9161 if (NO_PROFILE_COUNTERS)
9163 emit_library_call (fun, LCT_NORMAL, VOIDmode, 0);
9167 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
9168 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
9169 emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
9173 #ifdef TARGET_SOLARIS
9174 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
9177 sparc_solaris_elf_asm_named_section (const char *name, unsigned int flags,
9178 tree decl ATTRIBUTE_UNUSED)
9180 if (HAVE_COMDAT_GROUP && flags & SECTION_LINKONCE)
9182 solaris_elf_asm_comdat_section (name, flags, decl);
9186 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
9188 if (!(flags & SECTION_DEBUG))
9189 fputs (",#alloc", asm_out_file);
9190 if (flags & SECTION_WRITE)
9191 fputs (",#write", asm_out_file);
9192 if (flags & SECTION_TLS)
9193 fputs (",#tls", asm_out_file);
9194 if (flags & SECTION_CODE)
9195 fputs (",#execinstr", asm_out_file);
9197 /* ??? Handle SECTION_BSS. */
9199 fputc ('\n', asm_out_file);
9201 #endif /* TARGET_SOLARIS */
9203 /* We do not allow indirect calls to be optimized into sibling calls.
9205 We cannot use sibling calls when delayed branches are disabled
9206 because they will likely require the call delay slot to be filled.
9208 Also, on SPARC 32-bit we cannot emit a sibling call when the
9209 current function returns a structure. This is because the "unimp
9210 after call" convention would cause the callee to return to the
9211 wrong place. The generic code already disallows cases where the
9212 function being called returns a structure.
9214 It may seem strange how this last case could occur. Usually there
9215 is code after the call which jumps to epilogue code which dumps the
9216 return value into the struct return area. That ought to invalidate
9217 the sibling call right? Well, in the C++ case we can end up passing
9218 the pointer to the struct return area to a constructor (which returns
9219 void) and then nothing else happens. Such a sibling call would look
9220 valid without the added check here.
9222 VxWorks PIC PLT entries require the global pointer to be initialized
9223 on entry. We therefore can't emit sibling calls to them. */
9225 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
9228 && flag_delayed_branch
9229 && (TARGET_ARCH64 || ! cfun->returns_struct)
9230 && !(TARGET_VXWORKS_RTP
9232 && !targetm.binds_local_p (decl)));
9235 /* libfunc renaming. */
9238 sparc_init_libfuncs (void)
9242 /* Use the subroutines that Sun's library provides for integer
9243 multiply and divide. The `*' prevents an underscore from
9244 being prepended by the compiler. .umul is a little faster
9246 set_optab_libfunc (smul_optab, SImode, "*.umul");
9247 set_optab_libfunc (sdiv_optab, SImode, "*.div");
9248 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
9249 set_optab_libfunc (smod_optab, SImode, "*.rem");
9250 set_optab_libfunc (umod_optab, SImode, "*.urem");
9252 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
9253 set_optab_libfunc (add_optab, TFmode, "_Q_add");
9254 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
9255 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
9256 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
9257 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
9259 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
9260 is because with soft-float, the SFmode and DFmode sqrt
9261 instructions will be absent, and the compiler will notice and
9262 try to use the TFmode sqrt instruction for calls to the
9263 builtin function sqrt, but this fails. */
9265 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
9267 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
9268 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
9269 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
9270 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
9271 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
9272 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
9274 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
9275 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
9276 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
9277 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
9279 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
9280 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
9281 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
9282 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
9284 if (DITF_CONVERSION_LIBFUNCS)
9286 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
9287 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
9288 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
9289 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
9292 if (SUN_CONVERSION_LIBFUNCS)
9294 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
9295 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
9296 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
9297 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
9302 /* In the SPARC 64bit ABI, SImode multiply and divide functions
9303 do not exist in the library. Make sure the compiler does not
9304 emit calls to them by accident. (It should always use the
9305 hardware instructions.) */
9306 set_optab_libfunc (smul_optab, SImode, 0);
9307 set_optab_libfunc (sdiv_optab, SImode, 0);
9308 set_optab_libfunc (udiv_optab, SImode, 0);
9309 set_optab_libfunc (smod_optab, SImode, 0);
9310 set_optab_libfunc (umod_optab, SImode, 0);
9312 if (SUN_INTEGER_MULTIPLY_64)
9314 set_optab_libfunc (smul_optab, DImode, "__mul64");
9315 set_optab_libfunc (sdiv_optab, DImode, "__div64");
9316 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
9317 set_optab_libfunc (smod_optab, DImode, "__rem64");
9318 set_optab_libfunc (umod_optab, DImode, "__urem64");
9321 if (SUN_CONVERSION_LIBFUNCS)
9323 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
9324 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
9325 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
9326 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
9331 static tree def_builtin(const char *name, int code, tree type)
9333 return add_builtin_function(name, type, code, BUILT_IN_MD, NULL,
9337 static tree def_builtin_const(const char *name, int code, tree type)
9339 tree t = def_builtin(name, code, type);
9342 TREE_READONLY (t) = 1;
9347 /* Implement the TARGET_INIT_BUILTINS target hook.
9348 Create builtin functions for special SPARC instructions. */
9351 sparc_init_builtins (void)
9354 sparc_vis_init_builtins ();
9357 /* Create builtin functions for VIS 1.0 instructions. */
9360 sparc_vis_init_builtins (void)
9362 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
9363 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
9364 tree v4hi = build_vector_type (intHI_type_node, 4);
9365 tree v2hi = build_vector_type (intHI_type_node, 2);
9366 tree v2si = build_vector_type (intSI_type_node, 2);
9367 tree v1si = build_vector_type (intSI_type_node, 1);
9369 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
9370 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
9371 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
9372 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
9373 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
9374 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
9375 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
9376 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
9377 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
9378 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
9379 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
9380 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
9381 tree v2hi_ftype_v2hi_v2hi = build_function_type_list (v2hi, v2hi, v2hi, 0);
9382 tree v1si_ftype_v1si_v1si = build_function_type_list (v1si, v1si, v1si, 0);
9383 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
9385 intDI_type_node, 0);
9386 tree di_ftype_v8qi_v8qi = build_function_type_list (intDI_type_node,
9388 tree si_ftype_v8qi_v8qi = build_function_type_list (intSI_type_node,
9390 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
9392 intDI_type_node, 0);
9393 tree si_ftype_si_si = build_function_type_list (intSI_type_node,
9395 intSI_type_node, 0);
9396 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
9398 intSI_type_node, 0);
9399 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
9401 intDI_type_node, 0);
9402 tree si_ftype_ptr_ptr = build_function_type_list (intSI_type_node,
9405 tree di_ftype_ptr_ptr = build_function_type_list (intDI_type_node,
9408 tree si_ftype_v4hi_v4hi = build_function_type_list (intSI_type_node,
9410 tree si_ftype_v2si_v2si = build_function_type_list (intSI_type_node,
9412 tree di_ftype_v4hi_v4hi = build_function_type_list (intDI_type_node,
9414 tree di_ftype_v2si_v2si = build_function_type_list (intDI_type_node,
9416 tree void_ftype_di = build_function_type_list (void_type_node,
9417 intDI_type_node, 0);
9418 tree di_ftype_void = build_function_type_list (intDI_type_node,
9420 tree void_ftype_si = build_function_type_list (void_type_node,
9421 intSI_type_node, 0);
9422 tree sf_ftype_sf_sf = build_function_type_list (float_type_node,
9424 float_type_node, 0);
9425 tree df_ftype_df_df = build_function_type_list (double_type_node,
9427 double_type_node, 0);
9429 /* Packing and expanding vectors. */
9430 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis,
9432 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
9433 v8qi_ftype_v2si_v8qi);
9434 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
9436 def_builtin_const ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis,
9438 def_builtin_const ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
9439 v8qi_ftype_v4qi_v4qi);
9441 /* Multiplications. */
9442 def_builtin_const ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
9443 v4hi_ftype_v4qi_v4hi);
9444 def_builtin_const ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
9445 v4hi_ftype_v4qi_v2hi);
9446 def_builtin_const ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
9447 v4hi_ftype_v4qi_v2hi);
9448 def_builtin_const ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
9449 v4hi_ftype_v8qi_v4hi);
9450 def_builtin_const ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
9451 v4hi_ftype_v8qi_v4hi);
9452 def_builtin_const ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
9453 v2si_ftype_v4qi_v2hi);
9454 def_builtin_const ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
9455 v2si_ftype_v4qi_v2hi);
9457 /* Data aligning. */
9458 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
9459 v4hi_ftype_v4hi_v4hi);
9460 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
9461 v8qi_ftype_v8qi_v8qi);
9462 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
9463 v2si_ftype_v2si_v2si);
9464 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatav1di_vis,
9467 def_builtin ("__builtin_vis_write_gsr", CODE_FOR_wrgsr_vis,
9469 def_builtin ("__builtin_vis_read_gsr", CODE_FOR_rdgsr_vis,
9474 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
9476 def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrldi_vis,
9481 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
9483 def_builtin ("__builtin_vis_alignaddrl", CODE_FOR_alignaddrlsi_vis,
9487 /* Pixel distance. */
9488 def_builtin_const ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
9489 di_ftype_v8qi_v8qi_di);
9491 /* Edge handling. */
9494 def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8di_vis,
9496 def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8ldi_vis,
9498 def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16di_vis,
9500 def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16ldi_vis,
9502 def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32di_vis,
9504 def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32ldi_vis,
9508 def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8ndi_vis,
9510 def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lndi_vis,
9512 def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16ndi_vis,
9514 def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lndi_vis,
9516 def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32ndi_vis,
9518 def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lndi_vis,
9524 def_builtin_const ("__builtin_vis_edge8", CODE_FOR_edge8si_vis,
9526 def_builtin_const ("__builtin_vis_edge8l", CODE_FOR_edge8lsi_vis,
9528 def_builtin_const ("__builtin_vis_edge16", CODE_FOR_edge16si_vis,
9530 def_builtin_const ("__builtin_vis_edge16l", CODE_FOR_edge16lsi_vis,
9532 def_builtin_const ("__builtin_vis_edge32", CODE_FOR_edge32si_vis,
9534 def_builtin_const ("__builtin_vis_edge32l", CODE_FOR_edge32lsi_vis,
9538 def_builtin_const ("__builtin_vis_edge8n", CODE_FOR_edge8nsi_vis,
9540 def_builtin_const ("__builtin_vis_edge8ln", CODE_FOR_edge8lnsi_vis,
9542 def_builtin_const ("__builtin_vis_edge16n", CODE_FOR_edge16nsi_vis,
9544 def_builtin_const ("__builtin_vis_edge16ln", CODE_FOR_edge16lnsi_vis,
9546 def_builtin_const ("__builtin_vis_edge32n", CODE_FOR_edge32nsi_vis,
9548 def_builtin_const ("__builtin_vis_edge32ln", CODE_FOR_edge32lnsi_vis,
9553 /* Pixel compare. */
9556 def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16di_vis,
9557 di_ftype_v4hi_v4hi);
9558 def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32di_vis,
9559 di_ftype_v2si_v2si);
9560 def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16di_vis,
9561 di_ftype_v4hi_v4hi);
9562 def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32di_vis,
9563 di_ftype_v2si_v2si);
9564 def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16di_vis,
9565 di_ftype_v4hi_v4hi);
9566 def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32di_vis,
9567 di_ftype_v2si_v2si);
9568 def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16di_vis,
9569 di_ftype_v4hi_v4hi);
9570 def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32di_vis,
9571 di_ftype_v2si_v2si);
9575 def_builtin_const ("__builtin_vis_fcmple16", CODE_FOR_fcmple16si_vis,
9576 si_ftype_v4hi_v4hi);
9577 def_builtin_const ("__builtin_vis_fcmple32", CODE_FOR_fcmple32si_vis,
9578 si_ftype_v2si_v2si);
9579 def_builtin_const ("__builtin_vis_fcmpne16", CODE_FOR_fcmpne16si_vis,
9580 si_ftype_v4hi_v4hi);
9581 def_builtin_const ("__builtin_vis_fcmpne32", CODE_FOR_fcmpne32si_vis,
9582 si_ftype_v2si_v2si);
9583 def_builtin_const ("__builtin_vis_fcmpgt16", CODE_FOR_fcmpgt16si_vis,
9584 si_ftype_v4hi_v4hi);
9585 def_builtin_const ("__builtin_vis_fcmpgt32", CODE_FOR_fcmpgt32si_vis,
9586 si_ftype_v2si_v2si);
9587 def_builtin_const ("__builtin_vis_fcmpeq16", CODE_FOR_fcmpeq16si_vis,
9588 si_ftype_v4hi_v4hi);
9589 def_builtin_const ("__builtin_vis_fcmpeq32", CODE_FOR_fcmpeq32si_vis,
9590 si_ftype_v2si_v2si);
9593 /* Addition and subtraction. */
9594 def_builtin_const ("__builtin_vis_fpadd16", CODE_FOR_addv4hi3,
9595 v4hi_ftype_v4hi_v4hi);
9596 def_builtin_const ("__builtin_vis_fpadd16s", CODE_FOR_addv2hi3,
9597 v2hi_ftype_v2hi_v2hi);
9598 def_builtin_const ("__builtin_vis_fpadd32", CODE_FOR_addv2si3,
9599 v2si_ftype_v2si_v2si);
9600 def_builtin_const ("__builtin_vis_fpadd32s", CODE_FOR_addv1si3,
9601 v1si_ftype_v1si_v1si);
9602 def_builtin_const ("__builtin_vis_fpsub16", CODE_FOR_subv4hi3,
9603 v4hi_ftype_v4hi_v4hi);
9604 def_builtin_const ("__builtin_vis_fpsub16s", CODE_FOR_subv2hi3,
9605 v2hi_ftype_v2hi_v2hi);
9606 def_builtin_const ("__builtin_vis_fpsub32", CODE_FOR_subv2si3,
9607 v2si_ftype_v2si_v2si);
9608 def_builtin_const ("__builtin_vis_fpsub32s", CODE_FOR_subv1si3,
9609 v1si_ftype_v1si_v1si);
9611 /* Three-dimensional array addressing. */
9614 def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8di_vis,
9616 def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16di_vis,
9618 def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32di_vis,
9623 def_builtin_const ("__builtin_vis_array8", CODE_FOR_array8si_vis,
9625 def_builtin_const ("__builtin_vis_array16", CODE_FOR_array16si_vis,
9627 def_builtin_const ("__builtin_vis_array32", CODE_FOR_array32si_vis,
9633 /* Byte mask and shuffle */
9635 def_builtin ("__builtin_vis_bmask", CODE_FOR_bmaskdi_vis,
9638 def_builtin ("__builtin_vis_bmask", CODE_FOR_bmasksi_vis,
9640 def_builtin ("__builtin_vis_bshufflev4hi", CODE_FOR_bshufflev4hi_vis,
9641 v4hi_ftype_v4hi_v4hi);
9642 def_builtin ("__builtin_vis_bshufflev8qi", CODE_FOR_bshufflev8qi_vis,
9643 v8qi_ftype_v8qi_v8qi);
9644 def_builtin ("__builtin_vis_bshufflev2si", CODE_FOR_bshufflev2si_vis,
9645 v2si_ftype_v2si_v2si);
9646 def_builtin ("__builtin_vis_bshuffledi", CODE_FOR_bshufflev1di_vis,
9654 def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8di_vis,
9656 def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16di_vis,
9658 def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32di_vis,
9663 def_builtin ("__builtin_vis_cmask8", CODE_FOR_cmask8si_vis,
9665 def_builtin ("__builtin_vis_cmask16", CODE_FOR_cmask16si_vis,
9667 def_builtin ("__builtin_vis_cmask32", CODE_FOR_cmask32si_vis,
9671 def_builtin_const ("__builtin_vis_fchksm16", CODE_FOR_fchksm16_vis,
9672 v4hi_ftype_v4hi_v4hi);
9674 def_builtin_const ("__builtin_vis_fsll16", CODE_FOR_vashlv4hi3,
9675 v4hi_ftype_v4hi_v4hi);
9676 def_builtin_const ("__builtin_vis_fslas16", CODE_FOR_vssashlv4hi3,
9677 v4hi_ftype_v4hi_v4hi);
9678 def_builtin_const ("__builtin_vis_fsrl16", CODE_FOR_vlshrv4hi3,
9679 v4hi_ftype_v4hi_v4hi);
9680 def_builtin_const ("__builtin_vis_fsra16", CODE_FOR_vashrv4hi3,
9681 v4hi_ftype_v4hi_v4hi);
9682 def_builtin_const ("__builtin_vis_fsll32", CODE_FOR_vashlv2si3,
9683 v2si_ftype_v2si_v2si);
9684 def_builtin_const ("__builtin_vis_fslas32", CODE_FOR_vssashlv2si3,
9685 v2si_ftype_v2si_v2si);
9686 def_builtin_const ("__builtin_vis_fsrl32", CODE_FOR_vlshrv2si3,
9687 v2si_ftype_v2si_v2si);
9688 def_builtin_const ("__builtin_vis_fsra32", CODE_FOR_vashrv2si3,
9689 v2si_ftype_v2si_v2si);
9692 def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistndi_vis,
9693 di_ftype_v8qi_v8qi);
9695 def_builtin_const ("__builtin_vis_pdistn", CODE_FOR_pdistnsi_vis,
9696 si_ftype_v8qi_v8qi);
9698 def_builtin_const ("__builtin_vis_fmean16", CODE_FOR_fmean16_vis,
9699 v4hi_ftype_v4hi_v4hi);
9700 def_builtin_const ("__builtin_vis_fpadd64", CODE_FOR_fpadd64_vis,
9702 def_builtin_const ("__builtin_vis_fpsub64", CODE_FOR_fpsub64_vis,
9705 def_builtin_const ("__builtin_vis_fpadds16", CODE_FOR_ssaddv4hi3,
9706 v4hi_ftype_v4hi_v4hi);
9707 def_builtin_const ("__builtin_vis_fpadds16s", CODE_FOR_ssaddv2hi3,
9708 v2hi_ftype_v2hi_v2hi);
9709 def_builtin_const ("__builtin_vis_fpsubs16", CODE_FOR_sssubv4hi3,
9710 v4hi_ftype_v4hi_v4hi);
9711 def_builtin_const ("__builtin_vis_fpsubs16s", CODE_FOR_sssubv2hi3,
9712 v2hi_ftype_v2hi_v2hi);
9713 def_builtin_const ("__builtin_vis_fpadds32", CODE_FOR_ssaddv2si3,
9714 v2si_ftype_v2si_v2si);
9715 def_builtin_const ("__builtin_vis_fpadds32s", CODE_FOR_ssaddv1si3,
9716 v1si_ftype_v1si_v1si);
9717 def_builtin_const ("__builtin_vis_fpsubs32", CODE_FOR_sssubv2si3,
9718 v2si_ftype_v2si_v2si);
9719 def_builtin_const ("__builtin_vis_fpsubs32s", CODE_FOR_sssubv1si3,
9720 v1si_ftype_v1si_v1si);
9724 def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8di_vis,
9725 di_ftype_v8qi_v8qi);
9726 def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8di_vis,
9727 di_ftype_v8qi_v8qi);
9728 def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8di_vis,
9729 di_ftype_v8qi_v8qi);
9730 def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8di_vis,
9731 di_ftype_v8qi_v8qi);
9735 def_builtin_const ("__builtin_vis_fucmple8", CODE_FOR_fucmple8si_vis,
9736 si_ftype_v8qi_v8qi);
9737 def_builtin_const ("__builtin_vis_fucmpne8", CODE_FOR_fucmpne8si_vis,
9738 si_ftype_v8qi_v8qi);
9739 def_builtin_const ("__builtin_vis_fucmpgt8", CODE_FOR_fucmpgt8si_vis,
9740 si_ftype_v8qi_v8qi);
9741 def_builtin_const ("__builtin_vis_fucmpeq8", CODE_FOR_fucmpeq8si_vis,
9742 si_ftype_v8qi_v8qi);
9745 def_builtin_const ("__builtin_vis_fhadds", CODE_FOR_fhaddsf_vis,
9747 def_builtin_const ("__builtin_vis_fhaddd", CODE_FOR_fhadddf_vis,
9749 def_builtin_const ("__builtin_vis_fhsubs", CODE_FOR_fhsubsf_vis,
9751 def_builtin_const ("__builtin_vis_fhsubd", CODE_FOR_fhsubdf_vis,
9753 def_builtin_const ("__builtin_vis_fnhadds", CODE_FOR_fnhaddsf_vis,
9755 def_builtin_const ("__builtin_vis_fnhaddd", CODE_FOR_fnhadddf_vis,
9758 def_builtin_const ("__builtin_vis_umulxhi", CODE_FOR_umulxhi_vis,
9760 def_builtin_const ("__builtin_vis_xmulx", CODE_FOR_xmulx_vis,
9762 def_builtin_const ("__builtin_vis_xmulxhi", CODE_FOR_xmulxhi_vis,
9767 /* Handle TARGET_EXPAND_BUILTIN target hook.
9768 Expand builtin functions for sparc intrinsics. */
9771 sparc_expand_builtin (tree exp, rtx target,
9772 rtx subtarget ATTRIBUTE_UNUSED,
9773 enum machine_mode tmode ATTRIBUTE_UNUSED,
9774 int ignore ATTRIBUTE_UNUSED)
9777 call_expr_arg_iterator iter;
9778 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
9779 unsigned int icode = DECL_FUNCTION_CODE (fndecl);
9784 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
9788 enum machine_mode tmode = insn_data[icode].operand[0].mode;
9790 || GET_MODE (target) != tmode
9791 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9792 op[0] = gen_reg_rtx (tmode);
9796 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
9798 const struct insn_operand_data *insn_op;
9801 if (arg == error_mark_node)
9805 idx = arg_count - !nonvoid;
9806 insn_op = &insn_data[icode].operand[idx];
9807 op[arg_count] = expand_normal (arg);
9809 if (insn_op->mode == V1DImode
9810 && GET_MODE (op[arg_count]) == DImode)
9811 op[arg_count] = gen_lowpart (V1DImode, op[arg_count]);
9812 else if (insn_op->mode == V1SImode
9813 && GET_MODE (op[arg_count]) == SImode)
9814 op[arg_count] = gen_lowpart (V1SImode, op[arg_count]);
9816 if (! (*insn_data[icode].operand[idx].predicate) (op[arg_count],
9818 op[arg_count] = copy_to_mode_reg (insn_op->mode, op[arg_count]);
9824 pat = GEN_FCN (icode) (op[0]);
9828 pat = GEN_FCN (icode) (op[0], op[1]);
9830 pat = GEN_FCN (icode) (op[1]);
9833 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
9836 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
9854 sparc_vis_mul8x16 (int e8, int e16)
9856 return (e8 * e16 + 128) / 256;
9859 /* Multiply the vector elements in ELTS0 to the elements in ELTS1 as specified
9860 by FNCODE. All of the elements in ELTS0 and ELTS1 lists must be integer
9861 constants. A tree list with the results of the multiplications is returned,
9862 and each element in the list is of INNER_TYPE. */
9865 sparc_handle_vis_mul8x16 (int fncode, tree inner_type, tree elts0, tree elts1)
9867 tree n_elts = NULL_TREE;
9872 case CODE_FOR_fmul8x16_vis:
9873 for (; elts0 && elts1;
9874 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
9877 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
9878 TREE_INT_CST_LOW (TREE_VALUE (elts1)));
9879 n_elts = tree_cons (NULL_TREE,
9880 build_int_cst (inner_type, val),
9885 case CODE_FOR_fmul8x16au_vis:
9886 scale = TREE_INT_CST_LOW (TREE_VALUE (elts1));
9888 for (; elts0; elts0 = TREE_CHAIN (elts0))
9891 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
9893 n_elts = tree_cons (NULL_TREE,
9894 build_int_cst (inner_type, val),
9899 case CODE_FOR_fmul8x16al_vis:
9900 scale = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (elts1)));
9902 for (; elts0; elts0 = TREE_CHAIN (elts0))
9905 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
9907 n_elts = tree_cons (NULL_TREE,
9908 build_int_cst (inner_type, val),
9917 return nreverse (n_elts);
9920 /* Handle TARGET_FOLD_BUILTIN target hook.
9921 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
9922 result of the function call is ignored. NULL_TREE is returned if the
9923 function could not be folded. */
9926 sparc_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
9927 tree *args, bool ignore)
9929 tree arg0, arg1, arg2;
9930 tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
9931 enum insn_code icode = (enum insn_code) DECL_FUNCTION_CODE (fndecl);
9935 /* Note that a switch statement instead of the sequence of tests would
9936 be incorrect as many of the CODE_FOR values could be CODE_FOR_nothing
9937 and that would yield multiple alternatives with identical values. */
9938 if (icode == CODE_FOR_alignaddrsi_vis
9939 || icode == CODE_FOR_alignaddrdi_vis
9940 || icode == CODE_FOR_wrgsr_vis
9941 || icode == CODE_FOR_bmasksi_vis
9942 || icode == CODE_FOR_bmaskdi_vis
9943 || icode == CODE_FOR_cmask8si_vis
9944 || icode == CODE_FOR_cmask8di_vis
9945 || icode == CODE_FOR_cmask16si_vis
9946 || icode == CODE_FOR_cmask16di_vis
9947 || icode == CODE_FOR_cmask32si_vis
9948 || icode == CODE_FOR_cmask32di_vis)
9951 return build_zero_cst (rtype);
9956 case CODE_FOR_fexpand_vis:
9960 if (TREE_CODE (arg0) == VECTOR_CST)
9962 tree inner_type = TREE_TYPE (rtype);
9963 tree elts = TREE_VECTOR_CST_ELTS (arg0);
9964 tree n_elts = NULL_TREE;
9966 for (; elts; elts = TREE_CHAIN (elts))
9968 unsigned int val = TREE_INT_CST_LOW (TREE_VALUE (elts)) << 4;
9969 n_elts = tree_cons (NULL_TREE,
9970 build_int_cst (inner_type, val),
9973 return build_vector (rtype, nreverse (n_elts));
9977 case CODE_FOR_fmul8x16_vis:
9978 case CODE_FOR_fmul8x16au_vis:
9979 case CODE_FOR_fmul8x16al_vis:
9985 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
9987 tree inner_type = TREE_TYPE (rtype);
9988 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
9989 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
9990 tree n_elts = sparc_handle_vis_mul8x16 (icode, inner_type, elts0,
9993 return build_vector (rtype, n_elts);
9997 case CODE_FOR_fpmerge_vis:
10003 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
10005 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
10006 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
10007 tree n_elts = NULL_TREE;
10009 for (; elts0 && elts1;
10010 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
10012 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts0), n_elts);
10013 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts1), n_elts);
10016 return build_vector (rtype, nreverse (n_elts));
10020 case CODE_FOR_pdist_vis:
10028 if (TREE_CODE (arg0) == VECTOR_CST
10029 && TREE_CODE (arg1) == VECTOR_CST
10030 && TREE_CODE (arg2) == INTEGER_CST)
10033 unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (arg2);
10034 HOST_WIDE_INT high = TREE_INT_CST_HIGH (arg2);
10035 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
10036 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
10038 for (; elts0 && elts1;
10039 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
10041 unsigned HOST_WIDE_INT
10042 low0 = TREE_INT_CST_LOW (TREE_VALUE (elts0)),
10043 low1 = TREE_INT_CST_LOW (TREE_VALUE (elts1));
10044 HOST_WIDE_INT high0 = TREE_INT_CST_HIGH (TREE_VALUE (elts0));
10045 HOST_WIDE_INT high1 = TREE_INT_CST_HIGH (TREE_VALUE (elts1));
10047 unsigned HOST_WIDE_INT l;
10050 overflow |= neg_double (low1, high1, &l, &h);
10051 overflow |= add_double (low0, high0, l, h, &l, &h);
10053 overflow |= neg_double (l, h, &l, &h);
10055 overflow |= add_double (low, high, l, h, &low, &high);
10058 gcc_assert (overflow == 0);
10060 return build_int_cst_wide (rtype, low, high);
10070 /* ??? This duplicates information provided to the compiler by the
10071 ??? scheduler description. Some day, teach genautomata to output
10072 ??? the latencies and then CSE will just use that. */
10075 sparc_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
10076 int *total, bool speed ATTRIBUTE_UNUSED)
10078 enum machine_mode mode = GET_MODE (x);
10079 bool float_mode_p = FLOAT_MODE_P (mode);
10084 if (INTVAL (x) < 0x1000 && INTVAL (x) >= -0x1000)
10102 if (GET_MODE (x) == VOIDmode
10103 && ((CONST_DOUBLE_HIGH (x) == 0
10104 && CONST_DOUBLE_LOW (x) < 0x1000)
10105 || (CONST_DOUBLE_HIGH (x) == -1
10106 && CONST_DOUBLE_LOW (x) < 0
10107 && CONST_DOUBLE_LOW (x) >= -0x1000)))
10114 /* If outer-code was a sign or zero extension, a cost
10115 of COSTS_N_INSNS (1) was already added in. This is
10116 why we are subtracting it back out. */
10117 if (outer_code == ZERO_EXTEND)
10119 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
10121 else if (outer_code == SIGN_EXTEND)
10123 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
10125 else if (float_mode_p)
10127 *total = sparc_costs->float_load;
10131 *total = sparc_costs->int_load;
10139 *total = sparc_costs->float_plusminus;
10141 *total = COSTS_N_INSNS (1);
10148 gcc_assert (float_mode_p);
10149 *total = sparc_costs->float_mul;
10152 if (GET_CODE (sub) == NEG)
10153 sub = XEXP (sub, 0);
10154 *total += rtx_cost (sub, FMA, 0, speed);
10157 if (GET_CODE (sub) == NEG)
10158 sub = XEXP (sub, 0);
10159 *total += rtx_cost (sub, FMA, 2, speed);
10165 *total = sparc_costs->float_mul;
10166 else if (! TARGET_HARD_MUL)
10167 *total = COSTS_N_INSNS (25);
10173 if (sparc_costs->int_mul_bit_factor)
10177 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
10179 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
10180 for (nbits = 0; value != 0; value &= value - 1)
10183 else if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
10184 && GET_MODE (XEXP (x, 1)) == VOIDmode)
10186 rtx x1 = XEXP (x, 1);
10187 unsigned HOST_WIDE_INT value1 = CONST_DOUBLE_LOW (x1);
10188 unsigned HOST_WIDE_INT value2 = CONST_DOUBLE_HIGH (x1);
10190 for (nbits = 0; value1 != 0; value1 &= value1 - 1)
10192 for (; value2 != 0; value2 &= value2 - 1)
10200 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
10201 bit_cost = COSTS_N_INSNS (bit_cost);
10204 if (mode == DImode)
10205 *total = sparc_costs->int_mulX + bit_cost;
10207 *total = sparc_costs->int_mul + bit_cost;
10214 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
10223 if (mode == DFmode)
10224 *total = sparc_costs->float_div_df;
10226 *total = sparc_costs->float_div_sf;
10230 if (mode == DImode)
10231 *total = sparc_costs->int_divX;
10233 *total = sparc_costs->int_div;
10238 if (! float_mode_p)
10240 *total = COSTS_N_INSNS (1);
10247 case UNSIGNED_FLOAT:
10251 case FLOAT_TRUNCATE:
10252 *total = sparc_costs->float_move;
10256 if (mode == DFmode)
10257 *total = sparc_costs->float_sqrt_df;
10259 *total = sparc_costs->float_sqrt_sf;
10264 *total = sparc_costs->float_cmp;
10266 *total = COSTS_N_INSNS (1);
10271 *total = sparc_costs->float_cmove;
10273 *total = sparc_costs->int_cmove;
10277 /* Handle the NAND vector patterns. */
10278 if (sparc_vector_mode_supported_p (GET_MODE (x))
10279 && GET_CODE (XEXP (x, 0)) == NOT
10280 && GET_CODE (XEXP (x, 1)) == NOT)
10282 *total = COSTS_N_INSNS (1);
10293 /* Return true if CLASS is either GENERAL_REGS or I64_REGS. */
10296 general_or_i64_p (reg_class_t rclass)
10298 return (rclass == GENERAL_REGS || rclass == I64_REGS);
10301 /* Implement TARGET_REGISTER_MOVE_COST. */
10304 sparc_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
10305 reg_class_t from, reg_class_t to)
10307 bool need_memory = false;
10309 if (from == FPCC_REGS || to == FPCC_REGS)
10310 need_memory = true;
10311 else if ((FP_REG_CLASS_P (from) && general_or_i64_p (to))
10312 || (general_or_i64_p (from) && FP_REG_CLASS_P (to)))
10316 int size = GET_MODE_SIZE (mode);
10317 if (size == 8 || size == 4)
10319 if (! TARGET_ARCH32 || size == 4)
10325 need_memory = true;
10330 if (sparc_cpu == PROCESSOR_ULTRASPARC
10331 || sparc_cpu == PROCESSOR_ULTRASPARC3
10332 || sparc_cpu == PROCESSOR_NIAGARA
10333 || sparc_cpu == PROCESSOR_NIAGARA2
10334 || sparc_cpu == PROCESSOR_NIAGARA3
10335 || sparc_cpu == PROCESSOR_NIAGARA4)
10344 /* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
10345 This is achieved by means of a manual dynamic stack space allocation in
10346 the current frame. We make the assumption that SEQ doesn't contain any
10347 function calls, with the possible exception of calls to the GOT helper. */
10350 emit_and_preserve (rtx seq, rtx reg, rtx reg2)
10352 /* We must preserve the lowest 16 words for the register save area. */
10353 HOST_WIDE_INT offset = 16*UNITS_PER_WORD;
10354 /* We really need only 2 words of fresh stack space. */
10355 HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
10358 = gen_rtx_MEM (word_mode, plus_constant (stack_pointer_rtx,
10359 SPARC_STACK_BIAS + offset));
10361 emit_insn (gen_stack_pointer_dec (GEN_INT (size)));
10362 emit_insn (gen_rtx_SET (VOIDmode, slot, reg));
10364 emit_insn (gen_rtx_SET (VOIDmode,
10365 adjust_address (slot, word_mode, UNITS_PER_WORD),
10369 emit_insn (gen_rtx_SET (VOIDmode,
10371 adjust_address (slot, word_mode, UNITS_PER_WORD)));
10372 emit_insn (gen_rtx_SET (VOIDmode, reg, slot));
10373 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
10376 /* Output the assembler code for a thunk function. THUNK_DECL is the
10377 declaration for the thunk function itself, FUNCTION is the decl for
10378 the target function. DELTA is an immediate constant offset to be
10379 added to THIS. If VCALL_OFFSET is nonzero, the word at address
10380 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
10383 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
10384 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
10387 rtx this_rtx, insn, funexp;
10388 unsigned int int_arg_first;
10390 reload_completed = 1;
10391 epilogue_completed = 1;
10393 emit_note (NOTE_INSN_PROLOGUE_END);
10397 sparc_leaf_function_p = 1;
10399 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
10401 else if (flag_delayed_branch)
10403 /* We will emit a regular sibcall below, so we need to instruct
10404 output_sibcall that we are in a leaf function. */
10405 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 1;
10407 /* This will cause final.c to invoke leaf_renumber_regs so we
10408 must behave as if we were in a not-yet-leafified function. */
10409 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
10413 /* We will emit the sibcall manually below, so we will need to
10414 manually spill non-leaf registers. */
10415 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 0;
10417 /* We really are in a leaf function. */
10418 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
10421 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
10422 returns a structure, the structure return pointer is there instead. */
10424 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
10425 this_rtx = gen_rtx_REG (Pmode, int_arg_first + 1);
10427 this_rtx = gen_rtx_REG (Pmode, int_arg_first);
10429 /* Add DELTA. When possible use a plain add, otherwise load it into
10430 a register first. */
10433 rtx delta_rtx = GEN_INT (delta);
10435 if (! SPARC_SIMM13_P (delta))
10437 rtx scratch = gen_rtx_REG (Pmode, 1);
10438 emit_move_insn (scratch, delta_rtx);
10439 delta_rtx = scratch;
10442 /* THIS_RTX += DELTA. */
10443 emit_insn (gen_add2_insn (this_rtx, delta_rtx));
10446 /* Add the word at address (*THIS_RTX + VCALL_OFFSET). */
10449 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
10450 rtx scratch = gen_rtx_REG (Pmode, 1);
10452 gcc_assert (vcall_offset < 0);
10454 /* SCRATCH = *THIS_RTX. */
10455 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this_rtx));
10457 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
10458 may not have any available scratch register at this point. */
10459 if (SPARC_SIMM13_P (vcall_offset))
10461 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
10462 else if (! fixed_regs[5]
10463 /* The below sequence is made up of at least 2 insns,
10464 while the default method may need only one. */
10465 && vcall_offset < -8192)
10467 rtx scratch2 = gen_rtx_REG (Pmode, 5);
10468 emit_move_insn (scratch2, vcall_offset_rtx);
10469 vcall_offset_rtx = scratch2;
10473 rtx increment = GEN_INT (-4096);
10475 /* VCALL_OFFSET is a negative number whose typical range can be
10476 estimated as -32768..0 in 32-bit mode. In almost all cases
10477 it is therefore cheaper to emit multiple add insns than
10478 spilling and loading the constant into a register (at least
10480 while (! SPARC_SIMM13_P (vcall_offset))
10482 emit_insn (gen_add2_insn (scratch, increment));
10483 vcall_offset += 4096;
10485 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
10488 /* SCRATCH = *(*THIS_RTX + VCALL_OFFSET). */
10489 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
10490 gen_rtx_PLUS (Pmode,
10492 vcall_offset_rtx)));
10494 /* THIS_RTX += *(*THIS_RTX + VCALL_OFFSET). */
10495 emit_insn (gen_add2_insn (this_rtx, scratch));
10498 /* Generate a tail call to the target function. */
10499 if (! TREE_USED (function))
10501 assemble_external (function);
10502 TREE_USED (function) = 1;
10504 funexp = XEXP (DECL_RTL (function), 0);
10506 if (flag_delayed_branch)
10508 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
10509 insn = emit_call_insn (gen_sibcall (funexp));
10510 SIBLING_CALL_P (insn) = 1;
10514 /* The hoops we have to jump through in order to generate a sibcall
10515 without using delay slots... */
10516 rtx spill_reg, seq, scratch = gen_rtx_REG (Pmode, 1);
10520 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
10522 load_got_register (); /* clobbers %o7 */
10523 scratch = sparc_legitimize_pic_address (funexp, scratch);
10524 seq = get_insns ();
10526 emit_and_preserve (seq, spill_reg, pic_offset_table_rtx);
10528 else if (TARGET_ARCH32)
10530 emit_insn (gen_rtx_SET (VOIDmode,
10532 gen_rtx_HIGH (SImode, funexp)));
10533 emit_insn (gen_rtx_SET (VOIDmode,
10535 gen_rtx_LO_SUM (SImode, scratch, funexp)));
10537 else /* TARGET_ARCH64 */
10539 switch (sparc_cmodel)
10543 /* The destination can serve as a temporary. */
10544 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
10549 /* The destination cannot serve as a temporary. */
10550 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
10552 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
10553 seq = get_insns ();
10555 emit_and_preserve (seq, spill_reg, 0);
10559 gcc_unreachable ();
10563 emit_jump_insn (gen_indirect_jump (scratch));
10568 /* Run just enough of rest_of_compilation to get the insns emitted.
10569 There's not really enough bulk here to make other passes such as
10570 instruction scheduling worth while. Note that use_thunk calls
10571 assemble_start_function and assemble_end_function. */
10572 insn = get_insns ();
10573 insn_locators_alloc ();
10574 shorten_branches (insn);
10575 final_start_function (insn, file, 1);
10576 final (insn, file, 1);
10577 final_end_function ();
10579 reload_completed = 0;
10580 epilogue_completed = 0;
10583 /* Return true if sparc_output_mi_thunk would be able to output the
10584 assembler code for the thunk function specified by the arguments
10585 it is passed, and false otherwise. */
10587 sparc_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
10588 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
10589 HOST_WIDE_INT vcall_offset,
10590 const_tree function ATTRIBUTE_UNUSED)
10592 /* Bound the loop used in the default method above. */
10593 return (vcall_offset >= -32768 || ! fixed_regs[5]);
10596 /* We use the machine specific reorg pass to enable workarounds for errata. */
10603 /* The only erratum we handle for now is that of the AT697F processor. */
10604 if (!sparc_fix_at697f)
10607 /* We need to have the (essentially) final form of the insn stream in order
10608 to properly detect the various hazards. Run delay slot scheduling. */
10609 if (optimize > 0 && flag_delayed_branch)
10610 dbr_schedule (get_insns ());
10612 /* Now look for specific patterns in the insn stream. */
10613 for (insn = get_insns (); insn; insn = next)
10615 bool insert_nop = false;
10618 /* Look for a single-word load into an odd-numbered FP register. */
10619 if (NONJUMP_INSN_P (insn)
10620 && (set = single_set (insn)) != NULL_RTX
10621 && GET_MODE_SIZE (GET_MODE (SET_SRC (set))) == 4
10622 && MEM_P (SET_SRC (set))
10623 && REG_P (SET_DEST (set))
10624 && REGNO (SET_DEST (set)) > 31
10625 && REGNO (SET_DEST (set)) % 2 != 0)
10627 /* The wrong dependency is on the enclosing double register. */
10628 unsigned int x = REGNO (SET_DEST (set)) - 1;
10629 unsigned int src1, src2, dest;
10632 /* If the insn has a delay slot, then it cannot be problematic. */
10633 next = next_active_insn (insn);
10634 if (NONJUMP_INSN_P (next) && GET_CODE (PATTERN (next)) == SEQUENCE)
10638 extract_insn (next);
10639 code = INSN_CODE (next);
10644 case CODE_FOR_adddf3:
10645 case CODE_FOR_subdf3:
10646 case CODE_FOR_muldf3:
10647 case CODE_FOR_divdf3:
10648 dest = REGNO (recog_data.operand[0]);
10649 src1 = REGNO (recog_data.operand[1]);
10650 src2 = REGNO (recog_data.operand[2]);
10654 ld [address], %fx+1
10655 FPOPd %f{x,y}, %f{y,x}, %f{x,y} */
10656 if ((src1 == x || src2 == x)
10657 && (dest == src1 || dest == src2))
10663 ld [address], %fx+1
10664 FPOPd %fx, %fx, %fx */
10667 && (code == CODE_FOR_adddf3 || code == CODE_FOR_muldf3))
10672 case CODE_FOR_sqrtdf2:
10673 dest = REGNO (recog_data.operand[0]);
10674 src1 = REGNO (recog_data.operand[1]);
10676 ld [address], %fx+1
10678 if (src1 == x && dest == src1)
10687 next = NEXT_INSN (insn);
10690 emit_insn_after (gen_nop (), insn);
10694 /* How to allocate a 'struct machine_function'. */
10696 static struct machine_function *
10697 sparc_init_machine_status (void)
10699 return ggc_alloc_cleared_machine_function ();
10702 /* Locate some local-dynamic symbol still in use by this function
10703 so that we can print its name in local-dynamic base patterns. */
10705 static const char *
10706 get_some_local_dynamic_name (void)
10710 if (cfun->machine->some_ld_name)
10711 return cfun->machine->some_ld_name;
10713 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
10715 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
10716 return cfun->machine->some_ld_name;
10718 gcc_unreachable ();
10722 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
10727 && GET_CODE (x) == SYMBOL_REF
10728 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
10730 cfun->machine->some_ld_name = XSTR (x, 0);
10737 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
10738 We need to emit DTP-relative relocations. */
10741 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
10746 fputs ("\t.word\t%r_tls_dtpoff32(", file);
10749 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
10752 gcc_unreachable ();
10754 output_addr_const (file, x);
10758 /* Do whatever processing is required at the end of a file. */
10761 sparc_file_end (void)
10763 /* If we need to emit the special GOT helper function, do so now. */
10764 if (got_helper_rtx)
10766 const char *name = XSTR (got_helper_rtx, 0);
10767 const char *reg_name = reg_names[GLOBAL_OFFSET_TABLE_REGNUM];
10768 #ifdef DWARF2_UNWIND_INFO
10772 if (USE_HIDDEN_LINKONCE)
10774 tree decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
10775 get_identifier (name),
10776 build_function_type_list (void_type_node,
10778 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
10779 NULL_TREE, void_type_node);
10780 TREE_STATIC (decl) = 1;
10781 make_decl_one_only (decl, DECL_ASSEMBLER_NAME (decl));
10782 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
10783 DECL_VISIBILITY_SPECIFIED (decl) = 1;
10784 resolve_unique_section (decl, 0, flag_function_sections);
10785 allocate_struct_function (decl, true);
10786 cfun->is_thunk = 1;
10787 current_function_decl = decl;
10788 init_varasm_status ();
10789 assemble_start_function (decl, name);
10793 const int align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
10794 switch_to_section (text_section);
10796 ASM_OUTPUT_ALIGN (asm_out_file, align);
10797 ASM_OUTPUT_LABEL (asm_out_file, name);
10800 #ifdef DWARF2_UNWIND_INFO
10801 do_cfi = dwarf2out_do_cfi_asm ();
10803 fprintf (asm_out_file, "\t.cfi_startproc\n");
10805 if (flag_delayed_branch)
10806 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
10807 reg_name, reg_name);
10809 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
10810 reg_name, reg_name);
10811 #ifdef DWARF2_UNWIND_INFO
10813 fprintf (asm_out_file, "\t.cfi_endproc\n");
10817 if (NEED_INDICATE_EXEC_STACK)
10818 file_end_indicate_exec_stack ();
10820 #ifdef TARGET_SOLARIS
10821 solaris_file_end ();
10825 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10826 /* Implement TARGET_MANGLE_TYPE. */
10828 static const char *
10829 sparc_mangle_type (const_tree type)
10832 && TYPE_MAIN_VARIANT (type) == long_double_type_node
10833 && TARGET_LONG_DOUBLE_128)
10836 /* For all other types, use normal C++ mangling. */
10841 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
10842 compare and swap on the word containing the byte or half-word. */
10845 sparc_expand_compare_and_swap_12 (rtx result, rtx mem, rtx oldval, rtx newval)
10847 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
10848 rtx addr = gen_reg_rtx (Pmode);
10849 rtx off = gen_reg_rtx (SImode);
10850 rtx oldv = gen_reg_rtx (SImode);
10851 rtx newv = gen_reg_rtx (SImode);
10852 rtx oldvalue = gen_reg_rtx (SImode);
10853 rtx newvalue = gen_reg_rtx (SImode);
10854 rtx res = gen_reg_rtx (SImode);
10855 rtx resv = gen_reg_rtx (SImode);
10856 rtx memsi, val, mask, end_label, loop_label, cc;
10858 emit_insn (gen_rtx_SET (VOIDmode, addr,
10859 gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
10861 if (Pmode != SImode)
10862 addr1 = gen_lowpart (SImode, addr1);
10863 emit_insn (gen_rtx_SET (VOIDmode, off,
10864 gen_rtx_AND (SImode, addr1, GEN_INT (3))));
10866 memsi = gen_rtx_MEM (SImode, addr);
10867 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
10868 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
10870 val = force_reg (SImode, memsi);
10872 emit_insn (gen_rtx_SET (VOIDmode, off,
10873 gen_rtx_XOR (SImode, off,
10874 GEN_INT (GET_MODE (mem) == QImode
10877 emit_insn (gen_rtx_SET (VOIDmode, off,
10878 gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
10880 if (GET_MODE (mem) == QImode)
10881 mask = force_reg (SImode, GEN_INT (0xff));
10883 mask = force_reg (SImode, GEN_INT (0xffff));
10885 emit_insn (gen_rtx_SET (VOIDmode, mask,
10886 gen_rtx_ASHIFT (SImode, mask, off)));
10888 emit_insn (gen_rtx_SET (VOIDmode, val,
10889 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
10892 oldval = gen_lowpart (SImode, oldval);
10893 emit_insn (gen_rtx_SET (VOIDmode, oldv,
10894 gen_rtx_ASHIFT (SImode, oldval, off)));
10896 newval = gen_lowpart_common (SImode, newval);
10897 emit_insn (gen_rtx_SET (VOIDmode, newv,
10898 gen_rtx_ASHIFT (SImode, newval, off)));
10900 emit_insn (gen_rtx_SET (VOIDmode, oldv,
10901 gen_rtx_AND (SImode, oldv, mask)));
10903 emit_insn (gen_rtx_SET (VOIDmode, newv,
10904 gen_rtx_AND (SImode, newv, mask)));
10906 end_label = gen_label_rtx ();
10907 loop_label = gen_label_rtx ();
10908 emit_label (loop_label);
10910 emit_insn (gen_rtx_SET (VOIDmode, oldvalue,
10911 gen_rtx_IOR (SImode, oldv, val)));
10913 emit_insn (gen_rtx_SET (VOIDmode, newvalue,
10914 gen_rtx_IOR (SImode, newv, val)));
10916 emit_insn (gen_sync_compare_and_swapsi (res, memsi, oldvalue, newvalue));
10918 emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
10920 emit_insn (gen_rtx_SET (VOIDmode, resv,
10921 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
10924 cc = gen_compare_reg_1 (NE, resv, val);
10925 emit_insn (gen_rtx_SET (VOIDmode, val, resv));
10927 /* Use cbranchcc4 to separate the compare and branch! */
10928 emit_jump_insn (gen_cbranchcc4 (gen_rtx_NE (VOIDmode, cc, const0_rtx),
10929 cc, const0_rtx, loop_label));
10931 emit_label (end_label);
10933 emit_insn (gen_rtx_SET (VOIDmode, res,
10934 gen_rtx_AND (SImode, res, mask)));
10936 emit_insn (gen_rtx_SET (VOIDmode, res,
10937 gen_rtx_LSHIFTRT (SImode, res, off)));
10939 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
10943 sparc_expand_vec_perm_bmask (enum machine_mode vmode, rtx sel)
10947 sel = gen_lowpart (DImode, sel);
10951 /* inp = xxxxxxxAxxxxxxxB */
10952 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
10953 NULL_RTX, 1, OPTAB_DIRECT);
10954 /* t_1 = ....xxxxxxxAxxx. */
10955 sel = expand_simple_binop (SImode, AND, gen_lowpart (SImode, sel),
10956 GEN_INT (3), NULL_RTX, 1, OPTAB_DIRECT);
10957 t_1 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_1),
10958 GEN_INT (0x30000), NULL_RTX, 1, OPTAB_DIRECT);
10959 /* sel = .......B */
10960 /* t_1 = ...A.... */
10961 sel = expand_simple_binop (SImode, IOR, sel, t_1, sel, 1, OPTAB_DIRECT);
10962 /* sel = ...A...B */
10963 sel = expand_mult (SImode, sel, GEN_INT (0x4444), sel, 1);
10964 /* sel = AAAABBBB * 4 */
10965 t_1 = force_reg (SImode, GEN_INT (0x01230123));
10966 /* sel = { A*4, A*4+1, A*4+2, ... } */
10970 /* inp = xxxAxxxBxxxCxxxD */
10971 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (8),
10972 NULL_RTX, 1, OPTAB_DIRECT);
10973 t_2 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
10974 NULL_RTX, 1, OPTAB_DIRECT);
10975 t_3 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (24),
10976 NULL_RTX, 1, OPTAB_DIRECT);
10977 /* t_1 = ..xxxAxxxBxxxCxx */
10978 /* t_2 = ....xxxAxxxBxxxC */
10979 /* t_3 = ......xxxAxxxBxx */
10980 sel = expand_simple_binop (SImode, AND, gen_lowpart (SImode, sel),
10982 NULL_RTX, 1, OPTAB_DIRECT);
10983 t_1 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_1),
10985 NULL_RTX, 1, OPTAB_DIRECT);
10986 t_2 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_2),
10987 GEN_INT (0x070000),
10988 NULL_RTX, 1, OPTAB_DIRECT);
10989 t_3 = expand_simple_binop (SImode, AND, gen_lowpart (SImode, t_3),
10990 GEN_INT (0x07000000),
10991 NULL_RTX, 1, OPTAB_DIRECT);
10992 /* sel = .......D */
10993 /* t_1 = .....C.. */
10994 /* t_2 = ...B.... */
10995 /* t_3 = .A...... */
10996 sel = expand_simple_binop (SImode, IOR, sel, t_1, sel, 1, OPTAB_DIRECT);
10997 t_2 = expand_simple_binop (SImode, IOR, t_2, t_3, t_2, 1, OPTAB_DIRECT);
10998 sel = expand_simple_binop (SImode, IOR, sel, t_2, sel, 1, OPTAB_DIRECT);
10999 /* sel = .A.B.C.D */
11000 sel = expand_mult (SImode, sel, GEN_INT (0x22), sel, 1);
11001 /* sel = AABBCCDD * 2 */
11002 t_1 = force_reg (SImode, GEN_INT (0x01010101));
11003 /* sel = { A*2, A*2+1, B*2, B*2+1, ... } */
11007 /* input = xAxBxCxDxExFxGxH */
11008 sel = expand_simple_binop (DImode, AND, sel,
11009 GEN_INT ((HOST_WIDE_INT)0x0f0f0f0f << 32
11011 NULL_RTX, 1, OPTAB_DIRECT);
11012 /* sel = .A.B.C.D.E.F.G.H */
11013 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (4),
11014 NULL_RTX, 1, OPTAB_DIRECT);
11015 /* t_1 = ..A.B.C.D.E.F.G. */
11016 sel = expand_simple_binop (DImode, IOR, sel, t_1,
11017 NULL_RTX, 1, OPTAB_DIRECT);
11018 /* sel = .AABBCCDDEEFFGGH */
11019 sel = expand_simple_binop (DImode, AND, sel,
11020 GEN_INT ((HOST_WIDE_INT)0xff00ff << 32
11022 NULL_RTX, 1, OPTAB_DIRECT);
11023 /* sel = ..AB..CD..EF..GH */
11024 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (8),
11025 NULL_RTX, 1, OPTAB_DIRECT);
11026 /* t_1 = ....AB..CD..EF.. */
11027 sel = expand_simple_binop (DImode, IOR, sel, t_1,
11028 NULL_RTX, 1, OPTAB_DIRECT);
11029 /* sel = ..ABABCDCDEFEFGH */
11030 sel = expand_simple_binop (DImode, AND, sel,
11031 GEN_INT ((HOST_WIDE_INT)0xffff << 32 | 0xffff),
11032 NULL_RTX, 1, OPTAB_DIRECT);
11033 /* sel = ....ABCD....EFGH */
11034 t_1 = expand_simple_binop (DImode, LSHIFTRT, sel, GEN_INT (16),
11035 NULL_RTX, 1, OPTAB_DIRECT);
11036 /* t_1 = ........ABCD.... */
11037 sel = gen_lowpart (SImode, sel);
11038 t_1 = gen_lowpart (SImode, t_1);
11042 gcc_unreachable ();
11045 /* Always perform the final addition/merge within the bmask insn. */
11046 emit_insn (gen_bmasksi_vis (gen_reg_rtx (SImode), sel, t_1));
11049 /* Implement TARGET_FRAME_POINTER_REQUIRED. */
11052 sparc_frame_pointer_required (void)
11054 /* If the stack pointer is dynamically modified in the function, it cannot
11055 serve as the frame pointer. */
11056 if (cfun->calls_alloca)
11059 /* If the function receives nonlocal gotos, it needs to save the frame
11060 pointer in the nonlocal_goto_save_area object. */
11061 if (cfun->has_nonlocal_label)
11064 /* In flat mode, that's it. */
11068 /* Otherwise, the frame pointer is required if the function isn't leaf. */
11069 return !(current_function_is_leaf && only_leaf_regs_used ());
11072 /* The way this is structured, we can't eliminate SFP in favor of SP
11073 if the frame pointer is required: we want to use the SFP->HFP elimination
11074 in that case. But the test in update_eliminables doesn't know we are
11075 assuming below that we only do the former elimination. */
11078 sparc_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
11080 return to == HARD_FRAME_POINTER_REGNUM || !sparc_frame_pointer_required ();
11083 /* Return the hard frame pointer directly to bypass the stack bias. */
11086 sparc_builtin_setjmp_frame_value (void)
11088 return hard_frame_pointer_rtx;
11091 /* If !TARGET_FPU, then make the fp registers and fp cc regs fixed so that
11092 they won't be allocated. */
11095 sparc_conditional_register_usage (void)
11097 if (PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
11099 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
11100 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
11102 /* If the user has passed -f{fixed,call-{used,saved}}-g5 */
11103 /* then honor it. */
11104 if (TARGET_ARCH32 && fixed_regs[5])
11106 else if (TARGET_ARCH64 && fixed_regs[5] == 2)
11111 for (regno = SPARC_FIRST_V9_FP_REG;
11112 regno <= SPARC_LAST_V9_FP_REG;
11114 fixed_regs[regno] = 1;
11115 /* %fcc0 is used by v8 and v9. */
11116 for (regno = SPARC_FIRST_V9_FCC_REG + 1;
11117 regno <= SPARC_LAST_V9_FCC_REG;
11119 fixed_regs[regno] = 1;
11124 for (regno = 32; regno < SPARC_LAST_V9_FCC_REG; regno++)
11125 fixed_regs[regno] = 1;
11127 /* If the user has passed -f{fixed,call-{used,saved}}-g2 */
11128 /* then honor it. Likewise with g3 and g4. */
11129 if (fixed_regs[2] == 2)
11130 fixed_regs[2] = ! TARGET_APP_REGS;
11131 if (fixed_regs[3] == 2)
11132 fixed_regs[3] = ! TARGET_APP_REGS;
11133 if (TARGET_ARCH32 && fixed_regs[4] == 2)
11134 fixed_regs[4] = ! TARGET_APP_REGS;
11135 else if (TARGET_CM_EMBMEDANY)
11137 else if (fixed_regs[4] == 2)
11142 /* Disable leaf functions. */
11143 memset (sparc_leaf_regs, 0, FIRST_PSEUDO_REGISTER);
11144 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
11145 leaf_reg_remap [regno] = regno;
11148 global_regs[SPARC_GSR_REG] = 1;
11151 /* Implement TARGET_PREFERRED_RELOAD_CLASS
11153 - We can't load constants into FP registers.
11154 - We can't load FP constants into integer registers when soft-float,
11155 because there is no soft-float pattern with a r/F constraint.
11156 - We can't load FP constants into integer registers for TFmode unless
11157 it is 0.0L, because there is no movtf pattern with a r/F constraint.
11158 - Try and reload integer constants (symbolic or otherwise) back into
11159 registers directly, rather than having them dumped to memory. */
11162 sparc_preferred_reload_class (rtx x, reg_class_t rclass)
11164 enum machine_mode mode = GET_MODE (x);
11165 if (CONSTANT_P (x))
11167 if (FP_REG_CLASS_P (rclass)
11168 || rclass == GENERAL_OR_FP_REGS
11169 || rclass == GENERAL_OR_EXTRA_FP_REGS
11170 || (GET_MODE_CLASS (mode) == MODE_FLOAT && ! TARGET_FPU)
11171 || (mode == TFmode && ! const_zero_operand (x, mode)))
11174 if (GET_MODE_CLASS (mode) == MODE_INT)
11175 return GENERAL_REGS;
11177 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
11179 if (! FP_REG_CLASS_P (rclass)
11180 || !(const_zero_operand (x, mode)
11181 || const_all_ones_operand (x, mode)))
11188 && (rclass == EXTRA_FP_REGS
11189 || rclass == GENERAL_OR_EXTRA_FP_REGS))
11191 int regno = true_regnum (x);
11193 if (SPARC_INT_REG_P (regno))
11194 return (rclass == EXTRA_FP_REGS
11195 ? FP_REGS : GENERAL_OR_FP_REGS);
11202 output_v8plus_mult (rtx insn, rtx *operands, const char *name)
11206 gcc_assert (! TARGET_ARCH64);
11208 if (sparc_check_64 (operands[1], insn) <= 0)
11209 output_asm_insn ("srl\t%L1, 0, %L1", operands);
11210 if (which_alternative == 1)
11211 output_asm_insn ("sllx\t%H1, 32, %H1", operands);
11212 if (GET_CODE (operands[2]) == CONST_INT)
11214 if (which_alternative == 1)
11216 output_asm_insn ("or\t%L1, %H1, %H1", operands);
11217 sprintf (mulstr, "%s\t%%H1, %%2, %%L0", name);
11218 output_asm_insn (mulstr, operands);
11219 return "srlx\t%L0, 32, %H0";
11223 output_asm_insn ("sllx\t%H1, 32, %3", operands);
11224 output_asm_insn ("or\t%L1, %3, %3", operands);
11225 sprintf (mulstr, "%s\t%%3, %%2, %%3", name);
11226 output_asm_insn (mulstr, operands);
11227 output_asm_insn ("srlx\t%3, 32, %H0", operands);
11228 return "mov\t%3, %L0";
11231 else if (rtx_equal_p (operands[1], operands[2]))
11233 if (which_alternative == 1)
11235 output_asm_insn ("or\t%L1, %H1, %H1", operands);
11236 sprintf (mulstr, "%s\t%%H1, %%H1, %%L0", name);
11237 output_asm_insn (mulstr, operands);
11238 return "srlx\t%L0, 32, %H0";
11242 output_asm_insn ("sllx\t%H1, 32, %3", operands);
11243 output_asm_insn ("or\t%L1, %3, %3", operands);
11244 sprintf (mulstr, "%s\t%%3, %%3, %%3", name);
11245 output_asm_insn (mulstr, operands);
11246 output_asm_insn ("srlx\t%3, 32, %H0", operands);
11247 return "mov\t%3, %L0";
11250 if (sparc_check_64 (operands[2], insn) <= 0)
11251 output_asm_insn ("srl\t%L2, 0, %L2", operands);
11252 if (which_alternative == 1)
11254 output_asm_insn ("or\t%L1, %H1, %H1", operands);
11255 output_asm_insn ("sllx\t%H2, 32, %L1", operands);
11256 output_asm_insn ("or\t%L2, %L1, %L1", operands);
11257 sprintf (mulstr, "%s\t%%H1, %%L1, %%L0", name);
11258 output_asm_insn (mulstr, operands);
11259 return "srlx\t%L0, 32, %H0";
11263 output_asm_insn ("sllx\t%H1, 32, %3", operands);
11264 output_asm_insn ("sllx\t%H2, 32, %4", operands);
11265 output_asm_insn ("or\t%L1, %3, %3", operands);
11266 output_asm_insn ("or\t%L2, %4, %4", operands);
11267 sprintf (mulstr, "%s\t%%3, %%4, %%3", name);
11268 output_asm_insn (mulstr, operands);
11269 output_asm_insn ("srlx\t%3, 32, %H0", operands);
11270 return "mov\t%3, %L0";
11275 sparc_expand_vector_init (rtx target, rtx vals)
11277 enum machine_mode mode = GET_MODE (target);
11278 enum machine_mode inner_mode = GET_MODE_INNER (mode);
11279 int n_elts = GET_MODE_NUNITS (mode);
11283 for (i = 0; i < n_elts; i++)
11285 rtx x = XVECEXP (vals, 0, i);
11286 if (!CONSTANT_P (x))
11292 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
11296 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
11297 for (i = 0; i < n_elts; i++)
11298 emit_move_insn (adjust_address_nv (mem, inner_mode,
11299 i * GET_MODE_SIZE (inner_mode)),
11300 XVECEXP (vals, 0, i));
11301 emit_move_insn (target, mem);
11305 sparc_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
11306 enum machine_mode mode, secondary_reload_info *sri)
11308 enum reg_class rclass = (enum reg_class) rclass_i;
11310 sri->icode = CODE_FOR_nothing;
11311 sri->extra_cost = 0;
11313 /* We need a temporary when loading/storing a HImode/QImode value
11314 between memory and the FPU registers. This can happen when combine puts
11315 a paradoxical subreg in a float/fix conversion insn. */
11316 if (FP_REG_CLASS_P (rclass)
11317 && (mode == HImode || mode == QImode)
11318 && (GET_CODE (x) == MEM
11319 || ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
11320 && true_regnum (x) == -1)))
11321 return GENERAL_REGS;
11323 /* On 32-bit we need a temporary when loading/storing a DFmode value
11324 between unaligned memory and the upper FPU registers. */
11326 && rclass == EXTRA_FP_REGS
11328 && GET_CODE (x) == MEM
11329 && ! mem_min_alignment (x, 8))
11332 if (((TARGET_CM_MEDANY
11333 && symbolic_operand (x, mode))
11334 || (TARGET_CM_EMBMEDANY
11335 && text_segment_operand (x, mode)))
11339 sri->icode = direct_optab_handler (reload_in_optab, mode);
11341 sri->icode = direct_optab_handler (reload_out_optab, mode);
11345 if (TARGET_VIS3 && TARGET_ARCH32)
11347 int regno = true_regnum (x);
11349 /* When using VIS3 fp<-->int register moves, on 32-bit we have
11350 to move 8-byte values in 4-byte pieces. This only works via
11351 FP_REGS, and not via EXTRA_FP_REGS. Therefore if we try to
11352 move between EXTRA_FP_REGS and GENERAL_REGS, we will need
11353 an FP_REGS intermediate move. */
11354 if ((rclass == EXTRA_FP_REGS && SPARC_INT_REG_P (regno))
11355 || ((general_or_i64_p (rclass)
11356 || rclass == GENERAL_OR_FP_REGS)
11357 && SPARC_FP_REG_P (regno)))
11359 sri->extra_cost = 2;
11368 sparc_expand_conditional_move (enum machine_mode mode, rtx *operands)
11370 enum rtx_code rc = GET_CODE (operands[1]);
11371 enum machine_mode cmp_mode;
11372 rtx cc_reg, dst, cmp;
11375 cmp_mode = GET_MODE (XEXP (cmp, 0));
11376 if (cmp_mode == DImode && !TARGET_ARCH64)
11381 if (! rtx_equal_p (operands[2], dst)
11382 && ! rtx_equal_p (operands[3], dst))
11384 if (reg_overlap_mentioned_p (dst, cmp))
11385 dst = gen_reg_rtx (mode);
11387 emit_move_insn (dst, operands[3]);
11389 else if (operands[2] == dst)
11391 operands[2] = operands[3];
11393 if (GET_MODE_CLASS (cmp_mode) == MODE_FLOAT)
11394 rc = reverse_condition_maybe_unordered (rc);
11396 rc = reverse_condition (rc);
11399 if (cmp_mode == TFmode && !TARGET_HARD_QUAD)
11400 cmp = sparc_emit_float_lib_cmp (XEXP (cmp, 0), XEXP (cmp, 1), rc);
11402 if (XEXP (cmp, 1) == const0_rtx
11403 && GET_CODE (XEXP (cmp, 0)) == REG
11404 && cmp_mode == DImode
11405 && v9_regcmp_p (rc))
11406 cc_reg = XEXP (cmp, 0);
11408 cc_reg = gen_compare_reg_1 (rc, XEXP (cmp, 0), XEXP (cmp, 1));
11410 cmp = gen_rtx_fmt_ee (rc, GET_MODE (cc_reg), cc_reg, const0_rtx);
11412 emit_insn (gen_rtx_SET (VOIDmode, dst,
11413 gen_rtx_IF_THEN_ELSE (mode, cmp, operands[2], dst)));
11415 if (dst != operands[0])
11416 emit_move_insn (operands[0], dst);
11421 #include "gt-sparc.h"