1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
5 Free Software Foundation, Inc.
6 Contributed by A. Lichnewsky, lich@inria.inria.fr.
7 Changes by Michael Meissner, meissner@osf.org.
8 64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
9 Brendan Eich, brendan@microunity.com.
11 This file is part of GCC.
13 GCC is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 3, or (at your option)
18 GCC is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with GCC; see the file COPYING3. If not see
25 <http://www.gnu.org/licenses/>. */
29 #include "coretypes.h"
33 #include "hard-reg-set.h"
34 #include "insn-config.h"
35 #include "conditions.h"
36 #include "insn-attr.h"
52 #include "target-def.h"
53 #include "integrate.h"
54 #include "langhooks.h"
55 #include "cfglayout.h"
56 #include "sched-int.h"
59 #include "diagnostic.h"
60 #include "target-globals.h"
63 /* True if X is an UNSPEC wrapper around a SYMBOL_REF or LABEL_REF. */
64 #define UNSPEC_ADDRESS_P(X) \
65 (GET_CODE (X) == UNSPEC \
66 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
67 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
69 /* Extract the symbol or label from UNSPEC wrapper X. */
70 #define UNSPEC_ADDRESS(X) \
73 /* Extract the symbol type from UNSPEC wrapper X. */
74 #define UNSPEC_ADDRESS_TYPE(X) \
75 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
77 /* The maximum distance between the top of the stack frame and the
78 value $sp has when we save and restore registers.
80 The value for normal-mode code must be a SMALL_OPERAND and must
81 preserve the maximum stack alignment. We therefore use a value
82 of 0x7ff0 in this case.
84 MIPS16e SAVE and RESTORE instructions can adjust the stack pointer by
85 up to 0x7f8 bytes and can usually save or restore all the registers
86 that we need to save or restore. (Note that we can only use these
87 instructions for o32, for which the stack alignment is 8 bytes.)
89 We use a maximum gap of 0x100 or 0x400 for MIPS16 code when SAVE and
90 RESTORE are not available. We can then use unextended instructions
91 to save and restore registers, and to allocate and deallocate the top
93 #define MIPS_MAX_FIRST_STACK_STEP \
94 (!TARGET_MIPS16 ? 0x7ff0 \
95 : GENERATE_MIPS16E_SAVE_RESTORE ? 0x7f8 \
96 : TARGET_64BIT ? 0x100 : 0x400)
98 /* True if INSN is a mips.md pattern or asm statement. */
99 #define USEFUL_INSN_P(INSN) \
100 (NONDEBUG_INSN_P (INSN) \
101 && GET_CODE (PATTERN (INSN)) != USE \
102 && GET_CODE (PATTERN (INSN)) != CLOBBER \
103 && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
104 && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
106 /* If INSN is a delayed branch sequence, return the first instruction
107 in the sequence, otherwise return INSN itself. */
108 #define SEQ_BEGIN(INSN) \
109 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
110 ? XVECEXP (PATTERN (INSN), 0, 0) \
113 /* Likewise for the last instruction in a delayed branch sequence. */
114 #define SEQ_END(INSN) \
115 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
116 ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
119 /* Execute the following loop body with SUBINSN set to each instruction
120 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
121 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
122 for ((SUBINSN) = SEQ_BEGIN (INSN); \
123 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
124 (SUBINSN) = NEXT_INSN (SUBINSN))
126 /* True if bit BIT is set in VALUE. */
127 #define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
129 /* Return the opcode for a ptr_mode load of the form:
131 l[wd] DEST, OFFSET(BASE). */
132 #define MIPS_LOAD_PTR(DEST, OFFSET, BASE) \
133 (((ptr_mode == DImode ? 0x37 : 0x23) << 26) \
138 /* Return the opcode to move register SRC into register DEST. */
139 #define MIPS_MOVE(DEST, SRC) \
140 ((TARGET_64BIT ? 0x2d : 0x21) \
144 /* Return the opcode for:
147 #define MIPS_LUI(DEST, VALUE) \
148 ((0xf << 26) | ((DEST) << 16) | (VALUE))
150 /* Return the opcode to jump to register DEST. */
151 #define MIPS_JR(DEST) \
152 (((DEST) << 21) | 0x8)
154 /* Return the opcode for:
156 bal . + (1 + OFFSET) * 4. */
157 #define MIPS_BAL(OFFSET) \
158 ((0x1 << 26) | (0x11 << 16) | (OFFSET))
160 /* Return the usual opcode for a nop. */
163 /* Classifies an address.
166 A natural register + offset address. The register satisfies
167 mips_valid_base_register_p and the offset is a const_arith_operand.
170 A LO_SUM rtx. The first operand is a valid base register and
171 the second operand is a symbolic address.
174 A signed 16-bit constant address.
177 A constant symbolic address. */
178 enum mips_address_type {
185 /* Macros to create an enumeration identifier for a function prototype. */
186 #define MIPS_FTYPE_NAME1(A, B) MIPS_##A##_FTYPE_##B
187 #define MIPS_FTYPE_NAME2(A, B, C) MIPS_##A##_FTYPE_##B##_##C
188 #define MIPS_FTYPE_NAME3(A, B, C, D) MIPS_##A##_FTYPE_##B##_##C##_##D
189 #define MIPS_FTYPE_NAME4(A, B, C, D, E) MIPS_##A##_FTYPE_##B##_##C##_##D##_##E
191 /* Classifies the prototype of a built-in function. */
192 enum mips_function_type {
193 #define DEF_MIPS_FTYPE(NARGS, LIST) MIPS_FTYPE_NAME##NARGS LIST,
194 #include "config/mips/mips-ftypes.def"
195 #undef DEF_MIPS_FTYPE
199 /* Specifies how a built-in function should be converted into rtl. */
200 enum mips_builtin_type {
201 /* The function corresponds directly to an .md pattern. The return
202 value is mapped to operand 0 and the arguments are mapped to
203 operands 1 and above. */
206 /* The function corresponds directly to an .md pattern. There is no return
207 value and the arguments are mapped to operands 0 and above. */
208 MIPS_BUILTIN_DIRECT_NO_TARGET,
210 /* The function corresponds to a comparison instruction followed by
211 a mips_cond_move_tf_ps pattern. The first two arguments are the
212 values to compare and the second two arguments are the vector
213 operands for the movt.ps or movf.ps instruction (in assembly order). */
217 /* The function corresponds to a V2SF comparison instruction. Operand 0
218 of this instruction is the result of the comparison, which has mode
219 CCV2 or CCV4. The function arguments are mapped to operands 1 and
220 above. The function's return value is an SImode boolean that is
221 true under the following conditions:
223 MIPS_BUILTIN_CMP_ANY: one of the registers is true
224 MIPS_BUILTIN_CMP_ALL: all of the registers are true
225 MIPS_BUILTIN_CMP_LOWER: the first register is true
226 MIPS_BUILTIN_CMP_UPPER: the second register is true. */
227 MIPS_BUILTIN_CMP_ANY,
228 MIPS_BUILTIN_CMP_ALL,
229 MIPS_BUILTIN_CMP_UPPER,
230 MIPS_BUILTIN_CMP_LOWER,
232 /* As above, but the instruction only sets a single $fcc register. */
233 MIPS_BUILTIN_CMP_SINGLE,
235 /* For generating bposge32 branch instructions in MIPS32 DSP ASE. */
236 MIPS_BUILTIN_BPOSGE32
239 /* Invoke MACRO (COND) for each C.cond.fmt condition. */
240 #define MIPS_FP_CONDITIONS(MACRO) \
258 /* Enumerates the codes above as MIPS_FP_COND_<X>. */
259 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
260 enum mips_fp_condition {
261 MIPS_FP_CONDITIONS (DECLARE_MIPS_COND)
264 /* Index X provides the string representation of MIPS_FP_COND_<X>. */
265 #define STRINGIFY(X) #X
266 static const char *const mips_fp_conditions[] = {
267 MIPS_FP_CONDITIONS (STRINGIFY)
270 /* Information about a function's frame layout. */
271 struct GTY(()) mips_frame_info {
272 /* The size of the frame in bytes. */
273 HOST_WIDE_INT total_size;
275 /* The number of bytes allocated to variables. */
276 HOST_WIDE_INT var_size;
278 /* The number of bytes allocated to outgoing function arguments. */
279 HOST_WIDE_INT args_size;
281 /* The number of bytes allocated to the .cprestore slot, or 0 if there
283 HOST_WIDE_INT cprestore_size;
285 /* Bit X is set if the function saves or restores GPR X. */
288 /* Likewise FPR X. */
291 /* Likewise doubleword accumulator X ($acX). */
292 unsigned int acc_mask;
294 /* The number of GPRs, FPRs, doubleword accumulators and COP0
298 unsigned int num_acc;
299 unsigned int num_cop0_regs;
301 /* The offset of the topmost GPR, FPR, accumulator and COP0-register
302 save slots from the top of the frame, or zero if no such slots are
304 HOST_WIDE_INT gp_save_offset;
305 HOST_WIDE_INT fp_save_offset;
306 HOST_WIDE_INT acc_save_offset;
307 HOST_WIDE_INT cop0_save_offset;
309 /* Likewise, but giving offsets from the bottom of the frame. */
310 HOST_WIDE_INT gp_sp_offset;
311 HOST_WIDE_INT fp_sp_offset;
312 HOST_WIDE_INT acc_sp_offset;
313 HOST_WIDE_INT cop0_sp_offset;
315 /* Similar, but the value passed to _mcount. */
316 HOST_WIDE_INT ra_fp_offset;
318 /* The offset of arg_pointer_rtx from the bottom of the frame. */
319 HOST_WIDE_INT arg_pointer_offset;
321 /* The offset of hard_frame_pointer_rtx from the bottom of the frame. */
322 HOST_WIDE_INT hard_frame_pointer_offset;
325 struct GTY(()) machine_function {
326 /* The register returned by mips16_gp_pseudo_reg; see there for details. */
327 rtx mips16_gp_pseudo_rtx;
329 /* The number of extra stack bytes taken up by register varargs.
330 This area is allocated by the callee at the very top of the frame. */
333 /* The current frame information, calculated by mips_compute_frame_info. */
334 struct mips_frame_info frame;
336 /* The register to use as the function's global pointer, or INVALID_REGNUM
337 if the function doesn't need one. */
338 unsigned int global_pointer;
340 /* How many instructions it takes to load a label into $AT, or 0 if
341 this property hasn't yet been calculated. */
342 unsigned int load_label_num_insns;
344 /* True if mips_adjust_insn_length should ignore an instruction's
346 bool ignore_hazard_length_p;
348 /* True if the whole function is suitable for .set noreorder and
350 bool all_noreorder_p;
352 /* True if the function has "inflexible" and "flexible" references
353 to the global pointer. See mips_cfun_has_inflexible_gp_ref_p
354 and mips_cfun_has_flexible_gp_ref_p for details. */
355 bool has_inflexible_gp_insn_p;
356 bool has_flexible_gp_insn_p;
358 /* True if the function's prologue must load the global pointer
359 value into pic_offset_table_rtx and store the same value in
360 the function's cprestore slot (if any). Even if this value
361 is currently false, we may decide to set it to true later;
362 see mips_must_initialize_gp_p () for details. */
363 bool must_initialize_gp_p;
365 /* True if the current function must restore $gp after any potential
366 clobber. This value is only meaningful during the first post-epilogue
367 split_insns pass; see mips_must_initialize_gp_p () for details. */
368 bool must_restore_gp_when_clobbered_p;
370 /* True if this is an interrupt handler. */
371 bool interrupt_handler_p;
373 /* True if this is an interrupt handler that uses shadow registers. */
374 bool use_shadow_register_set_p;
376 /* True if this is an interrupt handler that should keep interrupts
378 bool keep_interrupts_masked_p;
380 /* True if this is an interrupt handler that should use DERET
382 bool use_debug_exception_return_p;
385 /* Information about a single argument. */
386 struct mips_arg_info {
387 /* True if the argument is passed in a floating-point register, or
388 would have been if we hadn't run out of registers. */
391 /* The number of words passed in registers, rounded up. */
392 unsigned int reg_words;
394 /* For EABI, the offset of the first register from GP_ARG_FIRST or
395 FP_ARG_FIRST. For other ABIs, the offset of the first register from
396 the start of the ABI's argument structure (see the CUMULATIVE_ARGS
397 comment for details).
399 The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
401 unsigned int reg_offset;
403 /* The number of words that must be passed on the stack, rounded up. */
404 unsigned int stack_words;
406 /* The offset from the start of the stack overflow area of the argument's
407 first stack word. Only meaningful when STACK_WORDS is nonzero. */
408 unsigned int stack_offset;
411 /* Information about an address described by mips_address_type.
417 REG is the base register and OFFSET is the constant offset.
420 REG and OFFSET are the operands to the LO_SUM and SYMBOL_TYPE
421 is the type of symbol it references.
424 SYMBOL_TYPE is the type of symbol that the address references. */
425 struct mips_address_info {
426 enum mips_address_type type;
429 enum mips_symbol_type symbol_type;
432 /* One stage in a constant building sequence. These sequences have
436 A = A CODE[1] VALUE[1]
437 A = A CODE[2] VALUE[2]
440 where A is an accumulator, each CODE[i] is a binary rtl operation
441 and each VALUE[i] is a constant integer. CODE[0] is undefined. */
442 struct mips_integer_op {
444 unsigned HOST_WIDE_INT value;
447 /* The largest number of operations needed to load an integer constant.
448 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
449 When the lowest bit is clear, we can try, but reject a sequence with
450 an extra SLL at the end. */
451 #define MIPS_MAX_INTEGER_OPS 7
453 /* Information about a MIPS16e SAVE or RESTORE instruction. */
454 struct mips16e_save_restore_info {
455 /* The number of argument registers saved by a SAVE instruction.
456 0 for RESTORE instructions. */
459 /* Bit X is set if the instruction saves or restores GPR X. */
462 /* The total number of bytes to allocate. */
466 /* Costs of various operations on the different architectures. */
468 struct mips_rtx_cost_data
470 unsigned short fp_add;
471 unsigned short fp_mult_sf;
472 unsigned short fp_mult_df;
473 unsigned short fp_div_sf;
474 unsigned short fp_div_df;
475 unsigned short int_mult_si;
476 unsigned short int_mult_di;
477 unsigned short int_div_si;
478 unsigned short int_div_di;
479 unsigned short branch_cost;
480 unsigned short memory_latency;
483 /* Global variables for machine-dependent things. */
485 /* The -G setting, or the configuration's default small-data limit if
486 no -G option is given. */
487 static unsigned int mips_small_data_threshold;
489 /* The number of file directives written by mips_output_filename. */
490 int num_source_filenames;
492 /* The name that appeared in the last .file directive written by
493 mips_output_filename, or "" if mips_output_filename hasn't
494 written anything yet. */
495 const char *current_function_file = "";
497 /* A label counter used by PUT_SDB_BLOCK_START and PUT_SDB_BLOCK_END. */
500 /* Arrays that map GCC register numbers to debugger register numbers. */
501 int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
502 int mips_dwarf_regno[FIRST_PSEUDO_REGISTER];
504 /* The nesting depth of the PRINT_OPERAND '%(', '%<' and '%[' constructs. */
505 struct mips_asm_switch mips_noreorder = { "reorder", 0 };
506 struct mips_asm_switch mips_nomacro = { "macro", 0 };
507 struct mips_asm_switch mips_noat = { "at", 0 };
509 /* True if we're writing out a branch-likely instruction rather than a
511 static bool mips_branch_likely;
513 /* The current instruction-set architecture. */
514 enum processor mips_arch;
515 const struct mips_cpu_info *mips_arch_info;
517 /* The processor that we should tune the code for. */
518 enum processor mips_tune;
519 const struct mips_cpu_info *mips_tune_info;
521 /* The ISA level associated with mips_arch. */
524 /* The architecture selected by -mipsN, or null if -mipsN wasn't used. */
525 static const struct mips_cpu_info *mips_isa_option_info;
527 /* Which cost information to use. */
528 static const struct mips_rtx_cost_data *mips_cost;
530 /* The ambient target flags, excluding MASK_MIPS16. */
531 static int mips_base_target_flags;
533 /* True if MIPS16 is the default mode. */
534 bool mips_base_mips16;
536 /* The ambient values of other global variables. */
537 static int mips_base_schedule_insns; /* flag_schedule_insns */
538 static int mips_base_reorder_blocks_and_partition; /* flag_reorder... */
539 static int mips_base_move_loop_invariants; /* flag_move_loop_invariants */
540 static int mips_base_align_loops; /* align_loops */
541 static int mips_base_align_jumps; /* align_jumps */
542 static int mips_base_align_functions; /* align_functions */
544 /* Index [M][R] is true if register R is allowed to hold a value of mode M. */
545 bool mips_hard_regno_mode_ok[(int) MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
547 /* Index C is true if character C is a valid PRINT_OPERAND punctation
549 static bool mips_print_operand_punct[256];
551 static GTY (()) int mips_output_filename_first_time = 1;
553 /* mips_split_p[X] is true if symbols of type X can be split by
554 mips_split_symbol. */
555 bool mips_split_p[NUM_SYMBOL_TYPES];
557 /* mips_split_hi_p[X] is true if the high parts of symbols of type X
558 can be split by mips_split_symbol. */
559 bool mips_split_hi_p[NUM_SYMBOL_TYPES];
561 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
562 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
563 if they are matched by a special .md file pattern. */
564 static const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
566 /* Likewise for HIGHs. */
567 static const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
569 /* Target state for MIPS16. */
570 struct target_globals *mips16_globals;
572 /* Cached value of can_issue_more. This is cached in mips_variable_issue hook
573 and returned from mips_sched_reorder2. */
574 static int cached_can_issue_more;
576 /* Index R is the smallest register class that contains register R. */
577 const enum reg_class mips_regno_to_class[FIRST_PSEUDO_REGISTER] = {
578 LEA_REGS, LEA_REGS, M16_REGS, V1_REG,
579 M16_REGS, M16_REGS, M16_REGS, M16_REGS,
580 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
581 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
582 M16_REGS, M16_REGS, LEA_REGS, LEA_REGS,
583 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
584 T_REG, PIC_FN_ADDR_REG, LEA_REGS, LEA_REGS,
585 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
586 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
587 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
588 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
589 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
590 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
591 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
592 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
593 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
594 MD0_REG, MD1_REG, NO_REGS, ST_REGS,
595 ST_REGS, ST_REGS, ST_REGS, ST_REGS,
596 ST_REGS, ST_REGS, ST_REGS, NO_REGS,
597 NO_REGS, FRAME_REGS, FRAME_REGS, NO_REGS,
598 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
599 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
600 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
601 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
602 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
603 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
604 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
605 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
606 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
607 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
608 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
609 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
610 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
611 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
612 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
613 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
614 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
615 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
616 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
617 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
618 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
619 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
620 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
621 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
622 DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS,
623 DSP_ACC_REGS, DSP_ACC_REGS, ALL_REGS, ALL_REGS,
624 ALL_REGS, ALL_REGS, ALL_REGS, ALL_REGS
627 /* The value of TARGET_ATTRIBUTE_TABLE. */
628 static const struct attribute_spec mips_attribute_table[] = {
629 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
631 { "long_call", 0, 0, false, true, true, NULL, false },
632 { "far", 0, 0, false, true, true, NULL, false },
633 { "near", 0, 0, false, true, true, NULL, false },
634 /* We would really like to treat "mips16" and "nomips16" as type
635 attributes, but GCC doesn't provide the hooks we need to support
636 the right conversion rules. As declaration attributes, they affect
637 code generation but don't carry other semantics. */
638 { "mips16", 0, 0, true, false, false, NULL, false },
639 { "nomips16", 0, 0, true, false, false, NULL, false },
640 /* Allow functions to be specified as interrupt handlers */
641 { "interrupt", 0, 0, false, true, true, NULL, false },
642 { "use_shadow_register_set", 0, 0, false, true, true, NULL, false },
643 { "keep_interrupts_masked", 0, 0, false, true, true, NULL, false },
644 { "use_debug_exception_return", 0, 0, false, true, true, NULL, false },
645 { NULL, 0, 0, false, false, false, NULL, false }
648 /* A table describing all the processors GCC knows about. Names are
649 matched in the order listed. The first mention of an ISA level is
650 taken as the canonical name for that ISA.
652 To ease comparison, please keep this table in the same order
653 as GAS's mips_cpu_info_table. Please also make sure that
654 MIPS_ISA_LEVEL_SPEC and MIPS_ARCH_FLOAT_SPEC handle all -march
655 options correctly. */
656 static const struct mips_cpu_info mips_cpu_info_table[] = {
657 /* Entries for generic ISAs. */
658 { "mips1", PROCESSOR_R3000, 1, 0 },
659 { "mips2", PROCESSOR_R6000, 2, 0 },
660 { "mips3", PROCESSOR_R4000, 3, 0 },
661 { "mips4", PROCESSOR_R8000, 4, 0 },
662 /* Prefer not to use branch-likely instructions for generic MIPS32rX
663 and MIPS64rX code. The instructions were officially deprecated
664 in revisions 2 and earlier, but revision 3 is likely to downgrade
665 that to a recommendation to avoid the instructions in code that
666 isn't tuned to a specific processor. */
667 { "mips32", PROCESSOR_4KC, 32, PTF_AVOID_BRANCHLIKELY },
668 { "mips32r2", PROCESSOR_M4K, 33, PTF_AVOID_BRANCHLIKELY },
669 { "mips64", PROCESSOR_5KC, 64, PTF_AVOID_BRANCHLIKELY },
670 /* ??? For now just tune the generic MIPS64r2 for 5KC as well. */
671 { "mips64r2", PROCESSOR_5KC, 65, PTF_AVOID_BRANCHLIKELY },
673 /* MIPS I processors. */
674 { "r3000", PROCESSOR_R3000, 1, 0 },
675 { "r2000", PROCESSOR_R3000, 1, 0 },
676 { "r3900", PROCESSOR_R3900, 1, 0 },
678 /* MIPS II processors. */
679 { "r6000", PROCESSOR_R6000, 2, 0 },
681 /* MIPS III processors. */
682 { "r4000", PROCESSOR_R4000, 3, 0 },
683 { "vr4100", PROCESSOR_R4100, 3, 0 },
684 { "vr4111", PROCESSOR_R4111, 3, 0 },
685 { "vr4120", PROCESSOR_R4120, 3, 0 },
686 { "vr4130", PROCESSOR_R4130, 3, 0 },
687 { "vr4300", PROCESSOR_R4300, 3, 0 },
688 { "r4400", PROCESSOR_R4000, 3, 0 },
689 { "r4600", PROCESSOR_R4600, 3, 0 },
690 { "orion", PROCESSOR_R4600, 3, 0 },
691 { "r4650", PROCESSOR_R4650, 3, 0 },
692 /* ST Loongson 2E/2F processors. */
693 { "loongson2e", PROCESSOR_LOONGSON_2E, 3, PTF_AVOID_BRANCHLIKELY },
694 { "loongson2f", PROCESSOR_LOONGSON_2F, 3, PTF_AVOID_BRANCHLIKELY },
696 /* MIPS IV processors. */
697 { "r8000", PROCESSOR_R8000, 4, 0 },
698 { "r10000", PROCESSOR_R10000, 4, 0 },
699 { "r12000", PROCESSOR_R10000, 4, 0 },
700 { "r14000", PROCESSOR_R10000, 4, 0 },
701 { "r16000", PROCESSOR_R10000, 4, 0 },
702 { "vr5000", PROCESSOR_R5000, 4, 0 },
703 { "vr5400", PROCESSOR_R5400, 4, 0 },
704 { "vr5500", PROCESSOR_R5500, 4, PTF_AVOID_BRANCHLIKELY },
705 { "rm7000", PROCESSOR_R7000, 4, 0 },
706 { "rm9000", PROCESSOR_R9000, 4, 0 },
708 /* MIPS32 processors. */
709 { "4kc", PROCESSOR_4KC, 32, 0 },
710 { "4km", PROCESSOR_4KC, 32, 0 },
711 { "4kp", PROCESSOR_4KP, 32, 0 },
712 { "4ksc", PROCESSOR_4KC, 32, 0 },
714 /* MIPS32 Release 2 processors. */
715 { "m4k", PROCESSOR_M4K, 33, 0 },
716 { "4kec", PROCESSOR_4KC, 33, 0 },
717 { "4kem", PROCESSOR_4KC, 33, 0 },
718 { "4kep", PROCESSOR_4KP, 33, 0 },
719 { "4ksd", PROCESSOR_4KC, 33, 0 },
721 { "24kc", PROCESSOR_24KC, 33, 0 },
722 { "24kf2_1", PROCESSOR_24KF2_1, 33, 0 },
723 { "24kf", PROCESSOR_24KF2_1, 33, 0 },
724 { "24kf1_1", PROCESSOR_24KF1_1, 33, 0 },
725 { "24kfx", PROCESSOR_24KF1_1, 33, 0 },
726 { "24kx", PROCESSOR_24KF1_1, 33, 0 },
728 { "24kec", PROCESSOR_24KC, 33, 0 }, /* 24K with DSP. */
729 { "24kef2_1", PROCESSOR_24KF2_1, 33, 0 },
730 { "24kef", PROCESSOR_24KF2_1, 33, 0 },
731 { "24kef1_1", PROCESSOR_24KF1_1, 33, 0 },
732 { "24kefx", PROCESSOR_24KF1_1, 33, 0 },
733 { "24kex", PROCESSOR_24KF1_1, 33, 0 },
735 { "34kc", PROCESSOR_24KC, 33, 0 }, /* 34K with MT/DSP. */
736 { "34kf2_1", PROCESSOR_24KF2_1, 33, 0 },
737 { "34kf", PROCESSOR_24KF2_1, 33, 0 },
738 { "34kf1_1", PROCESSOR_24KF1_1, 33, 0 },
739 { "34kfx", PROCESSOR_24KF1_1, 33, 0 },
740 { "34kx", PROCESSOR_24KF1_1, 33, 0 },
742 { "74kc", PROCESSOR_74KC, 33, 0 }, /* 74K with DSPr2. */
743 { "74kf2_1", PROCESSOR_74KF2_1, 33, 0 },
744 { "74kf", PROCESSOR_74KF2_1, 33, 0 },
745 { "74kf1_1", PROCESSOR_74KF1_1, 33, 0 },
746 { "74kfx", PROCESSOR_74KF1_1, 33, 0 },
747 { "74kx", PROCESSOR_74KF1_1, 33, 0 },
748 { "74kf3_2", PROCESSOR_74KF3_2, 33, 0 },
750 { "1004kc", PROCESSOR_24KC, 33, 0 }, /* 1004K with MT/DSP. */
751 { "1004kf2_1", PROCESSOR_24KF2_1, 33, 0 },
752 { "1004kf", PROCESSOR_24KF2_1, 33, 0 },
753 { "1004kf1_1", PROCESSOR_24KF1_1, 33, 0 },
755 /* MIPS64 processors. */
756 { "5kc", PROCESSOR_5KC, 64, 0 },
757 { "5kf", PROCESSOR_5KF, 64, 0 },
758 { "20kc", PROCESSOR_20KC, 64, PTF_AVOID_BRANCHLIKELY },
759 { "sb1", PROCESSOR_SB1, 64, PTF_AVOID_BRANCHLIKELY },
760 { "sb1a", PROCESSOR_SB1A, 64, PTF_AVOID_BRANCHLIKELY },
761 { "sr71000", PROCESSOR_SR71000, 64, PTF_AVOID_BRANCHLIKELY },
762 { "xlr", PROCESSOR_XLR, 64, 0 },
763 { "loongson3a", PROCESSOR_LOONGSON_3A, 64, PTF_AVOID_BRANCHLIKELY },
765 /* MIPS64 Release 2 processors. */
766 { "octeon", PROCESSOR_OCTEON, 65, PTF_AVOID_BRANCHLIKELY }
769 /* Default costs. If these are used for a processor we should look
770 up the actual costs. */
771 #define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \
772 COSTS_N_INSNS (7), /* fp_mult_sf */ \
773 COSTS_N_INSNS (8), /* fp_mult_df */ \
774 COSTS_N_INSNS (23), /* fp_div_sf */ \
775 COSTS_N_INSNS (36), /* fp_div_df */ \
776 COSTS_N_INSNS (10), /* int_mult_si */ \
777 COSTS_N_INSNS (10), /* int_mult_di */ \
778 COSTS_N_INSNS (69), /* int_div_si */ \
779 COSTS_N_INSNS (69), /* int_div_di */ \
780 2, /* branch_cost */ \
781 4 /* memory_latency */
783 /* Floating-point costs for processors without an FPU. Just assume that
784 all floating-point libcalls are very expensive. */
785 #define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \
786 COSTS_N_INSNS (256), /* fp_mult_sf */ \
787 COSTS_N_INSNS (256), /* fp_mult_df */ \
788 COSTS_N_INSNS (256), /* fp_div_sf */ \
789 COSTS_N_INSNS (256) /* fp_div_df */
791 /* Costs to use when optimizing for size. */
792 static const struct mips_rtx_cost_data mips_rtx_cost_optimize_size = {
793 COSTS_N_INSNS (1), /* fp_add */
794 COSTS_N_INSNS (1), /* fp_mult_sf */
795 COSTS_N_INSNS (1), /* fp_mult_df */
796 COSTS_N_INSNS (1), /* fp_div_sf */
797 COSTS_N_INSNS (1), /* fp_div_df */
798 COSTS_N_INSNS (1), /* int_mult_si */
799 COSTS_N_INSNS (1), /* int_mult_di */
800 COSTS_N_INSNS (1), /* int_div_si */
801 COSTS_N_INSNS (1), /* int_div_di */
803 4 /* memory_latency */
806 /* Costs to use when optimizing for speed, indexed by processor. */
807 static const struct mips_rtx_cost_data
808 mips_rtx_cost_data[NUM_PROCESSOR_VALUES] = {
810 COSTS_N_INSNS (2), /* fp_add */
811 COSTS_N_INSNS (4), /* fp_mult_sf */
812 COSTS_N_INSNS (5), /* fp_mult_df */
813 COSTS_N_INSNS (12), /* fp_div_sf */
814 COSTS_N_INSNS (19), /* fp_div_df */
815 COSTS_N_INSNS (12), /* int_mult_si */
816 COSTS_N_INSNS (12), /* int_mult_di */
817 COSTS_N_INSNS (35), /* int_div_si */
818 COSTS_N_INSNS (35), /* int_div_di */
820 4 /* memory_latency */
824 COSTS_N_INSNS (6), /* int_mult_si */
825 COSTS_N_INSNS (6), /* int_mult_di */
826 COSTS_N_INSNS (36), /* int_div_si */
827 COSTS_N_INSNS (36), /* int_div_di */
829 4 /* memory_latency */
833 COSTS_N_INSNS (36), /* int_mult_si */
834 COSTS_N_INSNS (36), /* int_mult_di */
835 COSTS_N_INSNS (37), /* int_div_si */
836 COSTS_N_INSNS (37), /* int_div_di */
838 4 /* memory_latency */
842 COSTS_N_INSNS (4), /* int_mult_si */
843 COSTS_N_INSNS (11), /* int_mult_di */
844 COSTS_N_INSNS (36), /* int_div_si */
845 COSTS_N_INSNS (68), /* int_div_di */
847 4 /* memory_latency */
850 COSTS_N_INSNS (4), /* fp_add */
851 COSTS_N_INSNS (4), /* fp_mult_sf */
852 COSTS_N_INSNS (5), /* fp_mult_df */
853 COSTS_N_INSNS (17), /* fp_div_sf */
854 COSTS_N_INSNS (32), /* fp_div_df */
855 COSTS_N_INSNS (4), /* int_mult_si */
856 COSTS_N_INSNS (11), /* int_mult_di */
857 COSTS_N_INSNS (36), /* int_div_si */
858 COSTS_N_INSNS (68), /* int_div_di */
860 4 /* memory_latency */
863 COSTS_N_INSNS (4), /* fp_add */
864 COSTS_N_INSNS (4), /* fp_mult_sf */
865 COSTS_N_INSNS (5), /* fp_mult_df */
866 COSTS_N_INSNS (17), /* fp_div_sf */
867 COSTS_N_INSNS (32), /* fp_div_df */
868 COSTS_N_INSNS (4), /* int_mult_si */
869 COSTS_N_INSNS (7), /* int_mult_di */
870 COSTS_N_INSNS (42), /* int_div_si */
871 COSTS_N_INSNS (72), /* int_div_di */
873 4 /* memory_latency */
877 COSTS_N_INSNS (5), /* int_mult_si */
878 COSTS_N_INSNS (5), /* int_mult_di */
879 COSTS_N_INSNS (41), /* int_div_si */
880 COSTS_N_INSNS (41), /* int_div_di */
882 4 /* memory_latency */
885 COSTS_N_INSNS (8), /* fp_add */
886 COSTS_N_INSNS (8), /* fp_mult_sf */
887 COSTS_N_INSNS (10), /* fp_mult_df */
888 COSTS_N_INSNS (34), /* fp_div_sf */
889 COSTS_N_INSNS (64), /* fp_div_df */
890 COSTS_N_INSNS (5), /* int_mult_si */
891 COSTS_N_INSNS (5), /* int_mult_di */
892 COSTS_N_INSNS (41), /* int_div_si */
893 COSTS_N_INSNS (41), /* int_div_di */
895 4 /* memory_latency */
898 COSTS_N_INSNS (4), /* fp_add */
899 COSTS_N_INSNS (4), /* fp_mult_sf */
900 COSTS_N_INSNS (5), /* fp_mult_df */
901 COSTS_N_INSNS (17), /* fp_div_sf */
902 COSTS_N_INSNS (32), /* fp_div_df */
903 COSTS_N_INSNS (5), /* int_mult_si */
904 COSTS_N_INSNS (5), /* int_mult_di */
905 COSTS_N_INSNS (41), /* int_div_si */
906 COSTS_N_INSNS (41), /* int_div_di */
908 4 /* memory_latency */
912 COSTS_N_INSNS (5), /* int_mult_si */
913 COSTS_N_INSNS (5), /* int_mult_di */
914 COSTS_N_INSNS (41), /* int_div_si */
915 COSTS_N_INSNS (41), /* int_div_di */
917 4 /* memory_latency */
920 COSTS_N_INSNS (8), /* fp_add */
921 COSTS_N_INSNS (8), /* fp_mult_sf */
922 COSTS_N_INSNS (10), /* fp_mult_df */
923 COSTS_N_INSNS (34), /* fp_div_sf */
924 COSTS_N_INSNS (64), /* fp_div_df */
925 COSTS_N_INSNS (5), /* int_mult_si */
926 COSTS_N_INSNS (5), /* int_mult_di */
927 COSTS_N_INSNS (41), /* int_div_si */
928 COSTS_N_INSNS (41), /* int_div_di */
930 4 /* memory_latency */
933 COSTS_N_INSNS (4), /* fp_add */
934 COSTS_N_INSNS (4), /* fp_mult_sf */
935 COSTS_N_INSNS (5), /* fp_mult_df */
936 COSTS_N_INSNS (17), /* fp_div_sf */
937 COSTS_N_INSNS (32), /* fp_div_df */
938 COSTS_N_INSNS (5), /* int_mult_si */
939 COSTS_N_INSNS (5), /* int_mult_di */
940 COSTS_N_INSNS (41), /* int_div_si */
941 COSTS_N_INSNS (41), /* int_div_di */
943 4 /* memory_latency */
946 COSTS_N_INSNS (6), /* fp_add */
947 COSTS_N_INSNS (6), /* fp_mult_sf */
948 COSTS_N_INSNS (7), /* fp_mult_df */
949 COSTS_N_INSNS (25), /* fp_div_sf */
950 COSTS_N_INSNS (48), /* fp_div_df */
951 COSTS_N_INSNS (5), /* int_mult_si */
952 COSTS_N_INSNS (5), /* int_mult_di */
953 COSTS_N_INSNS (41), /* int_div_si */
954 COSTS_N_INSNS (41), /* int_div_di */
956 4 /* memory_latency */
973 COSTS_N_INSNS (5), /* int_mult_si */
974 COSTS_N_INSNS (5), /* int_mult_di */
975 COSTS_N_INSNS (72), /* int_div_si */
976 COSTS_N_INSNS (72), /* int_div_di */
978 4 /* memory_latency */
981 COSTS_N_INSNS (2), /* fp_add */
982 COSTS_N_INSNS (4), /* fp_mult_sf */
983 COSTS_N_INSNS (5), /* fp_mult_df */
984 COSTS_N_INSNS (12), /* fp_div_sf */
985 COSTS_N_INSNS (19), /* fp_div_df */
986 COSTS_N_INSNS (2), /* int_mult_si */
987 COSTS_N_INSNS (2), /* int_mult_di */
988 COSTS_N_INSNS (35), /* int_div_si */
989 COSTS_N_INSNS (35), /* int_div_di */
991 4 /* memory_latency */
994 COSTS_N_INSNS (3), /* fp_add */
995 COSTS_N_INSNS (5), /* fp_mult_sf */
996 COSTS_N_INSNS (6), /* fp_mult_df */
997 COSTS_N_INSNS (15), /* fp_div_sf */
998 COSTS_N_INSNS (16), /* fp_div_df */
999 COSTS_N_INSNS (17), /* int_mult_si */
1000 COSTS_N_INSNS (17), /* int_mult_di */
1001 COSTS_N_INSNS (38), /* int_div_si */
1002 COSTS_N_INSNS (38), /* int_div_di */
1003 2, /* branch_cost */
1004 6 /* memory_latency */
1007 COSTS_N_INSNS (6), /* fp_add */
1008 COSTS_N_INSNS (7), /* fp_mult_sf */
1009 COSTS_N_INSNS (8), /* fp_mult_df */
1010 COSTS_N_INSNS (23), /* fp_div_sf */
1011 COSTS_N_INSNS (36), /* fp_div_df */
1012 COSTS_N_INSNS (10), /* int_mult_si */
1013 COSTS_N_INSNS (10), /* int_mult_di */
1014 COSTS_N_INSNS (69), /* int_div_si */
1015 COSTS_N_INSNS (69), /* int_div_di */
1016 2, /* branch_cost */
1017 6 /* memory_latency */
1029 /* The only costs that appear to be updated here are
1030 integer multiplication. */
1032 COSTS_N_INSNS (4), /* int_mult_si */
1033 COSTS_N_INSNS (6), /* int_mult_di */
1034 COSTS_N_INSNS (69), /* int_div_si */
1035 COSTS_N_INSNS (69), /* int_div_di */
1036 1, /* branch_cost */
1037 4 /* memory_latency */
1049 COSTS_N_INSNS (6), /* fp_add */
1050 COSTS_N_INSNS (4), /* fp_mult_sf */
1051 COSTS_N_INSNS (5), /* fp_mult_df */
1052 COSTS_N_INSNS (23), /* fp_div_sf */
1053 COSTS_N_INSNS (36), /* fp_div_df */
1054 COSTS_N_INSNS (5), /* int_mult_si */
1055 COSTS_N_INSNS (5), /* int_mult_di */
1056 COSTS_N_INSNS (36), /* int_div_si */
1057 COSTS_N_INSNS (36), /* int_div_di */
1058 1, /* branch_cost */
1059 4 /* memory_latency */
1062 COSTS_N_INSNS (6), /* fp_add */
1063 COSTS_N_INSNS (5), /* fp_mult_sf */
1064 COSTS_N_INSNS (6), /* fp_mult_df */
1065 COSTS_N_INSNS (30), /* fp_div_sf */
1066 COSTS_N_INSNS (59), /* fp_div_df */
1067 COSTS_N_INSNS (3), /* int_mult_si */
1068 COSTS_N_INSNS (4), /* int_mult_di */
1069 COSTS_N_INSNS (42), /* int_div_si */
1070 COSTS_N_INSNS (74), /* int_div_di */
1071 1, /* branch_cost */
1072 4 /* memory_latency */
1075 COSTS_N_INSNS (6), /* fp_add */
1076 COSTS_N_INSNS (5), /* fp_mult_sf */
1077 COSTS_N_INSNS (6), /* fp_mult_df */
1078 COSTS_N_INSNS (30), /* fp_div_sf */
1079 COSTS_N_INSNS (59), /* fp_div_df */
1080 COSTS_N_INSNS (5), /* int_mult_si */
1081 COSTS_N_INSNS (9), /* int_mult_di */
1082 COSTS_N_INSNS (42), /* int_div_si */
1083 COSTS_N_INSNS (74), /* int_div_di */
1084 1, /* branch_cost */
1085 4 /* memory_latency */
1088 /* The only costs that are changed here are
1089 integer multiplication. */
1090 COSTS_N_INSNS (6), /* fp_add */
1091 COSTS_N_INSNS (7), /* fp_mult_sf */
1092 COSTS_N_INSNS (8), /* fp_mult_df */
1093 COSTS_N_INSNS (23), /* fp_div_sf */
1094 COSTS_N_INSNS (36), /* fp_div_df */
1095 COSTS_N_INSNS (5), /* int_mult_si */
1096 COSTS_N_INSNS (9), /* int_mult_di */
1097 COSTS_N_INSNS (69), /* int_div_si */
1098 COSTS_N_INSNS (69), /* int_div_di */
1099 1, /* branch_cost */
1100 4 /* memory_latency */
1106 /* The only costs that are changed here are
1107 integer multiplication. */
1108 COSTS_N_INSNS (6), /* fp_add */
1109 COSTS_N_INSNS (7), /* fp_mult_sf */
1110 COSTS_N_INSNS (8), /* fp_mult_df */
1111 COSTS_N_INSNS (23), /* fp_div_sf */
1112 COSTS_N_INSNS (36), /* fp_div_df */
1113 COSTS_N_INSNS (3), /* int_mult_si */
1114 COSTS_N_INSNS (8), /* int_mult_di */
1115 COSTS_N_INSNS (69), /* int_div_si */
1116 COSTS_N_INSNS (69), /* int_div_di */
1117 1, /* branch_cost */
1118 4 /* memory_latency */
1121 COSTS_N_INSNS (2), /* fp_add */
1122 COSTS_N_INSNS (2), /* fp_mult_sf */
1123 COSTS_N_INSNS (2), /* fp_mult_df */
1124 COSTS_N_INSNS (12), /* fp_div_sf */
1125 COSTS_N_INSNS (19), /* fp_div_df */
1126 COSTS_N_INSNS (5), /* int_mult_si */
1127 COSTS_N_INSNS (9), /* int_mult_di */
1128 COSTS_N_INSNS (34), /* int_div_si */
1129 COSTS_N_INSNS (66), /* int_div_di */
1130 1, /* branch_cost */
1131 4 /* memory_latency */
1134 /* These costs are the same as the SB-1A below. */
1135 COSTS_N_INSNS (4), /* fp_add */
1136 COSTS_N_INSNS (4), /* fp_mult_sf */
1137 COSTS_N_INSNS (4), /* fp_mult_df */
1138 COSTS_N_INSNS (24), /* fp_div_sf */
1139 COSTS_N_INSNS (32), /* fp_div_df */
1140 COSTS_N_INSNS (3), /* int_mult_si */
1141 COSTS_N_INSNS (4), /* int_mult_di */
1142 COSTS_N_INSNS (36), /* int_div_si */
1143 COSTS_N_INSNS (68), /* int_div_di */
1144 1, /* branch_cost */
1145 4 /* memory_latency */
1148 /* These costs are the same as the SB-1 above. */
1149 COSTS_N_INSNS (4), /* fp_add */
1150 COSTS_N_INSNS (4), /* fp_mult_sf */
1151 COSTS_N_INSNS (4), /* fp_mult_df */
1152 COSTS_N_INSNS (24), /* fp_div_sf */
1153 COSTS_N_INSNS (32), /* fp_div_df */
1154 COSTS_N_INSNS (3), /* int_mult_si */
1155 COSTS_N_INSNS (4), /* int_mult_di */
1156 COSTS_N_INSNS (36), /* int_div_si */
1157 COSTS_N_INSNS (68), /* int_div_di */
1158 1, /* branch_cost */
1159 4 /* memory_latency */
1166 COSTS_N_INSNS (8), /* int_mult_si */
1167 COSTS_N_INSNS (8), /* int_mult_di */
1168 COSTS_N_INSNS (72), /* int_div_si */
1169 COSTS_N_INSNS (72), /* int_div_di */
1170 1, /* branch_cost */
1171 4 /* memory_latency */
1175 static rtx mips_find_pic_call_symbol (rtx, rtx);
1176 static int mips_register_move_cost (enum machine_mode, reg_class_t,
1178 static unsigned int mips_function_arg_boundary (enum machine_mode, const_tree);
1180 /* This hash table keeps track of implicit "mips16" and "nomips16" attributes
1181 for -mflip_mips16. It maps decl names onto a boolean mode setting. */
1182 struct GTY (()) mflip_mips16_entry {
1186 static GTY ((param_is (struct mflip_mips16_entry))) htab_t mflip_mips16_htab;
1188 /* Hash table callbacks for mflip_mips16_htab. */
1191 mflip_mips16_htab_hash (const void *entry)
1193 return htab_hash_string (((const struct mflip_mips16_entry *) entry)->name);
1197 mflip_mips16_htab_eq (const void *entry, const void *name)
1199 return strcmp (((const struct mflip_mips16_entry *) entry)->name,
1200 (const char *) name) == 0;
1203 /* True if -mflip-mips16 should next add an attribute for the default MIPS16
1204 mode, false if it should next add an attribute for the opposite mode. */
1205 static GTY(()) bool mips16_flipper;
1207 /* DECL is a function that needs a default "mips16" or "nomips16" attribute
1208 for -mflip-mips16. Return true if it should use "mips16" and false if
1209 it should use "nomips16". */
1212 mflip_mips16_use_mips16_p (tree decl)
1214 struct mflip_mips16_entry *entry;
1219 /* Use the opposite of the command-line setting for anonymous decls. */
1220 if (!DECL_NAME (decl))
1221 return !mips_base_mips16;
1223 if (!mflip_mips16_htab)
1224 mflip_mips16_htab = htab_create_ggc (37, mflip_mips16_htab_hash,
1225 mflip_mips16_htab_eq, NULL);
1227 name = IDENTIFIER_POINTER (DECL_NAME (decl));
1228 hash = htab_hash_string (name);
1229 slot = htab_find_slot_with_hash (mflip_mips16_htab, name, hash, INSERT);
1230 entry = (struct mflip_mips16_entry *) *slot;
1233 mips16_flipper = !mips16_flipper;
1234 entry = ggc_alloc_mflip_mips16_entry ();
1236 entry->mips16_p = mips16_flipper ? !mips_base_mips16 : mips_base_mips16;
1239 return entry->mips16_p;
1242 /* Predicates to test for presence of "near" and "far"/"long_call"
1243 attributes on the given TYPE. */
1246 mips_near_type_p (const_tree type)
1248 return lookup_attribute ("near", TYPE_ATTRIBUTES (type)) != NULL;
1252 mips_far_type_p (const_tree type)
1254 return (lookup_attribute ("long_call", TYPE_ATTRIBUTES (type)) != NULL
1255 || lookup_attribute ("far", TYPE_ATTRIBUTES (type)) != NULL);
1258 /* Similar predicates for "mips16"/"nomips16" function attributes. */
1261 mips_mips16_decl_p (const_tree decl)
1263 return lookup_attribute ("mips16", DECL_ATTRIBUTES (decl)) != NULL;
1267 mips_nomips16_decl_p (const_tree decl)
1269 return lookup_attribute ("nomips16", DECL_ATTRIBUTES (decl)) != NULL;
1272 /* Check if the interrupt attribute is set for a function. */
1275 mips_interrupt_type_p (tree type)
1277 return lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type)) != NULL;
1280 /* Check if the attribute to use shadow register set is set for a function. */
1283 mips_use_shadow_register_set_p (tree type)
1285 return lookup_attribute ("use_shadow_register_set",
1286 TYPE_ATTRIBUTES (type)) != NULL;
1289 /* Check if the attribute to keep interrupts masked is set for a function. */
1292 mips_keep_interrupts_masked_p (tree type)
1294 return lookup_attribute ("keep_interrupts_masked",
1295 TYPE_ATTRIBUTES (type)) != NULL;
1298 /* Check if the attribute to use debug exception return is set for
1302 mips_use_debug_exception_return_p (tree type)
1304 return lookup_attribute ("use_debug_exception_return",
1305 TYPE_ATTRIBUTES (type)) != NULL;
1308 /* Return true if function DECL is a MIPS16 function. Return the ambient
1309 setting if DECL is null. */
1312 mips_use_mips16_mode_p (tree decl)
1316 /* Nested functions must use the same frame pointer as their
1317 parent and must therefore use the same ISA mode. */
1318 tree parent = decl_function_context (decl);
1321 if (mips_mips16_decl_p (decl))
1323 if (mips_nomips16_decl_p (decl))
1326 return mips_base_mips16;
1329 /* Implement TARGET_COMP_TYPE_ATTRIBUTES. */
1332 mips_comp_type_attributes (const_tree type1, const_tree type2)
1334 /* Disallow mixed near/far attributes. */
1335 if (mips_far_type_p (type1) && mips_near_type_p (type2))
1337 if (mips_near_type_p (type1) && mips_far_type_p (type2))
1342 /* Implement TARGET_INSERT_ATTRIBUTES. */
1345 mips_insert_attributes (tree decl, tree *attributes)
1348 bool mips16_p, nomips16_p;
1350 /* Check for "mips16" and "nomips16" attributes. */
1351 mips16_p = lookup_attribute ("mips16", *attributes) != NULL;
1352 nomips16_p = lookup_attribute ("nomips16", *attributes) != NULL;
1353 if (TREE_CODE (decl) != FUNCTION_DECL)
1356 error ("%qs attribute only applies to functions", "mips16");
1358 error ("%qs attribute only applies to functions", "nomips16");
1362 mips16_p |= mips_mips16_decl_p (decl);
1363 nomips16_p |= mips_nomips16_decl_p (decl);
1364 if (mips16_p || nomips16_p)
1366 /* DECL cannot be simultaneously "mips16" and "nomips16". */
1367 if (mips16_p && nomips16_p)
1368 error ("%qE cannot have both %<mips16%> and "
1369 "%<nomips16%> attributes",
1372 else if (TARGET_FLIP_MIPS16 && !DECL_ARTIFICIAL (decl))
1374 /* Implement -mflip-mips16. If DECL has neither a "nomips16" nor a
1375 "mips16" attribute, arbitrarily pick one. We must pick the same
1376 setting for duplicate declarations of a function. */
1377 name = mflip_mips16_use_mips16_p (decl) ? "mips16" : "nomips16";
1378 *attributes = tree_cons (get_identifier (name), NULL, *attributes);
1383 /* Implement TARGET_MERGE_DECL_ATTRIBUTES. */
1386 mips_merge_decl_attributes (tree olddecl, tree newdecl)
1388 /* The decls' "mips16" and "nomips16" attributes must match exactly. */
1389 if (mips_mips16_decl_p (olddecl) != mips_mips16_decl_p (newdecl))
1390 error ("%qE redeclared with conflicting %qs attributes",
1391 DECL_NAME (newdecl), "mips16");
1392 if (mips_nomips16_decl_p (olddecl) != mips_nomips16_decl_p (newdecl))
1393 error ("%qE redeclared with conflicting %qs attributes",
1394 DECL_NAME (newdecl), "nomips16");
1396 return merge_attributes (DECL_ATTRIBUTES (olddecl),
1397 DECL_ATTRIBUTES (newdecl));
1400 /* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR
1401 and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */
1404 mips_split_plus (rtx x, rtx *base_ptr, HOST_WIDE_INT *offset_ptr)
1406 if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
1408 *base_ptr = XEXP (x, 0);
1409 *offset_ptr = INTVAL (XEXP (x, 1));
1418 static unsigned int mips_build_integer (struct mips_integer_op *,
1419 unsigned HOST_WIDE_INT);
1421 /* A subroutine of mips_build_integer, with the same interface.
1422 Assume that the final action in the sequence should be a left shift. */
1425 mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
1427 unsigned int i, shift;
1429 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
1430 since signed numbers are easier to load than unsigned ones. */
1432 while ((value & 1) == 0)
1433 value /= 2, shift++;
1435 i = mips_build_integer (codes, value);
1436 codes[i].code = ASHIFT;
1437 codes[i].value = shift;
1441 /* As for mips_build_shift, but assume that the final action will be
1442 an IOR or PLUS operation. */
1445 mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
1447 unsigned HOST_WIDE_INT high;
1450 high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
1451 if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
1453 /* The constant is too complex to load with a simple LUI/ORI pair,
1454 so we want to give the recursive call as many trailing zeros as
1455 possible. In this case, we know bit 16 is set and that the
1456 low 16 bits form a negative number. If we subtract that number
1457 from VALUE, we will clear at least the lowest 17 bits, maybe more. */
1458 i = mips_build_integer (codes, CONST_HIGH_PART (value));
1459 codes[i].code = PLUS;
1460 codes[i].value = CONST_LOW_PART (value);
1464 /* Either this is a simple LUI/ORI pair, or clearing the lowest 16
1465 bits gives a value with at least 17 trailing zeros. */
1466 i = mips_build_integer (codes, high);
1467 codes[i].code = IOR;
1468 codes[i].value = value & 0xffff;
1473 /* Fill CODES with a sequence of rtl operations to load VALUE.
1474 Return the number of operations needed. */
1477 mips_build_integer (struct mips_integer_op *codes,
1478 unsigned HOST_WIDE_INT value)
1480 if (SMALL_OPERAND (value)
1481 || SMALL_OPERAND_UNSIGNED (value)
1482 || LUI_OPERAND (value))
1484 /* The value can be loaded with a single instruction. */
1485 codes[0].code = UNKNOWN;
1486 codes[0].value = value;
1489 else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
1491 /* Either the constant is a simple LUI/ORI combination or its
1492 lowest bit is set. We don't want to shift in this case. */
1493 return mips_build_lower (codes, value);
1495 else if ((value & 0xffff) == 0)
1497 /* The constant will need at least three actions. The lowest
1498 16 bits are clear, so the final action will be a shift. */
1499 return mips_build_shift (codes, value);
1503 /* The final action could be a shift, add or inclusive OR.
1504 Rather than use a complex condition to select the best
1505 approach, try both mips_build_shift and mips_build_lower
1506 and pick the one that gives the shortest sequence.
1507 Note that this case is only used once per constant. */
1508 struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
1509 unsigned int cost, alt_cost;
1511 cost = mips_build_shift (codes, value);
1512 alt_cost = mips_build_lower (alt_codes, value);
1513 if (alt_cost < cost)
1515 memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
1522 /* Implement TARGET_LEGITIMATE_CONSTANT_P. */
1525 mips_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1527 return mips_const_insns (x) > 0;
1530 /* Return true if symbols of type TYPE require a GOT access. */
1533 mips_got_symbol_type_p (enum mips_symbol_type type)
1537 case SYMBOL_GOT_PAGE_OFST:
1538 case SYMBOL_GOT_DISP:
1546 /* Return true if X is a thread-local symbol. */
1549 mips_tls_symbol_p (rtx x)
1551 return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
1554 /* Return true if SYMBOL_REF X is associated with a global symbol
1555 (in the STB_GLOBAL sense). */
1558 mips_global_symbol_p (const_rtx x)
1560 const_tree decl = SYMBOL_REF_DECL (x);
1563 return !SYMBOL_REF_LOCAL_P (x) || SYMBOL_REF_EXTERNAL_P (x);
1565 /* Weakref symbols are not TREE_PUBLIC, but their targets are global
1566 or weak symbols. Relocations in the object file will be against
1567 the target symbol, so it's that symbol's binding that matters here. */
1568 return DECL_P (decl) && (TREE_PUBLIC (decl) || DECL_WEAK (decl));
1571 /* Return true if function X is a libgcc MIPS16 stub function. */
1574 mips16_stub_function_p (const_rtx x)
1576 return (GET_CODE (x) == SYMBOL_REF
1577 && strncmp (XSTR (x, 0), "__mips16_", 9) == 0);
1580 /* Return true if function X is a locally-defined and locally-binding
1584 mips16_local_function_p (const_rtx x)
1586 return (GET_CODE (x) == SYMBOL_REF
1587 && SYMBOL_REF_LOCAL_P (x)
1588 && !SYMBOL_REF_EXTERNAL_P (x)
1589 && mips_use_mips16_mode_p (SYMBOL_REF_DECL (x)));
1592 /* Return true if SYMBOL_REF X binds locally. */
1595 mips_symbol_binds_local_p (const_rtx x)
1597 return (SYMBOL_REF_DECL (x)
1598 ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
1599 : SYMBOL_REF_LOCAL_P (x));
1602 /* Return true if rtx constants of mode MODE should be put into a small
1606 mips_rtx_constant_in_small_data_p (enum machine_mode mode)
1608 return (!TARGET_EMBEDDED_DATA
1609 && TARGET_LOCAL_SDATA
1610 && GET_MODE_SIZE (mode) <= mips_small_data_threshold);
1613 /* Return true if X should not be moved directly into register $25.
1614 We need this because many versions of GAS will treat "la $25,foo" as
1615 part of a call sequence and so allow a global "foo" to be lazily bound. */
1618 mips_dangerous_for_la25_p (rtx x)
1620 return (!TARGET_EXPLICIT_RELOCS
1622 && GET_CODE (x) == SYMBOL_REF
1623 && mips_global_symbol_p (x));
1626 /* Return true if calls to X might need $25 to be valid on entry. */
1629 mips_use_pic_fn_addr_reg_p (const_rtx x)
1631 if (!TARGET_USE_PIC_FN_ADDR_REG)
1634 /* MIPS16 stub functions are guaranteed not to use $25. */
1635 if (mips16_stub_function_p (x))
1638 if (GET_CODE (x) == SYMBOL_REF)
1640 /* If PLTs and copy relocations are available, the static linker
1641 will make sure that $25 is valid on entry to the target function. */
1642 if (TARGET_ABICALLS_PIC0)
1645 /* Locally-defined functions use absolute accesses to set up
1646 the global pointer. */
1647 if (TARGET_ABSOLUTE_ABICALLS
1648 && mips_symbol_binds_local_p (x)
1649 && !SYMBOL_REF_EXTERNAL_P (x))
1656 /* Return the method that should be used to access SYMBOL_REF or
1657 LABEL_REF X in context CONTEXT. */
1659 static enum mips_symbol_type
1660 mips_classify_symbol (const_rtx x, enum mips_symbol_context context)
1663 return SYMBOL_GOT_DISP;
1665 if (GET_CODE (x) == LABEL_REF)
1667 /* Only return SYMBOL_PC_RELATIVE if we are generating MIPS16
1668 code and if we know that the label is in the current function's
1669 text section. LABEL_REFs are used for jump tables as well as
1670 text labels, so we must check whether jump tables live in the
1672 if (TARGET_MIPS16_SHORT_JUMP_TABLES
1673 && !LABEL_REF_NONLOCAL_P (x))
1674 return SYMBOL_PC_RELATIVE;
1676 if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
1677 return SYMBOL_GOT_PAGE_OFST;
1679 return SYMBOL_ABSOLUTE;
1682 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1684 if (SYMBOL_REF_TLS_MODEL (x))
1687 if (CONSTANT_POOL_ADDRESS_P (x))
1689 if (TARGET_MIPS16_TEXT_LOADS)
1690 return SYMBOL_PC_RELATIVE;
1692 if (TARGET_MIPS16_PCREL_LOADS && context == SYMBOL_CONTEXT_MEM)
1693 return SYMBOL_PC_RELATIVE;
1695 if (mips_rtx_constant_in_small_data_p (get_pool_mode (x)))
1696 return SYMBOL_GP_RELATIVE;
1699 /* Do not use small-data accesses for weak symbols; they may end up
1701 if (TARGET_GPOPT && SYMBOL_REF_SMALL_P (x) && !SYMBOL_REF_WEAK (x))
1702 return SYMBOL_GP_RELATIVE;
1704 /* Don't use GOT accesses for locally-binding symbols when -mno-shared
1706 if (TARGET_ABICALLS_PIC2
1707 && !(TARGET_ABSOLUTE_ABICALLS && mips_symbol_binds_local_p (x)))
1709 /* There are three cases to consider:
1711 - o32 PIC (either with or without explicit relocs)
1712 - n32/n64 PIC without explicit relocs
1713 - n32/n64 PIC with explicit relocs
1715 In the first case, both local and global accesses will use an
1716 R_MIPS_GOT16 relocation. We must correctly predict which of
1717 the two semantics (local or global) the assembler and linker
1718 will apply. The choice depends on the symbol's binding rather
1719 than its visibility.
1721 In the second case, the assembler will not use R_MIPS_GOT16
1722 relocations, but it chooses between local and global accesses
1723 in the same way as for o32 PIC.
1725 In the third case we have more freedom since both forms of
1726 access will work for any kind of symbol. However, there seems
1727 little point in doing things differently. */
1728 if (mips_global_symbol_p (x))
1729 return SYMBOL_GOT_DISP;
1731 return SYMBOL_GOT_PAGE_OFST;
1734 if (TARGET_MIPS16_PCREL_LOADS && context != SYMBOL_CONTEXT_CALL)
1735 return SYMBOL_FORCE_TO_MEM;
1737 return SYMBOL_ABSOLUTE;
1740 /* Classify the base of symbolic expression X, given that X appears in
1743 static enum mips_symbol_type
1744 mips_classify_symbolic_expression (rtx x, enum mips_symbol_context context)
1748 split_const (x, &x, &offset);
1749 if (UNSPEC_ADDRESS_P (x))
1750 return UNSPEC_ADDRESS_TYPE (x);
1752 return mips_classify_symbol (x, context);
1755 /* Return true if OFFSET is within the range [0, ALIGN), where ALIGN
1756 is the alignment in bytes of SYMBOL_REF X. */
1759 mips_offset_within_alignment_p (rtx x, HOST_WIDE_INT offset)
1761 HOST_WIDE_INT align;
1763 align = SYMBOL_REF_DECL (x) ? DECL_ALIGN_UNIT (SYMBOL_REF_DECL (x)) : 1;
1764 return IN_RANGE (offset, 0, align - 1);
1767 /* Return true if X is a symbolic constant that can be used in context
1768 CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */
1771 mips_symbolic_constant_p (rtx x, enum mips_symbol_context context,
1772 enum mips_symbol_type *symbol_type)
1776 split_const (x, &x, &offset);
1777 if (UNSPEC_ADDRESS_P (x))
1779 *symbol_type = UNSPEC_ADDRESS_TYPE (x);
1780 x = UNSPEC_ADDRESS (x);
1782 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1784 *symbol_type = mips_classify_symbol (x, context);
1785 if (*symbol_type == SYMBOL_TLS)
1791 if (offset == const0_rtx)
1794 /* Check whether a nonzero offset is valid for the underlying
1796 switch (*symbol_type)
1798 case SYMBOL_ABSOLUTE:
1799 case SYMBOL_FORCE_TO_MEM:
1800 case SYMBOL_32_HIGH:
1801 case SYMBOL_64_HIGH:
1804 /* If the target has 64-bit pointers and the object file only
1805 supports 32-bit symbols, the values of those symbols will be
1806 sign-extended. In this case we can't allow an arbitrary offset
1807 in case the 32-bit value X + OFFSET has a different sign from X. */
1808 if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
1809 return offset_within_block_p (x, INTVAL (offset));
1811 /* In other cases the relocations can handle any offset. */
1814 case SYMBOL_PC_RELATIVE:
1815 /* Allow constant pool references to be converted to LABEL+CONSTANT.
1816 In this case, we no longer have access to the underlying constant,
1817 but the original symbol-based access was known to be valid. */
1818 if (GET_CODE (x) == LABEL_REF)
1823 case SYMBOL_GP_RELATIVE:
1824 /* Make sure that the offset refers to something within the
1825 same object block. This should guarantee that the final
1826 PC- or GP-relative offset is within the 16-bit limit. */
1827 return offset_within_block_p (x, INTVAL (offset));
1829 case SYMBOL_GOT_PAGE_OFST:
1830 case SYMBOL_GOTOFF_PAGE:
1831 /* If the symbol is global, the GOT entry will contain the symbol's
1832 address, and we will apply a 16-bit offset after loading it.
1833 If the symbol is local, the linker should provide enough local
1834 GOT entries for a 16-bit offset, but larger offsets may lead
1836 return SMALL_INT (offset);
1840 /* There is no carry between the HI and LO REL relocations, so the
1841 offset is only valid if we know it won't lead to such a carry. */
1842 return mips_offset_within_alignment_p (x, INTVAL (offset));
1844 case SYMBOL_GOT_DISP:
1845 case SYMBOL_GOTOFF_DISP:
1846 case SYMBOL_GOTOFF_CALL:
1847 case SYMBOL_GOTOFF_LOADGP:
1850 case SYMBOL_GOTTPREL:
1858 /* Like mips_symbol_insns, but treat extended MIPS16 instructions as a
1859 single instruction. We rely on the fact that, in the worst case,
1860 all instructions involved in a MIPS16 address calculation are usually
1864 mips_symbol_insns_1 (enum mips_symbol_type type, enum machine_mode mode)
1868 case SYMBOL_ABSOLUTE:
1869 /* When using 64-bit symbols, we need 5 preparatory instructions,
1872 lui $at,%highest(symbol)
1873 daddiu $at,$at,%higher(symbol)
1875 daddiu $at,$at,%hi(symbol)
1878 The final address is then $at + %lo(symbol). With 32-bit
1879 symbols we just need a preparatory LUI for normal mode and
1880 a preparatory LI and SLL for MIPS16. */
1881 return ABI_HAS_64BIT_SYMBOLS ? 6 : TARGET_MIPS16 ? 3 : 2;
1883 case SYMBOL_GP_RELATIVE:
1884 /* Treat GP-relative accesses as taking a single instruction on
1885 MIPS16 too; the copy of $gp can often be shared. */
1888 case SYMBOL_PC_RELATIVE:
1889 /* PC-relative constants can be only be used with ADDIUPC,
1890 DADDIUPC, LWPC and LDPC. */
1891 if (mode == MAX_MACHINE_MODE
1892 || GET_MODE_SIZE (mode) == 4
1893 || GET_MODE_SIZE (mode) == 8)
1896 /* The constant must be loaded using ADDIUPC or DADDIUPC first. */
1899 case SYMBOL_FORCE_TO_MEM:
1900 /* LEAs will be converted into constant-pool references by
1902 if (mode == MAX_MACHINE_MODE)
1905 /* The constant must be loaded and then dereferenced. */
1908 case SYMBOL_GOT_DISP:
1909 /* The constant will have to be loaded from the GOT before it
1910 is used in an address. */
1911 if (mode != MAX_MACHINE_MODE)
1916 case SYMBOL_GOT_PAGE_OFST:
1917 /* Unless -funit-at-a-time is in effect, we can't be sure whether the
1918 local/global classification is accurate. The worst cases are:
1920 (1) For local symbols when generating o32 or o64 code. The assembler
1926 ...and the final address will be $at + %lo(symbol).
1928 (2) For global symbols when -mxgot. The assembler will use:
1930 lui $at,%got_hi(symbol)
1933 ...and the final address will be $at + %got_lo(symbol). */
1936 case SYMBOL_GOTOFF_PAGE:
1937 case SYMBOL_GOTOFF_DISP:
1938 case SYMBOL_GOTOFF_CALL:
1939 case SYMBOL_GOTOFF_LOADGP:
1940 case SYMBOL_32_HIGH:
1941 case SYMBOL_64_HIGH:
1947 case SYMBOL_GOTTPREL:
1950 /* A 16-bit constant formed by a single relocation, or a 32-bit
1951 constant formed from a high 16-bit relocation and a low 16-bit
1952 relocation. Use mips_split_p to determine which. 32-bit
1953 constants need an "lui; addiu" sequence for normal mode and
1954 an "li; sll; addiu" sequence for MIPS16 mode. */
1955 return !mips_split_p[type] ? 1 : TARGET_MIPS16 ? 3 : 2;
1958 /* We don't treat a bare TLS symbol as a constant. */
1964 /* If MODE is MAX_MACHINE_MODE, return the number of instructions needed
1965 to load symbols of type TYPE into a register. Return 0 if the given
1966 type of symbol cannot be used as an immediate operand.
1968 Otherwise, return the number of instructions needed to load or store
1969 values of mode MODE to or from addresses of type TYPE. Return 0 if
1970 the given type of symbol is not valid in addresses.
1972 In both cases, treat extended MIPS16 instructions as two instructions. */
1975 mips_symbol_insns (enum mips_symbol_type type, enum machine_mode mode)
1977 return mips_symbol_insns_1 (type, mode) * (TARGET_MIPS16 ? 2 : 1);
1980 /* A for_each_rtx callback. Stop the search if *X references a
1981 thread-local symbol. */
1984 mips_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1986 return mips_tls_symbol_p (*x);
1989 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
1992 mips_cannot_force_const_mem (enum machine_mode mode, rtx x)
1994 enum mips_symbol_type type;
1997 /* There is no assembler syntax for expressing an address-sized
1999 if (GET_CODE (x) == HIGH)
2002 /* As an optimization, reject constants that mips_legitimize_move
2005 Suppose we have a multi-instruction sequence that loads constant C
2006 into register R. If R does not get allocated a hard register, and
2007 R is used in an operand that allows both registers and memory
2008 references, reload will consider forcing C into memory and using
2009 one of the instruction's memory alternatives. Returning false
2010 here will force it to use an input reload instead. */
2011 if (CONST_INT_P (x) && mips_legitimate_constant_p (mode, x))
2014 split_const (x, &base, &offset);
2015 if (mips_symbolic_constant_p (base, SYMBOL_CONTEXT_LEA, &type)
2016 && type != SYMBOL_FORCE_TO_MEM)
2018 /* The same optimization as for CONST_INT. */
2019 if (SMALL_INT (offset) && mips_symbol_insns (type, MAX_MACHINE_MODE) > 0)
2022 /* If MIPS16 constant pools live in the text section, they should
2023 not refer to anything that might need run-time relocation. */
2024 if (TARGET_MIPS16_PCREL_LOADS && mips_got_symbol_type_p (type))
2028 /* TLS symbols must be computed by mips_legitimize_move. */
2029 if (for_each_rtx (&x, &mips_tls_symbol_ref_1, NULL))
2035 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. We can't use blocks for
2036 constants when we're using a per-function constant pool. */
2039 mips_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
2040 const_rtx x ATTRIBUTE_UNUSED)
2042 return !TARGET_MIPS16_PCREL_LOADS;
2045 /* Return true if register REGNO is a valid base register for mode MODE.
2046 STRICT_P is true if REG_OK_STRICT is in effect. */
2049 mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode,
2052 if (!HARD_REGISTER_NUM_P (regno))
2056 regno = reg_renumber[regno];
2059 /* These fake registers will be eliminated to either the stack or
2060 hard frame pointer, both of which are usually valid base registers.
2061 Reload deals with the cases where the eliminated form isn't valid. */
2062 if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
2065 /* In MIPS16 mode, the stack pointer can only address word and doubleword
2066 values, nothing smaller. There are two problems here:
2068 (a) Instantiating virtual registers can introduce new uses of the
2069 stack pointer. If these virtual registers are valid addresses,
2070 the stack pointer should be too.
2072 (b) Most uses of the stack pointer are not made explicit until
2073 FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
2074 We don't know until that stage whether we'll be eliminating to the
2075 stack pointer (which needs the restriction) or the hard frame
2076 pointer (which doesn't).
2078 All in all, it seems more consistent to only enforce this restriction
2079 during and after reload. */
2080 if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
2081 return !strict_p || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
2083 return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
2086 /* Return true if X is a valid base register for mode MODE.
2087 STRICT_P is true if REG_OK_STRICT is in effect. */
2090 mips_valid_base_register_p (rtx x, enum machine_mode mode, bool strict_p)
2092 if (!strict_p && GET_CODE (x) == SUBREG)
2096 && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict_p));
2099 /* Return true if, for every base register BASE_REG, (plus BASE_REG X)
2100 can address a value of mode MODE. */
2103 mips_valid_offset_p (rtx x, enum machine_mode mode)
2105 /* Check that X is a signed 16-bit number. */
2106 if (!const_arith_operand (x, Pmode))
2109 /* We may need to split multiword moves, so make sure that every word
2111 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
2112 && !SMALL_OPERAND (INTVAL (x) + GET_MODE_SIZE (mode) - UNITS_PER_WORD))
2118 /* Return true if a LO_SUM can address a value of mode MODE when the
2119 LO_SUM symbol has type SYMBOL_TYPE. */
2122 mips_valid_lo_sum_p (enum mips_symbol_type symbol_type, enum machine_mode mode)
2124 /* Check that symbols of type SYMBOL_TYPE can be used to access values
2126 if (mips_symbol_insns (symbol_type, mode) == 0)
2129 /* Check that there is a known low-part relocation. */
2130 if (mips_lo_relocs[symbol_type] == NULL)
2133 /* We may need to split multiword moves, so make sure that each word
2134 can be accessed without inducing a carry. This is mainly needed
2135 for o64, which has historically only guaranteed 64-bit alignment
2136 for 128-bit types. */
2137 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
2138 && GET_MODE_BITSIZE (mode) > GET_MODE_ALIGNMENT (mode))
2144 /* Return true if X is a valid address for machine mode MODE. If it is,
2145 fill in INFO appropriately. STRICT_P is true if REG_OK_STRICT is in
2149 mips_classify_address (struct mips_address_info *info, rtx x,
2150 enum machine_mode mode, bool strict_p)
2152 switch (GET_CODE (x))
2156 info->type = ADDRESS_REG;
2158 info->offset = const0_rtx;
2159 return mips_valid_base_register_p (info->reg, mode, strict_p);
2162 info->type = ADDRESS_REG;
2163 info->reg = XEXP (x, 0);
2164 info->offset = XEXP (x, 1);
2165 return (mips_valid_base_register_p (info->reg, mode, strict_p)
2166 && mips_valid_offset_p (info->offset, mode));
2169 info->type = ADDRESS_LO_SUM;
2170 info->reg = XEXP (x, 0);
2171 info->offset = XEXP (x, 1);
2172 /* We have to trust the creator of the LO_SUM to do something vaguely
2173 sane. Target-independent code that creates a LO_SUM should also
2174 create and verify the matching HIGH. Target-independent code that
2175 adds an offset to a LO_SUM must prove that the offset will not
2176 induce a carry. Failure to do either of these things would be
2177 a bug, and we are not required to check for it here. The MIPS
2178 backend itself should only create LO_SUMs for valid symbolic
2179 constants, with the high part being either a HIGH or a copy
2182 = mips_classify_symbolic_expression (info->offset, SYMBOL_CONTEXT_MEM);
2183 return (mips_valid_base_register_p (info->reg, mode, strict_p)
2184 && mips_valid_lo_sum_p (info->symbol_type, mode));
2187 /* Small-integer addresses don't occur very often, but they
2188 are legitimate if $0 is a valid base register. */
2189 info->type = ADDRESS_CONST_INT;
2190 return !TARGET_MIPS16 && SMALL_INT (x);
2195 info->type = ADDRESS_SYMBOLIC;
2196 return (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_MEM,
2198 && mips_symbol_insns (info->symbol_type, mode) > 0
2199 && !mips_split_p[info->symbol_type]);
2206 /* Implement TARGET_LEGITIMATE_ADDRESS_P. */
2209 mips_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p)
2211 struct mips_address_info addr;
2213 return mips_classify_address (&addr, x, mode, strict_p);
2216 /* Return true if X is a legitimate $sp-based address for mode MDOE. */
2219 mips_stack_address_p (rtx x, enum machine_mode mode)
2221 struct mips_address_info addr;
2223 return (mips_classify_address (&addr, x, mode, false)
2224 && addr.type == ADDRESS_REG
2225 && addr.reg == stack_pointer_rtx);
2228 /* Return true if ADDR matches the pattern for the LWXS load scaled indexed
2229 address instruction. Note that such addresses are not considered
2230 legitimate in the TARGET_LEGITIMATE_ADDRESS_P sense, because their use
2231 is so restricted. */
2234 mips_lwxs_address_p (rtx addr)
2237 && GET_CODE (addr) == PLUS
2238 && REG_P (XEXP (addr, 1)))
2240 rtx offset = XEXP (addr, 0);
2241 if (GET_CODE (offset) == MULT
2242 && REG_P (XEXP (offset, 0))
2243 && CONST_INT_P (XEXP (offset, 1))
2244 && INTVAL (XEXP (offset, 1)) == 4)
2250 /* Return true if a value at OFFSET bytes from base register BASE can be
2251 accessed using an unextended MIPS16 instruction. MODE is the mode of
2254 Usually the offset in an unextended instruction is a 5-bit field.
2255 The offset is unsigned and shifted left once for LH and SH, twice
2256 for LW and SW, and so on. An exception is LWSP and SWSP, which have
2257 an 8-bit immediate field that's shifted left twice. */
2260 mips16_unextended_reference_p (enum machine_mode mode, rtx base,
2261 unsigned HOST_WIDE_INT offset)
2263 if (offset % GET_MODE_SIZE (mode) == 0)
2265 if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
2266 return offset < 256U * GET_MODE_SIZE (mode);
2267 return offset < 32U * GET_MODE_SIZE (mode);
2272 /* Return the number of instructions needed to load or store a value
2273 of mode MODE at address X. Return 0 if X isn't valid for MODE.
2274 Assume that multiword moves may need to be split into word moves
2275 if MIGHT_SPLIT_P, otherwise assume that a single load or store is
2278 For MIPS16 code, count extended instructions as two instructions. */
2281 mips_address_insns (rtx x, enum machine_mode mode, bool might_split_p)
2283 struct mips_address_info addr;
2286 /* BLKmode is used for single unaligned loads and stores and should
2287 not count as a multiword mode. (GET_MODE_SIZE (BLKmode) is pretty
2288 meaningless, so we have to single it out as a special case one way
2290 if (mode != BLKmode && might_split_p)
2291 factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2295 if (mips_classify_address (&addr, x, mode, false))
2300 && !mips16_unextended_reference_p (mode, addr.reg,
2301 UINTVAL (addr.offset)))
2305 case ADDRESS_LO_SUM:
2306 return TARGET_MIPS16 ? factor * 2 : factor;
2308 case ADDRESS_CONST_INT:
2311 case ADDRESS_SYMBOLIC:
2312 return factor * mips_symbol_insns (addr.symbol_type, mode);
2317 /* Return the number of instructions needed to load constant X.
2318 Return 0 if X isn't a valid constant. */
2321 mips_const_insns (rtx x)
2323 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2324 enum mips_symbol_type symbol_type;
2327 switch (GET_CODE (x))
2330 if (!mips_symbolic_constant_p (XEXP (x, 0), SYMBOL_CONTEXT_LEA,
2332 || !mips_split_p[symbol_type])
2335 /* This is simply an LUI for normal mode. It is an extended
2336 LI followed by an extended SLL for MIPS16. */
2337 return TARGET_MIPS16 ? 4 : 1;
2341 /* Unsigned 8-bit constants can be loaded using an unextended
2342 LI instruction. Unsigned 16-bit constants can be loaded
2343 using an extended LI. Negative constants must be loaded
2344 using LI and then negated. */
2345 return (IN_RANGE (INTVAL (x), 0, 255) ? 1
2346 : SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
2347 : IN_RANGE (-INTVAL (x), 0, 255) ? 2
2348 : SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
2351 return mips_build_integer (codes, INTVAL (x));
2355 /* Allow zeros for normal mode, where we can use $0. */
2356 return !TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0;
2362 /* See if we can refer to X directly. */
2363 if (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_LEA, &symbol_type))
2364 return mips_symbol_insns (symbol_type, MAX_MACHINE_MODE);
2366 /* Otherwise try splitting the constant into a base and offset.
2367 If the offset is a 16-bit value, we can load the base address
2368 into a register and then use (D)ADDIU to add in the offset.
2369 If the offset is larger, we can load the base and offset
2370 into separate registers and add them together with (D)ADDU.
2371 However, the latter is only possible before reload; during
2372 and after reload, we must have the option of forcing the
2373 constant into the pool instead. */
2374 split_const (x, &x, &offset);
2377 int n = mips_const_insns (x);
2380 if (SMALL_INT (offset))
2382 else if (!targetm.cannot_force_const_mem (GET_MODE (x), x))
2383 return n + 1 + mips_build_integer (codes, INTVAL (offset));
2390 return mips_symbol_insns (mips_classify_symbol (x, SYMBOL_CONTEXT_LEA),
2398 /* X is a doubleword constant that can be handled by splitting it into
2399 two words and loading each word separately. Return the number of
2400 instructions required to do this. */
2403 mips_split_const_insns (rtx x)
2405 unsigned int low, high;
2407 low = mips_const_insns (mips_subword (x, false));
2408 high = mips_const_insns (mips_subword (x, true));
2409 gcc_assert (low > 0 && high > 0);
2413 /* Return the number of instructions needed to implement INSN,
2414 given that it loads from or stores to MEM. Count extended
2415 MIPS16 instructions as two instructions. */
2418 mips_load_store_insns (rtx mem, rtx insn)
2420 enum machine_mode mode;
2424 gcc_assert (MEM_P (mem));
2425 mode = GET_MODE (mem);
2427 /* Try to prove that INSN does not need to be split. */
2428 might_split_p = true;
2429 if (GET_MODE_BITSIZE (mode) == 64)
2431 set = single_set (insn);
2432 if (set && !mips_split_64bit_move_p (SET_DEST (set), SET_SRC (set)))
2433 might_split_p = false;
2436 return mips_address_insns (XEXP (mem, 0), mode, might_split_p);
2439 /* Return the number of instructions needed for an integer division. */
2442 mips_idiv_insns (void)
2447 if (TARGET_CHECK_ZERO_DIV)
2449 if (GENERATE_DIVIDE_TRAPS)
2455 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
2460 /* Emit a move from SRC to DEST. Assume that the move expanders can
2461 handle all moves if !can_create_pseudo_p (). The distinction is
2462 important because, unlike emit_move_insn, the move expanders know
2463 how to force Pmode objects into the constant pool even when the
2464 constant pool address is not itself legitimate. */
2467 mips_emit_move (rtx dest, rtx src)
2469 return (can_create_pseudo_p ()
2470 ? emit_move_insn (dest, src)
2471 : emit_move_insn_1 (dest, src));
2474 /* Emit an instruction of the form (set TARGET (CODE OP0)). */
2477 mips_emit_unary (enum rtx_code code, rtx target, rtx op0)
2479 emit_insn (gen_rtx_SET (VOIDmode, target,
2480 gen_rtx_fmt_e (code, GET_MODE (op0), op0)));
2483 /* Compute (CODE OP0) and store the result in a new register of mode MODE.
2484 Return that new register. */
2487 mips_force_unary (enum machine_mode mode, enum rtx_code code, rtx op0)
2491 reg = gen_reg_rtx (mode);
2492 mips_emit_unary (code, reg, op0);
2496 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
2499 mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
2501 emit_insn (gen_rtx_SET (VOIDmode, target,
2502 gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
2505 /* Compute (CODE OP0 OP1) and store the result in a new register
2506 of mode MODE. Return that new register. */
2509 mips_force_binary (enum machine_mode mode, enum rtx_code code, rtx op0, rtx op1)
2513 reg = gen_reg_rtx (mode);
2514 mips_emit_binary (code, reg, op0, op1);
2518 /* Copy VALUE to a register and return that register. If new pseudos
2519 are allowed, copy it into a new register, otherwise use DEST. */
2522 mips_force_temporary (rtx dest, rtx value)
2524 if (can_create_pseudo_p ())
2525 return force_reg (Pmode, value);
2528 mips_emit_move (dest, value);
2533 /* Emit a call sequence with call pattern PATTERN and return the call
2534 instruction itself (which is not necessarily the last instruction
2535 emitted). ORIG_ADDR is the original, unlegitimized address,
2536 ADDR is the legitimized form, and LAZY_P is true if the call
2537 address is lazily-bound. */
2540 mips_emit_call_insn (rtx pattern, rtx orig_addr, rtx addr, bool lazy_p)
2544 insn = emit_call_insn (pattern);
2546 if (TARGET_MIPS16 && mips_use_pic_fn_addr_reg_p (orig_addr))
2548 /* MIPS16 JALRs only take MIPS16 registers. If the target
2549 function requires $25 to be valid on entry, we must copy it
2550 there separately. The move instruction can be put in the
2551 call's delay slot. */
2552 reg = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
2553 emit_insn_before (gen_move_insn (reg, addr), insn);
2554 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
2558 /* Lazy-binding stubs require $gp to be valid on entry. */
2559 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
2563 /* See the comment above load_call<mode> for details. */
2564 use_reg (&CALL_INSN_FUNCTION_USAGE (insn),
2565 gen_rtx_REG (Pmode, GOT_VERSION_REGNUM));
2566 emit_insn (gen_update_got_version ());
2571 /* Wrap symbol or label BASE in an UNSPEC address of type SYMBOL_TYPE,
2572 then add CONST_INT OFFSET to the result. */
2575 mips_unspec_address_offset (rtx base, rtx offset,
2576 enum mips_symbol_type symbol_type)
2578 base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
2579 UNSPEC_ADDRESS_FIRST + symbol_type);
2580 if (offset != const0_rtx)
2581 base = gen_rtx_PLUS (Pmode, base, offset);
2582 return gen_rtx_CONST (Pmode, base);
2585 /* Return an UNSPEC address with underlying address ADDRESS and symbol
2586 type SYMBOL_TYPE. */
2589 mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
2593 split_const (address, &base, &offset);
2594 return mips_unspec_address_offset (base, offset, symbol_type);
2597 /* If OP is an UNSPEC address, return the address to which it refers,
2598 otherwise return OP itself. */
2601 mips_strip_unspec_address (rtx op)
2605 split_const (op, &base, &offset);
2606 if (UNSPEC_ADDRESS_P (base))
2607 op = plus_constant (UNSPEC_ADDRESS (base), INTVAL (offset));
2611 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
2612 high part to BASE and return the result. Just return BASE otherwise.
2613 TEMP is as for mips_force_temporary.
2615 The returned expression can be used as the first operand to a LO_SUM. */
2618 mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
2619 enum mips_symbol_type symbol_type)
2621 if (mips_split_p[symbol_type])
2623 addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
2624 addr = mips_force_temporary (temp, addr);
2625 base = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
2630 /* Return an instruction that copies $gp into register REG. We want
2631 GCC to treat the register's value as constant, so that its value
2632 can be rematerialized on demand. */
2635 gen_load_const_gp (rtx reg)
2637 return (Pmode == SImode
2638 ? gen_load_const_gp_si (reg)
2639 : gen_load_const_gp_di (reg));
2642 /* Return a pseudo register that contains the value of $gp throughout
2643 the current function. Such registers are needed by MIPS16 functions,
2644 for which $gp itself is not a valid base register or addition operand. */
2647 mips16_gp_pseudo_reg (void)
2649 if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
2653 cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
2655 push_topmost_sequence ();
2657 scan = get_insns ();
2658 while (NEXT_INSN (scan) && !INSN_P (NEXT_INSN (scan)))
2659 scan = NEXT_INSN (scan);
2661 insn = gen_load_const_gp (cfun->machine->mips16_gp_pseudo_rtx);
2662 emit_insn_after (insn, scan);
2664 pop_topmost_sequence ();
2667 return cfun->machine->mips16_gp_pseudo_rtx;
2670 /* Return a base register that holds pic_offset_table_rtx.
2671 TEMP, if nonnull, is a scratch Pmode base register. */
2674 mips_pic_base_register (rtx temp)
2677 return pic_offset_table_rtx;
2679 if (currently_expanding_to_rtl)
2680 return mips16_gp_pseudo_reg ();
2682 if (can_create_pseudo_p ())
2683 temp = gen_reg_rtx (Pmode);
2686 /* The first post-reload split exposes all references to $gp
2687 (both uses and definitions). All references must remain
2688 explicit after that point.
2690 It is safe to introduce uses of $gp at any time, so for
2691 simplicity, we do that before the split too. */
2692 mips_emit_move (temp, pic_offset_table_rtx);
2694 emit_insn (gen_load_const_gp (temp));
2698 /* Return the RHS of a load_call<mode> insn. */
2701 mips_unspec_call (rtx reg, rtx symbol)
2705 vec = gen_rtvec (3, reg, symbol, gen_rtx_REG (SImode, GOT_VERSION_REGNUM));
2706 return gen_rtx_UNSPEC (Pmode, vec, UNSPEC_LOAD_CALL);
2709 /* If SRC is the RHS of a load_call<mode> insn, return the underlying symbol
2710 reference. Return NULL_RTX otherwise. */
2713 mips_strip_unspec_call (rtx src)
2715 if (GET_CODE (src) == UNSPEC && XINT (src, 1) == UNSPEC_LOAD_CALL)
2716 return mips_strip_unspec_address (XVECEXP (src, 0, 1));
2720 /* Create and return a GOT reference of type TYPE for address ADDR.
2721 TEMP, if nonnull, is a scratch Pmode base register. */
2724 mips_got_load (rtx temp, rtx addr, enum mips_symbol_type type)
2726 rtx base, high, lo_sum_symbol;
2728 base = mips_pic_base_register (temp);
2730 /* If we used the temporary register to load $gp, we can't use
2731 it for the high part as well. */
2732 if (temp != NULL && reg_overlap_mentioned_p (base, temp))
2735 high = mips_unspec_offset_high (temp, base, addr, type);
2736 lo_sum_symbol = mips_unspec_address (addr, type);
2738 if (type == SYMBOL_GOTOFF_CALL)
2739 return mips_unspec_call (high, lo_sum_symbol);
2741 return (Pmode == SImode
2742 ? gen_unspec_gotsi (high, lo_sum_symbol)
2743 : gen_unspec_gotdi (high, lo_sum_symbol));
2746 /* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
2747 it appears in a MEM of that mode. Return true if ADDR is a legitimate
2748 constant in that context and can be split into high and low parts.
2749 If so, and if LOW_OUT is nonnull, emit the high part and store the
2750 low part in *LOW_OUT. Leave *LOW_OUT unchanged otherwise.
2752 TEMP is as for mips_force_temporary and is used to load the high
2753 part into a register.
2755 When MODE is MAX_MACHINE_MODE, the low part is guaranteed to be
2756 a legitimize SET_SRC for an .md pattern, otherwise the low part
2757 is guaranteed to be a legitimate address for mode MODE. */
2760 mips_split_symbol (rtx temp, rtx addr, enum machine_mode mode, rtx *low_out)
2762 enum mips_symbol_context context;
2763 enum mips_symbol_type symbol_type;
2766 context = (mode == MAX_MACHINE_MODE
2767 ? SYMBOL_CONTEXT_LEA
2768 : SYMBOL_CONTEXT_MEM);
2769 if (GET_CODE (addr) == HIGH && context == SYMBOL_CONTEXT_LEA)
2771 addr = XEXP (addr, 0);
2772 if (mips_symbolic_constant_p (addr, context, &symbol_type)
2773 && mips_symbol_insns (symbol_type, mode) > 0
2774 && mips_split_hi_p[symbol_type])
2777 switch (symbol_type)
2779 case SYMBOL_GOT_PAGE_OFST:
2780 /* The high part of a page/ofst pair is loaded from the GOT. */
2781 *low_out = mips_got_load (temp, addr, SYMBOL_GOTOFF_PAGE);
2792 if (mips_symbolic_constant_p (addr, context, &symbol_type)
2793 && mips_symbol_insns (symbol_type, mode) > 0
2794 && mips_split_p[symbol_type])
2797 switch (symbol_type)
2799 case SYMBOL_GOT_DISP:
2800 /* SYMBOL_GOT_DISP symbols are loaded from the GOT. */
2801 *low_out = mips_got_load (temp, addr, SYMBOL_GOTOFF_DISP);
2804 case SYMBOL_GP_RELATIVE:
2805 high = mips_pic_base_register (temp);
2806 *low_out = gen_rtx_LO_SUM (Pmode, high, addr);
2810 high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
2811 high = mips_force_temporary (temp, high);
2812 *low_out = gen_rtx_LO_SUM (Pmode, high, addr);
2821 /* Return a legitimate address for REG + OFFSET. TEMP is as for
2822 mips_force_temporary; it is only needed when OFFSET is not a
2826 mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
2828 if (!SMALL_OPERAND (offset))
2834 /* Load the full offset into a register so that we can use
2835 an unextended instruction for the address itself. */
2836 high = GEN_INT (offset);
2841 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH.
2842 The addition inside the macro CONST_HIGH_PART may cause an
2843 overflow, so we need to force a sign-extension check. */
2844 high = gen_int_mode (CONST_HIGH_PART (offset), Pmode);
2845 offset = CONST_LOW_PART (offset);
2847 high = mips_force_temporary (temp, high);
2848 reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
2850 return plus_constant (reg, offset);
2853 /* The __tls_get_attr symbol. */
2854 static GTY(()) rtx mips_tls_symbol;
2856 /* Return an instruction sequence that calls __tls_get_addr. SYM is
2857 the TLS symbol we are referencing and TYPE is the symbol type to use
2858 (either global dynamic or local dynamic). V0 is an RTX for the
2859 return value location. */
2862 mips_call_tls_get_addr (rtx sym, enum mips_symbol_type type, rtx v0)
2866 a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
2868 if (!mips_tls_symbol)
2869 mips_tls_symbol = init_one_libfunc ("__tls_get_addr");
2871 loc = mips_unspec_address (sym, type);
2875 emit_insn (gen_rtx_SET (Pmode, a0,
2876 gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, loc)));
2877 insn = mips_expand_call (MIPS_CALL_NORMAL, v0, mips_tls_symbol,
2878 const0_rtx, NULL_RTX, false);
2879 RTL_CONST_CALL_P (insn) = 1;
2880 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
2881 insn = get_insns ();
2888 /* Return a pseudo register that contains the current thread pointer. */
2895 tp = gen_reg_rtx (Pmode);
2896 if (Pmode == DImode)
2897 emit_insn (gen_tls_get_tp_di (tp));
2899 emit_insn (gen_tls_get_tp_si (tp));
2903 /* Generate the code to access LOC, a thread-local SYMBOL_REF, and return
2904 its address. The return value will be both a valid address and a valid
2905 SET_SRC (either a REG or a LO_SUM). */
2908 mips_legitimize_tls_address (rtx loc)
2910 rtx dest, insn, v0, tp, tmp1, tmp2, eqv;
2911 enum tls_model model;
2915 sorry ("MIPS16 TLS");
2916 return gen_reg_rtx (Pmode);
2919 model = SYMBOL_REF_TLS_MODEL (loc);
2920 /* Only TARGET_ABICALLS code can have more than one module; other
2921 code must be be static and should not use a GOT. All TLS models
2922 reduce to local exec in this situation. */
2923 if (!TARGET_ABICALLS)
2924 model = TLS_MODEL_LOCAL_EXEC;
2928 case TLS_MODEL_GLOBAL_DYNAMIC:
2929 v0 = gen_rtx_REG (Pmode, GP_RETURN);
2930 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSGD, v0);
2931 dest = gen_reg_rtx (Pmode);
2932 emit_libcall_block (insn, dest, v0, loc);
2935 case TLS_MODEL_LOCAL_DYNAMIC:
2936 v0 = gen_rtx_REG (Pmode, GP_RETURN);
2937 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSLDM, v0);
2938 tmp1 = gen_reg_rtx (Pmode);
2940 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2941 share the LDM result with other LD model accesses. */
2942 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2944 emit_libcall_block (insn, tmp1, v0, eqv);
2946 tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_DTPREL);
2947 dest = gen_rtx_LO_SUM (Pmode, tmp2,
2948 mips_unspec_address (loc, SYMBOL_DTPREL));
2951 case TLS_MODEL_INITIAL_EXEC:
2952 tp = mips_get_tp ();
2953 tmp1 = gen_reg_rtx (Pmode);
2954 tmp2 = mips_unspec_address (loc, SYMBOL_GOTTPREL);
2955 if (Pmode == DImode)
2956 emit_insn (gen_load_gotdi (tmp1, pic_offset_table_rtx, tmp2));
2958 emit_insn (gen_load_gotsi (tmp1, pic_offset_table_rtx, tmp2));
2959 dest = gen_reg_rtx (Pmode);
2960 emit_insn (gen_add3_insn (dest, tmp1, tp));
2963 case TLS_MODEL_LOCAL_EXEC:
2964 tp = mips_get_tp ();
2965 tmp1 = mips_unspec_offset_high (NULL, tp, loc, SYMBOL_TPREL);
2966 dest = gen_rtx_LO_SUM (Pmode, tmp1,
2967 mips_unspec_address (loc, SYMBOL_TPREL));
2976 /* If X is not a valid address for mode MODE, force it into a register. */
2979 mips_force_address (rtx x, enum machine_mode mode)
2981 if (!mips_legitimate_address_p (mode, x, false))
2982 x = force_reg (Pmode, x);
2986 /* This function is used to implement LEGITIMIZE_ADDRESS. If X can
2987 be legitimized in a way that the generic machinery might not expect,
2988 return a new address, otherwise return NULL. MODE is the mode of
2989 the memory being accessed. */
2992 mips_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
2993 enum machine_mode mode)
2996 HOST_WIDE_INT offset;
2998 if (mips_tls_symbol_p (x))
2999 return mips_legitimize_tls_address (x);
3001 /* See if the address can split into a high part and a LO_SUM. */
3002 if (mips_split_symbol (NULL, x, mode, &addr))
3003 return mips_force_address (addr, mode);
3005 /* Handle BASE + OFFSET using mips_add_offset. */
3006 mips_split_plus (x, &base, &offset);
3009 if (!mips_valid_base_register_p (base, mode, false))
3010 base = copy_to_mode_reg (Pmode, base);
3011 addr = mips_add_offset (NULL, base, offset);
3012 return mips_force_address (addr, mode);
3018 /* Load VALUE into DEST. TEMP is as for mips_force_temporary. */
3021 mips_move_integer (rtx temp, rtx dest, unsigned HOST_WIDE_INT value)
3023 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
3024 enum machine_mode mode;
3025 unsigned int i, num_ops;
3028 mode = GET_MODE (dest);
3029 num_ops = mips_build_integer (codes, value);
3031 /* Apply each binary operation to X. Invariant: X is a legitimate
3032 source operand for a SET pattern. */
3033 x = GEN_INT (codes[0].value);
3034 for (i = 1; i < num_ops; i++)
3036 if (!can_create_pseudo_p ())
3038 emit_insn (gen_rtx_SET (VOIDmode, temp, x));
3042 x = force_reg (mode, x);
3043 x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
3046 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
3049 /* Subroutine of mips_legitimize_move. Move constant SRC into register
3050 DEST given that SRC satisfies immediate_operand but doesn't satisfy
3054 mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
3058 /* Split moves of big integers into smaller pieces. */
3059 if (splittable_const_int_operand (src, mode))
3061 mips_move_integer (dest, dest, INTVAL (src));
3065 /* Split moves of symbolic constants into high/low pairs. */
3066 if (mips_split_symbol (dest, src, MAX_MACHINE_MODE, &src))
3068 emit_insn (gen_rtx_SET (VOIDmode, dest, src));
3072 /* Generate the appropriate access sequences for TLS symbols. */
3073 if (mips_tls_symbol_p (src))
3075 mips_emit_move (dest, mips_legitimize_tls_address (src));
3079 /* If we have (const (plus symbol offset)), and that expression cannot
3080 be forced into memory, load the symbol first and add in the offset.
3081 In non-MIPS16 mode, prefer to do this even if the constant _can_ be
3082 forced into memory, as it usually produces better code. */
3083 split_const (src, &base, &offset);
3084 if (offset != const0_rtx
3085 && (targetm.cannot_force_const_mem (mode, src)
3086 || (!TARGET_MIPS16 && can_create_pseudo_p ())))
3088 base = mips_force_temporary (dest, base);
3089 mips_emit_move (dest, mips_add_offset (NULL, base, INTVAL (offset)));
3093 src = force_const_mem (mode, src);
3095 /* When using explicit relocs, constant pool references are sometimes
3096 not legitimate addresses. */
3097 mips_split_symbol (dest, XEXP (src, 0), mode, &XEXP (src, 0));
3098 mips_emit_move (dest, src);
3101 /* If (set DEST SRC) is not a valid move instruction, emit an equivalent
3102 sequence that is valid. */
3105 mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
3107 if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
3109 mips_emit_move (dest, force_reg (mode, src));
3113 /* We need to deal with constants that would be legitimate
3114 immediate_operands but aren't legitimate move_operands. */
3115 if (CONSTANT_P (src) && !move_operand (src, mode))
3117 mips_legitimize_const_move (mode, dest, src);
3118 set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
3124 /* Return true if value X in context CONTEXT is a small-data address
3125 that can be rewritten as a LO_SUM. */
3128 mips_rewrite_small_data_p (rtx x, enum mips_symbol_context context)
3130 enum mips_symbol_type symbol_type;
3132 return (mips_lo_relocs[SYMBOL_GP_RELATIVE]
3133 && !mips_split_p[SYMBOL_GP_RELATIVE]
3134 && mips_symbolic_constant_p (x, context, &symbol_type)
3135 && symbol_type == SYMBOL_GP_RELATIVE);
3138 /* A for_each_rtx callback for mips_small_data_pattern_p. DATA is the
3139 containing MEM, or null if none. */
3142 mips_small_data_pattern_1 (rtx *loc, void *data)
3144 enum mips_symbol_context context;
3146 if (GET_CODE (*loc) == LO_SUM)
3151 if (for_each_rtx (&XEXP (*loc, 0), mips_small_data_pattern_1, *loc))
3156 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
3157 return mips_rewrite_small_data_p (*loc, context);
3160 /* Return true if OP refers to small data symbols directly, not through
3164 mips_small_data_pattern_p (rtx op)
3166 return for_each_rtx (&op, mips_small_data_pattern_1, NULL);
3169 /* A for_each_rtx callback, used by mips_rewrite_small_data.
3170 DATA is the containing MEM, or null if none. */
3173 mips_rewrite_small_data_1 (rtx *loc, void *data)
3175 enum mips_symbol_context context;
3179 for_each_rtx (&XEXP (*loc, 0), mips_rewrite_small_data_1, *loc);
3183 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
3184 if (mips_rewrite_small_data_p (*loc, context))
3185 *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
3187 if (GET_CODE (*loc) == LO_SUM)
3193 /* Rewrite instruction pattern PATTERN so that it refers to small data
3194 using explicit relocations. */
3197 mips_rewrite_small_data (rtx pattern)
3199 pattern = copy_insn (pattern);
3200 for_each_rtx (&pattern, mips_rewrite_small_data_1, NULL);
3204 /* We need a lot of little routines to check the range of MIPS16 immediate
3208 m16_check_op (rtx op, int low, int high, int mask)
3210 return (CONST_INT_P (op)
3211 && IN_RANGE (INTVAL (op), low, high)
3212 && (INTVAL (op) & mask) == 0);
3216 m16_uimm3_b (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3218 return m16_check_op (op, 0x1, 0x8, 0);
3222 m16_simm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3224 return m16_check_op (op, -0x8, 0x7, 0);
3228 m16_nsimm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3230 return m16_check_op (op, -0x7, 0x8, 0);
3234 m16_simm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3236 return m16_check_op (op, -0x10, 0xf, 0);
3240 m16_nsimm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3242 return m16_check_op (op, -0xf, 0x10, 0);
3246 m16_uimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3248 return m16_check_op (op, -0x10 << 2, 0xf << 2, 3);
3252 m16_nuimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3254 return m16_check_op (op, -0xf << 2, 0x10 << 2, 3);
3258 m16_simm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3260 return m16_check_op (op, -0x80, 0x7f, 0);
3264 m16_nsimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3266 return m16_check_op (op, -0x7f, 0x80, 0);
3270 m16_uimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3272 return m16_check_op (op, 0x0, 0xff, 0);
3276 m16_nuimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3278 return m16_check_op (op, -0xff, 0x0, 0);
3282 m16_uimm8_m1_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3284 return m16_check_op (op, -0x1, 0xfe, 0);
3288 m16_uimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3290 return m16_check_op (op, 0x0, 0xff << 2, 3);
3294 m16_nuimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3296 return m16_check_op (op, -0xff << 2, 0x0, 3);
3300 m16_simm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3302 return m16_check_op (op, -0x80 << 3, 0x7f << 3, 7);
3306 m16_nsimm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3308 return m16_check_op (op, -0x7f << 3, 0x80 << 3, 7);
3311 /* The cost of loading values from the constant pool. It should be
3312 larger than the cost of any constant we want to synthesize inline. */
3313 #define CONSTANT_POOL_COST COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 8)
3315 /* Return the cost of X when used as an operand to the MIPS16 instruction
3316 that implements CODE. Return -1 if there is no such instruction, or if
3317 X is not a valid immediate operand for it. */
3320 mips16_constant_cost (int code, HOST_WIDE_INT x)
3327 /* Shifts by between 1 and 8 bits (inclusive) are unextended,
3328 other shifts are extended. The shift patterns truncate the shift
3329 count to the right size, so there are no out-of-range values. */
3330 if (IN_RANGE (x, 1, 8))
3332 return COSTS_N_INSNS (1);
3335 if (IN_RANGE (x, -128, 127))
3337 if (SMALL_OPERAND (x))
3338 return COSTS_N_INSNS (1);
3342 /* Like LE, but reject the always-true case. */
3346 /* We add 1 to the immediate and use SLT. */
3349 /* We can use CMPI for an xor with an unsigned 16-bit X. */
3352 if (IN_RANGE (x, 0, 255))
3354 if (SMALL_OPERAND_UNSIGNED (x))
3355 return COSTS_N_INSNS (1);
3360 /* Equality comparisons with 0 are cheap. */
3370 /* Return true if there is a non-MIPS16 instruction that implements CODE
3371 and if that instruction accepts X as an immediate operand. */
3374 mips_immediate_operand_p (int code, HOST_WIDE_INT x)
3381 /* All shift counts are truncated to a valid constant. */
3386 /* Likewise rotates, if the target supports rotates at all. */
3392 /* These instructions take 16-bit unsigned immediates. */
3393 return SMALL_OPERAND_UNSIGNED (x);
3398 /* These instructions take 16-bit signed immediates. */
3399 return SMALL_OPERAND (x);
3405 /* The "immediate" forms of these instructions are really
3406 implemented as comparisons with register 0. */
3411 /* Likewise, meaning that the only valid immediate operand is 1. */
3415 /* We add 1 to the immediate and use SLT. */
3416 return SMALL_OPERAND (x + 1);
3419 /* Likewise SLTU, but reject the always-true case. */
3420 return SMALL_OPERAND (x + 1) && x + 1 != 0;
3424 /* The bit position and size are immediate operands. */
3425 return ISA_HAS_EXT_INS;
3428 /* By default assume that $0 can be used for 0. */
3433 /* Return the cost of binary operation X, given that the instruction
3434 sequence for a word-sized or smaller operation has cost SINGLE_COST
3435 and that the sequence of a double-word operation has cost DOUBLE_COST.
3436 If SPEED is true, optimize for speed otherwise optimize for size. */
3439 mips_binary_cost (rtx x, int single_cost, int double_cost, bool speed)
3443 if (GET_MODE_SIZE (GET_MODE (x)) == UNITS_PER_WORD * 2)
3448 + rtx_cost (XEXP (x, 0), SET, speed)
3449 + rtx_cost (XEXP (x, 1), GET_CODE (x), speed));
3452 /* Return the cost of floating-point multiplications of mode MODE. */
3455 mips_fp_mult_cost (enum machine_mode mode)
3457 return mode == DFmode ? mips_cost->fp_mult_df : mips_cost->fp_mult_sf;
3460 /* Return the cost of floating-point divisions of mode MODE. */
3463 mips_fp_div_cost (enum machine_mode mode)
3465 return mode == DFmode ? mips_cost->fp_div_df : mips_cost->fp_div_sf;
3468 /* Return the cost of sign-extending OP to mode MODE, not including the
3469 cost of OP itself. */
3472 mips_sign_extend_cost (enum machine_mode mode, rtx op)
3475 /* Extended loads are as cheap as unextended ones. */
3478 if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
3479 /* A sign extension from SImode to DImode in 64-bit mode is free. */
3482 if (ISA_HAS_SEB_SEH || GENERATE_MIPS16E)
3483 /* We can use SEB or SEH. */
3484 return COSTS_N_INSNS (1);
3486 /* We need to use a shift left and a shift right. */
3487 return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
3490 /* Return the cost of zero-extending OP to mode MODE, not including the
3491 cost of OP itself. */
3494 mips_zero_extend_cost (enum machine_mode mode, rtx op)
3497 /* Extended loads are as cheap as unextended ones. */
3500 if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
3501 /* We need a shift left by 32 bits and a shift right by 32 bits. */
3502 return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
3504 if (GENERATE_MIPS16E)
3505 /* We can use ZEB or ZEH. */
3506 return COSTS_N_INSNS (1);
3509 /* We need to load 0xff or 0xffff into a register and use AND. */
3510 return COSTS_N_INSNS (GET_MODE (op) == QImode ? 2 : 3);
3512 /* We can use ANDI. */
3513 return COSTS_N_INSNS (1);
3516 /* Implement TARGET_RTX_COSTS. */
3519 mips_rtx_costs (rtx x, int code, int outer_code, int *total, bool speed)
3521 enum machine_mode mode = GET_MODE (x);
3522 bool float_mode_p = FLOAT_MODE_P (mode);
3526 /* The cost of a COMPARE is hard to define for MIPS. COMPAREs don't
3527 appear in the instruction stream, and the cost of a comparison is
3528 really the cost of the branch or scc condition. At the time of
3529 writing, GCC only uses an explicit outer COMPARE code when optabs
3530 is testing whether a constant is expensive enough to force into a
3531 register. We want optabs to pass such constants through the MIPS
3532 expanders instead, so make all constants very cheap here. */
3533 if (outer_code == COMPARE)
3535 gcc_assert (CONSTANT_P (x));
3543 /* Treat *clear_upper32-style ANDs as having zero cost in the
3544 second operand. The cost is entirely in the first operand.
3546 ??? This is needed because we would otherwise try to CSE
3547 the constant operand. Although that's the right thing for
3548 instructions that continue to be a register operation throughout
3549 compilation, it is disastrous for instructions that could
3550 later be converted into a memory operation. */
3552 && outer_code == AND
3553 && UINTVAL (x) == 0xffffffff)
3561 cost = mips16_constant_cost (outer_code, INTVAL (x));
3570 /* When not optimizing for size, we care more about the cost
3571 of hot code, and hot code is often in a loop. If a constant
3572 operand needs to be forced into a register, we will often be
3573 able to hoist the constant load out of the loop, so the load
3574 should not contribute to the cost. */
3575 if (speed || mips_immediate_operand_p (outer_code, INTVAL (x)))
3587 if (force_to_mem_operand (x, VOIDmode))
3589 *total = COSTS_N_INSNS (1);
3592 cost = mips_const_insns (x);
3595 /* If the constant is likely to be stored in a GPR, SETs of
3596 single-insn constants are as cheap as register sets; we
3597 never want to CSE them.
3599 Don't reduce the cost of storing a floating-point zero in
3600 FPRs. If we have a zero in an FPR for other reasons, we
3601 can get better cfg-cleanup and delayed-branch results by
3602 using it consistently, rather than using $0 sometimes and
3603 an FPR at other times. Also, moves between floating-point
3604 registers are sometimes cheaper than (D)MTC1 $0. */
3606 && outer_code == SET
3607 && !(float_mode_p && TARGET_HARD_FLOAT))
3609 /* When non-MIPS16 code loads a constant N>1 times, we rarely
3610 want to CSE the constant itself. It is usually better to
3611 have N copies of the last operation in the sequence and one
3612 shared copy of the other operations. (Note that this is
3613 not true for MIPS16 code, where the final operation in the
3614 sequence is often an extended instruction.)
3616 Also, if we have a CONST_INT, we don't know whether it is
3617 for a word or doubleword operation, so we cannot rely on
3618 the result of mips_build_integer. */
3619 else if (!TARGET_MIPS16
3620 && (outer_code == SET || mode == VOIDmode))
3622 *total = COSTS_N_INSNS (cost);
3625 /* The value will need to be fetched from the constant pool. */
3626 *total = CONSTANT_POOL_COST;
3630 /* If the address is legitimate, return the number of
3631 instructions it needs. */
3633 cost = mips_address_insns (addr, mode, true);
3636 *total = COSTS_N_INSNS (cost + 1);
3639 /* Check for a scaled indexed address. */
3640 if (mips_lwxs_address_p (addr))
3642 *total = COSTS_N_INSNS (2);
3645 /* Otherwise use the default handling. */
3649 *total = COSTS_N_INSNS (6);
3653 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1);
3657 /* Check for a *clear_upper32 pattern and treat it like a zero
3658 extension. See the pattern's comment for details. */
3661 && CONST_INT_P (XEXP (x, 1))
3662 && UINTVAL (XEXP (x, 1)) == 0xffffffff)
3664 *total = (mips_zero_extend_cost (mode, XEXP (x, 0))
3665 + rtx_cost (XEXP (x, 0), SET, speed));
3672 /* Double-word operations use two single-word operations. */
3673 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (2),
3682 if (CONSTANT_P (XEXP (x, 1)))
3683 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4),
3686 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (12),
3692 *total = mips_cost->fp_add;
3694 *total = COSTS_N_INSNS (4);
3698 /* Low-part immediates need an extended MIPS16 instruction. */
3699 *total = (COSTS_N_INSNS (TARGET_MIPS16 ? 2 : 1)
3700 + rtx_cost (XEXP (x, 0), SET, speed));
3715 /* Branch comparisons have VOIDmode, so use the first operand's
3717 mode = GET_MODE (XEXP (x, 0));
3718 if (FLOAT_MODE_P (mode))
3720 *total = mips_cost->fp_add;
3723 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4),
3729 && (ISA_HAS_NMADD4_NMSUB4 (mode) || ISA_HAS_NMADD3_NMSUB3 (mode))
3730 && TARGET_FUSED_MADD
3731 && !HONOR_NANS (mode)
3732 && !HONOR_SIGNED_ZEROS (mode))
3734 /* See if we can use NMADD or NMSUB. See mips.md for the
3735 associated patterns. */
3736 rtx op0 = XEXP (x, 0);
3737 rtx op1 = XEXP (x, 1);
3738 if (GET_CODE (op0) == MULT && GET_CODE (XEXP (op0, 0)) == NEG)
3740 *total = (mips_fp_mult_cost (mode)
3741 + rtx_cost (XEXP (XEXP (op0, 0), 0), SET, speed)
3742 + rtx_cost (XEXP (op0, 1), SET, speed)
3743 + rtx_cost (op1, SET, speed));
3746 if (GET_CODE (op1) == MULT)
3748 *total = (mips_fp_mult_cost (mode)
3749 + rtx_cost (op0, SET, speed)
3750 + rtx_cost (XEXP (op1, 0), SET, speed)
3751 + rtx_cost (XEXP (op1, 1), SET, speed));
3760 /* If this is part of a MADD or MSUB, treat the PLUS as
3763 && TARGET_FUSED_MADD
3764 && GET_CODE (XEXP (x, 0)) == MULT)
3767 *total = mips_cost->fp_add;
3771 /* Double-word operations require three single-word operations and
3772 an SLTU. The MIPS16 version then needs to move the result of
3773 the SLTU from $24 to a MIPS16 register. */
3774 *total = mips_binary_cost (x, COSTS_N_INSNS (1),
3775 COSTS_N_INSNS (TARGET_MIPS16 ? 5 : 4),
3781 && (ISA_HAS_NMADD4_NMSUB4 (mode) || ISA_HAS_NMADD3_NMSUB3 (mode))
3782 && TARGET_FUSED_MADD
3783 && !HONOR_NANS (mode)
3784 && HONOR_SIGNED_ZEROS (mode))
3786 /* See if we can use NMADD or NMSUB. See mips.md for the
3787 associated patterns. */
3788 rtx op = XEXP (x, 0);
3789 if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
3790 && GET_CODE (XEXP (op, 0)) == MULT)
3792 *total = (mips_fp_mult_cost (mode)
3793 + rtx_cost (XEXP (XEXP (op, 0), 0), SET, speed)
3794 + rtx_cost (XEXP (XEXP (op, 0), 1), SET, speed)
3795 + rtx_cost (XEXP (op, 1), SET, speed));
3801 *total = mips_cost->fp_add;
3803 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 4 : 1);
3808 *total = mips_fp_mult_cost (mode);
3809 else if (mode == DImode && !TARGET_64BIT)
3810 /* Synthesized from 2 mulsi3s, 1 mulsidi3 and two additions,
3811 where the mulsidi3 always includes an MFHI and an MFLO. */
3813 ? mips_cost->int_mult_si * 3 + 6
3814 : COSTS_N_INSNS (ISA_HAS_MUL3 ? 7 : 9));
3816 *total = (ISA_HAS_MUL3 ? 1 : 2);
3817 else if (mode == DImode)
3818 *total = mips_cost->int_mult_di;
3820 *total = mips_cost->int_mult_si;
3824 /* Check for a reciprocal. */
3827 && flag_unsafe_math_optimizations
3828 && XEXP (x, 0) == CONST1_RTX (mode))
3830 if (outer_code == SQRT || GET_CODE (XEXP (x, 1)) == SQRT)
3831 /* An rsqrt<mode>a or rsqrt<mode>b pattern. Count the
3832 division as being free. */
3833 *total = rtx_cost (XEXP (x, 1), SET, speed);
3835 *total = (mips_fp_div_cost (mode)
3836 + rtx_cost (XEXP (x, 1), SET, speed));
3845 *total = mips_fp_div_cost (mode);
3854 /* It is our responsibility to make division by a power of 2
3855 as cheap as 2 register additions if we want the division
3856 expanders to be used for such operations; see the setting
3857 of sdiv_pow2_cheap in optabs.c. Using (D)DIV for MIPS16
3858 should always produce shorter code than using
3859 expand_sdiv2_pow2. */
3861 && CONST_INT_P (XEXP (x, 1))
3862 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
3864 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), SET, speed);
3867 *total = COSTS_N_INSNS (mips_idiv_insns ());
3869 else if (mode == DImode)
3870 *total = mips_cost->int_div_di;
3872 *total = mips_cost->int_div_si;
3876 *total = mips_sign_extend_cost (mode, XEXP (x, 0));
3880 *total = mips_zero_extend_cost (mode, XEXP (x, 0));
3884 case UNSIGNED_FLOAT:
3887 case FLOAT_TRUNCATE:
3888 *total = mips_cost->fp_add;
3896 /* Implement TARGET_ADDRESS_COST. */
3899 mips_address_cost (rtx addr, bool speed ATTRIBUTE_UNUSED)
3901 return mips_address_insns (addr, SImode, false);
3904 /* Information about a single instruction in a multi-instruction
3906 struct mips_multi_member {
3907 /* True if this is a label, false if it is code. */
3910 /* The output_asm_insn format of the instruction. */
3913 /* The operands to the instruction. */
3914 rtx operands[MAX_RECOG_OPERANDS];
3916 typedef struct mips_multi_member mips_multi_member;
3918 /* Vector definitions for the above. */
3919 DEF_VEC_O(mips_multi_member);
3920 DEF_VEC_ALLOC_O(mips_multi_member, heap);
3922 /* The instructions that make up the current multi-insn sequence. */
3923 static VEC (mips_multi_member, heap) *mips_multi_members;
3925 /* How many instructions (as opposed to labels) are in the current
3926 multi-insn sequence. */
3927 static unsigned int mips_multi_num_insns;
3929 /* Start a new multi-insn sequence. */
3932 mips_multi_start (void)
3934 VEC_truncate (mips_multi_member, mips_multi_members, 0);
3935 mips_multi_num_insns = 0;
3938 /* Add a new, uninitialized member to the current multi-insn sequence. */
3940 static struct mips_multi_member *
3941 mips_multi_add (void)
3943 return VEC_safe_push (mips_multi_member, heap, mips_multi_members, 0);
3946 /* Add a normal insn with the given asm format to the current multi-insn
3947 sequence. The other arguments are a null-terminated list of operands. */
3950 mips_multi_add_insn (const char *format, ...)
3952 struct mips_multi_member *member;
3957 member = mips_multi_add ();
3958 member->is_label_p = false;
3959 member->format = format;
3960 va_start (ap, format);
3962 while ((op = va_arg (ap, rtx)))
3963 member->operands[i++] = op;
3965 mips_multi_num_insns++;
3968 /* Add the given label definition to the current multi-insn sequence.
3969 The definition should include the colon. */
3972 mips_multi_add_label (const char *label)
3974 struct mips_multi_member *member;
3976 member = mips_multi_add ();
3977 member->is_label_p = true;
3978 member->format = label;
3981 /* Return the index of the last member of the current multi-insn sequence. */
3984 mips_multi_last_index (void)
3986 return VEC_length (mips_multi_member, mips_multi_members) - 1;
3989 /* Add a copy of an existing instruction to the current multi-insn
3990 sequence. I is the index of the instruction that should be copied. */
3993 mips_multi_copy_insn (unsigned int i)
3995 struct mips_multi_member *member;
3997 member = mips_multi_add ();
3998 memcpy (member, VEC_index (mips_multi_member, mips_multi_members, i),
4000 gcc_assert (!member->is_label_p);
4003 /* Change the operand of an existing instruction in the current
4004 multi-insn sequence. I is the index of the instruction,
4005 OP is the index of the operand, and X is the new value. */
4008 mips_multi_set_operand (unsigned int i, unsigned int op, rtx x)
4010 VEC_index (mips_multi_member, mips_multi_members, i)->operands[op] = x;
4013 /* Write out the asm code for the current multi-insn sequence. */
4016 mips_multi_write (void)
4018 struct mips_multi_member *member;
4021 FOR_EACH_VEC_ELT (mips_multi_member, mips_multi_members, i, member)
4022 if (member->is_label_p)
4023 fprintf (asm_out_file, "%s\n", member->format);
4025 output_asm_insn (member->format, member->operands);
4028 /* Return one word of double-word value OP, taking into account the fixed
4029 endianness of certain registers. HIGH_P is true to select the high part,
4030 false to select the low part. */
4033 mips_subword (rtx op, bool high_p)
4035 unsigned int byte, offset;
4036 enum machine_mode mode;
4038 mode = GET_MODE (op);
4039 if (mode == VOIDmode)
4040 mode = TARGET_64BIT ? TImode : DImode;
4042 if (TARGET_BIG_ENDIAN ? !high_p : high_p)
4043 byte = UNITS_PER_WORD;
4047 if (FP_REG_RTX_P (op))
4049 /* Paired FPRs are always ordered little-endian. */
4050 offset = (UNITS_PER_WORD < UNITS_PER_HWFPVALUE ? high_p : byte != 0);
4051 return gen_rtx_REG (word_mode, REGNO (op) + offset);
4055 return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
4057 return simplify_gen_subreg (word_mode, op, mode, byte);
4060 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
4063 mips_split_64bit_move_p (rtx dest, rtx src)
4068 /* FPR-to-FPR moves can be done in a single instruction, if they're
4070 if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
4073 /* Check for floating-point loads and stores. */
4074 if (ISA_HAS_LDC1_SDC1)
4076 if (FP_REG_RTX_P (dest) && MEM_P (src))
4078 if (FP_REG_RTX_P (src) && MEM_P (dest))
4084 /* Split a doubleword move from SRC to DEST. On 32-bit targets,
4085 this function handles 64-bit moves for which mips_split_64bit_move_p
4086 holds. For 64-bit targets, this function handles 128-bit moves. */
4089 mips_split_doubleword_move (rtx dest, rtx src)
4093 if (FP_REG_RTX_P (dest) || FP_REG_RTX_P (src))
4095 if (!TARGET_64BIT && GET_MODE (dest) == DImode)
4096 emit_insn (gen_move_doubleword_fprdi (dest, src));
4097 else if (!TARGET_64BIT && GET_MODE (dest) == DFmode)
4098 emit_insn (gen_move_doubleword_fprdf (dest, src));
4099 else if (!TARGET_64BIT && GET_MODE (dest) == V2SFmode)
4100 emit_insn (gen_move_doubleword_fprv2sf (dest, src));
4101 else if (!TARGET_64BIT && GET_MODE (dest) == V2SImode)
4102 emit_insn (gen_move_doubleword_fprv2si (dest, src));
4103 else if (!TARGET_64BIT && GET_MODE (dest) == V4HImode)
4104 emit_insn (gen_move_doubleword_fprv4hi (dest, src));
4105 else if (!TARGET_64BIT && GET_MODE (dest) == V8QImode)
4106 emit_insn (gen_move_doubleword_fprv8qi (dest, src));
4107 else if (TARGET_64BIT && GET_MODE (dest) == TFmode)
4108 emit_insn (gen_move_doubleword_fprtf (dest, src));
4112 else if (REG_P (dest) && REGNO (dest) == MD_REG_FIRST)
4114 low_dest = mips_subword (dest, false);
4115 mips_emit_move (low_dest, mips_subword (src, false));
4117 emit_insn (gen_mthidi_ti (dest, mips_subword (src, true), low_dest));
4119 emit_insn (gen_mthisi_di (dest, mips_subword (src, true), low_dest));
4121 else if (REG_P (src) && REGNO (src) == MD_REG_FIRST)
4123 mips_emit_move (mips_subword (dest, false), mips_subword (src, false));
4125 emit_insn (gen_mfhidi_ti (mips_subword (dest, true), src));
4127 emit_insn (gen_mfhisi_di (mips_subword (dest, true), src));
4131 /* The operation can be split into two normal moves. Decide in
4132 which order to do them. */
4133 low_dest = mips_subword (dest, false);
4134 if (REG_P (low_dest)
4135 && reg_overlap_mentioned_p (low_dest, src))
4137 mips_emit_move (mips_subword (dest, true), mips_subword (src, true));
4138 mips_emit_move (low_dest, mips_subword (src, false));
4142 mips_emit_move (low_dest, mips_subword (src, false));
4143 mips_emit_move (mips_subword (dest, true), mips_subword (src, true));
4148 /* Return the appropriate instructions to move SRC into DEST. Assume
4149 that SRC is operand 1 and DEST is operand 0. */
4152 mips_output_move (rtx dest, rtx src)
4154 enum rtx_code dest_code, src_code;
4155 enum machine_mode mode;
4156 enum mips_symbol_type symbol_type;
4159 dest_code = GET_CODE (dest);
4160 src_code = GET_CODE (src);
4161 mode = GET_MODE (dest);
4162 dbl_p = (GET_MODE_SIZE (mode) == 8);
4164 if (dbl_p && mips_split_64bit_move_p (dest, src))
4167 if ((src_code == REG && GP_REG_P (REGNO (src)))
4168 || (!TARGET_MIPS16 && src == CONST0_RTX (mode)))
4170 if (dest_code == REG)
4172 if (GP_REG_P (REGNO (dest)))
4173 return "move\t%0,%z1";
4175 /* Moves to HI are handled by special .md insns. */
4176 if (REGNO (dest) == LO_REGNUM)
4179 if (DSP_ACC_REG_P (REGNO (dest)))
4181 static char retval[] = "mt__\t%z1,%q0";
4183 retval[2] = reg_names[REGNO (dest)][4];
4184 retval[3] = reg_names[REGNO (dest)][5];
4188 if (FP_REG_P (REGNO (dest)))
4189 return dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0";
4191 if (ALL_COP_REG_P (REGNO (dest)))
4193 static char retval[] = "dmtc_\t%z1,%0";
4195 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
4196 return dbl_p ? retval : retval + 1;
4199 if (dest_code == MEM)
4200 switch (GET_MODE_SIZE (mode))
4202 case 1: return "sb\t%z1,%0";
4203 case 2: return "sh\t%z1,%0";
4204 case 4: return "sw\t%z1,%0";
4205 case 8: return "sd\t%z1,%0";
4208 if (dest_code == REG && GP_REG_P (REGNO (dest)))
4210 if (src_code == REG)
4212 /* Moves from HI are handled by special .md insns. */
4213 if (REGNO (src) == LO_REGNUM)
4215 /* When generating VR4120 or VR4130 code, we use MACC and
4216 DMACC instead of MFLO. This avoids both the normal
4217 MIPS III HI/LO hazards and the errata related to
4220 return dbl_p ? "dmacc\t%0,%.,%." : "macc\t%0,%.,%.";
4224 if (DSP_ACC_REG_P (REGNO (src)))
4226 static char retval[] = "mf__\t%0,%q1";
4228 retval[2] = reg_names[REGNO (src)][4];
4229 retval[3] = reg_names[REGNO (src)][5];
4233 if (FP_REG_P (REGNO (src)))
4234 return dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1";
4236 if (ALL_COP_REG_P (REGNO (src)))
4238 static char retval[] = "dmfc_\t%0,%1";
4240 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
4241 return dbl_p ? retval : retval + 1;
4244 if (ST_REG_P (REGNO (src)) && ISA_HAS_8CC)
4245 return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
4248 if (src_code == MEM)
4249 switch (GET_MODE_SIZE (mode))
4251 case 1: return "lbu\t%0,%1";
4252 case 2: return "lhu\t%0,%1";
4253 case 4: return "lw\t%0,%1";
4254 case 8: return "ld\t%0,%1";
4257 if (src_code == CONST_INT)
4259 /* Don't use the X format for the operand itself, because that
4260 will give out-of-range numbers for 64-bit hosts and 32-bit
4263 return "li\t%0,%1\t\t\t# %X1";
4265 if (SMALL_OPERAND_UNSIGNED (INTVAL (src)))
4268 if (SMALL_OPERAND_UNSIGNED (-INTVAL (src)))
4272 if (src_code == HIGH)
4273 return TARGET_MIPS16 ? "#" : "lui\t%0,%h1";
4275 if (CONST_GP_P (src))
4276 return "move\t%0,%1";
4278 if (mips_symbolic_constant_p (src, SYMBOL_CONTEXT_LEA, &symbol_type)
4279 && mips_lo_relocs[symbol_type] != 0)
4281 /* A signed 16-bit constant formed by applying a relocation
4282 operator to a symbolic address. */
4283 gcc_assert (!mips_split_p[symbol_type]);
4284 return "li\t%0,%R1";
4287 if (symbolic_operand (src, VOIDmode))
4289 gcc_assert (TARGET_MIPS16
4290 ? TARGET_MIPS16_TEXT_LOADS
4291 : !TARGET_EXPLICIT_RELOCS);
4292 return dbl_p ? "dla\t%0,%1" : "la\t%0,%1";
4295 if (src_code == REG && FP_REG_P (REGNO (src)))
4297 if (dest_code == REG && FP_REG_P (REGNO (dest)))
4299 if (GET_MODE (dest) == V2SFmode)
4300 return "mov.ps\t%0,%1";
4302 return dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1";
4305 if (dest_code == MEM)
4306 return dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0";
4308 if (dest_code == REG && FP_REG_P (REGNO (dest)))
4310 if (src_code == MEM)
4311 return dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1";
4313 if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
4315 static char retval[] = "l_c_\t%0,%1";
4317 retval[1] = (dbl_p ? 'd' : 'w');
4318 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
4321 if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
4323 static char retval[] = "s_c_\t%1,%0";
4325 retval[1] = (dbl_p ? 'd' : 'w');
4326 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
4332 /* Return true if CMP1 is a suitable second operand for integer ordering
4333 test CODE. See also the *sCC patterns in mips.md. */
4336 mips_int_order_operand_ok_p (enum rtx_code code, rtx cmp1)
4342 return reg_or_0_operand (cmp1, VOIDmode);
4346 return !TARGET_MIPS16 && cmp1 == const1_rtx;
4350 return arith_operand (cmp1, VOIDmode);
4353 return sle_operand (cmp1, VOIDmode);
4356 return sleu_operand (cmp1, VOIDmode);
4363 /* Return true if *CMP1 (of mode MODE) is a valid second operand for
4364 integer ordering test *CODE, or if an equivalent combination can
4365 be formed by adjusting *CODE and *CMP1. When returning true, update
4366 *CODE and *CMP1 with the chosen code and operand, otherwise leave
4370 mips_canonicalize_int_order_test (enum rtx_code *code, rtx *cmp1,
4371 enum machine_mode mode)
4373 HOST_WIDE_INT plus_one;
4375 if (mips_int_order_operand_ok_p (*code, *cmp1))
4378 if (CONST_INT_P (*cmp1))
4382 plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
4383 if (INTVAL (*cmp1) < plus_one)
4386 *cmp1 = force_reg (mode, GEN_INT (plus_one));
4392 plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
4396 *cmp1 = force_reg (mode, GEN_INT (plus_one));
4407 /* Compare CMP0 and CMP1 using ordering test CODE and store the result
4408 in TARGET. CMP0 and TARGET are register_operands. If INVERT_PTR
4409 is nonnull, it's OK to set TARGET to the inverse of the result and
4410 flip *INVERT_PTR instead. */
4413 mips_emit_int_order_test (enum rtx_code code, bool *invert_ptr,
4414 rtx target, rtx cmp0, rtx cmp1)
4416 enum machine_mode mode;
4418 /* First see if there is a MIPS instruction that can do this operation.
4419 If not, try doing the same for the inverse operation. If that also
4420 fails, force CMP1 into a register and try again. */
4421 mode = GET_MODE (cmp0);
4422 if (mips_canonicalize_int_order_test (&code, &cmp1, mode))
4423 mips_emit_binary (code, target, cmp0, cmp1);
4426 enum rtx_code inv_code = reverse_condition (code);
4427 if (!mips_canonicalize_int_order_test (&inv_code, &cmp1, mode))
4429 cmp1 = force_reg (mode, cmp1);
4430 mips_emit_int_order_test (code, invert_ptr, target, cmp0, cmp1);
4432 else if (invert_ptr == 0)
4436 inv_target = mips_force_binary (GET_MODE (target),
4437 inv_code, cmp0, cmp1);
4438 mips_emit_binary (XOR, target, inv_target, const1_rtx);
4442 *invert_ptr = !*invert_ptr;
4443 mips_emit_binary (inv_code, target, cmp0, cmp1);
4448 /* Return a register that is zero iff CMP0 and CMP1 are equal.
4449 The register will have the same mode as CMP0. */
4452 mips_zero_if_equal (rtx cmp0, rtx cmp1)
4454 if (cmp1 == const0_rtx)
4457 if (uns_arith_operand (cmp1, VOIDmode))
4458 return expand_binop (GET_MODE (cmp0), xor_optab,
4459 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
4461 return expand_binop (GET_MODE (cmp0), sub_optab,
4462 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
4465 /* Convert *CODE into a code that can be used in a floating-point
4466 scc instruction (C.cond.fmt). Return true if the values of
4467 the condition code registers will be inverted, with 0 indicating
4468 that the condition holds. */
4471 mips_reversed_fp_cond (enum rtx_code *code)
4478 *code = reverse_condition_maybe_unordered (*code);
4486 /* Convert a comparison into something that can be used in a branch or
4487 conditional move. On entry, *OP0 and *OP1 are the values being
4488 compared and *CODE is the code used to compare them.
4490 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
4491 If NEED_EQ_NE_P, then only EQ or NE comparisons against zero are possible,
4492 otherwise any standard branch condition can be used. The standard branch
4495 - EQ or NE between two registers.
4496 - any comparison between a register and zero. */
4499 mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
4504 if (GET_MODE_CLASS (GET_MODE (*op0)) == MODE_INT)
4506 if (!need_eq_ne_p && *op1 == const0_rtx)
4508 else if (*code == EQ || *code == NE)
4512 *op0 = mips_zero_if_equal (cmp_op0, cmp_op1);
4516 *op1 = force_reg (GET_MODE (cmp_op0), cmp_op1);
4520 /* The comparison needs a separate scc instruction. Store the
4521 result of the scc in *OP0 and compare it against zero. */
4522 bool invert = false;
4523 *op0 = gen_reg_rtx (GET_MODE (cmp_op0));
4524 mips_emit_int_order_test (*code, &invert, *op0, cmp_op0, cmp_op1);
4525 *code = (invert ? EQ : NE);
4529 else if (ALL_FIXED_POINT_MODE_P (GET_MODE (cmp_op0)))
4531 *op0 = gen_rtx_REG (CCDSPmode, CCDSP_CC_REGNUM);
4532 mips_emit_binary (*code, *op0, cmp_op0, cmp_op1);
4538 enum rtx_code cmp_code;
4540 /* Floating-point tests use a separate C.cond.fmt comparison to
4541 set a condition code register. The branch or conditional move
4542 will then compare that register against zero.
4544 Set CMP_CODE to the code of the comparison instruction and
4545 *CODE to the code that the branch or move should use. */
4547 *code = mips_reversed_fp_cond (&cmp_code) ? EQ : NE;
4549 ? gen_reg_rtx (CCmode)
4550 : gen_rtx_REG (CCmode, FPSW_REGNUM));
4552 mips_emit_binary (cmp_code, *op0, cmp_op0, cmp_op1);
4556 /* Try performing the comparison in OPERANDS[1], whose arms are OPERANDS[2]
4557 and OPERAND[3]. Store the result in OPERANDS[0].
4559 On 64-bit targets, the mode of the comparison and target will always be
4560 SImode, thus possibly narrower than that of the comparison's operands. */
4563 mips_expand_scc (rtx operands[])
4565 rtx target = operands[0];
4566 enum rtx_code code = GET_CODE (operands[1]);
4567 rtx op0 = operands[2];
4568 rtx op1 = operands[3];
4570 gcc_assert (GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT);
4572 if (code == EQ || code == NE)
4575 && reg_imm10_operand (op1, GET_MODE (op1)))
4576 mips_emit_binary (code, target, op0, op1);
4579 rtx zie = mips_zero_if_equal (op0, op1);
4580 mips_emit_binary (code, target, zie, const0_rtx);
4584 mips_emit_int_order_test (code, 0, target, op0, op1);
4587 /* Compare OPERANDS[1] with OPERANDS[2] using comparison code
4588 CODE and jump to OPERANDS[3] if the condition holds. */
4591 mips_expand_conditional_branch (rtx *operands)
4593 enum rtx_code code = GET_CODE (operands[0]);
4594 rtx op0 = operands[1];
4595 rtx op1 = operands[2];
4598 mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
4599 condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
4600 emit_jump_insn (gen_condjump (condition, operands[3]));
4605 (set temp (COND:CCV2 CMP_OP0 CMP_OP1))
4606 (set DEST (unspec [TRUE_SRC FALSE_SRC temp] UNSPEC_MOVE_TF_PS)) */
4609 mips_expand_vcondv2sf (rtx dest, rtx true_src, rtx false_src,
4610 enum rtx_code cond, rtx cmp_op0, rtx cmp_op1)
4615 reversed_p = mips_reversed_fp_cond (&cond);
4616 cmp_result = gen_reg_rtx (CCV2mode);
4617 emit_insn (gen_scc_ps (cmp_result,
4618 gen_rtx_fmt_ee (cond, VOIDmode, cmp_op0, cmp_op1)));
4620 emit_insn (gen_mips_cond_move_tf_ps (dest, false_src, true_src,
4623 emit_insn (gen_mips_cond_move_tf_ps (dest, true_src, false_src,
4627 /* Perform the comparison in OPERANDS[1]. Move OPERANDS[2] into OPERANDS[0]
4628 if the condition holds, otherwise move OPERANDS[3] into OPERANDS[0]. */
4631 mips_expand_conditional_move (rtx *operands)
4634 enum rtx_code code = GET_CODE (operands[1]);
4635 rtx op0 = XEXP (operands[1], 0);
4636 rtx op1 = XEXP (operands[1], 1);
4638 mips_emit_compare (&code, &op0, &op1, true);
4639 cond = gen_rtx_fmt_ee (code, GET_MODE (op0), op0, op1);
4640 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
4641 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]), cond,
4642 operands[2], operands[3])));
4645 /* Perform the comparison in COMPARISON, then trap if the condition holds. */
4648 mips_expand_conditional_trap (rtx comparison)
4651 enum machine_mode mode;
4654 /* MIPS conditional trap instructions don't have GT or LE flavors,
4655 so we must swap the operands and convert to LT and GE respectively. */
4656 code = GET_CODE (comparison);
4663 code = swap_condition (code);
4664 op0 = XEXP (comparison, 1);
4665 op1 = XEXP (comparison, 0);
4669 op0 = XEXP (comparison, 0);
4670 op1 = XEXP (comparison, 1);
4674 mode = GET_MODE (XEXP (comparison, 0));
4675 op0 = force_reg (mode, op0);
4676 if (!arith_operand (op1, mode))
4677 op1 = force_reg (mode, op1);
4679 emit_insn (gen_rtx_TRAP_IF (VOIDmode,
4680 gen_rtx_fmt_ee (code, mode, op0, op1),
4684 /* Initialize *CUM for a call to a function of type FNTYPE. */
4687 mips_init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype)
4689 memset (cum, 0, sizeof (*cum));
4690 cum->prototype = (fntype && prototype_p (fntype));
4691 cum->gp_reg_found = (cum->prototype && stdarg_p (fntype));
4694 /* Fill INFO with information about a single argument. CUM is the
4695 cumulative state for earlier arguments. MODE is the mode of this
4696 argument and TYPE is its type (if known). NAMED is true if this
4697 is a named (fixed) argument rather than a variable one. */
4700 mips_get_arg_info (struct mips_arg_info *info, const CUMULATIVE_ARGS *cum,
4701 enum machine_mode mode, const_tree type, bool named)
4703 bool doubleword_aligned_p;
4704 unsigned int num_bytes, num_words, max_regs;
4706 /* Work out the size of the argument. */
4707 num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
4708 num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4710 /* Decide whether it should go in a floating-point register, assuming
4711 one is free. Later code checks for availability.
4713 The checks against UNITS_PER_FPVALUE handle the soft-float and
4714 single-float cases. */
4718 /* The EABI conventions have traditionally been defined in terms
4719 of TYPE_MODE, regardless of the actual type. */
4720 info->fpr_p = ((GET_MODE_CLASS (mode) == MODE_FLOAT
4721 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4722 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4727 /* Only leading floating-point scalars are passed in
4728 floating-point registers. We also handle vector floats the same
4729 say, which is OK because they are not covered by the standard ABI. */
4730 info->fpr_p = (!cum->gp_reg_found
4731 && cum->arg_number < 2
4733 || SCALAR_FLOAT_TYPE_P (type)
4734 || VECTOR_FLOAT_TYPE_P (type))
4735 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4736 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4737 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4742 /* Scalar, complex and vector floating-point types are passed in
4743 floating-point registers, as long as this is a named rather
4744 than a variable argument. */
4745 info->fpr_p = (named
4746 && (type == 0 || FLOAT_TYPE_P (type))
4747 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4748 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4749 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4750 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
4752 /* ??? According to the ABI documentation, the real and imaginary
4753 parts of complex floats should be passed in individual registers.
4754 The real and imaginary parts of stack arguments are supposed
4755 to be contiguous and there should be an extra word of padding
4758 This has two problems. First, it makes it impossible to use a
4759 single "void *" va_list type, since register and stack arguments
4760 are passed differently. (At the time of writing, MIPSpro cannot
4761 handle complex float varargs correctly.) Second, it's unclear
4762 what should happen when there is only one register free.
4764 For now, we assume that named complex floats should go into FPRs
4765 if there are two FPRs free, otherwise they should be passed in the
4766 same way as a struct containing two floats. */
4768 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4769 && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
4771 if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
4772 info->fpr_p = false;
4782 /* See whether the argument has doubleword alignment. */
4783 doubleword_aligned_p = (mips_function_arg_boundary (mode, type)
4786 /* Set REG_OFFSET to the register count we're interested in.
4787 The EABI allocates the floating-point registers separately,
4788 but the other ABIs allocate them like integer registers. */
4789 info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
4793 /* Advance to an even register if the argument is doubleword-aligned. */
4794 if (doubleword_aligned_p)
4795 info->reg_offset += info->reg_offset & 1;
4797 /* Work out the offset of a stack argument. */
4798 info->stack_offset = cum->stack_words;
4799 if (doubleword_aligned_p)
4800 info->stack_offset += info->stack_offset & 1;
4802 max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
4804 /* Partition the argument between registers and stack. */
4805 info->reg_words = MIN (num_words, max_regs);
4806 info->stack_words = num_words - info->reg_words;
4809 /* INFO describes a register argument that has the normal format for the
4810 argument's mode. Return the register it uses, assuming that FPRs are
4811 available if HARD_FLOAT_P. */
4814 mips_arg_regno (const struct mips_arg_info *info, bool hard_float_p)
4816 if (!info->fpr_p || !hard_float_p)
4817 return GP_ARG_FIRST + info->reg_offset;
4818 else if (mips_abi == ABI_32 && TARGET_DOUBLE_FLOAT && info->reg_offset > 0)
4819 /* In o32, the second argument is always passed in $f14
4820 for TARGET_DOUBLE_FLOAT, regardless of whether the
4821 first argument was a word or doubleword. */
4822 return FP_ARG_FIRST + 2;
4824 return FP_ARG_FIRST + info->reg_offset;
4827 /* Implement TARGET_STRICT_ARGUMENT_NAMING. */
4830 mips_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
4832 return !TARGET_OLDABI;
4835 /* Implement TARGET_FUNCTION_ARG. */
4838 mips_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4839 const_tree type, bool named)
4841 struct mips_arg_info info;
4843 /* We will be called with a mode of VOIDmode after the last argument
4844 has been seen. Whatever we return will be passed to the call expander.
4845 If we need a MIPS16 fp_code, return a REG with the code stored as
4847 if (mode == VOIDmode)
4849 if (TARGET_MIPS16 && cum->fp_code != 0)
4850 return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
4855 mips_get_arg_info (&info, cum, mode, type, named);
4857 /* Return straight away if the whole argument is passed on the stack. */
4858 if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
4861 /* The n32 and n64 ABIs say that if any 64-bit chunk of the structure
4862 contains a double in its entirety, then that 64-bit chunk is passed
4863 in a floating-point register. */
4865 && TARGET_HARD_FLOAT
4868 && TREE_CODE (type) == RECORD_TYPE
4869 && TYPE_SIZE_UNIT (type)
4870 && host_integerp (TYPE_SIZE_UNIT (type), 1))
4874 /* First check to see if there is any such field. */
4875 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
4876 if (TREE_CODE (field) == FIELD_DECL
4877 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
4878 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
4879 && host_integerp (bit_position (field), 0)
4880 && int_bit_position (field) % BITS_PER_WORD == 0)
4885 /* Now handle the special case by returning a PARALLEL
4886 indicating where each 64-bit chunk goes. INFO.REG_WORDS
4887 chunks are passed in registers. */
4889 HOST_WIDE_INT bitpos;
4892 /* assign_parms checks the mode of ENTRY_PARM, so we must
4893 use the actual mode here. */
4894 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
4897 field = TYPE_FIELDS (type);
4898 for (i = 0; i < info.reg_words; i++)
4902 for (; field; field = DECL_CHAIN (field))
4903 if (TREE_CODE (field) == FIELD_DECL
4904 && int_bit_position (field) >= bitpos)
4908 && int_bit_position (field) == bitpos
4909 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
4910 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
4911 reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
4913 reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
4916 = gen_rtx_EXPR_LIST (VOIDmode, reg,
4917 GEN_INT (bitpos / BITS_PER_UNIT));
4919 bitpos += BITS_PER_WORD;
4925 /* Handle the n32/n64 conventions for passing complex floating-point
4926 arguments in FPR pairs. The real part goes in the lower register
4927 and the imaginary part goes in the upper register. */
4930 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4933 enum machine_mode inner;
4936 inner = GET_MODE_INNER (mode);
4937 regno = FP_ARG_FIRST + info.reg_offset;
4938 if (info.reg_words * UNITS_PER_WORD == GET_MODE_SIZE (inner))
4940 /* Real part in registers, imaginary part on stack. */
4941 gcc_assert (info.stack_words == info.reg_words);
4942 return gen_rtx_REG (inner, regno);
4946 gcc_assert (info.stack_words == 0);
4947 real = gen_rtx_EXPR_LIST (VOIDmode,
4948 gen_rtx_REG (inner, regno),
4950 imag = gen_rtx_EXPR_LIST (VOIDmode,
4952 regno + info.reg_words / 2),
4953 GEN_INT (GET_MODE_SIZE (inner)));
4954 return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
4958 return gen_rtx_REG (mode, mips_arg_regno (&info, TARGET_HARD_FLOAT));
4961 /* Implement TARGET_FUNCTION_ARG_ADVANCE. */
4964 mips_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4965 const_tree type, bool named)
4967 struct mips_arg_info info;
4969 mips_get_arg_info (&info, cum, mode, type, named);
4972 cum->gp_reg_found = true;
4974 /* See the comment above the CUMULATIVE_ARGS structure in mips.h for
4975 an explanation of what this code does. It assumes that we're using
4976 either the o32 or the o64 ABI, both of which pass at most 2 arguments
4978 if (cum->arg_number < 2 && info.fpr_p)
4979 cum->fp_code += (mode == SFmode ? 1 : 2) << (cum->arg_number * 2);
4981 /* Advance the register count. This has the effect of setting
4982 num_gprs to MAX_ARGS_IN_REGISTERS if a doubleword-aligned
4983 argument required us to skip the final GPR and pass the whole
4984 argument on the stack. */
4985 if (mips_abi != ABI_EABI || !info.fpr_p)
4986 cum->num_gprs = info.reg_offset + info.reg_words;
4987 else if (info.reg_words > 0)
4988 cum->num_fprs += MAX_FPRS_PER_FMT;
4990 /* Advance the stack word count. */
4991 if (info.stack_words > 0)
4992 cum->stack_words = info.stack_offset + info.stack_words;
4997 /* Implement TARGET_ARG_PARTIAL_BYTES. */
5000 mips_arg_partial_bytes (CUMULATIVE_ARGS *cum,
5001 enum machine_mode mode, tree type, bool named)
5003 struct mips_arg_info info;
5005 mips_get_arg_info (&info, cum, mode, type, named);
5006 return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
5009 /* Implement TARGET_FUNCTION_ARG_BOUNDARY. Every parameter gets at
5010 least PARM_BOUNDARY bits of alignment, but will be given anything up
5011 to STACK_BOUNDARY bits if the type requires it. */
5014 mips_function_arg_boundary (enum machine_mode mode, const_tree type)
5016 unsigned int alignment;
5018 alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
5019 if (alignment < PARM_BOUNDARY)
5020 alignment = PARM_BOUNDARY;
5021 if (alignment > STACK_BOUNDARY)
5022 alignment = STACK_BOUNDARY;
5026 /* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
5027 upward rather than downward. In other words, return true if the
5028 first byte of the stack slot has useful data, false if the last
5032 mips_pad_arg_upward (enum machine_mode mode, const_tree type)
5034 /* On little-endian targets, the first byte of every stack argument
5035 is passed in the first byte of the stack slot. */
5036 if (!BYTES_BIG_ENDIAN)
5039 /* Otherwise, integral types are padded downward: the last byte of a
5040 stack argument is passed in the last byte of the stack slot. */
5042 ? (INTEGRAL_TYPE_P (type)
5043 || POINTER_TYPE_P (type)
5044 || FIXED_POINT_TYPE_P (type))
5045 : (SCALAR_INT_MODE_P (mode)
5046 || ALL_SCALAR_FIXED_POINT_MODE_P (mode)))
5049 /* Big-endian o64 pads floating-point arguments downward. */
5050 if (mips_abi == ABI_O64)
5051 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
5054 /* Other types are padded upward for o32, o64, n32 and n64. */
5055 if (mips_abi != ABI_EABI)
5058 /* Arguments smaller than a stack slot are padded downward. */
5059 if (mode != BLKmode)
5060 return GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY;
5062 return int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT);
5065 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
5066 if the least significant byte of the register has useful data. Return
5067 the opposite if the most significant byte does. */
5070 mips_pad_reg_upward (enum machine_mode mode, tree type)
5072 /* No shifting is required for floating-point arguments. */
5073 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
5074 return !BYTES_BIG_ENDIAN;
5076 /* Otherwise, apply the same padding to register arguments as we do
5077 to stack arguments. */
5078 return mips_pad_arg_upward (mode, type);
5081 /* Return nonzero when an argument must be passed by reference. */
5084 mips_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5085 enum machine_mode mode, const_tree type,
5086 bool named ATTRIBUTE_UNUSED)
5088 if (mips_abi == ABI_EABI)
5092 /* ??? How should SCmode be handled? */
5093 if (mode == DImode || mode == DFmode
5094 || mode == DQmode || mode == UDQmode
5095 || mode == DAmode || mode == UDAmode)
5098 size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
5099 return size == -1 || size > UNITS_PER_WORD;
5103 /* If we have a variable-sized parameter, we have no choice. */
5104 return targetm.calls.must_pass_in_stack (mode, type);
5108 /* Implement TARGET_CALLEE_COPIES. */
5111 mips_callee_copies (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5112 enum machine_mode mode ATTRIBUTE_UNUSED,
5113 const_tree type ATTRIBUTE_UNUSED, bool named)
5115 return mips_abi == ABI_EABI && named;
5118 /* See whether VALTYPE is a record whose fields should be returned in
5119 floating-point registers. If so, return the number of fields and
5120 list them in FIELDS (which should have two elements). Return 0
5123 For n32 & n64, a structure with one or two fields is returned in
5124 floating-point registers as long as every field has a floating-point
5128 mips_fpr_return_fields (const_tree valtype, tree *fields)
5136 if (TREE_CODE (valtype) != RECORD_TYPE)
5140 for (field = TYPE_FIELDS (valtype); field != 0; field = DECL_CHAIN (field))
5142 if (TREE_CODE (field) != FIELD_DECL)
5145 if (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (field)))
5151 fields[i++] = field;
5156 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
5157 a value in the most significant part of $2/$3 if:
5159 - the target is big-endian;
5161 - the value has a structure or union type (we generalize this to
5162 cover aggregates from other languages too); and
5164 - the structure is not returned in floating-point registers. */
5167 mips_return_in_msb (const_tree valtype)
5171 return (TARGET_NEWABI
5172 && TARGET_BIG_ENDIAN
5173 && AGGREGATE_TYPE_P (valtype)
5174 && mips_fpr_return_fields (valtype, fields) == 0);
5177 /* Return true if the function return value MODE will get returned in a
5178 floating-point register. */
5181 mips_return_mode_in_fpr_p (enum machine_mode mode)
5183 return ((GET_MODE_CLASS (mode) == MODE_FLOAT
5184 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
5185 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5186 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_HWFPVALUE);
5189 /* Return the representation of an FPR return register when the
5190 value being returned in FP_RETURN has mode VALUE_MODE and the
5191 return type itself has mode TYPE_MODE. On NewABI targets,
5192 the two modes may be different for structures like:
5194 struct __attribute__((packed)) foo { float f; }
5196 where we return the SFmode value of "f" in FP_RETURN, but where
5197 the structure itself has mode BLKmode. */
5200 mips_return_fpr_single (enum machine_mode type_mode,
5201 enum machine_mode value_mode)
5205 x = gen_rtx_REG (value_mode, FP_RETURN);
5206 if (type_mode != value_mode)
5208 x = gen_rtx_EXPR_LIST (VOIDmode, x, const0_rtx);
5209 x = gen_rtx_PARALLEL (type_mode, gen_rtvec (1, x));
5214 /* Return a composite value in a pair of floating-point registers.
5215 MODE1 and OFFSET1 are the mode and byte offset for the first value,
5216 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
5219 For n32 & n64, $f0 always holds the first value and $f2 the second.
5220 Otherwise the values are packed together as closely as possible. */
5223 mips_return_fpr_pair (enum machine_mode mode,
5224 enum machine_mode mode1, HOST_WIDE_INT offset1,
5225 enum machine_mode mode2, HOST_WIDE_INT offset2)
5229 inc = (TARGET_NEWABI ? 2 : MAX_FPRS_PER_FMT);
5230 return gen_rtx_PARALLEL
5233 gen_rtx_EXPR_LIST (VOIDmode,
5234 gen_rtx_REG (mode1, FP_RETURN),
5236 gen_rtx_EXPR_LIST (VOIDmode,
5237 gen_rtx_REG (mode2, FP_RETURN + inc),
5238 GEN_INT (offset2))));
5242 /* Implement TARGET_FUNCTION_VALUE and TARGET_LIBCALL_VALUE.
5243 For normal calls, VALTYPE is the return type and MODE is VOIDmode.
5244 For libcalls, VALTYPE is null and MODE is the mode of the return value. */
5247 mips_function_value_1 (const_tree valtype, const_tree fn_decl_or_type,
5248 enum machine_mode mode)
5256 if (fn_decl_or_type && DECL_P (fn_decl_or_type))
5257 func = fn_decl_or_type;
5261 mode = TYPE_MODE (valtype);
5262 unsigned_p = TYPE_UNSIGNED (valtype);
5264 /* Since TARGET_PROMOTE_FUNCTION_MODE unconditionally promotes,
5265 return values, promote the mode here too. */
5266 mode = promote_function_mode (valtype, mode, &unsigned_p, func, 1);
5268 /* Handle structures whose fields are returned in $f0/$f2. */
5269 switch (mips_fpr_return_fields (valtype, fields))
5272 return mips_return_fpr_single (mode,
5273 TYPE_MODE (TREE_TYPE (fields[0])));
5276 return mips_return_fpr_pair (mode,
5277 TYPE_MODE (TREE_TYPE (fields[0])),
5278 int_byte_position (fields[0]),
5279 TYPE_MODE (TREE_TYPE (fields[1])),
5280 int_byte_position (fields[1]));
5283 /* If a value is passed in the most significant part of a register, see
5284 whether we have to round the mode up to a whole number of words. */
5285 if (mips_return_in_msb (valtype))
5287 HOST_WIDE_INT size = int_size_in_bytes (valtype);
5288 if (size % UNITS_PER_WORD != 0)
5290 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
5291 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5295 /* For EABI, the class of return register depends entirely on MODE.
5296 For example, "struct { some_type x; }" and "union { some_type x; }"
5297 are returned in the same way as a bare "some_type" would be.
5298 Other ABIs only use FPRs for scalar, complex or vector types. */
5299 if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
5300 return gen_rtx_REG (mode, GP_RETURN);
5305 /* Handle long doubles for n32 & n64. */
5307 return mips_return_fpr_pair (mode,
5309 DImode, GET_MODE_SIZE (mode) / 2);
5311 if (mips_return_mode_in_fpr_p (mode))
5313 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5314 return mips_return_fpr_pair (mode,
5315 GET_MODE_INNER (mode), 0,
5316 GET_MODE_INNER (mode),
5317 GET_MODE_SIZE (mode) / 2);
5319 return gen_rtx_REG (mode, FP_RETURN);
5323 return gen_rtx_REG (mode, GP_RETURN);
5326 /* Implement TARGET_FUNCTION_VALUE. */
5329 mips_function_value (const_tree valtype, const_tree fn_decl_or_type,
5330 bool outgoing ATTRIBUTE_UNUSED)
5332 return mips_function_value_1 (valtype, fn_decl_or_type, VOIDmode);
5335 /* Implement TARGET_LIBCALL_VALUE. */
5338 mips_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
5340 return mips_function_value_1 (NULL_TREE, NULL_TREE, mode);
5343 /* Implement TARGET_FUNCTION_VALUE_REGNO_P.
5345 On the MIPS, R2 R3 and F0 F2 are the only register thus used.
5346 Currently, R2 and F0 are only implemented here (C has no complex type). */
5349 mips_function_value_regno_p (const unsigned int regno)
5351 if (regno == GP_RETURN
5352 || regno == FP_RETURN
5353 || (LONG_DOUBLE_TYPE_SIZE == 128
5354 && FP_RETURN != GP_RETURN
5355 && regno == FP_RETURN + 2))
5361 /* Implement TARGET_RETURN_IN_MEMORY. Under the o32 and o64 ABIs,
5362 all BLKmode objects are returned in memory. Under the n32, n64
5363 and embedded ABIs, small structures are returned in a register.
5364 Objects with varying size must still be returned in memory, of
5368 mips_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
5370 return (TARGET_OLDABI
5371 ? TYPE_MODE (type) == BLKmode
5372 : !IN_RANGE (int_size_in_bytes (type), 0, 2 * UNITS_PER_WORD));
5375 /* Implement TARGET_SETUP_INCOMING_VARARGS. */
5378 mips_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5379 tree type, int *pretend_size ATTRIBUTE_UNUSED,
5382 CUMULATIVE_ARGS local_cum;
5383 int gp_saved, fp_saved;
5385 /* The caller has advanced CUM up to, but not beyond, the last named
5386 argument. Advance a local copy of CUM past the last "real" named
5387 argument, to find out how many registers are left over. */
5389 mips_function_arg_advance (&local_cum, mode, type, true);
5391 /* Found out how many registers we need to save. */
5392 gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
5393 fp_saved = (EABI_FLOAT_VARARGS_P
5394 ? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
5403 ptr = plus_constant (virtual_incoming_args_rtx,
5404 REG_PARM_STACK_SPACE (cfun->decl)
5405 - gp_saved * UNITS_PER_WORD);
5406 mem = gen_frame_mem (BLKmode, ptr);
5407 set_mem_alias_set (mem, get_varargs_alias_set ());
5409 move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
5414 /* We can't use move_block_from_reg, because it will use
5416 enum machine_mode mode;
5419 /* Set OFF to the offset from virtual_incoming_args_rtx of
5420 the first float register. The FP save area lies below
5421 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
5422 off = (-gp_saved * UNITS_PER_WORD) & -UNITS_PER_FPVALUE;
5423 off -= fp_saved * UNITS_PER_FPREG;
5425 mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
5427 for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS;
5428 i += MAX_FPRS_PER_FMT)
5432 ptr = plus_constant (virtual_incoming_args_rtx, off);
5433 mem = gen_frame_mem (mode, ptr);
5434 set_mem_alias_set (mem, get_varargs_alias_set ());
5435 mips_emit_move (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
5436 off += UNITS_PER_HWFPVALUE;
5440 if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
5441 cfun->machine->varargs_size = (gp_saved * UNITS_PER_WORD
5442 + fp_saved * UNITS_PER_FPREG);
5445 /* Implement TARGET_BUILTIN_VA_LIST. */
5448 mips_build_builtin_va_list (void)
5450 if (EABI_FLOAT_VARARGS_P)
5452 /* We keep 3 pointers, and two offsets.
5454 Two pointers are to the overflow area, which starts at the CFA.
5455 One of these is constant, for addressing into the GPR save area
5456 below it. The other is advanced up the stack through the
5459 The third pointer is to the bottom of the GPR save area.
5460 Since the FPR save area is just below it, we can address
5461 FPR slots off this pointer.
5463 We also keep two one-byte offsets, which are to be subtracted
5464 from the constant pointers to yield addresses in the GPR and
5465 FPR save areas. These are downcounted as float or non-float
5466 arguments are used, and when they get to zero, the argument
5467 must be obtained from the overflow region. */
5468 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
5471 record = lang_hooks.types.make_type (RECORD_TYPE);
5473 f_ovfl = build_decl (BUILTINS_LOCATION,
5474 FIELD_DECL, get_identifier ("__overflow_argptr"),
5476 f_gtop = build_decl (BUILTINS_LOCATION,
5477 FIELD_DECL, get_identifier ("__gpr_top"),
5479 f_ftop = build_decl (BUILTINS_LOCATION,
5480 FIELD_DECL, get_identifier ("__fpr_top"),
5482 f_goff = build_decl (BUILTINS_LOCATION,
5483 FIELD_DECL, get_identifier ("__gpr_offset"),
5484 unsigned_char_type_node);
5485 f_foff = build_decl (BUILTINS_LOCATION,
5486 FIELD_DECL, get_identifier ("__fpr_offset"),
5487 unsigned_char_type_node);
5488 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
5489 warn on every user file. */
5490 index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1);
5491 array = build_array_type (unsigned_char_type_node,
5492 build_index_type (index));
5493 f_res = build_decl (BUILTINS_LOCATION,
5494 FIELD_DECL, get_identifier ("__reserved"), array);
5496 DECL_FIELD_CONTEXT (f_ovfl) = record;
5497 DECL_FIELD_CONTEXT (f_gtop) = record;
5498 DECL_FIELD_CONTEXT (f_ftop) = record;
5499 DECL_FIELD_CONTEXT (f_goff) = record;
5500 DECL_FIELD_CONTEXT (f_foff) = record;
5501 DECL_FIELD_CONTEXT (f_res) = record;
5503 TYPE_FIELDS (record) = f_ovfl;
5504 DECL_CHAIN (f_ovfl) = f_gtop;
5505 DECL_CHAIN (f_gtop) = f_ftop;
5506 DECL_CHAIN (f_ftop) = f_goff;
5507 DECL_CHAIN (f_goff) = f_foff;
5508 DECL_CHAIN (f_foff) = f_res;
5510 layout_type (record);
5513 else if (TARGET_IRIX6)
5514 /* On IRIX 6, this type is 'char *'. */
5515 return build_pointer_type (char_type_node);
5517 /* Otherwise, we use 'void *'. */
5518 return ptr_type_node;
5521 /* Implement TARGET_EXPAND_BUILTIN_VA_START. */
5524 mips_va_start (tree valist, rtx nextarg)
5526 if (EABI_FLOAT_VARARGS_P)
5528 const CUMULATIVE_ARGS *cum;
5529 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
5530 tree ovfl, gtop, ftop, goff, foff;
5532 int gpr_save_area_size;
5533 int fpr_save_area_size;
5536 cum = &crtl->args.info;
5538 = (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
5540 = (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
5542 f_ovfl = TYPE_FIELDS (va_list_type_node);
5543 f_gtop = DECL_CHAIN (f_ovfl);
5544 f_ftop = DECL_CHAIN (f_gtop);
5545 f_goff = DECL_CHAIN (f_ftop);
5546 f_foff = DECL_CHAIN (f_goff);
5548 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
5550 gtop = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
5552 ftop = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
5554 goff = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
5556 foff = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
5559 /* Emit code to initialize OVFL, which points to the next varargs
5560 stack argument. CUM->STACK_WORDS gives the number of stack
5561 words used by named arguments. */
5562 t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
5563 if (cum->stack_words > 0)
5564 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), t,
5565 size_int (cum->stack_words * UNITS_PER_WORD));
5566 t = build2 (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
5567 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5569 /* Emit code to initialize GTOP, the top of the GPR save area. */
5570 t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
5571 t = build2 (MODIFY_EXPR, TREE_TYPE (gtop), gtop, t);
5572 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5574 /* Emit code to initialize FTOP, the top of the FPR save area.
5575 This address is gpr_save_area_bytes below GTOP, rounded
5576 down to the next fp-aligned boundary. */
5577 t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
5578 fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
5579 fpr_offset &= -UNITS_PER_FPVALUE;
5581 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ftop), t,
5582 size_int (-fpr_offset));
5583 t = build2 (MODIFY_EXPR, TREE_TYPE (ftop), ftop, t);
5584 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5586 /* Emit code to initialize GOFF, the offset from GTOP of the
5587 next GPR argument. */
5588 t = build2 (MODIFY_EXPR, TREE_TYPE (goff), goff,
5589 build_int_cst (TREE_TYPE (goff), gpr_save_area_size));
5590 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5592 /* Likewise emit code to initialize FOFF, the offset from FTOP
5593 of the next FPR argument. */
5594 t = build2 (MODIFY_EXPR, TREE_TYPE (foff), foff,
5595 build_int_cst (TREE_TYPE (foff), fpr_save_area_size));
5596 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5600 nextarg = plus_constant (nextarg, -cfun->machine->varargs_size);
5601 std_expand_builtin_va_start (valist, nextarg);
5605 /* Implement TARGET_GIMPLIFY_VA_ARG_EXPR. */
5608 mips_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
5614 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
5616 type = build_pointer_type (type);
5618 if (!EABI_FLOAT_VARARGS_P)
5619 addr = std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
5622 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
5623 tree ovfl, top, off, align;
5624 HOST_WIDE_INT size, rsize, osize;
5627 f_ovfl = TYPE_FIELDS (va_list_type_node);
5628 f_gtop = DECL_CHAIN (f_ovfl);
5629 f_ftop = DECL_CHAIN (f_gtop);
5630 f_goff = DECL_CHAIN (f_ftop);
5631 f_foff = DECL_CHAIN (f_goff);
5635 TOP be the top of the GPR or FPR save area;
5636 OFF be the offset from TOP of the next register;
5637 ADDR_RTX be the address of the argument;
5638 SIZE be the number of bytes in the argument type;
5639 RSIZE be the number of bytes used to store the argument
5640 when it's in the register save area; and
5641 OSIZE be the number of bytes used to store it when it's
5642 in the stack overflow area.
5644 The code we want is:
5646 1: off &= -rsize; // round down
5649 4: addr_rtx = top - off + (BYTES_BIG_ENDIAN ? RSIZE - SIZE : 0);
5654 9: ovfl = ((intptr_t) ovfl + osize - 1) & -osize;
5655 10: addr_rtx = ovfl + (BYTES_BIG_ENDIAN ? OSIZE - SIZE : 0);
5659 [1] and [9] can sometimes be optimized away. */
5661 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
5663 size = int_size_in_bytes (type);
5665 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
5666 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
5668 top = build3 (COMPONENT_REF, TREE_TYPE (f_ftop),
5669 unshare_expr (valist), f_ftop, NULL_TREE);
5670 off = build3 (COMPONENT_REF, TREE_TYPE (f_foff),
5671 unshare_expr (valist), f_foff, NULL_TREE);
5673 /* When va_start saves FPR arguments to the stack, each slot
5674 takes up UNITS_PER_HWFPVALUE bytes, regardless of the
5675 argument's precision. */
5676 rsize = UNITS_PER_HWFPVALUE;
5678 /* Overflow arguments are padded to UNITS_PER_WORD bytes
5679 (= PARM_BOUNDARY bits). This can be different from RSIZE
5682 (1) On 32-bit targets when TYPE is a structure such as:
5684 struct s { float f; };
5686 Such structures are passed in paired FPRs, so RSIZE
5687 will be 8 bytes. However, the structure only takes
5688 up 4 bytes of memory, so OSIZE will only be 4.
5690 (2) In combinations such as -mgp64 -msingle-float
5691 -fshort-double. Doubles passed in registers will then take
5692 up 4 (UNITS_PER_HWFPVALUE) bytes, but those passed on the
5693 stack take up UNITS_PER_WORD bytes. */
5694 osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
5698 top = build3 (COMPONENT_REF, TREE_TYPE (f_gtop),
5699 unshare_expr (valist), f_gtop, NULL_TREE);
5700 off = build3 (COMPONENT_REF, TREE_TYPE (f_goff),
5701 unshare_expr (valist), f_goff, NULL_TREE);
5702 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
5703 if (rsize > UNITS_PER_WORD)
5705 /* [1] Emit code for: off &= -rsize. */
5706 t = build2 (BIT_AND_EXPR, TREE_TYPE (off), unshare_expr (off),
5707 build_int_cst (TREE_TYPE (off), -rsize));
5708 gimplify_assign (unshare_expr (off), t, pre_p);
5713 /* [2] Emit code to branch if off == 0. */
5714 t = build2 (NE_EXPR, boolean_type_node, off,
5715 build_int_cst (TREE_TYPE (off), 0));
5716 addr = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
5718 /* [5] Emit code for: off -= rsize. We do this as a form of
5719 post-decrement not available to C. */
5720 t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize));
5721 t = build2 (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
5723 /* [4] Emit code for:
5724 addr_rtx = top - off + (BYTES_BIG_ENDIAN ? RSIZE - SIZE : 0). */
5725 t = fold_convert (sizetype, t);
5726 t = fold_build1 (NEGATE_EXPR, sizetype, t);
5727 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (top), top, t);
5728 if (BYTES_BIG_ENDIAN && rsize > size)
5730 u = size_int (rsize - size);
5731 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
5733 COND_EXPR_THEN (addr) = t;
5735 if (osize > UNITS_PER_WORD)
5737 /* [9] Emit: ovfl = ((intptr_t) ovfl + osize - 1) & -osize. */
5738 u = size_int (osize - 1);
5739 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl),
5740 unshare_expr (ovfl), u);
5741 t = fold_convert (sizetype, t);
5742 u = size_int (-osize);
5743 t = build2 (BIT_AND_EXPR, sizetype, t, u);
5744 t = fold_convert (TREE_TYPE (ovfl), t);
5745 align = build2 (MODIFY_EXPR, TREE_TYPE (ovfl),
5746 unshare_expr (ovfl), t);
5751 /* [10, 11] Emit code for:
5752 addr_rtx = ovfl + (BYTES_BIG_ENDIAN ? OSIZE - SIZE : 0)
5754 u = fold_convert (TREE_TYPE (ovfl), build_int_cst (NULL_TREE, osize));
5755 t = build2 (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
5756 if (BYTES_BIG_ENDIAN && osize > size)
5758 u = size_int (osize - size);
5759 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
5762 /* String [9] and [10, 11] together. */
5764 t = build2 (COMPOUND_EXPR, TREE_TYPE (t), align, t);
5765 COND_EXPR_ELSE (addr) = t;
5767 addr = fold_convert (build_pointer_type (type), addr);
5768 addr = build_va_arg_indirect_ref (addr);
5772 addr = build_va_arg_indirect_ref (addr);
5777 /* Start a definition of function NAME. MIPS16_P indicates whether the
5778 function contains MIPS16 code. */
5781 mips_start_function_definition (const char *name, bool mips16_p)
5784 fprintf (asm_out_file, "\t.set\tmips16\n");
5786 fprintf (asm_out_file, "\t.set\tnomips16\n");
5788 if (!flag_inhibit_size_directive)
5790 fputs ("\t.ent\t", asm_out_file);
5791 assemble_name (asm_out_file, name);
5792 fputs ("\n", asm_out_file);
5795 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, name, "function");
5797 /* Start the definition proper. */
5798 assemble_name (asm_out_file, name);
5799 fputs (":\n", asm_out_file);
5802 /* End a function definition started by mips_start_function_definition. */
5805 mips_end_function_definition (const char *name)
5807 if (!flag_inhibit_size_directive)
5809 fputs ("\t.end\t", asm_out_file);
5810 assemble_name (asm_out_file, name);
5811 fputs ("\n", asm_out_file);
5815 /* Return true if calls to X can use R_MIPS_CALL* relocations. */
5818 mips_ok_for_lazy_binding_p (rtx x)
5820 return (TARGET_USE_GOT
5821 && GET_CODE (x) == SYMBOL_REF
5822 && !SYMBOL_REF_BIND_NOW_P (x)
5823 && !mips_symbol_binds_local_p (x));
5826 /* Load function address ADDR into register DEST. TYPE is as for
5827 mips_expand_call. Return true if we used an explicit lazy-binding
5831 mips_load_call_address (enum mips_call_type type, rtx dest, rtx addr)
5833 /* If we're generating PIC, and this call is to a global function,
5834 try to allow its address to be resolved lazily. This isn't
5835 possible for sibcalls when $gp is call-saved because the value
5836 of $gp on entry to the stub would be our caller's gp, not ours. */
5837 if (TARGET_EXPLICIT_RELOCS
5838 && !(type == MIPS_CALL_SIBCALL && TARGET_CALL_SAVED_GP)
5839 && mips_ok_for_lazy_binding_p (addr))
5841 addr = mips_got_load (dest, addr, SYMBOL_GOTOFF_CALL);
5842 emit_insn (gen_rtx_SET (VOIDmode, dest, addr));
5847 mips_emit_move (dest, addr);
5852 /* Each locally-defined hard-float MIPS16 function has a local symbol
5853 associated with it. This hash table maps the function symbol (FUNC)
5854 to the local symbol (LOCAL). */
5855 struct GTY(()) mips16_local_alias {
5859 static GTY ((param_is (struct mips16_local_alias))) htab_t mips16_local_aliases;
5861 /* Hash table callbacks for mips16_local_aliases. */
5864 mips16_local_aliases_hash (const void *entry)
5866 const struct mips16_local_alias *alias;
5868 alias = (const struct mips16_local_alias *) entry;
5869 return htab_hash_string (XSTR (alias->func, 0));
5873 mips16_local_aliases_eq (const void *entry1, const void *entry2)
5875 const struct mips16_local_alias *alias1, *alias2;
5877 alias1 = (const struct mips16_local_alias *) entry1;
5878 alias2 = (const struct mips16_local_alias *) entry2;
5879 return rtx_equal_p (alias1->func, alias2->func);
5882 /* FUNC is the symbol for a locally-defined hard-float MIPS16 function.
5883 Return a local alias for it, creating a new one if necessary. */
5886 mips16_local_alias (rtx func)
5888 struct mips16_local_alias *alias, tmp_alias;
5891 /* Create the hash table if this is the first call. */
5892 if (mips16_local_aliases == NULL)
5893 mips16_local_aliases = htab_create_ggc (37, mips16_local_aliases_hash,
5894 mips16_local_aliases_eq, NULL);
5896 /* Look up the function symbol, creating a new entry if need be. */
5897 tmp_alias.func = func;
5898 slot = htab_find_slot (mips16_local_aliases, &tmp_alias, INSERT);
5899 gcc_assert (slot != NULL);
5901 alias = (struct mips16_local_alias *) *slot;
5904 const char *func_name, *local_name;
5907 /* Create a new SYMBOL_REF for the local symbol. The choice of
5908 __fn_local_* is based on the __fn_stub_* names that we've
5909 traditionally used for the non-MIPS16 stub. */
5910 func_name = targetm.strip_name_encoding (XSTR (func, 0));
5911 local_name = ACONCAT (("__fn_local_", func_name, NULL));
5912 local = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (local_name));
5913 SYMBOL_REF_FLAGS (local) = SYMBOL_REF_FLAGS (func) | SYMBOL_FLAG_LOCAL;
5915 /* Create a new structure to represent the mapping. */
5916 alias = ggc_alloc_mips16_local_alias ();
5918 alias->local = local;
5921 return alias->local;
5924 /* A chained list of functions for which mips16_build_call_stub has already
5925 generated a stub. NAME is the name of the function and FP_RET_P is true
5926 if the function returns a value in floating-point registers. */
5927 struct mips16_stub {
5928 struct mips16_stub *next;
5932 static struct mips16_stub *mips16_stubs;
5934 /* Return a SYMBOL_REF for a MIPS16 function called NAME. */
5937 mips16_stub_function (const char *name)
5941 x = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
5942 SYMBOL_REF_FLAGS (x) |= (SYMBOL_FLAG_EXTERNAL | SYMBOL_FLAG_FUNCTION);
5946 /* Return the two-character string that identifies floating-point
5947 return mode MODE in the name of a MIPS16 function stub. */
5950 mips16_call_stub_mode_suffix (enum machine_mode mode)
5954 else if (mode == DFmode)
5956 else if (mode == SCmode)
5958 else if (mode == DCmode)
5960 else if (mode == V2SFmode)
5966 /* Write instructions to move a 32-bit value between general register
5967 GPREG and floating-point register FPREG. DIRECTION is 't' to move
5968 from GPREG to FPREG and 'f' to move in the opposite direction. */
5971 mips_output_32bit_xfer (char direction, unsigned int gpreg, unsigned int fpreg)
5973 fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
5974 reg_names[gpreg], reg_names[fpreg]);
5977 /* Likewise for 64-bit values. */
5980 mips_output_64bit_xfer (char direction, unsigned int gpreg, unsigned int fpreg)
5983 fprintf (asm_out_file, "\tdm%cc1\t%s,%s\n", direction,
5984 reg_names[gpreg], reg_names[fpreg]);
5985 else if (TARGET_FLOAT64)
5987 fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
5988 reg_names[gpreg + TARGET_BIG_ENDIAN], reg_names[fpreg]);
5989 fprintf (asm_out_file, "\tm%chc1\t%s,%s\n", direction,
5990 reg_names[gpreg + TARGET_LITTLE_ENDIAN], reg_names[fpreg]);
5994 /* Move the least-significant word. */
5995 fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
5996 reg_names[gpreg + TARGET_BIG_ENDIAN], reg_names[fpreg]);
5997 /* ...then the most significant word. */
5998 fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
5999 reg_names[gpreg + TARGET_LITTLE_ENDIAN], reg_names[fpreg + 1]);
6003 /* Write out code to move floating-point arguments into or out of
6004 general registers. FP_CODE is the code describing which arguments
6005 are present (see the comment above the definition of CUMULATIVE_ARGS
6006 in mips.h). DIRECTION is as for mips_output_32bit_xfer. */
6009 mips_output_args_xfer (int fp_code, char direction)
6011 unsigned int gparg, fparg, f;
6012 CUMULATIVE_ARGS cum;
6014 /* This code only works for o32 and o64. */
6015 gcc_assert (TARGET_OLDABI);
6017 mips_init_cumulative_args (&cum, NULL);
6019 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
6021 enum machine_mode mode;
6022 struct mips_arg_info info;
6026 else if ((f & 3) == 2)
6031 mips_get_arg_info (&info, &cum, mode, NULL, true);
6032 gparg = mips_arg_regno (&info, false);
6033 fparg = mips_arg_regno (&info, true);
6036 mips_output_32bit_xfer (direction, gparg, fparg);
6038 mips_output_64bit_xfer (direction, gparg, fparg);
6040 mips_function_arg_advance (&cum, mode, NULL, true);
6044 /* Write a MIPS16 stub for the current function. This stub is used
6045 for functions which take arguments in the floating-point registers.
6046 It is normal-mode code that moves the floating-point arguments
6047 into the general registers and then jumps to the MIPS16 code. */
6050 mips16_build_function_stub (void)
6052 const char *fnname, *alias_name, *separator;
6053 char *secname, *stubname;
6058 /* Create the name of the stub, and its unique section. */
6059 symbol = XEXP (DECL_RTL (current_function_decl), 0);
6060 alias = mips16_local_alias (symbol);
6062 fnname = targetm.strip_name_encoding (XSTR (symbol, 0));
6063 alias_name = targetm.strip_name_encoding (XSTR (alias, 0));
6064 secname = ACONCAT ((".mips16.fn.", fnname, NULL));
6065 stubname = ACONCAT (("__fn_stub_", fnname, NULL));
6067 /* Build a decl for the stub. */
6068 stubdecl = build_decl (BUILTINS_LOCATION,
6069 FUNCTION_DECL, get_identifier (stubname),
6070 build_function_type_list (void_type_node, NULL_TREE));
6071 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
6072 DECL_RESULT (stubdecl) = build_decl (BUILTINS_LOCATION,
6073 RESULT_DECL, NULL_TREE, void_type_node);
6075 /* Output a comment. */
6076 fprintf (asm_out_file, "\t# Stub function for %s (",
6077 current_function_name ());
6079 for (f = (unsigned int) crtl->args.info.fp_code; f != 0; f >>= 2)
6081 fprintf (asm_out_file, "%s%s", separator,
6082 (f & 3) == 1 ? "float" : "double");
6085 fprintf (asm_out_file, ")\n");
6087 /* Start the function definition. */
6088 assemble_start_function (stubdecl, stubname);
6089 mips_start_function_definition (stubname, false);
6091 /* If generating pic2 code, either set up the global pointer or
6093 if (TARGET_ABICALLS_PIC2)
6095 if (TARGET_ABSOLUTE_ABICALLS)
6096 fprintf (asm_out_file, "\t.option\tpic0\n");
6099 output_asm_insn ("%(.cpload\t%^%)", NULL);
6100 /* Emit an R_MIPS_NONE relocation to tell the linker what the
6101 target function is. Use a local GOT access when loading the
6102 symbol, to cut down on the number of unnecessary GOT entries
6103 for stubs that aren't needed. */
6104 output_asm_insn (".reloc\t0,R_MIPS_NONE,%0", &symbol);
6109 /* Load the address of the MIPS16 function into $25. Do this first so
6110 that targets with coprocessor interlocks can use an MFC1 to fill the
6112 output_asm_insn ("la\t%^,%0", &symbol);
6114 /* Move the arguments from floating-point registers to general registers. */
6115 mips_output_args_xfer (crtl->args.info.fp_code, 'f');
6117 /* Jump to the MIPS16 function. */
6118 output_asm_insn ("jr\t%^", NULL);
6120 if (TARGET_ABICALLS_PIC2 && TARGET_ABSOLUTE_ABICALLS)
6121 fprintf (asm_out_file, "\t.option\tpic2\n");
6123 mips_end_function_definition (stubname);
6125 /* If the linker needs to create a dynamic symbol for the target
6126 function, it will associate the symbol with the stub (which,
6127 unlike the target function, follows the proper calling conventions).
6128 It is therefore useful to have a local alias for the target function,
6129 so that it can still be identified as MIPS16 code. As an optimization,
6130 this symbol can also be used for indirect MIPS16 references from
6131 within this file. */
6132 ASM_OUTPUT_DEF (asm_out_file, alias_name, fnname);
6134 switch_to_section (function_section (current_function_decl));
6137 /* The current function is a MIPS16 function that returns a value in an FPR.
6138 Copy the return value from its soft-float to its hard-float location.
6139 libgcc2 has special non-MIPS16 helper functions for each case. */
6142 mips16_copy_fpr_return_value (void)
6144 rtx fn, insn, retval;
6146 enum machine_mode return_mode;
6149 return_type = DECL_RESULT (current_function_decl);
6150 return_mode = DECL_MODE (return_type);
6152 name = ACONCAT (("__mips16_ret_",
6153 mips16_call_stub_mode_suffix (return_mode),
6155 fn = mips16_stub_function (name);
6157 /* The function takes arguments in $2 (and possibly $3), so calls
6158 to it cannot be lazily bound. */
6159 SYMBOL_REF_FLAGS (fn) |= SYMBOL_FLAG_BIND_NOW;
6161 /* Model the call as something that takes the GPR return value as
6162 argument and returns an "updated" value. */
6163 retval = gen_rtx_REG (return_mode, GP_RETURN);
6164 insn = mips_expand_call (MIPS_CALL_EPILOGUE, retval, fn,
6165 const0_rtx, NULL_RTX, false);
6166 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), retval);
6169 /* Consider building a stub for a MIPS16 call to function *FN_PTR.
6170 RETVAL is the location of the return value, or null if this is
6171 a "call" rather than a "call_value". ARGS_SIZE is the size of the
6172 arguments and FP_CODE is the code built by mips_function_arg;
6173 see the comment before the fp_code field in CUMULATIVE_ARGS for details.
6175 There are three alternatives:
6177 - If a stub was needed, emit the call and return the call insn itself.
6179 - If we can avoid using a stub by redirecting the call, set *FN_PTR
6180 to the new target and return null.
6182 - If *FN_PTR doesn't need a stub, return null and leave *FN_PTR
6185 A stub is needed for calls to functions that, in normal mode,
6186 receive arguments in FPRs or return values in FPRs. The stub
6187 copies the arguments from their soft-float positions to their
6188 hard-float positions, calls the real function, then copies the
6189 return value from its hard-float position to its soft-float
6192 We can emit a JAL to *FN_PTR even when *FN_PTR might need a stub.
6193 If *FN_PTR turns out to be to a non-MIPS16 function, the linker
6194 automatically redirects the JAL to the stub, otherwise the JAL
6195 continues to call FN directly. */
6198 mips16_build_call_stub (rtx retval, rtx *fn_ptr, rtx args_size, int fp_code)
6202 struct mips16_stub *l;
6205 /* We don't need to do anything if we aren't in MIPS16 mode, or if
6206 we were invoked with the -msoft-float option. */
6207 if (!TARGET_MIPS16 || TARGET_SOFT_FLOAT_ABI)
6210 /* Figure out whether the value might come back in a floating-point
6212 fp_ret_p = retval && mips_return_mode_in_fpr_p (GET_MODE (retval));
6214 /* We don't need to do anything if there were no floating-point
6215 arguments and the value will not be returned in a floating-point
6217 if (fp_code == 0 && !fp_ret_p)
6220 /* We don't need to do anything if this is a call to a special
6221 MIPS16 support function. */
6223 if (mips16_stub_function_p (fn))
6226 /* This code will only work for o32 and o64 abis. The other ABI's
6227 require more sophisticated support. */
6228 gcc_assert (TARGET_OLDABI);
6230 /* If we're calling via a function pointer, use one of the magic
6231 libgcc.a stubs provided for each (FP_CODE, FP_RET_P) combination.
6232 Each stub expects the function address to arrive in register $2. */
6233 if (GET_CODE (fn) != SYMBOL_REF
6234 || !call_insn_operand (fn, VOIDmode))
6237 rtx stub_fn, insn, addr;
6240 /* If this is a locally-defined and locally-binding function,
6241 avoid the stub by calling the local alias directly. */
6242 if (mips16_local_function_p (fn))
6244 *fn_ptr = mips16_local_alias (fn);
6248 /* Create a SYMBOL_REF for the libgcc.a function. */
6250 sprintf (buf, "__mips16_call_stub_%s_%d",
6251 mips16_call_stub_mode_suffix (GET_MODE (retval)),
6254 sprintf (buf, "__mips16_call_stub_%d", fp_code);
6255 stub_fn = mips16_stub_function (buf);
6257 /* The function uses $2 as an argument, so calls to it
6258 cannot be lazily bound. */
6259 SYMBOL_REF_FLAGS (stub_fn) |= SYMBOL_FLAG_BIND_NOW;
6261 /* Load the target function into $2. */
6262 addr = gen_rtx_REG (Pmode, GP_REG_FIRST + 2);
6263 lazy_p = mips_load_call_address (MIPS_CALL_NORMAL, addr, fn);
6265 /* Emit the call. */
6266 insn = mips_expand_call (MIPS_CALL_NORMAL, retval, stub_fn,
6267 args_size, NULL_RTX, lazy_p);
6269 /* Tell GCC that this call does indeed use the value of $2. */
6270 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), addr);
6272 /* If we are handling a floating-point return value, we need to
6273 save $18 in the function prologue. Putting a note on the
6274 call will mean that df_regs_ever_live_p ($18) will be true if the
6275 call is not eliminated, and we can check that in the prologue
6278 CALL_INSN_FUNCTION_USAGE (insn) =
6279 gen_rtx_EXPR_LIST (VOIDmode,
6280 gen_rtx_CLOBBER (VOIDmode,
6281 gen_rtx_REG (word_mode, 18)),
6282 CALL_INSN_FUNCTION_USAGE (insn));
6287 /* We know the function we are going to call. If we have already
6288 built a stub, we don't need to do anything further. */
6289 fnname = targetm.strip_name_encoding (XSTR (fn, 0));
6290 for (l = mips16_stubs; l != NULL; l = l->next)
6291 if (strcmp (l->name, fnname) == 0)
6296 const char *separator;
6297 char *secname, *stubname;
6298 tree stubid, stubdecl;
6301 /* If the function does not return in FPRs, the special stub
6305 If the function does return in FPRs, the stub section is named
6306 .mips16.call.fp.FNNAME
6308 Build a decl for the stub. */
6309 secname = ACONCAT ((".mips16.call.", fp_ret_p ? "fp." : "",
6311 stubname = ACONCAT (("__call_stub_", fp_ret_p ? "fp_" : "",
6313 stubid = get_identifier (stubname);
6314 stubdecl = build_decl (BUILTINS_LOCATION,
6315 FUNCTION_DECL, stubid,
6316 build_function_type_list (void_type_node,
6318 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
6319 DECL_RESULT (stubdecl) = build_decl (BUILTINS_LOCATION,
6320 RESULT_DECL, NULL_TREE,
6323 /* Output a comment. */
6324 fprintf (asm_out_file, "\t# Stub function to call %s%s (",
6326 ? (GET_MODE (retval) == SFmode ? "float " : "double ")
6330 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
6332 fprintf (asm_out_file, "%s%s", separator,
6333 (f & 3) == 1 ? "float" : "double");
6336 fprintf (asm_out_file, ")\n");
6338 /* Start the function definition. */
6339 assemble_start_function (stubdecl, stubname);
6340 mips_start_function_definition (stubname, false);
6344 /* Load the address of the MIPS16 function into $25. Do this
6345 first so that targets with coprocessor interlocks can use
6346 an MFC1 to fill the delay slot. */
6347 if (TARGET_EXPLICIT_RELOCS)
6349 output_asm_insn ("lui\t%^,%%hi(%0)", &fn);
6350 output_asm_insn ("addiu\t%^,%^,%%lo(%0)", &fn);
6353 output_asm_insn ("la\t%^,%0", &fn);
6356 /* Move the arguments from general registers to floating-point
6358 mips_output_args_xfer (fp_code, 't');
6362 /* Jump to the previously-loaded address. */
6363 output_asm_insn ("jr\t%^", NULL);
6367 /* Save the return address in $18 and call the non-MIPS16 function.
6368 The stub's caller knows that $18 might be clobbered, even though
6369 $18 is usually a call-saved register. */
6370 fprintf (asm_out_file, "\tmove\t%s,%s\n",
6371 reg_names[GP_REG_FIRST + 18], reg_names[RETURN_ADDR_REGNUM]);
6372 output_asm_insn (MIPS_CALL ("jal", &fn, 0, -1), &fn);
6374 /* Move the result from floating-point registers to
6375 general registers. */
6376 switch (GET_MODE (retval))
6379 mips_output_32bit_xfer ('f', GP_RETURN + TARGET_BIG_ENDIAN,
6381 ? FP_REG_FIRST + MAX_FPRS_PER_FMT
6383 mips_output_32bit_xfer ('f', GP_RETURN + TARGET_LITTLE_ENDIAN,
6384 TARGET_LITTLE_ENDIAN
6385 ? FP_REG_FIRST + MAX_FPRS_PER_FMT
6387 if (GET_MODE (retval) == SCmode && TARGET_64BIT)
6389 /* On 64-bit targets, complex floats are returned in
6390 a single GPR, such that "sd" on a suitably-aligned
6391 target would store the value correctly. */
6392 fprintf (asm_out_file, "\tdsll\t%s,%s,32\n",
6393 reg_names[GP_RETURN + TARGET_BIG_ENDIAN],
6394 reg_names[GP_RETURN + TARGET_BIG_ENDIAN]);
6395 fprintf (asm_out_file, "\tdsll\t%s,%s,32\n",
6396 reg_names[GP_RETURN + TARGET_LITTLE_ENDIAN],
6397 reg_names[GP_RETURN + TARGET_LITTLE_ENDIAN]);
6398 fprintf (asm_out_file, "\tdsrl\t%s,%s,32\n",
6399 reg_names[GP_RETURN + TARGET_BIG_ENDIAN],
6400 reg_names[GP_RETURN + TARGET_BIG_ENDIAN]);
6401 fprintf (asm_out_file, "\tor\t%s,%s,%s\n",
6402 reg_names[GP_RETURN],
6403 reg_names[GP_RETURN],
6404 reg_names[GP_RETURN + 1]);
6409 mips_output_32bit_xfer ('f', GP_RETURN, FP_REG_FIRST);
6413 mips_output_64bit_xfer ('f', GP_RETURN + (8 / UNITS_PER_WORD),
6414 FP_REG_FIRST + MAX_FPRS_PER_FMT);
6418 mips_output_64bit_xfer ('f', GP_RETURN, FP_REG_FIRST);
6424 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 18]);
6427 #ifdef ASM_DECLARE_FUNCTION_SIZE
6428 ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
6431 mips_end_function_definition (stubname);
6433 /* Record this stub. */
6434 l = XNEW (struct mips16_stub);
6435 l->name = xstrdup (fnname);
6436 l->fp_ret_p = fp_ret_p;
6437 l->next = mips16_stubs;
6441 /* If we expect a floating-point return value, but we've built a
6442 stub which does not expect one, then we're in trouble. We can't
6443 use the existing stub, because it won't handle the floating-point
6444 value. We can't build a new stub, because the linker won't know
6445 which stub to use for the various calls in this object file.
6446 Fortunately, this case is illegal, since it means that a function
6447 was declared in two different ways in a single compilation. */
6448 if (fp_ret_p && !l->fp_ret_p)
6449 error ("cannot handle inconsistent calls to %qs", fnname);
6451 if (retval == NULL_RTX)
6452 insn = gen_call_internal_direct (fn, args_size);
6454 insn = gen_call_value_internal_direct (retval, fn, args_size);
6455 insn = mips_emit_call_insn (insn, fn, fn, false);
6457 /* If we are calling a stub which handles a floating-point return
6458 value, we need to arrange to save $18 in the prologue. We do this
6459 by marking the function call as using the register. The prologue
6460 will later see that it is used, and emit code to save it. */
6462 CALL_INSN_FUNCTION_USAGE (insn) =
6463 gen_rtx_EXPR_LIST (VOIDmode,
6464 gen_rtx_CLOBBER (VOIDmode,
6465 gen_rtx_REG (word_mode, 18)),
6466 CALL_INSN_FUNCTION_USAGE (insn));
6471 /* Expand a call of type TYPE. RESULT is where the result will go (null
6472 for "call"s and "sibcall"s), ADDR is the address of the function,
6473 ARGS_SIZE is the size of the arguments and AUX is the value passed
6474 to us by mips_function_arg. LAZY_P is true if this call already
6475 involves a lazily-bound function address (such as when calling
6476 functions through a MIPS16 hard-float stub).
6478 Return the call itself. */
6481 mips_expand_call (enum mips_call_type type, rtx result, rtx addr,
6482 rtx args_size, rtx aux, bool lazy_p)
6484 rtx orig_addr, pattern, insn;
6487 fp_code = aux == 0 ? 0 : (int) GET_MODE (aux);
6488 insn = mips16_build_call_stub (result, &addr, args_size, fp_code);
6491 gcc_assert (!lazy_p && type == MIPS_CALL_NORMAL);
6496 if (!call_insn_operand (addr, VOIDmode))
6498 if (type == MIPS_CALL_EPILOGUE)
6499 addr = MIPS_EPILOGUE_TEMP (Pmode);
6501 addr = gen_reg_rtx (Pmode);
6502 lazy_p |= mips_load_call_address (type, addr, orig_addr);
6507 rtx (*fn) (rtx, rtx);
6509 if (type == MIPS_CALL_SIBCALL)
6510 fn = gen_sibcall_internal;
6512 fn = gen_call_internal;
6514 pattern = fn (addr, args_size);
6516 else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
6518 /* Handle return values created by mips_return_fpr_pair. */
6519 rtx (*fn) (rtx, rtx, rtx, rtx);
6522 if (type == MIPS_CALL_SIBCALL)
6523 fn = gen_sibcall_value_multiple_internal;
6525 fn = gen_call_value_multiple_internal;
6527 reg1 = XEXP (XVECEXP (result, 0, 0), 0);
6528 reg2 = XEXP (XVECEXP (result, 0, 1), 0);
6529 pattern = fn (reg1, addr, args_size, reg2);
6533 rtx (*fn) (rtx, rtx, rtx);
6535 if (type == MIPS_CALL_SIBCALL)
6536 fn = gen_sibcall_value_internal;
6538 fn = gen_call_value_internal;
6540 /* Handle return values created by mips_return_fpr_single. */
6541 if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 1)
6542 result = XEXP (XVECEXP (result, 0, 0), 0);
6543 pattern = fn (result, addr, args_size);
6546 return mips_emit_call_insn (pattern, orig_addr, addr, lazy_p);
6549 /* Split call instruction INSN into a $gp-clobbering call and
6550 (where necessary) an instruction to restore $gp from its save slot.
6551 CALL_PATTERN is the pattern of the new call. */
6554 mips_split_call (rtx insn, rtx call_pattern)
6556 emit_call_insn (call_pattern);
6557 if (!find_reg_note (insn, REG_NORETURN, 0))
6558 /* Pick a temporary register that is suitable for both MIPS16 and
6559 non-MIPS16 code. $4 and $5 are used for returning complex double
6560 values in soft-float code, so $6 is the first suitable candidate. */
6561 mips_restore_gp_from_cprestore_slot (gen_rtx_REG (Pmode, GP_ARG_FIRST + 2));
6564 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */
6567 mips_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
6569 if (!TARGET_SIBCALLS)
6572 /* Interrupt handlers need special epilogue code and therefore can't
6574 if (mips_interrupt_type_p (TREE_TYPE (current_function_decl)))
6577 /* We can't do a sibcall if the called function is a MIPS16 function
6578 because there is no direct "jx" instruction equivalent to "jalx" to
6579 switch the ISA mode. We only care about cases where the sibling
6580 and normal calls would both be direct. */
6582 && mips_use_mips16_mode_p (decl)
6583 && const_call_insn_operand (XEXP (DECL_RTL (decl), 0), VOIDmode))
6586 /* When -minterlink-mips16 is in effect, assume that non-locally-binding
6587 functions could be MIPS16 ones unless an attribute explicitly tells
6589 if (TARGET_INTERLINK_MIPS16
6591 && (DECL_EXTERNAL (decl) || !targetm.binds_local_p (decl))
6592 && !mips_nomips16_decl_p (decl)
6593 && const_call_insn_operand (XEXP (DECL_RTL (decl), 0), VOIDmode))
6600 /* Emit code to move general operand SRC into condition-code
6601 register DEST given that SCRATCH is a scratch TFmode FPR.
6608 where FP1 and FP2 are single-precision FPRs taken from SCRATCH. */
6611 mips_expand_fcc_reload (rtx dest, rtx src, rtx scratch)
6615 /* Change the source to SFmode. */
6617 src = adjust_address (src, SFmode, 0);
6618 else if (REG_P (src) || GET_CODE (src) == SUBREG)
6619 src = gen_rtx_REG (SFmode, true_regnum (src));
6621 fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
6622 fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + MAX_FPRS_PER_FMT);
6624 mips_emit_move (copy_rtx (fp1), src);
6625 mips_emit_move (copy_rtx (fp2), CONST0_RTX (SFmode));
6626 emit_insn (gen_slt_sf (dest, fp2, fp1));
6629 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
6630 Assume that the areas do not overlap. */
6633 mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
6635 HOST_WIDE_INT offset, delta;
6636 unsigned HOST_WIDE_INT bits;
6638 enum machine_mode mode;
6641 /* Work out how many bits to move at a time. If both operands have
6642 half-word alignment, it is usually better to move in half words.
6643 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
6644 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
6645 Otherwise move word-sized chunks. */
6646 if (MEM_ALIGN (src) == BITS_PER_WORD / 2
6647 && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
6648 bits = BITS_PER_WORD / 2;
6650 bits = BITS_PER_WORD;
6652 mode = mode_for_size (bits, MODE_INT, 0);
6653 delta = bits / BITS_PER_UNIT;
6655 /* Allocate a buffer for the temporary registers. */
6656 regs = XALLOCAVEC (rtx, length / delta);
6658 /* Load as many BITS-sized chunks as possible. Use a normal load if
6659 the source has enough alignment, otherwise use left/right pairs. */
6660 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
6662 regs[i] = gen_reg_rtx (mode);
6663 if (MEM_ALIGN (src) >= bits)
6664 mips_emit_move (regs[i], adjust_address (src, mode, offset));
6667 rtx part = adjust_address (src, BLKmode, offset);
6668 if (!mips_expand_ext_as_unaligned_load (regs[i], part, bits, 0))
6673 /* Copy the chunks to the destination. */
6674 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
6675 if (MEM_ALIGN (dest) >= bits)
6676 mips_emit_move (adjust_address (dest, mode, offset), regs[i]);
6679 rtx part = adjust_address (dest, BLKmode, offset);
6680 if (!mips_expand_ins_as_unaligned_store (part, regs[i], bits, 0))
6684 /* Mop up any left-over bytes. */
6685 if (offset < length)
6687 src = adjust_address (src, BLKmode, offset);
6688 dest = adjust_address (dest, BLKmode, offset);
6689 move_by_pieces (dest, src, length - offset,
6690 MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
6694 /* Helper function for doing a loop-based block operation on memory
6695 reference MEM. Each iteration of the loop will operate on LENGTH
6698 Create a new base register for use within the loop and point it to
6699 the start of MEM. Create a new memory reference that uses this
6700 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
6703 mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
6704 rtx *loop_reg, rtx *loop_mem)
6706 *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
6708 /* Although the new mem does not refer to a known location,
6709 it does keep up to LENGTH bytes of alignment. */
6710 *loop_mem = change_address (mem, BLKmode, *loop_reg);
6711 set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
6714 /* Move LENGTH bytes from SRC to DEST using a loop that moves BYTES_PER_ITER
6715 bytes at a time. LENGTH must be at least BYTES_PER_ITER. Assume that
6716 the memory regions do not overlap. */
6719 mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length,
6720 HOST_WIDE_INT bytes_per_iter)
6722 rtx label, src_reg, dest_reg, final_src, test;
6723 HOST_WIDE_INT leftover;
6725 leftover = length % bytes_per_iter;
6728 /* Create registers and memory references for use within the loop. */
6729 mips_adjust_block_mem (src, bytes_per_iter, &src_reg, &src);
6730 mips_adjust_block_mem (dest, bytes_per_iter, &dest_reg, &dest);
6732 /* Calculate the value that SRC_REG should have after the last iteration
6734 final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
6737 /* Emit the start of the loop. */
6738 label = gen_label_rtx ();
6741 /* Emit the loop body. */
6742 mips_block_move_straight (dest, src, bytes_per_iter);
6744 /* Move on to the next block. */
6745 mips_emit_move (src_reg, plus_constant (src_reg, bytes_per_iter));
6746 mips_emit_move (dest_reg, plus_constant (dest_reg, bytes_per_iter));
6748 /* Emit the loop condition. */
6749 test = gen_rtx_NE (VOIDmode, src_reg, final_src);
6750 if (Pmode == DImode)
6751 emit_jump_insn (gen_cbranchdi4 (test, src_reg, final_src, label));
6753 emit_jump_insn (gen_cbranchsi4 (test, src_reg, final_src, label));
6755 /* Mop up any left-over bytes. */
6757 mips_block_move_straight (dest, src, leftover);
6760 /* Expand a movmemsi instruction, which copies LENGTH bytes from
6761 memory reference SRC to memory reference DEST. */
6764 mips_expand_block_move (rtx dest, rtx src, rtx length)
6766 if (CONST_INT_P (length))
6768 if (INTVAL (length) <= MIPS_MAX_MOVE_BYTES_STRAIGHT)
6770 mips_block_move_straight (dest, src, INTVAL (length));
6775 mips_block_move_loop (dest, src, INTVAL (length),
6776 MIPS_MAX_MOVE_BYTES_PER_LOOP_ITER);
6783 /* Expand a loop of synci insns for the address range [BEGIN, END). */
6786 mips_expand_synci_loop (rtx begin, rtx end)
6788 rtx inc, label, end_label, cmp_result, mask, length;
6790 /* Create end_label. */
6791 end_label = gen_label_rtx ();
6793 /* Check if begin equals end. */
6794 cmp_result = gen_rtx_EQ (VOIDmode, begin, end);
6795 emit_jump_insn (gen_condjump (cmp_result, end_label));
6797 /* Load INC with the cache line size (rdhwr INC,$1). */
6798 inc = gen_reg_rtx (Pmode);
6799 emit_insn (Pmode == SImode
6800 ? gen_rdhwr_synci_step_si (inc)
6801 : gen_rdhwr_synci_step_di (inc));
6803 /* Check if inc is 0. */
6804 cmp_result = gen_rtx_EQ (VOIDmode, inc, const0_rtx);
6805 emit_jump_insn (gen_condjump (cmp_result, end_label));
6807 /* Calculate mask. */
6808 mask = mips_force_unary (Pmode, NEG, inc);
6810 /* Mask out begin by mask. */
6811 begin = mips_force_binary (Pmode, AND, begin, mask);
6813 /* Calculate length. */
6814 length = mips_force_binary (Pmode, MINUS, end, begin);
6816 /* Loop back to here. */
6817 label = gen_label_rtx ();
6820 emit_insn (gen_synci (begin));
6822 /* Update length. */
6823 mips_emit_binary (MINUS, length, length, inc);
6826 mips_emit_binary (PLUS, begin, begin, inc);
6828 /* Check if length is greater than 0. */
6829 cmp_result = gen_rtx_GT (VOIDmode, length, const0_rtx);
6830 emit_jump_insn (gen_condjump (cmp_result, label));
6832 emit_label (end_label);
6835 /* Expand a QI or HI mode atomic memory operation.
6837 GENERATOR contains a pointer to the gen_* function that generates
6838 the SI mode underlying atomic operation using masks that we
6841 RESULT is the return register for the operation. Its value is NULL
6844 MEM is the location of the atomic access.
6846 OLDVAL is the first operand for the operation.
6848 NEWVAL is the optional second operand for the operation. Its value
6849 is NULL if unused. */
6852 mips_expand_atomic_qihi (union mips_gen_fn_ptrs generator,
6853 rtx result, rtx mem, rtx oldval, rtx newval)
6855 rtx orig_addr, memsi_addr, memsi, shift, shiftsi, unshifted_mask;
6856 rtx unshifted_mask_reg, mask, inverted_mask, si_op;
6858 enum machine_mode mode;
6860 mode = GET_MODE (mem);
6862 /* Compute the address of the containing SImode value. */
6863 orig_addr = force_reg (Pmode, XEXP (mem, 0));
6864 memsi_addr = mips_force_binary (Pmode, AND, orig_addr,
6865 force_reg (Pmode, GEN_INT (-4)));
6867 /* Create a memory reference for it. */
6868 memsi = gen_rtx_MEM (SImode, memsi_addr);
6869 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
6870 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
6872 /* Work out the byte offset of the QImode or HImode value,
6873 counting from the least significant byte. */
6874 shift = mips_force_binary (Pmode, AND, orig_addr, GEN_INT (3));
6875 if (TARGET_BIG_ENDIAN)
6876 mips_emit_binary (XOR, shift, shift, GEN_INT (mode == QImode ? 3 : 2));
6878 /* Multiply by eight to convert the shift value from bytes to bits. */
6879 mips_emit_binary (ASHIFT, shift, shift, GEN_INT (3));
6881 /* Make the final shift an SImode value, so that it can be used in
6882 SImode operations. */
6883 shiftsi = force_reg (SImode, gen_lowpart (SImode, shift));
6885 /* Set MASK to an inclusive mask of the QImode or HImode value. */
6886 unshifted_mask = GEN_INT (GET_MODE_MASK (mode));
6887 unshifted_mask_reg = force_reg (SImode, unshifted_mask);
6888 mask = mips_force_binary (SImode, ASHIFT, unshifted_mask_reg, shiftsi);
6890 /* Compute the equivalent exclusive mask. */
6891 inverted_mask = gen_reg_rtx (SImode);
6892 emit_insn (gen_rtx_SET (VOIDmode, inverted_mask,
6893 gen_rtx_NOT (SImode, mask)));
6895 /* Shift the old value into place. */
6896 if (oldval != const0_rtx)
6898 oldval = convert_modes (SImode, mode, oldval, true);
6899 oldval = force_reg (SImode, oldval);
6900 oldval = mips_force_binary (SImode, ASHIFT, oldval, shiftsi);
6903 /* Do the same for the new value. */
6904 if (newval && newval != const0_rtx)
6906 newval = convert_modes (SImode, mode, newval, true);
6907 newval = force_reg (SImode, newval);
6908 newval = mips_force_binary (SImode, ASHIFT, newval, shiftsi);
6911 /* Do the SImode atomic access. */
6913 res = gen_reg_rtx (SImode);
6915 si_op = generator.fn_6 (res, memsi, mask, inverted_mask, oldval, newval);
6917 si_op = generator.fn_5 (res, memsi, mask, inverted_mask, oldval);
6919 si_op = generator.fn_4 (memsi, mask, inverted_mask, oldval);
6925 /* Shift and convert the result. */
6926 mips_emit_binary (AND, res, res, mask);
6927 mips_emit_binary (LSHIFTRT, res, res, shiftsi);
6928 mips_emit_move (result, gen_lowpart (GET_MODE (result), res));
6932 /* Return true if it is possible to use left/right accesses for a
6933 bitfield of WIDTH bits starting BITPOS bits into *OP. When
6934 returning true, update *OP, *LEFT and *RIGHT as follows:
6936 *OP is a BLKmode reference to the whole field.
6938 *LEFT is a QImode reference to the first byte if big endian or
6939 the last byte if little endian. This address can be used in the
6940 left-side instructions (LWL, SWL, LDL, SDL).
6942 *RIGHT is a QImode reference to the opposite end of the field and
6943 can be used in the patterning right-side instruction. */
6946 mips_get_unaligned_mem (rtx *op, HOST_WIDE_INT width, HOST_WIDE_INT bitpos,
6947 rtx *left, rtx *right)
6951 /* Check that the operand really is a MEM. Not all the extv and
6952 extzv predicates are checked. */
6956 /* Check that the size is valid. */
6957 if (width != 32 && (!TARGET_64BIT || width != 64))
6960 /* We can only access byte-aligned values. Since we are always passed
6961 a reference to the first byte of the field, it is not necessary to
6962 do anything with BITPOS after this check. */
6963 if (bitpos % BITS_PER_UNIT != 0)
6966 /* Reject aligned bitfields: we want to use a normal load or store
6967 instead of a left/right pair. */
6968 if (MEM_ALIGN (*op) >= width)
6971 /* Adjust *OP to refer to the whole field. This also has the effect
6972 of legitimizing *OP's address for BLKmode, possibly simplifying it. */
6973 *op = adjust_address (*op, BLKmode, 0);
6974 set_mem_size (*op, GEN_INT (width / BITS_PER_UNIT));
6976 /* Get references to both ends of the field. We deliberately don't
6977 use the original QImode *OP for FIRST since the new BLKmode one
6978 might have a simpler address. */
6979 first = adjust_address (*op, QImode, 0);
6980 last = adjust_address (*op, QImode, width / BITS_PER_UNIT - 1);
6982 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
6983 correspond to the MSB and RIGHT to the LSB. */
6984 if (TARGET_BIG_ENDIAN)
6985 *left = first, *right = last;
6987 *left = last, *right = first;
6992 /* Try to use left/right loads to expand an "extv" or "extzv" pattern.
6993 DEST, SRC, WIDTH and BITPOS are the operands passed to the expander;
6994 the operation is the equivalent of:
6996 (set DEST (*_extract SRC WIDTH BITPOS))
6998 Return true on success. */
7001 mips_expand_ext_as_unaligned_load (rtx dest, rtx src, HOST_WIDE_INT width,
7002 HOST_WIDE_INT bitpos)
7004 rtx left, right, temp;
7006 /* If TARGET_64BIT, the destination of a 32-bit "extz" or "extzv" will
7007 be a paradoxical word_mode subreg. This is the only case in which
7008 we allow the destination to be larger than the source. */
7009 if (GET_CODE (dest) == SUBREG
7010 && GET_MODE (dest) == DImode
7011 && GET_MODE (SUBREG_REG (dest)) == SImode)
7012 dest = SUBREG_REG (dest);
7014 /* After the above adjustment, the destination must be the same
7015 width as the source. */
7016 if (GET_MODE_BITSIZE (GET_MODE (dest)) != width)
7019 if (!mips_get_unaligned_mem (&src, width, bitpos, &left, &right))
7022 temp = gen_reg_rtx (GET_MODE (dest));
7023 if (GET_MODE (dest) == DImode)
7025 emit_insn (gen_mov_ldl (temp, src, left));
7026 emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
7030 emit_insn (gen_mov_lwl (temp, src, left));
7031 emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
7036 /* Try to use left/right stores to expand an "ins" pattern. DEST, WIDTH,
7037 BITPOS and SRC are the operands passed to the expander; the operation
7038 is the equivalent of:
7040 (set (zero_extract DEST WIDTH BITPOS) SRC)
7042 Return true on success. */
7045 mips_expand_ins_as_unaligned_store (rtx dest, rtx src, HOST_WIDE_INT width,
7046 HOST_WIDE_INT bitpos)
7049 enum machine_mode mode;
7051 if (!mips_get_unaligned_mem (&dest, width, bitpos, &left, &right))
7054 mode = mode_for_size (width, MODE_INT, 0);
7055 src = gen_lowpart (mode, src);
7058 emit_insn (gen_mov_sdl (dest, src, left));
7059 emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
7063 emit_insn (gen_mov_swl (dest, src, left));
7064 emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
7069 /* Return true if X is a MEM with the same size as MODE. */
7072 mips_mem_fits_mode_p (enum machine_mode mode, rtx x)
7079 size = MEM_SIZE (x);
7080 return size && INTVAL (size) == GET_MODE_SIZE (mode);
7083 /* Return true if (zero_extract OP WIDTH BITPOS) can be used as the
7084 source of an "ext" instruction or the destination of an "ins"
7085 instruction. OP must be a register operand and the following
7086 conditions must hold:
7088 0 <= BITPOS < GET_MODE_BITSIZE (GET_MODE (op))
7089 0 < WIDTH <= GET_MODE_BITSIZE (GET_MODE (op))
7090 0 < BITPOS + WIDTH <= GET_MODE_BITSIZE (GET_MODE (op))
7092 Also reject lengths equal to a word as they are better handled
7093 by the move patterns. */
7096 mips_use_ins_ext_p (rtx op, HOST_WIDE_INT width, HOST_WIDE_INT bitpos)
7098 if (!ISA_HAS_EXT_INS
7099 || !register_operand (op, VOIDmode)
7100 || GET_MODE_BITSIZE (GET_MODE (op)) > BITS_PER_WORD)
7103 if (!IN_RANGE (width, 1, GET_MODE_BITSIZE (GET_MODE (op)) - 1))
7106 if (bitpos < 0 || bitpos + width > GET_MODE_BITSIZE (GET_MODE (op)))
7112 /* Check if MASK and SHIFT are valid in mask-low-and-shift-left
7113 operation if MAXLEN is the maxium length of consecutive bits that
7114 can make up MASK. MODE is the mode of the operation. See
7115 mask_low_and_shift_len for the actual definition. */
7118 mask_low_and_shift_p (enum machine_mode mode, rtx mask, rtx shift, int maxlen)
7120 return IN_RANGE (mask_low_and_shift_len (mode, mask, shift), 1, maxlen);
7123 /* Return true iff OP1 and OP2 are valid operands together for the
7124 *and<MODE>3 and *and<MODE>3_mips16 patterns. For the cases to consider,
7125 see the table in the comment before the pattern. */
7128 and_operands_ok (enum machine_mode mode, rtx op1, rtx op2)
7130 return (memory_operand (op1, mode)
7131 ? and_load_operand (op2, mode)
7132 : and_reg_operand (op2, mode));
7135 /* The canonical form of a mask-low-and-shift-left operation is
7136 (and (ashift X SHIFT) MASK) where MASK has the lower SHIFT number of bits
7137 cleared. Thus we need to shift MASK to the right before checking if it
7138 is a valid mask value. MODE is the mode of the operation. If true
7139 return the length of the mask, otherwise return -1. */
7142 mask_low_and_shift_len (enum machine_mode mode, rtx mask, rtx shift)
7144 HOST_WIDE_INT shval;
7146 shval = INTVAL (shift) & (GET_MODE_BITSIZE (mode) - 1);
7147 return exact_log2 ((UINTVAL (mask) >> shval) + 1);
7150 /* Return true if -msplit-addresses is selected and should be honored.
7152 -msplit-addresses is a half-way house between explicit relocations
7153 and the traditional assembler macros. It can split absolute 32-bit
7154 symbolic constants into a high/lo_sum pair but uses macros for other
7157 Like explicit relocation support for REL targets, it relies
7158 on GNU extensions in the assembler and the linker.
7160 Although this code should work for -O0, it has traditionally
7161 been treated as an optimization. */
7164 mips_split_addresses_p (void)
7166 return (TARGET_SPLIT_ADDRESSES
7170 && !ABI_HAS_64BIT_SYMBOLS);
7173 /* (Re-)Initialize mips_split_p, mips_lo_relocs and mips_hi_relocs. */
7176 mips_init_relocs (void)
7178 memset (mips_split_p, '\0', sizeof (mips_split_p));
7179 memset (mips_split_hi_p, '\0', sizeof (mips_split_hi_p));
7180 memset (mips_hi_relocs, '\0', sizeof (mips_hi_relocs));
7181 memset (mips_lo_relocs, '\0', sizeof (mips_lo_relocs));
7183 if (ABI_HAS_64BIT_SYMBOLS)
7185 if (TARGET_EXPLICIT_RELOCS)
7187 mips_split_p[SYMBOL_64_HIGH] = true;
7188 mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
7189 mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
7191 mips_split_p[SYMBOL_64_MID] = true;
7192 mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
7193 mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
7195 mips_split_p[SYMBOL_64_LOW] = true;
7196 mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
7197 mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
7199 mips_split_p[SYMBOL_ABSOLUTE] = true;
7200 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
7205 if (TARGET_EXPLICIT_RELOCS || mips_split_addresses_p () || TARGET_MIPS16)
7207 mips_split_p[SYMBOL_ABSOLUTE] = true;
7208 mips_hi_relocs[SYMBOL_ABSOLUTE] = "%hi(";
7209 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
7211 mips_lo_relocs[SYMBOL_32_HIGH] = "%hi(";
7217 /* The high part is provided by a pseudo copy of $gp. */
7218 mips_split_p[SYMBOL_GP_RELATIVE] = true;
7219 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gprel(";
7221 else if (TARGET_EXPLICIT_RELOCS)
7222 /* Small data constants are kept whole until after reload,
7223 then lowered by mips_rewrite_small_data. */
7224 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gp_rel(";
7226 if (TARGET_EXPLICIT_RELOCS)
7228 mips_split_p[SYMBOL_GOT_PAGE_OFST] = true;
7231 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
7232 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%got_ofst(";
7236 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
7237 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%lo(";
7240 /* Expose the use of $28 as soon as possible. */
7241 mips_split_hi_p[SYMBOL_GOT_PAGE_OFST] = true;
7245 /* The HIGH and LO_SUM are matched by special .md patterns. */
7246 mips_split_p[SYMBOL_GOT_DISP] = true;
7248 mips_split_p[SYMBOL_GOTOFF_DISP] = true;
7249 mips_hi_relocs[SYMBOL_GOTOFF_DISP] = "%got_hi(";
7250 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_lo(";
7252 mips_split_p[SYMBOL_GOTOFF_CALL] = true;
7253 mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
7254 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
7259 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_disp(";
7261 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got(";
7262 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
7264 /* Expose the use of $28 as soon as possible. */
7265 mips_split_p[SYMBOL_GOT_DISP] = true;
7271 mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
7272 mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
7273 mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
7276 mips_lo_relocs[SYMBOL_TLSGD] = "%tlsgd(";
7277 mips_lo_relocs[SYMBOL_TLSLDM] = "%tlsldm(";
7279 mips_split_p[SYMBOL_DTPREL] = true;
7280 mips_hi_relocs[SYMBOL_DTPREL] = "%dtprel_hi(";
7281 mips_lo_relocs[SYMBOL_DTPREL] = "%dtprel_lo(";
7283 mips_lo_relocs[SYMBOL_GOTTPREL] = "%gottprel(";
7285 mips_split_p[SYMBOL_TPREL] = true;
7286 mips_hi_relocs[SYMBOL_TPREL] = "%tprel_hi(";
7287 mips_lo_relocs[SYMBOL_TPREL] = "%tprel_lo(";
7289 mips_lo_relocs[SYMBOL_HALF] = "%half(";
7292 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM
7293 in context CONTEXT. RELOCS is the array of relocations to use. */
7296 mips_print_operand_reloc (FILE *file, rtx op, enum mips_symbol_context context,
7297 const char **relocs)
7299 enum mips_symbol_type symbol_type;
7302 symbol_type = mips_classify_symbolic_expression (op, context);
7303 gcc_assert (relocs[symbol_type]);
7305 fputs (relocs[symbol_type], file);
7306 output_addr_const (file, mips_strip_unspec_address (op));
7307 for (p = relocs[symbol_type]; *p != 0; p++)
7312 /* Start a new block with the given asm switch enabled. If we need
7313 to print a directive, emit PREFIX before it and SUFFIX after it. */
7316 mips_push_asm_switch_1 (struct mips_asm_switch *asm_switch,
7317 const char *prefix, const char *suffix)
7319 if (asm_switch->nesting_level == 0)
7320 fprintf (asm_out_file, "%s.set\tno%s%s", prefix, asm_switch->name, suffix);
7321 asm_switch->nesting_level++;
7324 /* Likewise, but end a block. */
7327 mips_pop_asm_switch_1 (struct mips_asm_switch *asm_switch,
7328 const char *prefix, const char *suffix)
7330 gcc_assert (asm_switch->nesting_level);
7331 asm_switch->nesting_level--;
7332 if (asm_switch->nesting_level == 0)
7333 fprintf (asm_out_file, "%s.set\t%s%s", prefix, asm_switch->name, suffix);
7336 /* Wrappers around mips_push_asm_switch_1 and mips_pop_asm_switch_1
7337 that either print a complete line or print nothing. */
7340 mips_push_asm_switch (struct mips_asm_switch *asm_switch)
7342 mips_push_asm_switch_1 (asm_switch, "\t", "\n");
7346 mips_pop_asm_switch (struct mips_asm_switch *asm_switch)
7348 mips_pop_asm_switch_1 (asm_switch, "\t", "\n");
7351 /* Print the text for PRINT_OPERAND punctation character CH to FILE.
7352 The punctuation characters are:
7354 '(' Start a nested ".set noreorder" block.
7355 ')' End a nested ".set noreorder" block.
7356 '[' Start a nested ".set noat" block.
7357 ']' End a nested ".set noat" block.
7358 '<' Start a nested ".set nomacro" block.
7359 '>' End a nested ".set nomacro" block.
7360 '*' Behave like %(%< if generating a delayed-branch sequence.
7361 '#' Print a nop if in a ".set noreorder" block.
7362 '/' Like '#', but do nothing within a delayed-branch sequence.
7363 '?' Print "l" if mips_branch_likely is true
7364 '~' Print a nop if mips_branch_likely is true
7365 '.' Print the name of the register with a hard-wired zero (zero or $0).
7366 '@' Print the name of the assembler temporary register (at or $1).
7367 '^' Print the name of the pic call-through register (t9 or $25).
7368 '+' Print the name of the gp register (usually gp or $28).
7369 '$' Print the name of the stack pointer register (sp or $29).
7371 See also mips_init_print_operand_pucnt. */
7374 mips_print_operand_punctuation (FILE *file, int ch)
7379 mips_push_asm_switch_1 (&mips_noreorder, "", "\n\t");
7383 mips_pop_asm_switch_1 (&mips_noreorder, "\n\t", "");
7387 mips_push_asm_switch_1 (&mips_noat, "", "\n\t");
7391 mips_pop_asm_switch_1 (&mips_noat, "\n\t", "");
7395 mips_push_asm_switch_1 (&mips_nomacro, "", "\n\t");
7399 mips_pop_asm_switch_1 (&mips_nomacro, "\n\t", "");
7403 if (final_sequence != 0)
7405 mips_print_operand_punctuation (file, '(');
7406 mips_print_operand_punctuation (file, '<');
7411 if (mips_noreorder.nesting_level > 0)
7412 fputs ("\n\tnop", file);
7416 /* Print an extra newline so that the delayed insn is separated
7417 from the following ones. This looks neater and is consistent
7418 with non-nop delayed sequences. */
7419 if (mips_noreorder.nesting_level > 0 && final_sequence == 0)
7420 fputs ("\n\tnop\n", file);
7424 if (mips_branch_likely)
7429 if (mips_branch_likely)
7430 fputs ("\n\tnop", file);
7434 fputs (reg_names[GP_REG_FIRST + 0], file);
7438 fputs (reg_names[AT_REGNUM], file);
7442 fputs (reg_names[PIC_FUNCTION_ADDR_REGNUM], file);
7446 fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
7450 fputs (reg_names[STACK_POINTER_REGNUM], file);
7459 /* Initialize mips_print_operand_punct. */
7462 mips_init_print_operand_punct (void)
7466 for (p = "()[]<>*#/?~.@^+$"; *p; p++)
7467 mips_print_operand_punct[(unsigned char) *p] = true;
7470 /* PRINT_OPERAND prefix LETTER refers to the integer branch instruction
7471 associated with condition CODE. Print the condition part of the
7475 mips_print_int_branch_condition (FILE *file, enum rtx_code code, int letter)
7489 /* Conveniently, the MIPS names for these conditions are the same
7490 as their RTL equivalents. */
7491 fputs (GET_RTX_NAME (code), file);
7495 output_operand_lossage ("'%%%c' is not a valid operand prefix", letter);
7500 /* Likewise floating-point branches. */
7503 mips_print_float_branch_condition (FILE *file, enum rtx_code code, int letter)
7508 fputs ("c1f", file);
7512 fputs ("c1t", file);
7516 output_operand_lossage ("'%%%c' is not a valid operand prefix", letter);
7521 /* Implement TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
7524 mips_print_operand_punct_valid_p (unsigned char code)
7526 return mips_print_operand_punct[code];
7529 /* Implement TARGET_PRINT_OPERAND. The MIPS-specific operand codes are:
7531 'X' Print CONST_INT OP in hexadecimal format.
7532 'x' Print the low 16 bits of CONST_INT OP in hexadecimal format.
7533 'd' Print CONST_INT OP in decimal.
7534 'm' Print one less than CONST_INT OP in decimal.
7535 'h' Print the high-part relocation associated with OP, after stripping
7537 'R' Print the low-part relocation associated with OP.
7538 'C' Print the integer branch condition for comparison OP.
7539 'N' Print the inverse of the integer branch condition for comparison OP.
7540 'F' Print the FPU branch condition for comparison OP.
7541 'W' Print the inverse of the FPU branch condition for comparison OP.
7542 'T' Print 'f' for (eq:CC ...), 't' for (ne:CC ...),
7543 'z' for (eq:?I ...), 'n' for (ne:?I ...).
7544 't' Like 'T', but with the EQ/NE cases reversed
7545 'Y' Print mips_fp_conditions[INTVAL (OP)]
7546 'Z' Print OP and a comma for ISA_HAS_8CC, otherwise print nothing.
7547 'q' Print a DSP accumulator register.
7548 'D' Print the second part of a double-word register or memory operand.
7549 'L' Print the low-order register in a double-word register operand.
7550 'M' Print high-order register in a double-word register operand.
7551 'z' Print $0 if OP is zero, otherwise print OP normally. */
7554 mips_print_operand (FILE *file, rtx op, int letter)
7558 if (mips_print_operand_punct_valid_p (letter))
7560 mips_print_operand_punctuation (file, letter);
7565 code = GET_CODE (op);
7570 if (CONST_INT_P (op))
7571 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
7573 output_operand_lossage ("invalid use of '%%%c'", letter);
7577 if (CONST_INT_P (op))
7578 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op) & 0xffff);
7580 output_operand_lossage ("invalid use of '%%%c'", letter);
7584 if (CONST_INT_P (op))
7585 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op));
7587 output_operand_lossage ("invalid use of '%%%c'", letter);
7591 if (CONST_INT_P (op))
7592 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op) - 1);
7594 output_operand_lossage ("invalid use of '%%%c'", letter);
7600 mips_print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_hi_relocs);
7604 mips_print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_lo_relocs);
7608 mips_print_int_branch_condition (file, code, letter);
7612 mips_print_int_branch_condition (file, reverse_condition (code), letter);
7616 mips_print_float_branch_condition (file, code, letter);
7620 mips_print_float_branch_condition (file, reverse_condition (code),
7627 int truth = (code == NE) == (letter == 'T');
7628 fputc ("zfnt"[truth * 2 + (GET_MODE (op) == CCmode)], file);
7633 if (code == CONST_INT && UINTVAL (op) < ARRAY_SIZE (mips_fp_conditions))
7634 fputs (mips_fp_conditions[UINTVAL (op)], file);
7636 output_operand_lossage ("'%%%c' is not a valid operand prefix",
7643 mips_print_operand (file, op, 0);
7649 if (code == REG && MD_REG_P (REGNO (op)))
7650 fprintf (file, "$ac0");
7651 else if (code == REG && DSP_ACC_REG_P (REGNO (op)))
7652 fprintf (file, "$ac%c", reg_names[REGNO (op)][3]);
7654 output_operand_lossage ("invalid use of '%%%c'", letter);
7662 unsigned int regno = REGNO (op);
7663 if ((letter == 'M' && TARGET_LITTLE_ENDIAN)
7664 || (letter == 'L' && TARGET_BIG_ENDIAN)
7667 else if (letter && letter != 'z' && letter != 'M' && letter != 'L')
7668 output_operand_lossage ("invalid use of '%%%c'", letter);
7669 /* We need to print $0 .. $31 for COP0 registers. */
7670 if (COP0_REG_P (regno))
7671 fprintf (file, "$%s", ®_names[regno][4]);
7673 fprintf (file, "%s", reg_names[regno]);
7679 output_address (plus_constant (XEXP (op, 0), 4));
7680 else if (letter && letter != 'z')
7681 output_operand_lossage ("invalid use of '%%%c'", letter);
7683 output_address (XEXP (op, 0));
7687 if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
7688 fputs (reg_names[GP_REG_FIRST], file);
7689 else if (letter && letter != 'z')
7690 output_operand_lossage ("invalid use of '%%%c'", letter);
7691 else if (CONST_GP_P (op))
7692 fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
7694 output_addr_const (file, mips_strip_unspec_address (op));
7700 /* Implement TARGET_PRINT_OPERAND_ADDRESS. */
7703 mips_print_operand_address (FILE *file, rtx x)
7705 struct mips_address_info addr;
7707 if (mips_classify_address (&addr, x, word_mode, true))
7711 mips_print_operand (file, addr.offset, 0);
7712 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
7715 case ADDRESS_LO_SUM:
7716 mips_print_operand_reloc (file, addr.offset, SYMBOL_CONTEXT_MEM,
7718 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
7721 case ADDRESS_CONST_INT:
7722 output_addr_const (file, x);
7723 fprintf (file, "(%s)", reg_names[GP_REG_FIRST]);
7726 case ADDRESS_SYMBOLIC:
7727 output_addr_const (file, mips_strip_unspec_address (x));
7733 /* Implement TARGET_ENCODE_SECTION_INFO. */
7736 mips_encode_section_info (tree decl, rtx rtl, int first)
7738 default_encode_section_info (decl, rtl, first);
7740 if (TREE_CODE (decl) == FUNCTION_DECL)
7742 rtx symbol = XEXP (rtl, 0);
7743 tree type = TREE_TYPE (decl);
7745 /* Encode whether the symbol is short or long. */
7746 if ((TARGET_LONG_CALLS && !mips_near_type_p (type))
7747 || mips_far_type_p (type))
7748 SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL;
7752 /* Implement TARGET_SELECT_RTX_SECTION. */
7755 mips_select_rtx_section (enum machine_mode mode, rtx x,
7756 unsigned HOST_WIDE_INT align)
7758 /* ??? Consider using mergeable small data sections. */
7759 if (mips_rtx_constant_in_small_data_p (mode))
7760 return get_named_section (NULL, ".sdata", 0);
7762 return default_elf_select_rtx_section (mode, x, align);
7765 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
7767 The complication here is that, with the combination TARGET_ABICALLS
7768 && !TARGET_ABSOLUTE_ABICALLS && !TARGET_GPWORD, jump tables will use
7769 absolute addresses, and should therefore not be included in the
7770 read-only part of a DSO. Handle such cases by selecting a normal
7771 data section instead of a read-only one. The logic apes that in
7772 default_function_rodata_section. */
7775 mips_function_rodata_section (tree decl)
7777 if (!TARGET_ABICALLS || TARGET_ABSOLUTE_ABICALLS || TARGET_GPWORD)
7778 return default_function_rodata_section (decl);
7780 if (decl && DECL_SECTION_NAME (decl))
7782 const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
7783 if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
7785 char *rname = ASTRDUP (name);
7787 return get_section (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
7789 else if (flag_function_sections
7790 && flag_data_sections
7791 && strncmp (name, ".text.", 6) == 0)
7793 char *rname = ASTRDUP (name);
7794 memcpy (rname + 1, "data", 4);
7795 return get_section (rname, SECTION_WRITE, decl);
7798 return data_section;
7801 /* Implement TARGET_IN_SMALL_DATA_P. */
7804 mips_in_small_data_p (const_tree decl)
7806 unsigned HOST_WIDE_INT size;
7808 if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
7811 /* We don't yet generate small-data references for -mabicalls
7812 or VxWorks RTP code. See the related -G handling in
7813 mips_option_override. */
7814 if (TARGET_ABICALLS || TARGET_VXWORKS_RTP)
7817 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
7821 /* Reject anything that isn't in a known small-data section. */
7822 name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
7823 if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
7826 /* If a symbol is defined externally, the assembler will use the
7827 usual -G rules when deciding how to implement macros. */
7828 if (mips_lo_relocs[SYMBOL_GP_RELATIVE] || !DECL_EXTERNAL (decl))
7831 else if (TARGET_EMBEDDED_DATA)
7833 /* Don't put constants into the small data section: we want them
7834 to be in ROM rather than RAM. */
7835 if (TREE_CODE (decl) != VAR_DECL)
7838 if (TREE_READONLY (decl)
7839 && !TREE_SIDE_EFFECTS (decl)
7840 && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
7844 /* Enforce -mlocal-sdata. */
7845 if (!TARGET_LOCAL_SDATA && !TREE_PUBLIC (decl))
7848 /* Enforce -mextern-sdata. */
7849 if (!TARGET_EXTERN_SDATA && DECL_P (decl))
7851 if (DECL_EXTERNAL (decl))
7853 if (DECL_COMMON (decl) && DECL_INITIAL (decl) == NULL)
7857 /* We have traditionally not treated zero-sized objects as small data,
7858 so this is now effectively part of the ABI. */
7859 size = int_size_in_bytes (TREE_TYPE (decl));
7860 return size > 0 && size <= mips_small_data_threshold;
7863 /* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
7864 anchors for small data: the GP register acts as an anchor in that
7865 case. We also don't want to use them for PC-relative accesses,
7866 where the PC acts as an anchor. */
7869 mips_use_anchors_for_symbol_p (const_rtx symbol)
7871 switch (mips_classify_symbol (symbol, SYMBOL_CONTEXT_MEM))
7873 case SYMBOL_PC_RELATIVE:
7874 case SYMBOL_GP_RELATIVE:
7878 return default_use_anchors_for_symbol_p (symbol);
7882 /* The MIPS debug format wants all automatic variables and arguments
7883 to be in terms of the virtual frame pointer (stack pointer before
7884 any adjustment in the function), while the MIPS 3.0 linker wants
7885 the frame pointer to be the stack pointer after the initial
7886 adjustment. So, we do the adjustment here. The arg pointer (which
7887 is eliminated) points to the virtual frame pointer, while the frame
7888 pointer (which may be eliminated) points to the stack pointer after
7889 the initial adjustments. */
7892 mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
7894 rtx offset2 = const0_rtx;
7895 rtx reg = eliminate_constant_term (addr, &offset2);
7898 offset = INTVAL (offset2);
7900 if (reg == stack_pointer_rtx
7901 || reg == frame_pointer_rtx
7902 || reg == hard_frame_pointer_rtx)
7904 offset -= cfun->machine->frame.total_size;
7905 if (reg == hard_frame_pointer_rtx)
7906 offset += cfun->machine->frame.hard_frame_pointer_offset;
7909 /* sdbout_parms does not want this to crash for unrecognized cases. */
7911 else if (reg != arg_pointer_rtx)
7912 fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
7919 /* Implement ASM_OUTPUT_EXTERNAL. */
7922 mips_output_external (FILE *file, tree decl, const char *name)
7924 default_elf_asm_output_external (file, decl, name);
7926 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
7927 set in order to avoid putting out names that are never really
7929 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
7931 if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
7933 /* When using assembler macros, emit .extern directives for
7934 all small-data externs so that the assembler knows how
7937 In most cases it would be safe (though pointless) to emit
7938 .externs for other symbols too. One exception is when an
7939 object is within the -G limit but declared by the user to
7940 be in a section other than .sbss or .sdata. */
7941 fputs ("\t.extern\t", file);
7942 assemble_name (file, name);
7943 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC "\n",
7944 int_size_in_bytes (TREE_TYPE (decl)));
7949 /* Implement TARGET_ASM_OUTPUT_SOURCE_FILENAME. */
7952 mips_output_filename (FILE *stream, const char *name)
7954 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
7956 if (write_symbols == DWARF2_DEBUG)
7958 else if (mips_output_filename_first_time)
7960 mips_output_filename_first_time = 0;
7961 num_source_filenames += 1;
7962 current_function_file = name;
7963 fprintf (stream, "\t.file\t%d ", num_source_filenames);
7964 output_quoted_string (stream, name);
7965 putc ('\n', stream);
7967 /* If we are emitting stabs, let dbxout.c handle this (except for
7968 the mips_output_filename_first_time case). */
7969 else if (write_symbols == DBX_DEBUG)
7971 else if (name != current_function_file
7972 && strcmp (name, current_function_file) != 0)
7974 num_source_filenames += 1;
7975 current_function_file = name;
7976 fprintf (stream, "\t.file\t%d ", num_source_filenames);
7977 output_quoted_string (stream, name);
7978 putc ('\n', stream);
7982 /* Implement TARGET_ASM_OUTPUT_DWARF_DTPREL. */
7984 static void ATTRIBUTE_UNUSED
7985 mips_output_dwarf_dtprel (FILE *file, int size, rtx x)
7990 fputs ("\t.dtprelword\t", file);
7994 fputs ("\t.dtpreldword\t", file);
8000 output_addr_const (file, x);
8001 fputs ("+0x8000", file);
8004 /* Implement TARGET_DWARF_REGISTER_SPAN. */
8007 mips_dwarf_register_span (rtx reg)
8010 enum machine_mode mode;
8012 /* By default, GCC maps increasing register numbers to increasing
8013 memory locations, but paired FPRs are always little-endian,
8014 regardless of the prevailing endianness. */
8015 mode = GET_MODE (reg);
8016 if (FP_REG_P (REGNO (reg))
8017 && TARGET_BIG_ENDIAN
8018 && MAX_FPRS_PER_FMT > 1
8019 && GET_MODE_SIZE (mode) > UNITS_PER_FPREG)
8021 gcc_assert (GET_MODE_SIZE (mode) == UNITS_PER_HWFPVALUE);
8022 high = mips_subword (reg, true);
8023 low = mips_subword (reg, false);
8024 return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, high, low));
8030 /* Implement ASM_OUTPUT_ASCII. */
8033 mips_output_ascii (FILE *stream, const char *string, size_t len)
8039 fprintf (stream, "\t.ascii\t\"");
8040 for (i = 0; i < len; i++)
8044 c = (unsigned char) string[i];
8047 if (c == '\\' || c == '\"')
8049 putc ('\\', stream);
8057 fprintf (stream, "\\%03o", c);
8061 if (cur_pos > 72 && i+1 < len)
8064 fprintf (stream, "\"\n\t.ascii\t\"");
8067 fprintf (stream, "\"\n");
8070 /* Emit either a label, .comm, or .lcomm directive. When using assembler
8071 macros, mark the symbol as written so that mips_asm_output_external
8072 won't emit an .extern for it. STREAM is the output file, NAME is the
8073 name of the symbol, INIT_STRING is the string that should be written
8074 before the symbol and FINAL_STRING is the string that should be
8075 written after it. FINAL_STRING is a printf format that consumes the
8076 remaining arguments. */
8079 mips_declare_object (FILE *stream, const char *name, const char *init_string,
8080 const char *final_string, ...)
8084 fputs (init_string, stream);
8085 assemble_name (stream, name);
8086 va_start (ap, final_string);
8087 vfprintf (stream, final_string, ap);
8090 if (!TARGET_EXPLICIT_RELOCS)
8092 tree name_tree = get_identifier (name);
8093 TREE_ASM_WRITTEN (name_tree) = 1;
8097 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
8098 NAME is the name of the object and ALIGN is the required alignment
8099 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
8100 alignment argument. */
8103 mips_declare_common_object (FILE *stream, const char *name,
8104 const char *init_string,
8105 unsigned HOST_WIDE_INT size,
8106 unsigned int align, bool takes_alignment_p)
8108 if (!takes_alignment_p)
8110 size += (align / BITS_PER_UNIT) - 1;
8111 size -= size % (align / BITS_PER_UNIT);
8112 mips_declare_object (stream, name, init_string,
8113 "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
8116 mips_declare_object (stream, name, init_string,
8117 "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
8118 size, align / BITS_PER_UNIT);
8121 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
8122 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
8125 mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
8126 unsigned HOST_WIDE_INT size,
8129 /* If the target wants uninitialized const declarations in
8130 .rdata then don't put them in .comm. */
8131 if (TARGET_EMBEDDED_DATA
8132 && TARGET_UNINIT_CONST_IN_RODATA
8133 && TREE_CODE (decl) == VAR_DECL
8134 && TREE_READONLY (decl)
8135 && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
8137 if (TREE_PUBLIC (decl) && DECL_NAME (decl))
8138 targetm.asm_out.globalize_label (stream, name);
8140 switch_to_section (readonly_data_section);
8141 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
8142 mips_declare_object (stream, name, "",
8143 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
8147 mips_declare_common_object (stream, name, "\n\t.comm\t",
8151 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8152 extern int size_directive_output;
8154 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
8155 definitions except that it uses mips_declare_object to emit the label. */
8158 mips_declare_object_name (FILE *stream, const char *name,
8159 tree decl ATTRIBUTE_UNUSED)
8161 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8162 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
8165 size_directive_output = 0;
8166 if (!flag_inhibit_size_directive && DECL_SIZE (decl))
8170 size_directive_output = 1;
8171 size = int_size_in_bytes (TREE_TYPE (decl));
8172 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8175 mips_declare_object (stream, name, "", ":\n");
8178 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
8181 mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
8185 name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
8186 if (!flag_inhibit_size_directive
8187 && DECL_SIZE (decl) != 0
8190 && DECL_INITIAL (decl) == error_mark_node
8191 && !size_directive_output)
8195 size_directive_output = 1;
8196 size = int_size_in_bytes (TREE_TYPE (decl));
8197 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8202 /* Return the FOO in the name of the ".mdebug.FOO" section associated
8203 with the current ABI. */
8206 mips_mdebug_abi_name (void)
8219 return TARGET_64BIT ? "eabi64" : "eabi32";
8225 /* Implement TARGET_ASM_FILE_START. */
8228 mips_file_start (void)
8230 default_file_start ();
8232 /* Generate a special section to describe the ABI switches used to
8233 produce the resultant binary. This is unnecessary on IRIX and
8234 causes unwanted warnings from the native linker. */
8237 /* Record the ABI itself. Modern versions of binutils encode
8238 this information in the ELF header flags, but GDB needs the
8239 information in order to correctly debug binaries produced by
8240 older binutils. See the function mips_gdbarch_init in
8242 fprintf (asm_out_file, "\t.section .mdebug.%s\n\t.previous\n",
8243 mips_mdebug_abi_name ());
8245 /* There is no ELF header flag to distinguish long32 forms of the
8246 EABI from long64 forms. Emit a special section to help tools
8247 such as GDB. Do the same for o64, which is sometimes used with
8249 if (mips_abi == ABI_EABI || mips_abi == ABI_O64)
8250 fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n"
8251 "\t.previous\n", TARGET_LONG64 ? 64 : 32);
8253 #ifdef HAVE_AS_GNU_ATTRIBUTE
8257 /* No floating-point operations, -mno-float. */
8258 if (TARGET_NO_FLOAT)
8260 /* Soft-float code, -msoft-float. */
8261 else if (!TARGET_HARD_FLOAT_ABI)
8263 /* Single-float code, -msingle-float. */
8264 else if (!TARGET_DOUBLE_FLOAT)
8266 /* 64-bit FP registers on a 32-bit target, -mips32r2 -mfp64. */
8267 else if (!TARGET_64BIT && TARGET_FLOAT64)
8269 /* Regular FP code, FP regs same size as GP regs, -mdouble-float. */
8273 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", attr);
8278 /* If TARGET_ABICALLS, tell GAS to generate -KPIC code. */
8279 if (TARGET_ABICALLS)
8281 fprintf (asm_out_file, "\t.abicalls\n");
8282 if (TARGET_ABICALLS_PIC0)
8283 fprintf (asm_out_file, "\t.option\tpic0\n");
8286 if (flag_verbose_asm)
8287 fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
8289 mips_small_data_threshold, mips_arch_info->name, mips_isa);
8292 /* Make the last instruction frame-related and note that it performs
8293 the operation described by FRAME_PATTERN. */
8296 mips_set_frame_expr (rtx frame_pattern)
8300 insn = get_last_insn ();
8301 RTX_FRAME_RELATED_P (insn) = 1;
8302 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
8307 /* Return a frame-related rtx that stores REG at MEM.
8308 REG must be a single register. */
8311 mips_frame_set (rtx mem, rtx reg)
8315 /* If we're saving the return address register and the DWARF return
8316 address column differs from the hard register number, adjust the
8317 note reg to refer to the former. */
8318 if (REGNO (reg) == RETURN_ADDR_REGNUM
8319 && DWARF_FRAME_RETURN_COLUMN != RETURN_ADDR_REGNUM)
8320 reg = gen_rtx_REG (GET_MODE (reg), DWARF_FRAME_RETURN_COLUMN);
8322 set = gen_rtx_SET (VOIDmode, mem, reg);
8323 RTX_FRAME_RELATED_P (set) = 1;
8328 /* If a MIPS16e SAVE or RESTORE instruction saves or restores register
8329 mips16e_s2_s8_regs[X], it must also save the registers in indexes
8330 X + 1 onwards. Likewise mips16e_a0_a3_regs. */
8331 static const unsigned char mips16e_s2_s8_regs[] = {
8332 30, 23, 22, 21, 20, 19, 18
8334 static const unsigned char mips16e_a0_a3_regs[] = {
8338 /* A list of the registers that can be saved by the MIPS16e SAVE instruction,
8339 ordered from the uppermost in memory to the lowest in memory. */
8340 static const unsigned char mips16e_save_restore_regs[] = {
8341 31, 30, 23, 22, 21, 20, 19, 18, 17, 16, 7, 6, 5, 4
8344 /* Return the index of the lowest X in the range [0, SIZE) for which
8345 bit REGS[X] is set in MASK. Return SIZE if there is no such X. */
8348 mips16e_find_first_register (unsigned int mask, const unsigned char *regs,
8353 for (i = 0; i < size; i++)
8354 if (BITSET_P (mask, regs[i]))
8360 /* *MASK_PTR is a mask of general-purpose registers and *NUM_REGS_PTR
8361 is the number of set bits. If *MASK_PTR contains REGS[X] for some X
8362 in [0, SIZE), adjust *MASK_PTR and *NUM_REGS_PTR so that the same
8363 is true for all indexes (X, SIZE). */
8366 mips16e_mask_registers (unsigned int *mask_ptr, const unsigned char *regs,
8367 unsigned int size, unsigned int *num_regs_ptr)
8371 i = mips16e_find_first_register (*mask_ptr, regs, size);
8372 for (i++; i < size; i++)
8373 if (!BITSET_P (*mask_ptr, regs[i]))
8376 *mask_ptr |= 1 << regs[i];
8380 /* Return a simplified form of X using the register values in REG_VALUES.
8381 REG_VALUES[R] is the last value assigned to hard register R, or null
8382 if R has not been modified.
8384 This function is rather limited, but is good enough for our purposes. */
8387 mips16e_collect_propagate_value (rtx x, rtx *reg_values)
8389 x = avoid_constant_pool_reference (x);
8393 rtx x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
8394 return simplify_gen_unary (GET_CODE (x), GET_MODE (x),
8395 x0, GET_MODE (XEXP (x, 0)));
8398 if (ARITHMETIC_P (x))
8400 rtx x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
8401 rtx x1 = mips16e_collect_propagate_value (XEXP (x, 1), reg_values);
8402 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), x0, x1);
8406 && reg_values[REGNO (x)]
8407 && !rtx_unstable_p (reg_values[REGNO (x)]))
8408 return reg_values[REGNO (x)];
8413 /* Return true if (set DEST SRC) stores an argument register into its
8414 caller-allocated save slot, storing the number of that argument
8415 register in *REGNO_PTR if so. REG_VALUES is as for
8416 mips16e_collect_propagate_value. */
8419 mips16e_collect_argument_save_p (rtx dest, rtx src, rtx *reg_values,
8420 unsigned int *regno_ptr)
8422 unsigned int argno, regno;
8423 HOST_WIDE_INT offset, required_offset;
8426 /* Check that this is a word-mode store. */
8427 if (!MEM_P (dest) || !REG_P (src) || GET_MODE (dest) != word_mode)
8430 /* Check that the register being saved is an unmodified argument
8432 regno = REGNO (src);
8433 if (!IN_RANGE (regno, GP_ARG_FIRST, GP_ARG_LAST) || reg_values[regno])
8435 argno = regno - GP_ARG_FIRST;
8437 /* Check whether the address is an appropriate stack-pointer or
8438 frame-pointer access. */
8439 addr = mips16e_collect_propagate_value (XEXP (dest, 0), reg_values);
8440 mips_split_plus (addr, &base, &offset);
8441 required_offset = cfun->machine->frame.total_size + argno * UNITS_PER_WORD;
8442 if (base == hard_frame_pointer_rtx)
8443 required_offset -= cfun->machine->frame.hard_frame_pointer_offset;
8444 else if (base != stack_pointer_rtx)
8446 if (offset != required_offset)
8453 /* A subroutine of mips_expand_prologue, called only when generating
8454 MIPS16e SAVE instructions. Search the start of the function for any
8455 instructions that save argument registers into their caller-allocated
8456 save slots. Delete such instructions and return a value N such that
8457 saving [GP_ARG_FIRST, GP_ARG_FIRST + N) would make all the deleted
8458 instructions redundant. */
8461 mips16e_collect_argument_saves (void)
8463 rtx reg_values[FIRST_PSEUDO_REGISTER];
8464 rtx insn, next, set, dest, src;
8465 unsigned int nargs, regno;
8467 push_topmost_sequence ();
8469 memset (reg_values, 0, sizeof (reg_values));
8470 for (insn = get_insns (); insn; insn = next)
8472 next = NEXT_INSN (insn);
8473 if (NOTE_P (insn) || DEBUG_INSN_P (insn))
8479 set = PATTERN (insn);
8480 if (GET_CODE (set) != SET)
8483 dest = SET_DEST (set);
8484 src = SET_SRC (set);
8485 if (mips16e_collect_argument_save_p (dest, src, reg_values, ®no))
8487 if (!BITSET_P (cfun->machine->frame.mask, regno))
8490 nargs = MAX (nargs, (regno - GP_ARG_FIRST) + 1);
8493 else if (REG_P (dest) && GET_MODE (dest) == word_mode)
8494 reg_values[REGNO (dest)]
8495 = mips16e_collect_propagate_value (src, reg_values);
8499 pop_topmost_sequence ();
8504 /* Return a move between register REGNO and memory location SP + OFFSET.
8505 Make the move a load if RESTORE_P, otherwise make it a frame-related
8509 mips16e_save_restore_reg (bool restore_p, HOST_WIDE_INT offset,
8514 mem = gen_frame_mem (SImode, plus_constant (stack_pointer_rtx, offset));
8515 reg = gen_rtx_REG (SImode, regno);
8517 ? gen_rtx_SET (VOIDmode, reg, mem)
8518 : mips_frame_set (mem, reg));
8521 /* Return RTL for a MIPS16e SAVE or RESTORE instruction; RESTORE_P says which.
8522 The instruction must:
8524 - Allocate or deallocate SIZE bytes in total; SIZE is known
8527 - Save or restore as many registers in *MASK_PTR as possible.
8528 The instruction saves the first registers at the top of the
8529 allocated area, with the other registers below it.
8531 - Save NARGS argument registers above the allocated area.
8533 (NARGS is always zero if RESTORE_P.)
8535 The SAVE and RESTORE instructions cannot save and restore all general
8536 registers, so there may be some registers left over for the caller to
8537 handle. Destructively modify *MASK_PTR so that it contains the registers
8538 that still need to be saved or restored. The caller can save these
8539 registers in the memory immediately below *OFFSET_PTR, which is a
8540 byte offset from the bottom of the allocated stack area. */
8543 mips16e_build_save_restore (bool restore_p, unsigned int *mask_ptr,
8544 HOST_WIDE_INT *offset_ptr, unsigned int nargs,
8548 HOST_WIDE_INT offset, top_offset;
8549 unsigned int i, regno;
8552 gcc_assert (cfun->machine->frame.num_fp == 0);
8554 /* Calculate the number of elements in the PARALLEL. We need one element
8555 for the stack adjustment, one for each argument register save, and one
8556 for each additional register move. */
8558 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
8559 if (BITSET_P (*mask_ptr, mips16e_save_restore_regs[i]))
8562 /* Create the final PARALLEL. */
8563 pattern = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (n));
8566 /* Add the stack pointer adjustment. */
8567 set = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8568 plus_constant (stack_pointer_rtx,
8569 restore_p ? size : -size));
8570 RTX_FRAME_RELATED_P (set) = 1;
8571 XVECEXP (pattern, 0, n++) = set;
8573 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
8574 top_offset = restore_p ? size : 0;
8576 /* Save the arguments. */
8577 for (i = 0; i < nargs; i++)
8579 offset = top_offset + i * UNITS_PER_WORD;
8580 set = mips16e_save_restore_reg (restore_p, offset, GP_ARG_FIRST + i);
8581 XVECEXP (pattern, 0, n++) = set;
8584 /* Then fill in the other register moves. */
8585 offset = top_offset;
8586 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
8588 regno = mips16e_save_restore_regs[i];
8589 if (BITSET_P (*mask_ptr, regno))
8591 offset -= UNITS_PER_WORD;
8592 set = mips16e_save_restore_reg (restore_p, offset, regno);
8593 XVECEXP (pattern, 0, n++) = set;
8594 *mask_ptr &= ~(1 << regno);
8598 /* Tell the caller what offset it should use for the remaining registers. */
8599 *offset_ptr = size + (offset - top_offset);
8601 gcc_assert (n == XVECLEN (pattern, 0));
8606 /* PATTERN is a PARALLEL whose first element adds ADJUST to the stack
8607 pointer. Return true if PATTERN matches the kind of instruction
8608 generated by mips16e_build_save_restore. If INFO is nonnull,
8609 initialize it when returning true. */
8612 mips16e_save_restore_pattern_p (rtx pattern, HOST_WIDE_INT adjust,
8613 struct mips16e_save_restore_info *info)
8615 unsigned int i, nargs, mask, extra;
8616 HOST_WIDE_INT top_offset, save_offset, offset;
8617 rtx set, reg, mem, base;
8620 if (!GENERATE_MIPS16E_SAVE_RESTORE)
8623 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
8624 top_offset = adjust > 0 ? adjust : 0;
8626 /* Interpret all other members of the PARALLEL. */
8627 save_offset = top_offset - UNITS_PER_WORD;
8631 for (n = 1; n < XVECLEN (pattern, 0); n++)
8633 /* Check that we have a SET. */
8634 set = XVECEXP (pattern, 0, n);
8635 if (GET_CODE (set) != SET)
8638 /* Check that the SET is a load (if restoring) or a store
8640 mem = adjust > 0 ? SET_SRC (set) : SET_DEST (set);
8644 /* Check that the address is the sum of the stack pointer and a
8645 possibly-zero constant offset. */
8646 mips_split_plus (XEXP (mem, 0), &base, &offset);
8647 if (base != stack_pointer_rtx)
8650 /* Check that SET's other operand is a register. */
8651 reg = adjust > 0 ? SET_DEST (set) : SET_SRC (set);
8655 /* Check for argument saves. */
8656 if (offset == top_offset + nargs * UNITS_PER_WORD
8657 && REGNO (reg) == GP_ARG_FIRST + nargs)
8659 else if (offset == save_offset)
8661 while (mips16e_save_restore_regs[i++] != REGNO (reg))
8662 if (i == ARRAY_SIZE (mips16e_save_restore_regs))
8665 mask |= 1 << REGNO (reg);
8666 save_offset -= UNITS_PER_WORD;
8672 /* Check that the restrictions on register ranges are met. */
8674 mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
8675 ARRAY_SIZE (mips16e_s2_s8_regs), &extra);
8676 mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
8677 ARRAY_SIZE (mips16e_a0_a3_regs), &extra);
8681 /* Make sure that the topmost argument register is not saved twice.
8682 The checks above ensure that the same is then true for the other
8683 argument registers. */
8684 if (nargs > 0 && BITSET_P (mask, GP_ARG_FIRST + nargs - 1))
8687 /* Pass back information, if requested. */
8690 info->nargs = nargs;
8692 info->size = (adjust > 0 ? adjust : -adjust);
8698 /* Add a MIPS16e SAVE or RESTORE register-range argument to string S
8699 for the register range [MIN_REG, MAX_REG]. Return a pointer to
8700 the null terminator. */
8703 mips16e_add_register_range (char *s, unsigned int min_reg,
8704 unsigned int max_reg)
8706 if (min_reg != max_reg)
8707 s += sprintf (s, ",%s-%s", reg_names[min_reg], reg_names[max_reg]);
8709 s += sprintf (s, ",%s", reg_names[min_reg]);
8713 /* Return the assembly instruction for a MIPS16e SAVE or RESTORE instruction.
8714 PATTERN and ADJUST are as for mips16e_save_restore_pattern_p. */
8717 mips16e_output_save_restore (rtx pattern, HOST_WIDE_INT adjust)
8719 static char buffer[300];
8721 struct mips16e_save_restore_info info;
8722 unsigned int i, end;
8725 /* Parse the pattern. */
8726 if (!mips16e_save_restore_pattern_p (pattern, adjust, &info))
8729 /* Add the mnemonic. */
8730 s = strcpy (buffer, adjust > 0 ? "restore\t" : "save\t");
8733 /* Save the arguments. */
8735 s += sprintf (s, "%s-%s,", reg_names[GP_ARG_FIRST],
8736 reg_names[GP_ARG_FIRST + info.nargs - 1]);
8737 else if (info.nargs == 1)
8738 s += sprintf (s, "%s,", reg_names[GP_ARG_FIRST]);
8740 /* Emit the amount of stack space to allocate or deallocate. */
8741 s += sprintf (s, "%d", (int) info.size);
8743 /* Save or restore $16. */
8744 if (BITSET_P (info.mask, 16))
8745 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 16]);
8747 /* Save or restore $17. */
8748 if (BITSET_P (info.mask, 17))
8749 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 17]);
8751 /* Save or restore registers in the range $s2...$s8, which
8752 mips16e_s2_s8_regs lists in decreasing order. Note that this
8753 is a software register range; the hardware registers are not
8754 numbered consecutively. */
8755 end = ARRAY_SIZE (mips16e_s2_s8_regs);
8756 i = mips16e_find_first_register (info.mask, mips16e_s2_s8_regs, end);
8758 s = mips16e_add_register_range (s, mips16e_s2_s8_regs[end - 1],
8759 mips16e_s2_s8_regs[i]);
8761 /* Save or restore registers in the range $a0...$a3. */
8762 end = ARRAY_SIZE (mips16e_a0_a3_regs);
8763 i = mips16e_find_first_register (info.mask, mips16e_a0_a3_regs, end);
8765 s = mips16e_add_register_range (s, mips16e_a0_a3_regs[i],
8766 mips16e_a0_a3_regs[end - 1]);
8768 /* Save or restore $31. */
8769 if (BITSET_P (info.mask, RETURN_ADDR_REGNUM))
8770 s += sprintf (s, ",%s", reg_names[RETURN_ADDR_REGNUM]);
8775 /* Return true if the current function returns its value in a floating-point
8776 register in MIPS16 mode. */
8779 mips16_cfun_returns_in_fpr_p (void)
8781 tree return_type = DECL_RESULT (current_function_decl);
8782 return (TARGET_MIPS16
8783 && TARGET_HARD_FLOAT_ABI
8784 && !aggregate_value_p (return_type, current_function_decl)
8785 && mips_return_mode_in_fpr_p (DECL_MODE (return_type)));
8788 /* Return true if predicate PRED is true for at least one instruction.
8789 Cache the result in *CACHE, and assume that the result is true
8790 if *CACHE is already true. */
8793 mips_find_gp_ref (bool *cache, bool (*pred) (rtx))
8799 push_topmost_sequence ();
8800 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8801 if (USEFUL_INSN_P (insn) && pred (insn))
8806 pop_topmost_sequence ();
8811 /* Return true if INSN refers to the global pointer in an "inflexible" way.
8812 See mips_cfun_has_inflexible_gp_ref_p for details. */
8815 mips_insn_has_inflexible_gp_ref_p (rtx insn)
8817 /* Uses of pic_offset_table_rtx in CALL_INSN_FUNCTION_USAGE
8818 indicate that the target could be a traditional MIPS
8819 lazily-binding stub. */
8820 return find_reg_fusage (insn, USE, pic_offset_table_rtx);
8823 /* Return true if the current function refers to the global pointer
8824 in a way that forces $28 to be valid. This means that we can't
8825 change the choice of global pointer, even for NewABI code.
8827 One example of this (and one which needs several checks) is that
8828 $28 must be valid when calling traditional MIPS lazy-binding stubs.
8829 (This restriction does not apply to PLTs.) */
8832 mips_cfun_has_inflexible_gp_ref_p (void)
8834 /* If the function has a nonlocal goto, $28 must hold the correct
8835 global pointer for the target function. That is, the target
8836 of the goto implicitly uses $28. */
8837 if (crtl->has_nonlocal_goto)
8840 if (TARGET_ABICALLS_PIC2)
8842 /* Symbolic accesses implicitly use the global pointer unless
8843 -mexplicit-relocs is in effect. JAL macros to symbolic addresses
8844 might go to traditional MIPS lazy-binding stubs. */
8845 if (!TARGET_EXPLICIT_RELOCS)
8848 /* FUNCTION_PROFILER includes a JAL to _mcount, which again
8849 can be lazily-bound. */
8853 /* MIPS16 functions that return in FPRs need to call an
8854 external libgcc routine. This call is only made explict
8855 during mips_expand_epilogue, and it too might be lazily bound. */
8856 if (mips16_cfun_returns_in_fpr_p ())
8860 return mips_find_gp_ref (&cfun->machine->has_inflexible_gp_insn_p,
8861 mips_insn_has_inflexible_gp_ref_p);
8864 /* Return true if INSN refers to the global pointer in a "flexible" way.
8865 See mips_cfun_has_flexible_gp_ref_p for details. */
8868 mips_insn_has_flexible_gp_ref_p (rtx insn)
8870 return (get_attr_got (insn) != GOT_UNSET
8871 || mips_small_data_pattern_p (PATTERN (insn))
8872 || reg_overlap_mentioned_p (pic_offset_table_rtx, PATTERN (insn)));
8875 /* Return true if the current function references the global pointer,
8876 but if those references do not inherently require the global pointer
8877 to be $28. Assume !mips_cfun_has_inflexible_gp_ref_p (). */
8880 mips_cfun_has_flexible_gp_ref_p (void)
8882 /* Reload can sometimes introduce constant pool references
8883 into a function that otherwise didn't need them. For example,
8884 suppose we have an instruction like:
8886 (set (reg:DF R1) (float:DF (reg:SI R2)))
8888 If R2 turns out to be a constant such as 1, the instruction may
8889 have a REG_EQUAL note saying that R1 == 1.0. Reload then has
8890 the option of using this constant if R2 doesn't get allocated
8893 In cases like these, reload will have added the constant to the
8894 pool but no instruction will yet refer to it. */
8895 if (TARGET_ABICALLS_PIC2 && !reload_completed && crtl->uses_const_pool)
8898 return mips_find_gp_ref (&cfun->machine->has_flexible_gp_insn_p,
8899 mips_insn_has_flexible_gp_ref_p);
8902 /* Return the register that should be used as the global pointer
8903 within this function. Return INVALID_REGNUM if the function
8904 doesn't need a global pointer. */
8907 mips_global_pointer (void)
8911 /* $gp is always available unless we're using a GOT. */
8912 if (!TARGET_USE_GOT)
8913 return GLOBAL_POINTER_REGNUM;
8915 /* If there are inflexible references to $gp, we must use the
8916 standard register. */
8917 if (mips_cfun_has_inflexible_gp_ref_p ())
8918 return GLOBAL_POINTER_REGNUM;
8920 /* If there are no current references to $gp, then the only uses
8921 we can introduce later are those involved in long branches. */
8922 if (TARGET_ABSOLUTE_JUMPS && !mips_cfun_has_flexible_gp_ref_p ())
8923 return INVALID_REGNUM;
8925 /* If the global pointer is call-saved, try to use a call-clobbered
8927 if (TARGET_CALL_SAVED_GP && current_function_is_leaf)
8928 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
8929 if (!df_regs_ever_live_p (regno)
8930 && call_really_used_regs[regno]
8931 && !fixed_regs[regno]
8932 && regno != PIC_FUNCTION_ADDR_REGNUM)
8935 return GLOBAL_POINTER_REGNUM;
8938 /* Return true if the current function's prologue must load the global
8939 pointer value into pic_offset_table_rtx and store the same value in
8940 the function's cprestore slot (if any).
8942 One problem we have to deal with is that, when emitting GOT-based
8943 position independent code, long-branch sequences will need to load
8944 the address of the branch target from the GOT. We don't know until
8945 the very end of compilation whether (and where) the function needs
8946 long branches, so we must ensure that _any_ branch can access the
8947 global pointer in some form. However, we do not want to pessimize
8948 the usual case in which all branches are short.
8950 We handle this as follows:
8952 (1) During reload, we set cfun->machine->global_pointer to
8953 INVALID_REGNUM if we _know_ that the current function
8954 doesn't need a global pointer. This is only valid if
8955 long branches don't need the GOT.
8957 Otherwise, we assume that we might need a global pointer
8958 and pick an appropriate register.
8960 (2) If cfun->machine->global_pointer != INVALID_REGNUM,
8961 we ensure that the global pointer is available at every
8962 block boundary bar entry and exit. We do this in one of two ways:
8964 - If the function has a cprestore slot, we ensure that this
8965 slot is valid at every branch. However, as explained in
8966 point (6) below, there is no guarantee that pic_offset_table_rtx
8967 itself is valid if new uses of the global pointer are introduced
8968 after the first post-epilogue split.
8970 We guarantee that the cprestore slot is valid by loading it
8971 into a fake register, CPRESTORE_SLOT_REGNUM. We then make
8972 this register live at every block boundary bar function entry
8973 and exit. It is then invalid to move the load (and thus the
8974 preceding store) across a block boundary.
8976 - If the function has no cprestore slot, we guarantee that
8977 pic_offset_table_rtx itself is valid at every branch.
8979 See mips_eh_uses for the handling of the register liveness.
8981 (3) During prologue and epilogue generation, we emit "ghost"
8982 placeholder instructions to manipulate the global pointer.
8984 (4) During prologue generation, we set cfun->machine->must_initialize_gp_p
8985 and cfun->machine->must_restore_gp_when_clobbered_p if we already know
8986 that the function needs a global pointer. (There is no need to set
8987 them earlier than this, and doing it as late as possible leads to
8988 fewer false positives.)
8990 (5) If cfun->machine->must_initialize_gp_p is true during a
8991 split_insns pass, we split the ghost instructions into real
8992 instructions. These split instructions can then be optimized in
8993 the usual way. Otherwise, we keep the ghost instructions intact,
8994 and optimize for the case where they aren't needed. We still
8995 have the option of splitting them later, if we need to introduce
8996 new uses of the global pointer.
8998 For example, the scheduler ignores a ghost instruction that
8999 stores $28 to the stack, but it handles the split form of
9000 the ghost instruction as an ordinary store.
9002 (6) [OldABI only.] If cfun->machine->must_restore_gp_when_clobbered_p
9003 is true during the first post-epilogue split_insns pass, we split
9004 calls and restore_gp patterns into instructions that explicitly
9005 load pic_offset_table_rtx from the cprestore slot. Otherwise,
9006 we split these patterns into instructions that _don't_ load from
9009 If cfun->machine->must_restore_gp_when_clobbered_p is true at the
9010 time of the split, then any instructions that exist at that time
9011 can make free use of pic_offset_table_rtx. However, if we want
9012 to introduce new uses of the global pointer after the split,
9013 we must explicitly load the value from the cprestore slot, since
9014 pic_offset_table_rtx itself might not be valid at a given point
9017 The idea is that we want to be able to delete redundant
9018 loads from the cprestore slot in the usual case where no
9019 long branches are needed.
9021 (7) If cfun->machine->must_initialize_gp_p is still false at the end
9022 of md_reorg, we decide whether the global pointer is needed for
9023 long branches. If so, we set cfun->machine->must_initialize_gp_p
9024 to true and split the ghost instructions into real instructions
9027 Note that the ghost instructions must have a zero length for three reasons:
9029 - Giving the length of the underlying $gp sequence might cause
9030 us to use long branches in cases where they aren't really needed.
9032 - They would perturb things like alignment calculations.
9034 - More importantly, the hazard detection in md_reorg relies on
9035 empty instructions having a zero length.
9037 If we find a long branch and split the ghost instructions at the
9038 end of md_reorg, the split could introduce more long branches.
9039 That isn't a problem though, because we still do the split before
9040 the final shorten_branches pass.
9042 This is extremely ugly, but it seems like the best compromise between
9043 correctness and efficiency. */
9046 mips_must_initialize_gp_p (void)
9048 return cfun->machine->must_initialize_gp_p;
9051 /* Return true if REGNO is a register that is ordinarily call-clobbered
9052 but must nevertheless be preserved by an interrupt handler. */
9055 mips_interrupt_extra_call_saved_reg_p (unsigned int regno)
9057 if (MD_REG_P (regno))
9060 if (TARGET_DSP && DSP_ACC_REG_P (regno))
9063 if (GP_REG_P (regno) && !cfun->machine->use_shadow_register_set_p)
9065 /* $0 is hard-wired. */
9066 if (regno == GP_REG_FIRST)
9069 /* The interrupt handler can treat kernel registers as
9070 scratch registers. */
9071 if (KERNEL_REG_P (regno))
9074 /* The function will return the stack pointer to its original value
9076 if (regno == STACK_POINTER_REGNUM)
9079 /* Otherwise, return true for registers that aren't ordinarily
9081 return call_really_used_regs[regno];
9087 /* Return true if the current function should treat register REGNO
9091 mips_cfun_call_saved_reg_p (unsigned int regno)
9093 /* If the user makes an ordinarily-call-saved register global,
9094 that register is no longer call-saved. */
9095 if (global_regs[regno])
9098 /* Interrupt handlers need to save extra registers. */
9099 if (cfun->machine->interrupt_handler_p
9100 && mips_interrupt_extra_call_saved_reg_p (regno))
9103 /* call_insns preserve $28 unless they explicitly say otherwise,
9104 so call_really_used_regs[] treats $28 as call-saved. However,
9105 we want the ABI property rather than the default call_insn
9107 return (regno == GLOBAL_POINTER_REGNUM
9108 ? TARGET_CALL_SAVED_GP
9109 : !call_really_used_regs[regno]);
9112 /* Return true if the function body might clobber register REGNO.
9113 We know that REGNO is call-saved. */
9116 mips_cfun_might_clobber_call_saved_reg_p (unsigned int regno)
9118 /* Some functions should be treated as clobbering all call-saved
9120 if (crtl->saves_all_registers)
9123 /* DF handles cases where a register is explicitly referenced in
9124 the rtl. Incoming values are passed in call-clobbered registers,
9125 so we can assume that any live call-saved register is set within
9127 if (df_regs_ever_live_p (regno))
9130 /* Check for registers that are clobbered by FUNCTION_PROFILER.
9131 These clobbers are not explicit in the rtl. */
9132 if (crtl->profile && MIPS_SAVE_REG_FOR_PROFILING_P (regno))
9135 /* If we're using a call-saved global pointer, the function's
9136 prologue will need to set it up. */
9137 if (cfun->machine->global_pointer == regno)
9140 /* The function's prologue will need to set the frame pointer if
9141 frame_pointer_needed. */
9142 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
9145 /* If a MIPS16 function returns a value in FPRs, its epilogue
9146 will need to call an external libgcc routine. This yet-to-be
9147 generated call_insn will clobber $31. */
9148 if (regno == RETURN_ADDR_REGNUM && mips16_cfun_returns_in_fpr_p ())
9151 /* If REGNO is ordinarily call-clobbered, we must assume that any
9152 called function could modify it. */
9153 if (cfun->machine->interrupt_handler_p
9154 && !current_function_is_leaf
9155 && mips_interrupt_extra_call_saved_reg_p (regno))
9161 /* Return true if the current function must save register REGNO. */
9164 mips_save_reg_p (unsigned int regno)
9166 if (mips_cfun_call_saved_reg_p (regno))
9168 if (mips_cfun_might_clobber_call_saved_reg_p (regno))
9171 /* Save both registers in an FPR pair if either one is used. This is
9172 needed for the case when MIN_FPRS_PER_FMT == 1, which allows the odd
9173 register to be used without the even register. */
9174 if (FP_REG_P (regno)
9175 && MAX_FPRS_PER_FMT == 2
9176 && mips_cfun_might_clobber_call_saved_reg_p (regno + 1))
9180 /* We need to save the incoming return address if __builtin_eh_return
9181 is being used to set a different return address. */
9182 if (regno == RETURN_ADDR_REGNUM && crtl->calls_eh_return)
9188 /* Populate the current function's mips_frame_info structure.
9190 MIPS stack frames look like:
9192 +-------------------------------+
9194 | incoming stack arguments |
9196 +-------------------------------+
9198 | caller-allocated save area |
9199 A | for register arguments |
9201 +-------------------------------+ <-- incoming stack pointer
9203 | callee-allocated save area |
9204 B | for arguments that are |
9205 | split between registers and |
9208 +-------------------------------+ <-- arg_pointer_rtx
9210 C | callee-allocated save area |
9211 | for register varargs |
9213 +-------------------------------+ <-- frame_pointer_rtx
9214 | | + cop0_sp_offset
9215 | COP0 reg save area | + UNITS_PER_WORD
9217 +-------------------------------+ <-- frame_pointer_rtx + acc_sp_offset
9218 | | + UNITS_PER_WORD
9219 | accumulator save area |
9221 +-------------------------------+ <-- stack_pointer_rtx + fp_sp_offset
9222 | | + UNITS_PER_HWFPVALUE
9225 +-------------------------------+ <-- stack_pointer_rtx + gp_sp_offset
9226 | | + UNITS_PER_WORD
9229 +-------------------------------+ <-- frame_pointer_rtx with
9230 | | \ -fstack-protector
9231 | local variables | | var_size
9233 +-------------------------------+
9235 | $gp save area | | cprestore_size
9237 P +-------------------------------+ <-- hard_frame_pointer_rtx for
9239 | outgoing stack arguments | |
9241 +-------------------------------+ | args_size
9243 | caller-allocated save area | |
9244 | for register arguments | |
9246 +-------------------------------+ <-- stack_pointer_rtx
9247 frame_pointer_rtx without
9249 hard_frame_pointer_rtx for
9252 At least two of A, B and C will be empty.
9254 Dynamic stack allocations such as alloca insert data at point P.
9255 They decrease stack_pointer_rtx but leave frame_pointer_rtx and
9256 hard_frame_pointer_rtx unchanged. */
9259 mips_compute_frame_info (void)
9261 struct mips_frame_info *frame;
9262 HOST_WIDE_INT offset, size;
9263 unsigned int regno, i;
9265 /* Set this function's interrupt properties. */
9266 if (mips_interrupt_type_p (TREE_TYPE (current_function_decl)))
9269 error ("the %<interrupt%> attribute requires a MIPS32r2 processor");
9270 else if (TARGET_HARD_FLOAT)
9271 error ("the %<interrupt%> attribute requires %<-msoft-float%>");
9272 else if (TARGET_MIPS16)
9273 error ("interrupt handlers cannot be MIPS16 functions");
9276 cfun->machine->interrupt_handler_p = true;
9277 cfun->machine->use_shadow_register_set_p =
9278 mips_use_shadow_register_set_p (TREE_TYPE (current_function_decl));
9279 cfun->machine->keep_interrupts_masked_p =
9280 mips_keep_interrupts_masked_p (TREE_TYPE (current_function_decl));
9281 cfun->machine->use_debug_exception_return_p =
9282 mips_use_debug_exception_return_p (TREE_TYPE
9283 (current_function_decl));
9287 frame = &cfun->machine->frame;
9288 memset (frame, 0, sizeof (*frame));
9289 size = get_frame_size ();
9291 cfun->machine->global_pointer = mips_global_pointer ();
9293 /* The first two blocks contain the outgoing argument area and the $gp save
9294 slot. This area isn't needed in leaf functions, but if the
9295 target-independent frame size is nonzero, we have already committed to
9296 allocating these in STARTING_FRAME_OFFSET for !FRAME_GROWS_DOWNWARD. */
9297 if ((size == 0 || FRAME_GROWS_DOWNWARD) && current_function_is_leaf)
9299 /* The MIPS 3.0 linker does not like functions that dynamically
9300 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
9301 looks like we are trying to create a second frame pointer to the
9302 function, so allocate some stack space to make it happy. */
9303 if (cfun->calls_alloca)
9304 frame->args_size = REG_PARM_STACK_SPACE (cfun->decl);
9306 frame->args_size = 0;
9307 frame->cprestore_size = 0;
9311 frame->args_size = crtl->outgoing_args_size;
9312 frame->cprestore_size = MIPS_GP_SAVE_AREA_SIZE;
9314 offset = frame->args_size + frame->cprestore_size;
9316 /* Move above the local variables. */
9317 frame->var_size = MIPS_STACK_ALIGN (size);
9318 offset += frame->var_size;
9320 /* Find out which GPRs we need to save. */
9321 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
9322 if (mips_save_reg_p (regno))
9325 frame->mask |= 1 << (regno - GP_REG_FIRST);
9328 /* If this function calls eh_return, we must also save and restore the
9329 EH data registers. */
9330 if (crtl->calls_eh_return)
9331 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; i++)
9334 frame->mask |= 1 << (EH_RETURN_DATA_REGNO (i) - GP_REG_FIRST);
9337 /* The MIPS16e SAVE and RESTORE instructions have two ranges of registers:
9338 $a3-$a0 and $s2-$s8. If we save one register in the range, we must
9339 save all later registers too. */
9340 if (GENERATE_MIPS16E_SAVE_RESTORE)
9342 mips16e_mask_registers (&frame->mask, mips16e_s2_s8_regs,
9343 ARRAY_SIZE (mips16e_s2_s8_regs), &frame->num_gp);
9344 mips16e_mask_registers (&frame->mask, mips16e_a0_a3_regs,
9345 ARRAY_SIZE (mips16e_a0_a3_regs), &frame->num_gp);
9348 /* Move above the GPR save area. */
9349 if (frame->num_gp > 0)
9351 offset += MIPS_STACK_ALIGN (frame->num_gp * UNITS_PER_WORD);
9352 frame->gp_sp_offset = offset - UNITS_PER_WORD;
9355 /* Find out which FPRs we need to save. This loop must iterate over
9356 the same space as its companion in mips_for_each_saved_gpr_and_fpr. */
9357 if (TARGET_HARD_FLOAT)
9358 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno += MAX_FPRS_PER_FMT)
9359 if (mips_save_reg_p (regno))
9361 frame->num_fp += MAX_FPRS_PER_FMT;
9362 frame->fmask |= ~(~0 << MAX_FPRS_PER_FMT) << (regno - FP_REG_FIRST);
9365 /* Move above the FPR save area. */
9366 if (frame->num_fp > 0)
9368 offset += MIPS_STACK_ALIGN (frame->num_fp * UNITS_PER_FPREG);
9369 frame->fp_sp_offset = offset - UNITS_PER_HWFPVALUE;
9372 /* Add in space for the interrupt context information. */
9373 if (cfun->machine->interrupt_handler_p)
9376 if (mips_save_reg_p (LO_REGNUM) || mips_save_reg_p (HI_REGNUM))
9379 frame->acc_mask |= (1 << 0);
9382 /* Check accumulators 1, 2, 3. */
9383 for (i = DSP_ACC_REG_FIRST; i <= DSP_ACC_REG_LAST; i += 2)
9384 if (mips_save_reg_p (i) || mips_save_reg_p (i + 1))
9387 frame->acc_mask |= 1 << (((i - DSP_ACC_REG_FIRST) / 2) + 1);
9390 /* All interrupt context functions need space to preserve STATUS. */
9391 frame->num_cop0_regs++;
9393 /* If we don't keep interrupts masked, we need to save EPC. */
9394 if (!cfun->machine->keep_interrupts_masked_p)
9395 frame->num_cop0_regs++;
9398 /* Move above the accumulator save area. */
9399 if (frame->num_acc > 0)
9401 /* Each accumulator needs 2 words. */
9402 offset += frame->num_acc * 2 * UNITS_PER_WORD;
9403 frame->acc_sp_offset = offset - UNITS_PER_WORD;
9406 /* Move above the COP0 register save area. */
9407 if (frame->num_cop0_regs > 0)
9409 offset += frame->num_cop0_regs * UNITS_PER_WORD;
9410 frame->cop0_sp_offset = offset - UNITS_PER_WORD;
9413 /* Move above the callee-allocated varargs save area. */
9414 offset += MIPS_STACK_ALIGN (cfun->machine->varargs_size);
9415 frame->arg_pointer_offset = offset;
9417 /* Move above the callee-allocated area for pretend stack arguments. */
9418 offset += crtl->args.pretend_args_size;
9419 frame->total_size = offset;
9421 /* Work out the offsets of the save areas from the top of the frame. */
9422 if (frame->gp_sp_offset > 0)
9423 frame->gp_save_offset = frame->gp_sp_offset - offset;
9424 if (frame->fp_sp_offset > 0)
9425 frame->fp_save_offset = frame->fp_sp_offset - offset;
9426 if (frame->acc_sp_offset > 0)
9427 frame->acc_save_offset = frame->acc_sp_offset - offset;
9428 if (frame->num_cop0_regs > 0)
9429 frame->cop0_save_offset = frame->cop0_sp_offset - offset;
9431 /* MIPS16 code offsets the frame pointer by the size of the outgoing
9432 arguments. This tends to increase the chances of using unextended
9433 instructions for local variables and incoming arguments. */
9435 frame->hard_frame_pointer_offset = frame->args_size;
9438 /* Return the style of GP load sequence that is being used for the
9439 current function. */
9441 enum mips_loadgp_style
9442 mips_current_loadgp_style (void)
9444 if (!TARGET_USE_GOT || cfun->machine->global_pointer == INVALID_REGNUM)
9450 if (TARGET_ABSOLUTE_ABICALLS)
9451 return LOADGP_ABSOLUTE;
9453 return TARGET_NEWABI ? LOADGP_NEWABI : LOADGP_OLDABI;
9456 /* Implement TARGET_FRAME_POINTER_REQUIRED. */
9459 mips_frame_pointer_required (void)
9461 /* If the function contains dynamic stack allocations, we need to
9462 use the frame pointer to access the static parts of the frame. */
9463 if (cfun->calls_alloca)
9466 /* In MIPS16 mode, we need a frame pointer for a large frame; otherwise,
9467 reload may be unable to compute the address of a local variable,
9468 since there is no way to add a large constant to the stack pointer
9469 without using a second temporary register. */
9472 mips_compute_frame_info ();
9473 if (!SMALL_OPERAND (cfun->machine->frame.total_size))
9480 /* Make sure that we're not trying to eliminate to the wrong hard frame
9484 mips_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
9486 return (to == HARD_FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM);
9489 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame pointer
9490 or argument pointer. TO is either the stack pointer or hard frame
9494 mips_initial_elimination_offset (int from, int to)
9496 HOST_WIDE_INT offset;
9498 mips_compute_frame_info ();
9500 /* Set OFFSET to the offset from the end-of-prologue stack pointer. */
9503 case FRAME_POINTER_REGNUM:
9504 if (FRAME_GROWS_DOWNWARD)
9505 offset = (cfun->machine->frame.args_size
9506 + cfun->machine->frame.cprestore_size
9507 + cfun->machine->frame.var_size);
9512 case ARG_POINTER_REGNUM:
9513 offset = cfun->machine->frame.arg_pointer_offset;
9520 if (to == HARD_FRAME_POINTER_REGNUM)
9521 offset -= cfun->machine->frame.hard_frame_pointer_offset;
9526 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. */
9529 mips_extra_live_on_entry (bitmap regs)
9533 /* PIC_FUNCTION_ADDR_REGNUM is live if we need it to set up
9534 the global pointer. */
9535 if (!TARGET_ABSOLUTE_ABICALLS)
9536 bitmap_set_bit (regs, PIC_FUNCTION_ADDR_REGNUM);
9538 /* The prologue may set MIPS16_PIC_TEMP_REGNUM to the value of
9539 the global pointer. */
9541 bitmap_set_bit (regs, MIPS16_PIC_TEMP_REGNUM);
9543 /* See the comment above load_call<mode> for details. */
9544 bitmap_set_bit (regs, GOT_VERSION_REGNUM);
9548 /* Implement RETURN_ADDR_RTX. We do not support moving back to a
9552 mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
9557 return get_hard_reg_initial_val (Pmode, RETURN_ADDR_REGNUM);
9560 /* Emit code to change the current function's return address to
9561 ADDRESS. SCRATCH is available as a scratch register, if needed.
9562 ADDRESS and SCRATCH are both word-mode GPRs. */
9565 mips_set_return_address (rtx address, rtx scratch)
9569 gcc_assert (BITSET_P (cfun->machine->frame.mask, RETURN_ADDR_REGNUM));
9570 slot_address = mips_add_offset (scratch, stack_pointer_rtx,
9571 cfun->machine->frame.gp_sp_offset);
9572 mips_emit_move (gen_frame_mem (GET_MODE (address), slot_address), address);
9575 /* Return true if the current function has a cprestore slot. */
9578 mips_cfun_has_cprestore_slot_p (void)
9580 return (cfun->machine->global_pointer != INVALID_REGNUM
9581 && cfun->machine->frame.cprestore_size > 0);
9584 /* Fill *BASE and *OFFSET such that *BASE + *OFFSET refers to the
9585 cprestore slot. LOAD_P is true if the caller wants to load from
9586 the cprestore slot; it is false if the caller wants to store to
9590 mips_get_cprestore_base_and_offset (rtx *base, HOST_WIDE_INT *offset,
9593 const struct mips_frame_info *frame;
9595 frame = &cfun->machine->frame;
9596 /* .cprestore always uses the stack pointer instead of the frame pointer.
9597 We have a free choice for direct stores for non-MIPS16 functions,
9598 and for MIPS16 functions whose cprestore slot is in range of the
9599 stack pointer. Using the stack pointer would sometimes give more
9600 (early) scheduling freedom, but using the frame pointer would
9601 sometimes give more (late) scheduling freedom. It's hard to
9602 predict which applies to a given function, so let's keep things
9605 Loads must always use the frame pointer in functions that call
9606 alloca, and there's little benefit to using the stack pointer
9608 if (frame_pointer_needed && !(TARGET_CPRESTORE_DIRECTIVE && !load_p))
9610 *base = hard_frame_pointer_rtx;
9611 *offset = frame->args_size - frame->hard_frame_pointer_offset;
9615 *base = stack_pointer_rtx;
9616 *offset = frame->args_size;
9620 /* Return true if X is the load or store address of the cprestore slot;
9621 LOAD_P says which. */
9624 mips_cprestore_address_p (rtx x, bool load_p)
9626 rtx given_base, required_base;
9627 HOST_WIDE_INT given_offset, required_offset;
9629 mips_split_plus (x, &given_base, &given_offset);
9630 mips_get_cprestore_base_and_offset (&required_base, &required_offset, load_p);
9631 return given_base == required_base && given_offset == required_offset;
9634 /* Return a MEM rtx for the cprestore slot. LOAD_P is true if we are
9635 going to load from it, false if we are going to store to it.
9636 Use TEMP as a temporary register if need be. */
9639 mips_cprestore_slot (rtx temp, bool load_p)
9642 HOST_WIDE_INT offset;
9644 mips_get_cprestore_base_and_offset (&base, &offset, load_p);
9645 return gen_frame_mem (Pmode, mips_add_offset (temp, base, offset));
9648 /* Emit instructions to save global pointer value GP into cprestore
9649 slot MEM. OFFSET is the offset that MEM applies to the base register.
9651 MEM may not be a legitimate address. If it isn't, TEMP is a
9652 temporary register that can be used, otherwise it is a SCRATCH. */
9655 mips_save_gp_to_cprestore_slot (rtx mem, rtx offset, rtx gp, rtx temp)
9657 if (TARGET_CPRESTORE_DIRECTIVE)
9659 gcc_assert (gp == pic_offset_table_rtx);
9660 emit_insn (gen_cprestore (mem, offset));
9663 mips_emit_move (mips_cprestore_slot (temp, false), gp);
9666 /* Restore $gp from its save slot, using TEMP as a temporary base register
9667 if need be. This function is for o32 and o64 abicalls only.
9669 See mips_must_initialize_gp_p for details about how we manage the
9673 mips_restore_gp_from_cprestore_slot (rtx temp)
9675 gcc_assert (TARGET_ABICALLS && TARGET_OLDABI && epilogue_completed);
9677 if (!cfun->machine->must_restore_gp_when_clobbered_p)
9679 emit_note (NOTE_INSN_DELETED);
9685 mips_emit_move (temp, mips_cprestore_slot (temp, true));
9686 mips_emit_move (pic_offset_table_rtx, temp);
9689 mips_emit_move (pic_offset_table_rtx, mips_cprestore_slot (temp, true));
9690 if (!TARGET_EXPLICIT_RELOCS)
9691 emit_insn (gen_blockage ());
9694 /* A function to save or store a register. The first argument is the
9695 register and the second is the stack slot. */
9696 typedef void (*mips_save_restore_fn) (rtx, rtx);
9698 /* Use FN to save or restore register REGNO. MODE is the register's
9699 mode and OFFSET is the offset of its save slot from the current
9703 mips_save_restore_reg (enum machine_mode mode, int regno,
9704 HOST_WIDE_INT offset, mips_save_restore_fn fn)
9708 mem = gen_frame_mem (mode, plus_constant (stack_pointer_rtx, offset));
9709 fn (gen_rtx_REG (mode, regno), mem);
9712 /* Call FN for each accumlator that is saved by the current function.
9713 SP_OFFSET is the offset of the current stack pointer from the start
9717 mips_for_each_saved_acc (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
9719 HOST_WIDE_INT offset;
9722 offset = cfun->machine->frame.acc_sp_offset - sp_offset;
9723 if (BITSET_P (cfun->machine->frame.acc_mask, 0))
9725 mips_save_restore_reg (word_mode, LO_REGNUM, offset, fn);
9726 offset -= UNITS_PER_WORD;
9727 mips_save_restore_reg (word_mode, HI_REGNUM, offset, fn);
9728 offset -= UNITS_PER_WORD;
9731 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
9732 if (BITSET_P (cfun->machine->frame.acc_mask,
9733 ((regno - DSP_ACC_REG_FIRST) / 2) + 1))
9735 mips_save_restore_reg (word_mode, regno, offset, fn);
9736 offset -= UNITS_PER_WORD;
9740 /* Call FN for each register that is saved by the current function.
9741 SP_OFFSET is the offset of the current stack pointer from the start
9745 mips_for_each_saved_gpr_and_fpr (HOST_WIDE_INT sp_offset,
9746 mips_save_restore_fn fn)
9748 enum machine_mode fpr_mode;
9749 HOST_WIDE_INT offset;
9752 /* Save registers starting from high to low. The debuggers prefer at least
9753 the return register be stored at func+4, and also it allows us not to
9754 need a nop in the epilogue if at least one register is reloaded in
9755 addition to return address. */
9756 offset = cfun->machine->frame.gp_sp_offset - sp_offset;
9757 for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
9758 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
9760 /* Record the ra offset for use by mips_function_profiler. */
9761 if (regno == RETURN_ADDR_REGNUM)
9762 cfun->machine->frame.ra_fp_offset = offset + sp_offset;
9763 mips_save_restore_reg (word_mode, regno, offset, fn);
9764 offset -= UNITS_PER_WORD;
9767 /* This loop must iterate over the same space as its companion in
9768 mips_compute_frame_info. */
9769 offset = cfun->machine->frame.fp_sp_offset - sp_offset;
9770 fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
9771 for (regno = FP_REG_LAST - MAX_FPRS_PER_FMT + 1;
9772 regno >= FP_REG_FIRST;
9773 regno -= MAX_FPRS_PER_FMT)
9774 if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
9776 mips_save_restore_reg (fpr_mode, regno, offset, fn);
9777 offset -= GET_MODE_SIZE (fpr_mode);
9781 /* Return true if a move between register REGNO and its save slot (MEM)
9782 can be done in a single move. LOAD_P is true if we are loading
9783 from the slot, false if we are storing to it. */
9786 mips_direct_save_slot_move_p (unsigned int regno, rtx mem, bool load_p)
9788 /* There is a specific MIPS16 instruction for saving $31 to the stack. */
9789 if (TARGET_MIPS16 && !load_p && regno == RETURN_ADDR_REGNUM)
9792 return mips_secondary_reload_class (REGNO_REG_CLASS (regno),
9793 GET_MODE (mem), mem, load_p) == NO_REGS;
9796 /* Emit a move from SRC to DEST, given that one of them is a register
9797 save slot and that the other is a register. TEMP is a temporary
9798 GPR of the same mode that is available if need be. */
9801 mips_emit_save_slot_move (rtx dest, rtx src, rtx temp)
9808 regno = REGNO (src);
9813 regno = REGNO (dest);
9817 if (regno == cfun->machine->global_pointer && !mips_must_initialize_gp_p ())
9819 /* We don't yet know whether we'll need this instruction or not.
9820 Postpone the decision by emitting a ghost move. This move
9821 is specifically not frame-related; only the split version is. */
9823 emit_insn (gen_move_gpdi (dest, src));
9825 emit_insn (gen_move_gpsi (dest, src));
9829 if (regno == HI_REGNUM)
9833 mips_emit_move (temp, src);
9835 emit_insn (gen_mthisi_di (gen_rtx_REG (TImode, MD_REG_FIRST),
9836 temp, gen_rtx_REG (DImode, LO_REGNUM)));
9838 emit_insn (gen_mthisi_di (gen_rtx_REG (DImode, MD_REG_FIRST),
9839 temp, gen_rtx_REG (SImode, LO_REGNUM)));
9844 emit_insn (gen_mfhidi_ti (temp,
9845 gen_rtx_REG (TImode, MD_REG_FIRST)));
9847 emit_insn (gen_mfhisi_di (temp,
9848 gen_rtx_REG (DImode, MD_REG_FIRST)));
9849 mips_emit_move (dest, temp);
9852 else if (mips_direct_save_slot_move_p (regno, mem, mem == src))
9853 mips_emit_move (dest, src);
9856 gcc_assert (!reg_overlap_mentioned_p (dest, temp));
9857 mips_emit_move (temp, src);
9858 mips_emit_move (dest, temp);
9861 mips_set_frame_expr (mips_frame_set (dest, src));
9864 /* If we're generating n32 or n64 abicalls, and the current function
9865 does not use $28 as its global pointer, emit a cplocal directive.
9866 Use pic_offset_table_rtx as the argument to the directive. */
9869 mips_output_cplocal (void)
9871 if (!TARGET_EXPLICIT_RELOCS
9872 && mips_must_initialize_gp_p ()
9873 && cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
9874 output_asm_insn (".cplocal %+", 0);
9877 /* Implement TARGET_OUTPUT_FUNCTION_PROLOGUE. */
9880 mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
9884 #ifdef SDB_DEBUGGING_INFO
9885 if (debug_info_level != DINFO_LEVEL_TERSE && write_symbols == SDB_DEBUG)
9886 SDB_OUTPUT_SOURCE_LINE (file, DECL_SOURCE_LINE (current_function_decl));
9889 /* In MIPS16 mode, we may need to generate a non-MIPS16 stub to handle
9890 floating-point arguments. */
9892 && TARGET_HARD_FLOAT_ABI
9893 && crtl->args.info.fp_code != 0)
9894 mips16_build_function_stub ();
9896 /* Get the function name the same way that toplev.c does before calling
9897 assemble_start_function. This is needed so that the name used here
9898 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
9899 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
9900 mips_start_function_definition (fnname, TARGET_MIPS16);
9902 /* Output MIPS-specific frame information. */
9903 if (!flag_inhibit_size_directive)
9905 const struct mips_frame_info *frame;
9907 frame = &cfun->machine->frame;
9909 /* .frame FRAMEREG, FRAMESIZE, RETREG. */
9911 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
9912 "# vars= " HOST_WIDE_INT_PRINT_DEC
9914 ", args= " HOST_WIDE_INT_PRINT_DEC
9915 ", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
9916 reg_names[frame_pointer_needed
9917 ? HARD_FRAME_POINTER_REGNUM
9918 : STACK_POINTER_REGNUM],
9919 (frame_pointer_needed
9920 ? frame->total_size - frame->hard_frame_pointer_offset
9921 : frame->total_size),
9922 reg_names[RETURN_ADDR_REGNUM],
9924 frame->num_gp, frame->num_fp,
9926 frame->cprestore_size);
9928 /* .mask MASK, OFFSET. */
9929 fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
9930 frame->mask, frame->gp_save_offset);
9932 /* .fmask MASK, OFFSET. */
9933 fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
9934 frame->fmask, frame->fp_save_offset);
9937 /* Handle the initialization of $gp for SVR4 PIC, if applicable.
9938 Also emit the ".set noreorder; .set nomacro" sequence for functions
9940 if (mips_must_initialize_gp_p ()
9941 && mips_current_loadgp_style () == LOADGP_OLDABI)
9945 /* This is a fixed-form sequence. The position of the
9946 first two instructions is important because of the
9947 way _gp_disp is defined. */
9948 output_asm_insn ("li\t$2,%%hi(_gp_disp)", 0);
9949 output_asm_insn ("addiu\t$3,$pc,%%lo(_gp_disp)", 0);
9950 output_asm_insn ("sll\t$2,16", 0);
9951 output_asm_insn ("addu\t$2,$3", 0);
9955 /* .cpload must be in a .set noreorder but not a
9956 .set nomacro block. */
9957 mips_push_asm_switch (&mips_noreorder);
9958 output_asm_insn (".cpload\t%^", 0);
9959 if (!cfun->machine->all_noreorder_p)
9960 mips_pop_asm_switch (&mips_noreorder);
9962 mips_push_asm_switch (&mips_nomacro);
9965 else if (cfun->machine->all_noreorder_p)
9967 mips_push_asm_switch (&mips_noreorder);
9968 mips_push_asm_switch (&mips_nomacro);
9971 /* Tell the assembler which register we're using as the global
9972 pointer. This is needed for thunks, since they can use either
9973 explicit relocs or assembler macros. */
9974 mips_output_cplocal ();
9977 /* Implement TARGET_OUTPUT_FUNCTION_EPILOGUE. */
9980 mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9981 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
9985 /* Reinstate the normal $gp. */
9986 SET_REGNO (pic_offset_table_rtx, GLOBAL_POINTER_REGNUM);
9987 mips_output_cplocal ();
9989 if (cfun->machine->all_noreorder_p)
9991 mips_pop_asm_switch (&mips_nomacro);
9992 mips_pop_asm_switch (&mips_noreorder);
9995 /* Get the function name the same way that toplev.c does before calling
9996 assemble_start_function. This is needed so that the name used here
9997 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
9998 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
9999 mips_end_function_definition (fnname);
10002 /* Save register REG to MEM. Make the instruction frame-related. */
10005 mips_save_reg (rtx reg, rtx mem)
10007 if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
10011 if (mips_split_64bit_move_p (mem, reg))
10012 mips_split_doubleword_move (mem, reg);
10014 mips_emit_move (mem, reg);
10016 x1 = mips_frame_set (mips_subword (mem, false),
10017 mips_subword (reg, false));
10018 x2 = mips_frame_set (mips_subword (mem, true),
10019 mips_subword (reg, true));
10020 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
10023 mips_emit_save_slot_move (mem, reg, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
10026 /* The __gnu_local_gp symbol. */
10028 static GTY(()) rtx mips_gnu_local_gp;
10030 /* If we're generating n32 or n64 abicalls, emit instructions
10031 to set up the global pointer. */
10034 mips_emit_loadgp (void)
10036 rtx addr, offset, incoming_address, base, index, pic_reg;
10038 pic_reg = TARGET_MIPS16 ? MIPS16_PIC_TEMP : pic_offset_table_rtx;
10039 switch (mips_current_loadgp_style ())
10041 case LOADGP_ABSOLUTE:
10042 if (mips_gnu_local_gp == NULL)
10044 mips_gnu_local_gp = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp");
10045 SYMBOL_REF_FLAGS (mips_gnu_local_gp) |= SYMBOL_FLAG_LOCAL;
10047 emit_insn (Pmode == SImode
10048 ? gen_loadgp_absolute_si (pic_reg, mips_gnu_local_gp)
10049 : gen_loadgp_absolute_di (pic_reg, mips_gnu_local_gp));
10052 case LOADGP_OLDABI:
10053 /* Added by mips_output_function_prologue. */
10056 case LOADGP_NEWABI:
10057 addr = XEXP (DECL_RTL (current_function_decl), 0);
10058 offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
10059 incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
10060 emit_insn (Pmode == SImode
10061 ? gen_loadgp_newabi_si (pic_reg, offset, incoming_address)
10062 : gen_loadgp_newabi_di (pic_reg, offset, incoming_address));
10066 base = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_BASE));
10067 index = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_INDEX));
10068 emit_insn (Pmode == SImode
10069 ? gen_loadgp_rtp_si (pic_reg, base, index)
10070 : gen_loadgp_rtp_di (pic_reg, base, index));
10078 emit_insn (gen_copygp_mips16 (pic_offset_table_rtx, pic_reg));
10080 /* Emit a blockage if there are implicit uses of the GP register.
10081 This includes profiled functions, because FUNCTION_PROFILE uses
10083 if (!TARGET_EXPLICIT_RELOCS || crtl->profile)
10084 emit_insn (gen_loadgp_blockage ());
10087 /* A for_each_rtx callback. Stop the search if *X is a kernel register. */
10090 mips_kernel_reg_p (rtx *x, void *data ATTRIBUTE_UNUSED)
10092 return REG_P (*x) && KERNEL_REG_P (REGNO (*x));
10095 /* Expand the "prologue" pattern. */
10098 mips_expand_prologue (void)
10100 const struct mips_frame_info *frame;
10101 HOST_WIDE_INT size;
10102 unsigned int nargs;
10105 if (cfun->machine->global_pointer != INVALID_REGNUM)
10107 /* Check whether an insn uses pic_offset_table_rtx, either explicitly
10108 or implicitly. If so, we can commit to using a global pointer
10109 straight away, otherwise we need to defer the decision. */
10110 if (mips_cfun_has_inflexible_gp_ref_p ()
10111 || mips_cfun_has_flexible_gp_ref_p ())
10113 cfun->machine->must_initialize_gp_p = true;
10114 cfun->machine->must_restore_gp_when_clobbered_p = true;
10117 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
10120 frame = &cfun->machine->frame;
10121 size = frame->total_size;
10123 if (flag_stack_usage)
10124 current_function_static_stack_size = size;
10126 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
10127 bytes beforehand; this is enough to cover the register save area
10128 without going out of range. */
10129 if (((frame->mask | frame->fmask | frame->acc_mask) != 0)
10130 || frame->num_cop0_regs > 0)
10132 HOST_WIDE_INT step1;
10134 step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
10135 if (GENERATE_MIPS16E_SAVE_RESTORE)
10137 HOST_WIDE_INT offset;
10138 unsigned int mask, regno;
10140 /* Try to merge argument stores into the save instruction. */
10141 nargs = mips16e_collect_argument_saves ();
10143 /* Build the save instruction. */
10144 mask = frame->mask;
10145 insn = mips16e_build_save_restore (false, &mask, &offset,
10147 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
10150 /* Check if we need to save other registers. */
10151 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
10152 if (BITSET_P (mask, regno - GP_REG_FIRST))
10154 offset -= UNITS_PER_WORD;
10155 mips_save_restore_reg (word_mode, regno,
10156 offset, mips_save_reg);
10161 if (cfun->machine->interrupt_handler_p)
10163 HOST_WIDE_INT offset;
10166 /* If this interrupt is using a shadow register set, we need to
10167 get the stack pointer from the previous register set. */
10168 if (cfun->machine->use_shadow_register_set_p)
10169 emit_insn (gen_mips_rdpgpr (stack_pointer_rtx,
10170 stack_pointer_rtx));
10172 if (!cfun->machine->keep_interrupts_masked_p)
10174 /* Move from COP0 Cause to K0. */
10175 emit_insn (gen_cop0_move (gen_rtx_REG (SImode, K0_REG_NUM),
10176 gen_rtx_REG (SImode,
10177 COP0_CAUSE_REG_NUM)));
10178 /* Move from COP0 EPC to K1. */
10179 emit_insn (gen_cop0_move (gen_rtx_REG (SImode, K1_REG_NUM),
10180 gen_rtx_REG (SImode,
10181 COP0_EPC_REG_NUM)));
10184 /* Allocate the first part of the frame. */
10185 insn = gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
10187 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
10190 /* Start at the uppermost location for saving. */
10191 offset = frame->cop0_sp_offset - size;
10192 if (!cfun->machine->keep_interrupts_masked_p)
10194 /* Push EPC into its stack slot. */
10195 mem = gen_frame_mem (word_mode,
10196 plus_constant (stack_pointer_rtx,
10198 mips_emit_move (mem, gen_rtx_REG (word_mode, K1_REG_NUM));
10199 offset -= UNITS_PER_WORD;
10202 /* Move from COP0 Status to K1. */
10203 emit_insn (gen_cop0_move (gen_rtx_REG (SImode, K1_REG_NUM),
10204 gen_rtx_REG (SImode,
10205 COP0_STATUS_REG_NUM)));
10207 /* Right justify the RIPL in k0. */
10208 if (!cfun->machine->keep_interrupts_masked_p)
10209 emit_insn (gen_lshrsi3 (gen_rtx_REG (SImode, K0_REG_NUM),
10210 gen_rtx_REG (SImode, K0_REG_NUM),
10211 GEN_INT (CAUSE_IPL)));
10213 /* Push Status into its stack slot. */
10214 mem = gen_frame_mem (word_mode,
10215 plus_constant (stack_pointer_rtx, offset));
10216 mips_emit_move (mem, gen_rtx_REG (word_mode, K1_REG_NUM));
10217 offset -= UNITS_PER_WORD;
10219 /* Insert the RIPL into our copy of SR (k1) as the new IPL. */
10220 if (!cfun->machine->keep_interrupts_masked_p)
10221 emit_insn (gen_insvsi (gen_rtx_REG (SImode, K1_REG_NUM),
10224 gen_rtx_REG (SImode, K0_REG_NUM)));
10226 if (!cfun->machine->keep_interrupts_masked_p)
10227 /* Enable interrupts by clearing the KSU ERL and EXL bits.
10228 IE is already the correct value, so we don't have to do
10229 anything explicit. */
10230 emit_insn (gen_insvsi (gen_rtx_REG (SImode, K1_REG_NUM),
10233 gen_rtx_REG (SImode, GP_REG_FIRST)));
10235 /* Disable interrupts by clearing the KSU, ERL, EXL,
10237 emit_insn (gen_insvsi (gen_rtx_REG (SImode, K1_REG_NUM),
10240 gen_rtx_REG (SImode, GP_REG_FIRST)));
10244 insn = gen_add3_insn (stack_pointer_rtx,
10247 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
10250 mips_for_each_saved_acc (size, mips_save_reg);
10251 mips_for_each_saved_gpr_and_fpr (size, mips_save_reg);
10255 /* Allocate the rest of the frame. */
10258 if (SMALL_OPERAND (-size))
10259 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
10261 GEN_INT (-size)))) = 1;
10264 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
10267 /* There are no instructions to add or subtract registers
10268 from the stack pointer, so use the frame pointer as a
10269 temporary. We should always be using a frame pointer
10270 in this case anyway. */
10271 gcc_assert (frame_pointer_needed);
10272 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
10273 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
10274 hard_frame_pointer_rtx,
10275 MIPS_PROLOGUE_TEMP (Pmode)));
10276 mips_emit_move (stack_pointer_rtx, hard_frame_pointer_rtx);
10279 emit_insn (gen_sub3_insn (stack_pointer_rtx,
10281 MIPS_PROLOGUE_TEMP (Pmode)));
10283 /* Describe the combined effect of the previous instructions. */
10284 mips_set_frame_expr
10285 (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10286 plus_constant (stack_pointer_rtx, -size)));
10290 /* Set up the frame pointer, if we're using one. */
10291 if (frame_pointer_needed)
10293 HOST_WIDE_INT offset;
10295 offset = frame->hard_frame_pointer_offset;
10298 insn = mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
10299 RTX_FRAME_RELATED_P (insn) = 1;
10301 else if (SMALL_OPERAND (offset))
10303 insn = gen_add3_insn (hard_frame_pointer_rtx,
10304 stack_pointer_rtx, GEN_INT (offset));
10305 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
10309 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (offset));
10310 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
10311 emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
10312 hard_frame_pointer_rtx,
10313 MIPS_PROLOGUE_TEMP (Pmode)));
10314 mips_set_frame_expr
10315 (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
10316 plus_constant (stack_pointer_rtx, offset)));
10320 mips_emit_loadgp ();
10322 /* Initialize the $gp save slot. */
10323 if (mips_cfun_has_cprestore_slot_p ())
10325 rtx base, mem, gp, temp;
10326 HOST_WIDE_INT offset;
10328 mips_get_cprestore_base_and_offset (&base, &offset, false);
10329 mem = gen_frame_mem (Pmode, plus_constant (base, offset));
10330 gp = TARGET_MIPS16 ? MIPS16_PIC_TEMP : pic_offset_table_rtx;
10331 temp = (SMALL_OPERAND (offset)
10332 ? gen_rtx_SCRATCH (Pmode)
10333 : MIPS_PROLOGUE_TEMP (Pmode));
10334 emit_insn (gen_potential_cprestore (mem, GEN_INT (offset), gp, temp));
10336 mips_get_cprestore_base_and_offset (&base, &offset, true);
10337 mem = gen_frame_mem (Pmode, plus_constant (base, offset));
10338 emit_insn (gen_use_cprestore (mem));
10341 /* We need to search back to the last use of K0 or K1. */
10342 if (cfun->machine->interrupt_handler_p)
10344 for (insn = get_last_insn (); insn != NULL_RTX; insn = PREV_INSN (insn))
10346 && for_each_rtx (&PATTERN (insn), mips_kernel_reg_p, NULL))
10348 /* Emit a move from K1 to COP0 Status after insn. */
10349 gcc_assert (insn != NULL_RTX);
10350 emit_insn_after (gen_cop0_move (gen_rtx_REG (SImode, COP0_STATUS_REG_NUM),
10351 gen_rtx_REG (SImode, K1_REG_NUM)),
10355 /* If we are profiling, make sure no instructions are scheduled before
10356 the call to mcount. */
10358 emit_insn (gen_blockage ());
10361 /* Emit instructions to restore register REG from slot MEM. */
10364 mips_restore_reg (rtx reg, rtx mem)
10366 /* There's no MIPS16 instruction to load $31 directly. Load into
10367 $7 instead and adjust the return insn appropriately. */
10368 if (TARGET_MIPS16 && REGNO (reg) == RETURN_ADDR_REGNUM)
10369 reg = gen_rtx_REG (GET_MODE (reg), GP_REG_FIRST + 7);
10371 mips_emit_save_slot_move (reg, mem, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
10374 /* Emit any instructions needed before a return. */
10377 mips_expand_before_return (void)
10379 /* When using a call-clobbered gp, we start out with unified call
10380 insns that include instructions to restore the gp. We then split
10381 these unified calls after reload. These split calls explicitly
10382 clobber gp, so there is no need to define
10383 PIC_OFFSET_TABLE_REG_CALL_CLOBBERED.
10385 For consistency, we should also insert an explicit clobber of $28
10386 before return insns, so that the post-reload optimizers know that
10387 the register is not live on exit. */
10388 if (TARGET_CALL_CLOBBERED_GP)
10389 emit_clobber (pic_offset_table_rtx);
10392 /* Expand an "epilogue" or "sibcall_epilogue" pattern; SIBCALL_P
10396 mips_expand_epilogue (bool sibcall_p)
10398 const struct mips_frame_info *frame;
10399 HOST_WIDE_INT step1, step2;
10400 rtx base, target, insn;
10402 if (!sibcall_p && mips_can_use_return_insn ())
10404 emit_jump_insn (gen_return ());
10408 /* In MIPS16 mode, if the return value should go into a floating-point
10409 register, we need to call a helper routine to copy it over. */
10410 if (mips16_cfun_returns_in_fpr_p ())
10411 mips16_copy_fpr_return_value ();
10413 /* Split the frame into two. STEP1 is the amount of stack we should
10414 deallocate before restoring the registers. STEP2 is the amount we
10415 should deallocate afterwards.
10417 Start off by assuming that no registers need to be restored. */
10418 frame = &cfun->machine->frame;
10419 step1 = frame->total_size;
10422 /* Work out which register holds the frame address. */
10423 if (!frame_pointer_needed)
10424 base = stack_pointer_rtx;
10427 base = hard_frame_pointer_rtx;
10428 step1 -= frame->hard_frame_pointer_offset;
10431 /* If we need to restore registers, deallocate as much stack as
10432 possible in the second step without going out of range. */
10433 if ((frame->mask | frame->fmask | frame->acc_mask) != 0
10434 || frame->num_cop0_regs > 0)
10436 step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
10440 /* Set TARGET to BASE + STEP1. */
10446 /* Get an rtx for STEP1 that we can add to BASE. */
10447 adjust = GEN_INT (step1);
10448 if (!SMALL_OPERAND (step1))
10450 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), adjust);
10451 adjust = MIPS_EPILOGUE_TEMP (Pmode);
10454 /* Normal mode code can copy the result straight into $sp. */
10455 if (!TARGET_MIPS16)
10456 target = stack_pointer_rtx;
10458 emit_insn (gen_add3_insn (target, base, adjust));
10461 /* Copy TARGET into the stack pointer. */
10462 if (target != stack_pointer_rtx)
10463 mips_emit_move (stack_pointer_rtx, target);
10465 /* If we're using addressing macros, $gp is implicitly used by all
10466 SYMBOL_REFs. We must emit a blockage insn before restoring $gp
10468 if (TARGET_CALL_SAVED_GP && !TARGET_EXPLICIT_RELOCS)
10469 emit_insn (gen_blockage ());
10471 if (GENERATE_MIPS16E_SAVE_RESTORE && frame->mask != 0)
10473 unsigned int regno, mask;
10474 HOST_WIDE_INT offset;
10477 /* Generate the restore instruction. */
10478 mask = frame->mask;
10479 restore = mips16e_build_save_restore (true, &mask, &offset, 0, step2);
10481 /* Restore any other registers manually. */
10482 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
10483 if (BITSET_P (mask, regno - GP_REG_FIRST))
10485 offset -= UNITS_PER_WORD;
10486 mips_save_restore_reg (word_mode, regno, offset, mips_restore_reg);
10489 /* Restore the remaining registers and deallocate the final bit
10491 emit_insn (restore);
10495 /* Restore the registers. */
10496 mips_for_each_saved_acc (frame->total_size - step2, mips_restore_reg);
10497 mips_for_each_saved_gpr_and_fpr (frame->total_size - step2,
10500 if (cfun->machine->interrupt_handler_p)
10502 HOST_WIDE_INT offset;
10505 offset = frame->cop0_sp_offset - (frame->total_size - step2);
10506 if (!cfun->machine->keep_interrupts_masked_p)
10508 /* Restore the original EPC. */
10509 mem = gen_frame_mem (word_mode,
10510 plus_constant (stack_pointer_rtx, offset));
10511 mips_emit_move (gen_rtx_REG (word_mode, K0_REG_NUM), mem);
10512 offset -= UNITS_PER_WORD;
10514 /* Move to COP0 EPC. */
10515 emit_insn (gen_cop0_move (gen_rtx_REG (SImode, COP0_EPC_REG_NUM),
10516 gen_rtx_REG (SImode, K0_REG_NUM)));
10519 /* Restore the original Status. */
10520 mem = gen_frame_mem (word_mode,
10521 plus_constant (stack_pointer_rtx, offset));
10522 mips_emit_move (gen_rtx_REG (word_mode, K0_REG_NUM), mem);
10523 offset -= UNITS_PER_WORD;
10525 /* If we don't use shoadow register set, we need to update SP. */
10526 if (!cfun->machine->use_shadow_register_set_p && step2 > 0)
10527 emit_insn (gen_add3_insn (stack_pointer_rtx,
10531 /* Move to COP0 Status. */
10532 emit_insn (gen_cop0_move (gen_rtx_REG (SImode, COP0_STATUS_REG_NUM),
10533 gen_rtx_REG (SImode, K0_REG_NUM)));
10537 /* Deallocate the final bit of the frame. */
10539 emit_insn (gen_add3_insn (stack_pointer_rtx,
10545 /* Add in the __builtin_eh_return stack adjustment. We need to
10546 use a temporary in MIPS16 code. */
10547 if (crtl->calls_eh_return)
10551 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
10552 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
10553 MIPS_EPILOGUE_TEMP (Pmode),
10554 EH_RETURN_STACKADJ_RTX));
10555 mips_emit_move (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
10558 emit_insn (gen_add3_insn (stack_pointer_rtx,
10560 EH_RETURN_STACKADJ_RTX));
10565 mips_expand_before_return ();
10566 if (cfun->machine->interrupt_handler_p)
10568 /* Interrupt handlers generate eret or deret. */
10569 if (cfun->machine->use_debug_exception_return_p)
10570 emit_jump_insn (gen_mips_deret ());
10572 emit_jump_insn (gen_mips_eret ());
10576 unsigned int regno;
10578 /* When generating MIPS16 code, the normal
10579 mips_for_each_saved_gpr_and_fpr path will restore the return
10580 address into $7 rather than $31. */
10582 && !GENERATE_MIPS16E_SAVE_RESTORE
10583 && BITSET_P (frame->mask, RETURN_ADDR_REGNUM))
10584 regno = GP_REG_FIRST + 7;
10586 regno = RETURN_ADDR_REGNUM;
10587 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode, regno)));
10591 /* Search from the beginning to the first use of K0 or K1. */
10592 if (cfun->machine->interrupt_handler_p
10593 && !cfun->machine->keep_interrupts_masked_p)
10595 for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
10597 && for_each_rtx (&PATTERN(insn), mips_kernel_reg_p, NULL))
10599 gcc_assert (insn != NULL_RTX);
10600 /* Insert disable interrupts before the first use of K0 or K1. */
10601 emit_insn_before (gen_mips_di (), insn);
10602 emit_insn_before (gen_mips_ehb (), insn);
10606 /* Return nonzero if this function is known to have a null epilogue.
10607 This allows the optimizer to omit jumps to jumps if no stack
10611 mips_can_use_return_insn (void)
10613 /* Interrupt handlers need to go through the epilogue. */
10614 if (cfun->machine->interrupt_handler_p)
10617 if (!reload_completed)
10623 /* In MIPS16 mode, a function that returns a floating-point value
10624 needs to arrange to copy the return value into the floating-point
10626 if (mips16_cfun_returns_in_fpr_p ())
10629 return cfun->machine->frame.total_size == 0;
10632 /* Return true if register REGNO can store a value of mode MODE.
10633 The result of this function is cached in mips_hard_regno_mode_ok. */
10636 mips_hard_regno_mode_ok_p (unsigned int regno, enum machine_mode mode)
10639 enum mode_class mclass;
10641 if (mode == CCV2mode)
10642 return (ISA_HAS_8CC
10643 && ST_REG_P (regno)
10644 && (regno - ST_REG_FIRST) % 2 == 0);
10646 if (mode == CCV4mode)
10647 return (ISA_HAS_8CC
10648 && ST_REG_P (regno)
10649 && (regno - ST_REG_FIRST) % 4 == 0);
10651 if (mode == CCmode)
10654 return regno == FPSW_REGNUM;
10656 return (ST_REG_P (regno)
10657 || GP_REG_P (regno)
10658 || FP_REG_P (regno));
10661 size = GET_MODE_SIZE (mode);
10662 mclass = GET_MODE_CLASS (mode);
10664 if (GP_REG_P (regno))
10665 return ((regno - GP_REG_FIRST) & 1) == 0 || size <= UNITS_PER_WORD;
10667 if (FP_REG_P (regno)
10668 && (((regno - FP_REG_FIRST) % MAX_FPRS_PER_FMT) == 0
10669 || (MIN_FPRS_PER_FMT == 1 && size <= UNITS_PER_FPREG)))
10671 /* Allow TFmode for CCmode reloads. */
10672 if (mode == TFmode && ISA_HAS_8CC)
10675 /* Allow 64-bit vector modes for Loongson-2E/2F. */
10676 if (TARGET_LOONGSON_VECTORS
10677 && (mode == V2SImode
10678 || mode == V4HImode
10679 || mode == V8QImode
10680 || mode == DImode))
10683 if (mclass == MODE_FLOAT
10684 || mclass == MODE_COMPLEX_FLOAT
10685 || mclass == MODE_VECTOR_FLOAT)
10686 return size <= UNITS_PER_FPVALUE;
10688 /* Allow integer modes that fit into a single register. We need
10689 to put integers into FPRs when using instructions like CVT
10690 and TRUNC. There's no point allowing sizes smaller than a word,
10691 because the FPU has no appropriate load/store instructions. */
10692 if (mclass == MODE_INT)
10693 return size >= MIN_UNITS_PER_WORD && size <= UNITS_PER_FPREG;
10696 if (ACC_REG_P (regno)
10697 && (INTEGRAL_MODE_P (mode) || ALL_FIXED_POINT_MODE_P (mode)))
10699 if (MD_REG_P (regno))
10701 /* After a multiplication or division, clobbering HI makes
10702 the value of LO unpredictable, and vice versa. This means
10703 that, for all interesting cases, HI and LO are effectively
10706 We model this by requiring that any value that uses HI
10708 if (size <= UNITS_PER_WORD * 2)
10709 return regno == (size <= UNITS_PER_WORD ? LO_REGNUM : MD_REG_FIRST);
10713 /* DSP accumulators do not have the same restrictions as
10714 HI and LO, so we can treat them as normal doubleword
10716 if (size <= UNITS_PER_WORD)
10719 if (size <= UNITS_PER_WORD * 2
10720 && ((regno - DSP_ACC_REG_FIRST) & 1) == 0)
10725 if (ALL_COP_REG_P (regno))
10726 return mclass == MODE_INT && size <= UNITS_PER_WORD;
10728 if (regno == GOT_VERSION_REGNUM)
10729 return mode == SImode;
10734 /* Implement HARD_REGNO_NREGS. */
10737 mips_hard_regno_nregs (int regno, enum machine_mode mode)
10739 if (ST_REG_P (regno))
10740 /* The size of FP status registers is always 4, because they only hold
10741 CCmode values, and CCmode is always considered to be 4 bytes wide. */
10742 return (GET_MODE_SIZE (mode) + 3) / 4;
10744 if (FP_REG_P (regno))
10745 return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
10747 /* All other registers are word-sized. */
10748 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
10751 /* Implement CLASS_MAX_NREGS, taking the maximum of the cases
10752 in mips_hard_regno_nregs. */
10755 mips_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
10761 COPY_HARD_REG_SET (left, reg_class_contents[(int) rclass]);
10762 if (hard_reg_set_intersect_p (left, reg_class_contents[(int) ST_REGS]))
10764 size = MIN (size, 4);
10765 AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) ST_REGS]);
10767 if (hard_reg_set_intersect_p (left, reg_class_contents[(int) FP_REGS]))
10769 size = MIN (size, UNITS_PER_FPREG);
10770 AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) FP_REGS]);
10772 if (!hard_reg_set_empty_p (left))
10773 size = MIN (size, UNITS_PER_WORD);
10774 return (GET_MODE_SIZE (mode) + size - 1) / size;
10777 /* Implement CANNOT_CHANGE_MODE_CLASS. */
10780 mips_cannot_change_mode_class (enum machine_mode from ATTRIBUTE_UNUSED,
10781 enum machine_mode to ATTRIBUTE_UNUSED,
10782 enum reg_class rclass)
10784 /* There are several problems with changing the modes of values in
10785 floating-point registers:
10787 - When a multi-word value is stored in paired floating-point
10788 registers, the first register always holds the low word. We
10789 therefore can't allow FPRs to change between single-word and
10790 multi-word modes on big-endian targets.
10792 - GCC assumes that each word of a multiword register can be
10793 accessed individually using SUBREGs. This is not true for
10794 floating-point registers if they are bigger than a word.
10796 - Loading a 32-bit value into a 64-bit floating-point register
10797 will not sign-extend the value, despite what LOAD_EXTEND_OP
10798 says. We can't allow FPRs to change from SImode to a wider
10799 mode on 64-bit targets.
10801 - If the FPU has already interpreted a value in one format, we
10802 must not ask it to treat the value as having a different
10805 We therefore disallow all mode changes involving FPRs. */
10806 return reg_classes_intersect_p (FP_REGS, rclass);
10809 /* Implement target hook small_register_classes_for_mode_p. */
10812 mips_small_register_classes_for_mode_p (enum machine_mode mode
10815 return TARGET_MIPS16;
10818 /* Return true if moves in mode MODE can use the FPU's mov.fmt instruction. */
10821 mips_mode_ok_for_mov_fmt_p (enum machine_mode mode)
10826 return TARGET_HARD_FLOAT;
10829 return TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT;
10832 return TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT;
10839 /* Implement MODES_TIEABLE_P. */
10842 mips_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
10844 /* FPRs allow no mode punning, so it's not worth tying modes if we'd
10845 prefer to put one of them in FPRs. */
10846 return (mode1 == mode2
10847 || (!mips_mode_ok_for_mov_fmt_p (mode1)
10848 && !mips_mode_ok_for_mov_fmt_p (mode2)));
10851 /* Implement TARGET_PREFERRED_RELOAD_CLASS. */
10854 mips_preferred_reload_class (rtx x, reg_class_t rclass)
10856 if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, rclass))
10859 if (reg_class_subset_p (FP_REGS, rclass)
10860 && mips_mode_ok_for_mov_fmt_p (GET_MODE (x)))
10863 if (reg_class_subset_p (GR_REGS, rclass))
10866 if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, rclass))
10872 /* RCLASS is a class involved in a REGISTER_MOVE_COST calculation.
10873 Return a "canonical" class to represent it in later calculations. */
10876 mips_canonicalize_move_class (reg_class_t rclass)
10878 /* All moves involving accumulator registers have the same cost. */
10879 if (reg_class_subset_p (rclass, ACC_REGS))
10882 /* Likewise promote subclasses of general registers to the most
10883 interesting containing class. */
10884 if (TARGET_MIPS16 && reg_class_subset_p (rclass, M16_REGS))
10886 else if (reg_class_subset_p (rclass, GENERAL_REGS))
10887 rclass = GENERAL_REGS;
10892 /* Return the cost of moving a value of mode MODE from a register of
10893 class FROM to a GPR. Return 0 for classes that are unions of other
10894 classes handled by this function. */
10897 mips_move_to_gpr_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
10903 /* A MIPS16 MOVE instruction, or a non-MIPS16 MOVE macro. */
10907 /* MFLO and MFHI. */
10915 /* LUI followed by MOVF. */
10921 /* This choice of value is historical. */
10929 /* Return the cost of moving a value of mode MODE from a GPR to a
10930 register of class TO. Return 0 for classes that are unions of
10931 other classes handled by this function. */
10934 mips_move_from_gpr_cost (enum machine_mode mode, reg_class_t to)
10939 /* A MIPS16 MOVE instruction, or a non-MIPS16 MOVE macro. */
10943 /* MTLO and MTHI. */
10951 /* A secondary reload through an FPR scratch. */
10952 return (mips_register_move_cost (mode, GENERAL_REGS, FP_REGS)
10953 + mips_register_move_cost (mode, FP_REGS, ST_REGS));
10958 /* This choice of value is historical. */
10966 /* Implement TARGET_REGISTER_MOVE_COST. Return 0 for classes that are the
10967 maximum of the move costs for subclasses; regclass will work out
10968 the maximum for us. */
10971 mips_register_move_cost (enum machine_mode mode,
10972 reg_class_t from, reg_class_t to)
10977 from = mips_canonicalize_move_class (from);
10978 to = mips_canonicalize_move_class (to);
10980 /* Handle moves that can be done without using general-purpose registers. */
10981 if (from == FP_REGS)
10983 if (to == FP_REGS && mips_mode_ok_for_mov_fmt_p (mode))
10987 /* The sequence generated by mips_expand_fcc_reload. */
10991 /* Handle cases in which only one class deviates from the ideal. */
10992 dregs = TARGET_MIPS16 ? M16_REGS : GENERAL_REGS;
10994 return mips_move_from_gpr_cost (mode, to);
10996 return mips_move_to_gpr_cost (mode, from);
10998 /* Handles cases that require a GPR temporary. */
10999 cost1 = mips_move_to_gpr_cost (mode, from);
11002 cost2 = mips_move_from_gpr_cost (mode, to);
11004 return cost1 + cost2;
11010 /* Implement TARGET_MEMORY_MOVE_COST. */
11013 mips_memory_move_cost (enum machine_mode mode, reg_class_t rclass, bool in)
11015 return (mips_cost->memory_latency
11016 + memory_move_secondary_cost (mode, rclass, in));
11019 /* Return the register class required for a secondary register when
11020 copying between one of the registers in RCLASS and value X, which
11021 has mode MODE. X is the source of the move if IN_P, otherwise it
11022 is the destination. Return NO_REGS if no secondary register is
11026 mips_secondary_reload_class (enum reg_class rclass,
11027 enum machine_mode mode, rtx x, bool in_p)
11031 /* If X is a constant that cannot be loaded into $25, it must be loaded
11032 into some other GPR. No other register class allows a direct move. */
11033 if (mips_dangerous_for_la25_p (x))
11034 return reg_class_subset_p (rclass, LEA_REGS) ? NO_REGS : LEA_REGS;
11036 regno = true_regnum (x);
11039 /* In MIPS16 mode, every move must involve a member of M16_REGS. */
11040 if (!reg_class_subset_p (rclass, M16_REGS) && !M16_REG_P (regno))
11046 /* Copying from accumulator registers to anywhere other than a general
11047 register requires a temporary general register. */
11048 if (reg_class_subset_p (rclass, ACC_REGS))
11049 return GP_REG_P (regno) ? NO_REGS : GR_REGS;
11050 if (ACC_REG_P (regno))
11051 return reg_class_subset_p (rclass, GR_REGS) ? NO_REGS : GR_REGS;
11053 /* We can only copy a value to a condition code register from a
11054 floating-point register, and even then we require a scratch
11055 floating-point register. We can only copy a value out of a
11056 condition-code register into a general register. */
11057 if (reg_class_subset_p (rclass, ST_REGS))
11061 return GP_REG_P (regno) ? NO_REGS : GR_REGS;
11063 if (ST_REG_P (regno))
11067 return reg_class_subset_p (rclass, GR_REGS) ? NO_REGS : GR_REGS;
11070 if (reg_class_subset_p (rclass, FP_REGS))
11073 && (GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8))
11074 /* In this case we can use lwc1, swc1, ldc1 or sdc1. We'll use
11075 pairs of lwc1s and swc1s if ldc1 and sdc1 are not supported. */
11078 if (GP_REG_P (regno) || x == CONST0_RTX (mode))
11079 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
11082 if (CONSTANT_P (x) && !targetm.cannot_force_const_mem (mode, x))
11083 /* We can force the constant to memory and use lwc1
11084 and ldc1. As above, we will use pairs of lwc1s if
11085 ldc1 is not supported. */
11088 if (FP_REG_P (regno) && mips_mode_ok_for_mov_fmt_p (mode))
11089 /* In this case we can use mov.fmt. */
11092 /* Otherwise, we need to reload through an integer register. */
11095 if (FP_REG_P (regno))
11096 return reg_class_subset_p (rclass, GR_REGS) ? NO_REGS : GR_REGS;
11101 /* Implement TARGET_MODE_REP_EXTENDED. */
11104 mips_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
11106 /* On 64-bit targets, SImode register values are sign-extended to DImode. */
11107 if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
11108 return SIGN_EXTEND;
11113 /* Implement TARGET_VALID_POINTER_MODE. */
11116 mips_valid_pointer_mode (enum machine_mode mode)
11118 return mode == SImode || (TARGET_64BIT && mode == DImode);
11121 /* Implement TARGET_VECTOR_MODE_SUPPORTED_P. */
11124 mips_vector_mode_supported_p (enum machine_mode mode)
11129 return TARGET_PAIRED_SINGLE_FLOAT;
11144 return TARGET_LOONGSON_VECTORS;
11151 /* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
11154 mips_scalar_mode_supported_p (enum machine_mode mode)
11156 if (ALL_FIXED_POINT_MODE_P (mode)
11157 && GET_MODE_PRECISION (mode) <= 2 * BITS_PER_WORD)
11160 return default_scalar_mode_supported_p (mode);
11163 /* Implement TARGET_VECTORIZE_PREFERRED_SIMD_MODE. */
11165 static enum machine_mode
11166 mips_preferred_simd_mode (enum machine_mode mode ATTRIBUTE_UNUSED)
11168 if (TARGET_PAIRED_SINGLE_FLOAT
11174 /* Implement TARGET_INIT_LIBFUNCS. */
11177 mips_init_libfuncs (void)
11179 if (TARGET_FIX_VR4120)
11181 /* Register the special divsi3 and modsi3 functions needed to work
11182 around VR4120 division errata. */
11183 set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
11184 set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
11187 if (TARGET_MIPS16 && TARGET_HARD_FLOAT_ABI)
11189 /* Register the MIPS16 -mhard-float stubs. */
11190 set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
11191 set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
11192 set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
11193 set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
11195 set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
11196 set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
11197 set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
11198 set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
11199 set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
11200 set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
11201 set_optab_libfunc (unord_optab, SFmode, "__mips16_unordsf2");
11203 set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
11204 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
11205 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__mips16_floatunsisf");
11207 if (TARGET_DOUBLE_FLOAT)
11209 set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
11210 set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
11211 set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
11212 set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
11214 set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
11215 set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
11216 set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
11217 set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
11218 set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
11219 set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
11220 set_optab_libfunc (unord_optab, DFmode, "__mips16_unorddf2");
11222 set_conv_libfunc (sext_optab, DFmode, SFmode,
11223 "__mips16_extendsfdf2");
11224 set_conv_libfunc (trunc_optab, SFmode, DFmode,
11225 "__mips16_truncdfsf2");
11226 set_conv_libfunc (sfix_optab, SImode, DFmode,
11227 "__mips16_fix_truncdfsi");
11228 set_conv_libfunc (sfloat_optab, DFmode, SImode,
11229 "__mips16_floatsidf");
11230 set_conv_libfunc (ufloat_optab, DFmode, SImode,
11231 "__mips16_floatunsidf");
11235 /* The MIPS16 ISA does not have an encoding for "sync", so we rely
11236 on an external non-MIPS16 routine to implement __sync_synchronize. */
11238 synchronize_libfunc = init_one_libfunc ("__sync_synchronize");
11241 /* Build up a multi-insn sequence that loads label TARGET into $AT. */
11244 mips_process_load_label (rtx target)
11246 rtx base, gp, intop;
11247 HOST_WIDE_INT offset;
11249 mips_multi_start ();
11253 mips_multi_add_insn ("lw\t%@,%%got_page(%0)(%+)", target, 0);
11254 mips_multi_add_insn ("addiu\t%@,%@,%%got_ofst(%0)", target, 0);
11258 mips_multi_add_insn ("ld\t%@,%%got_page(%0)(%+)", target, 0);
11259 mips_multi_add_insn ("daddiu\t%@,%@,%%got_ofst(%0)", target, 0);
11263 gp = pic_offset_table_rtx;
11264 if (mips_cfun_has_cprestore_slot_p ())
11266 gp = gen_rtx_REG (Pmode, AT_REGNUM);
11267 mips_get_cprestore_base_and_offset (&base, &offset, true);
11268 if (!SMALL_OPERAND (offset))
11270 intop = GEN_INT (CONST_HIGH_PART (offset));
11271 mips_multi_add_insn ("lui\t%0,%1", gp, intop, 0);
11272 mips_multi_add_insn ("addu\t%0,%0,%1", gp, base, 0);
11275 offset = CONST_LOW_PART (offset);
11277 intop = GEN_INT (offset);
11278 if (ISA_HAS_LOAD_DELAY)
11279 mips_multi_add_insn ("lw\t%0,%1(%2)%#", gp, intop, base, 0);
11281 mips_multi_add_insn ("lw\t%0,%1(%2)", gp, intop, base, 0);
11283 if (ISA_HAS_LOAD_DELAY)
11284 mips_multi_add_insn ("lw\t%@,%%got(%0)(%1)%#", target, gp, 0);
11286 mips_multi_add_insn ("lw\t%@,%%got(%0)(%1)", target, gp, 0);
11287 mips_multi_add_insn ("addiu\t%@,%@,%%lo(%0)", target, 0);
11292 /* Return the number of instructions needed to load a label into $AT. */
11294 static unsigned int
11295 mips_load_label_num_insns (void)
11297 if (cfun->machine->load_label_num_insns == 0)
11299 mips_process_load_label (pc_rtx);
11300 cfun->machine->load_label_num_insns = mips_multi_num_insns;
11302 return cfun->machine->load_label_num_insns;
11305 /* Emit an asm sequence to start a noat block and load the address
11306 of a label into $1. */
11309 mips_output_load_label (rtx target)
11311 mips_push_asm_switch (&mips_noat);
11312 if (TARGET_EXPLICIT_RELOCS)
11314 mips_process_load_label (target);
11315 mips_multi_write ();
11319 if (Pmode == DImode)
11320 output_asm_insn ("dla\t%@,%0", &target);
11322 output_asm_insn ("la\t%@,%0", &target);
11326 /* Return the length of INSN. LENGTH is the initial length computed by
11327 attributes in the machine-description file. */
11330 mips_adjust_insn_length (rtx insn, int length)
11332 /* mips.md uses MAX_PIC_BRANCH_LENGTH as a placeholder for the length
11333 of a PIC long-branch sequence. Substitute the correct value. */
11334 if (length == MAX_PIC_BRANCH_LENGTH
11335 && INSN_CODE (insn) >= 0
11336 && get_attr_type (insn) == TYPE_BRANCH)
11338 /* Add the branch-over instruction and its delay slot, if this
11339 is a conditional branch. */
11340 length = simplejump_p (insn) ? 0 : 8;
11342 /* Load the label into $AT and jump to it. Ignore the delay
11343 slot of the jump. */
11344 length += 4 * mips_load_label_num_insns() + 4;
11347 /* A unconditional jump has an unfilled delay slot if it is not part
11348 of a sequence. A conditional jump normally has a delay slot, but
11349 does not on MIPS16. */
11350 if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
11353 /* See how many nops might be needed to avoid hardware hazards. */
11354 if (!cfun->machine->ignore_hazard_length_p && INSN_CODE (insn) >= 0)
11355 switch (get_attr_hazard (insn))
11369 /* In order to make it easier to share MIPS16 and non-MIPS16 patterns,
11370 the .md file length attributes are 4-based for both modes.
11371 Adjust the MIPS16 ones here. */
11378 /* Return the assembly code for INSN, which has the operands given by
11379 OPERANDS, and which branches to OPERANDS[0] if some condition is true.
11380 BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[0]
11381 is in range of a direct branch. BRANCH_IF_FALSE is an inverted
11382 version of BRANCH_IF_TRUE. */
11385 mips_output_conditional_branch (rtx insn, rtx *operands,
11386 const char *branch_if_true,
11387 const char *branch_if_false)
11389 unsigned int length;
11390 rtx taken, not_taken;
11392 gcc_assert (LABEL_P (operands[0]));
11394 length = get_attr_length (insn);
11397 /* Just a simple conditional branch. */
11398 mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
11399 return branch_if_true;
11402 /* Generate a reversed branch around a direct jump. This fallback does
11403 not use branch-likely instructions. */
11404 mips_branch_likely = false;
11405 not_taken = gen_label_rtx ();
11406 taken = operands[0];
11408 /* Generate the reversed branch to NOT_TAKEN. */
11409 operands[0] = not_taken;
11410 output_asm_insn (branch_if_false, operands);
11412 /* If INSN has a delay slot, we must provide delay slots for both the
11413 branch to NOT_TAKEN and the conditional jump. We must also ensure
11414 that INSN's delay slot is executed in the appropriate cases. */
11415 if (final_sequence)
11417 /* This first delay slot will always be executed, so use INSN's
11418 delay slot if is not annulled. */
11419 if (!INSN_ANNULLED_BRANCH_P (insn))
11421 final_scan_insn (XVECEXP (final_sequence, 0, 1),
11422 asm_out_file, optimize, 1, NULL);
11423 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
11426 output_asm_insn ("nop", 0);
11427 fprintf (asm_out_file, "\n");
11430 /* Output the unconditional branch to TAKEN. */
11431 if (TARGET_ABSOLUTE_JUMPS)
11432 output_asm_insn (MIPS_ABSOLUTE_JUMP ("j\t%0%/"), &taken);
11435 mips_output_load_label (taken);
11436 output_asm_insn ("jr\t%@%]%/", 0);
11439 /* Now deal with its delay slot; see above. */
11440 if (final_sequence)
11442 /* This delay slot will only be executed if the branch is taken.
11443 Use INSN's delay slot if is annulled. */
11444 if (INSN_ANNULLED_BRANCH_P (insn))
11446 final_scan_insn (XVECEXP (final_sequence, 0, 1),
11447 asm_out_file, optimize, 1, NULL);
11448 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
11451 output_asm_insn ("nop", 0);
11452 fprintf (asm_out_file, "\n");
11455 /* Output NOT_TAKEN. */
11456 targetm.asm_out.internal_label (asm_out_file, "L",
11457 CODE_LABEL_NUMBER (not_taken));
11461 /* Return the assembly code for INSN, which branches to OPERANDS[0]
11462 if some ordering condition is true. The condition is given by
11463 OPERANDS[1] if !INVERTED_P, otherwise it is the inverse of
11464 OPERANDS[1]. OPERANDS[2] is the comparison's first operand;
11465 its second is always zero. */
11468 mips_output_order_conditional_branch (rtx insn, rtx *operands, bool inverted_p)
11470 const char *branch[2];
11472 /* Make BRANCH[1] branch to OPERANDS[0] when the condition is true.
11473 Make BRANCH[0] branch on the inverse condition. */
11474 switch (GET_CODE (operands[1]))
11476 /* These cases are equivalent to comparisons against zero. */
11478 inverted_p = !inverted_p;
11479 /* Fall through. */
11481 branch[!inverted_p] = MIPS_BRANCH ("bne", "%2,%.,%0");
11482 branch[inverted_p] = MIPS_BRANCH ("beq", "%2,%.,%0");
11485 /* These cases are always true or always false. */
11487 inverted_p = !inverted_p;
11488 /* Fall through. */
11490 branch[!inverted_p] = MIPS_BRANCH ("beq", "%.,%.,%0");
11491 branch[inverted_p] = MIPS_BRANCH ("bne", "%.,%.,%0");
11495 branch[!inverted_p] = MIPS_BRANCH ("b%C1z", "%2,%0");
11496 branch[inverted_p] = MIPS_BRANCH ("b%N1z", "%2,%0");
11499 return mips_output_conditional_branch (insn, operands, branch[1], branch[0]);
11502 /* Start a block of code that needs access to the LL, SC and SYNC
11506 mips_start_ll_sc_sync_block (void)
11508 if (!ISA_HAS_LL_SC)
11510 output_asm_insn (".set\tpush", 0);
11511 output_asm_insn (".set\tmips2", 0);
11515 /* End a block started by mips_start_ll_sc_sync_block. */
11518 mips_end_ll_sc_sync_block (void)
11520 if (!ISA_HAS_LL_SC)
11521 output_asm_insn (".set\tpop", 0);
11524 /* Output and/or return the asm template for a sync instruction. */
11527 mips_output_sync (void)
11529 mips_start_ll_sc_sync_block ();
11530 output_asm_insn ("sync", 0);
11531 mips_end_ll_sc_sync_block ();
11535 /* Return the asm template associated with sync_insn1 value TYPE.
11536 IS_64BIT_P is true if we want a 64-bit rather than 32-bit operation. */
11538 static const char *
11539 mips_sync_insn1_template (enum attr_sync_insn1 type, bool is_64bit_p)
11543 case SYNC_INSN1_MOVE:
11544 return "move\t%0,%z2";
11545 case SYNC_INSN1_LI:
11546 return "li\t%0,%2";
11547 case SYNC_INSN1_ADDU:
11548 return is_64bit_p ? "daddu\t%0,%1,%z2" : "addu\t%0,%1,%z2";
11549 case SYNC_INSN1_ADDIU:
11550 return is_64bit_p ? "daddiu\t%0,%1,%2" : "addiu\t%0,%1,%2";
11551 case SYNC_INSN1_SUBU:
11552 return is_64bit_p ? "dsubu\t%0,%1,%z2" : "subu\t%0,%1,%z2";
11553 case SYNC_INSN1_AND:
11554 return "and\t%0,%1,%z2";
11555 case SYNC_INSN1_ANDI:
11556 return "andi\t%0,%1,%2";
11557 case SYNC_INSN1_OR:
11558 return "or\t%0,%1,%z2";
11559 case SYNC_INSN1_ORI:
11560 return "ori\t%0,%1,%2";
11561 case SYNC_INSN1_XOR:
11562 return "xor\t%0,%1,%z2";
11563 case SYNC_INSN1_XORI:
11564 return "xori\t%0,%1,%2";
11566 gcc_unreachable ();
11569 /* Return the asm template associated with sync_insn2 value TYPE. */
11571 static const char *
11572 mips_sync_insn2_template (enum attr_sync_insn2 type)
11576 case SYNC_INSN2_NOP:
11577 gcc_unreachable ();
11578 case SYNC_INSN2_AND:
11579 return "and\t%0,%1,%z2";
11580 case SYNC_INSN2_XOR:
11581 return "xor\t%0,%1,%z2";
11582 case SYNC_INSN2_NOT:
11583 return "nor\t%0,%1,%.";
11585 gcc_unreachable ();
11588 /* OPERANDS are the operands to a sync loop instruction and INDEX is
11589 the value of the one of the sync_* attributes. Return the operand
11590 referred to by the attribute, or DEFAULT_VALUE if the insn doesn't
11591 have the associated attribute. */
11594 mips_get_sync_operand (rtx *operands, int index, rtx default_value)
11597 default_value = operands[index - 1];
11598 return default_value;
11601 /* INSN is a sync loop with operands OPERANDS. Build up a multi-insn
11602 sequence for it. */
11605 mips_process_sync_loop (rtx insn, rtx *operands)
11607 rtx at, mem, oldval, newval, inclusive_mask, exclusive_mask;
11608 rtx required_oldval, insn1_op2, tmp1, tmp2, tmp3;
11609 unsigned int tmp3_insn;
11610 enum attr_sync_insn1 insn1;
11611 enum attr_sync_insn2 insn2;
11614 /* Read an operand from the sync_WHAT attribute and store it in
11615 variable WHAT. DEFAULT is the default value if no attribute
11617 #define READ_OPERAND(WHAT, DEFAULT) \
11618 WHAT = mips_get_sync_operand (operands, (int) get_attr_sync_##WHAT (insn), \
11621 /* Read the memory. */
11622 READ_OPERAND (mem, 0);
11624 is_64bit_p = (GET_MODE_BITSIZE (GET_MODE (mem)) == 64);
11626 /* Read the other attributes. */
11627 at = gen_rtx_REG (GET_MODE (mem), AT_REGNUM);
11628 READ_OPERAND (oldval, at);
11629 READ_OPERAND (newval, at);
11630 READ_OPERAND (inclusive_mask, 0);
11631 READ_OPERAND (exclusive_mask, 0);
11632 READ_OPERAND (required_oldval, 0);
11633 READ_OPERAND (insn1_op2, 0);
11634 insn1 = get_attr_sync_insn1 (insn);
11635 insn2 = get_attr_sync_insn2 (insn);
11637 mips_multi_start ();
11639 /* Output the release side of the memory barrier. */
11640 if (get_attr_sync_release_barrier (insn) == SYNC_RELEASE_BARRIER_YES)
11642 if (required_oldval == 0 && TARGET_OCTEON)
11644 /* Octeon doesn't reorder reads, so a full barrier can be
11645 created by using SYNCW to order writes combined with the
11646 write from the following SC. When the SC successfully
11647 completes, we know that all preceding writes are also
11648 committed to the coherent memory system. It is possible
11649 for a single SYNCW to fail, but a pair of them will never
11650 fail, so we use two. */
11651 mips_multi_add_insn ("syncw", NULL);
11652 mips_multi_add_insn ("syncw", NULL);
11655 mips_multi_add_insn ("sync", NULL);
11658 /* Output the branch-back label. */
11659 mips_multi_add_label ("1:");
11661 /* OLDVAL = *MEM. */
11662 mips_multi_add_insn (is_64bit_p ? "lld\t%0,%1" : "ll\t%0,%1",
11663 oldval, mem, NULL);
11665 /* if ((OLDVAL & INCLUSIVE_MASK) != REQUIRED_OLDVAL) goto 2. */
11666 if (required_oldval)
11668 if (inclusive_mask == 0)
11672 gcc_assert (oldval != at);
11673 mips_multi_add_insn ("and\t%0,%1,%2",
11674 at, oldval, inclusive_mask, NULL);
11677 mips_multi_add_insn ("bne\t%0,%z1,2f", tmp1, required_oldval, NULL);
11680 /* $TMP1 = OLDVAL & EXCLUSIVE_MASK. */
11681 if (exclusive_mask == 0)
11685 gcc_assert (oldval != at);
11686 mips_multi_add_insn ("and\t%0,%1,%z2",
11687 at, oldval, exclusive_mask, NULL);
11691 /* $TMP2 = INSN1 (OLDVAL, INSN1_OP2).
11693 We can ignore moves if $TMP4 != INSN1_OP2, since we'll still emit
11694 at least one instruction in that case. */
11695 if (insn1 == SYNC_INSN1_MOVE
11696 && (tmp1 != const0_rtx || insn2 != SYNC_INSN2_NOP))
11700 mips_multi_add_insn (mips_sync_insn1_template (insn1, is_64bit_p),
11701 newval, oldval, insn1_op2, NULL);
11705 /* $TMP3 = INSN2 ($TMP2, INCLUSIVE_MASK). */
11706 if (insn2 == SYNC_INSN2_NOP)
11710 mips_multi_add_insn (mips_sync_insn2_template (insn2),
11711 newval, tmp2, inclusive_mask, NULL);
11714 tmp3_insn = mips_multi_last_index ();
11716 /* $AT = $TMP1 | $TMP3. */
11717 if (tmp1 == const0_rtx || tmp3 == const0_rtx)
11719 mips_multi_set_operand (tmp3_insn, 0, at);
11724 gcc_assert (tmp1 != tmp3);
11725 mips_multi_add_insn ("or\t%0,%1,%2", at, tmp1, tmp3, NULL);
11728 /* if (!commit (*MEM = $AT)) goto 1.
11730 This will sometimes be a delayed branch; see the write code below
11732 mips_multi_add_insn (is_64bit_p ? "scd\t%0,%1" : "sc\t%0,%1", at, mem, NULL);
11733 mips_multi_add_insn ("beq%?\t%0,%.,1b", at, NULL);
11735 /* if (INSN1 != MOVE && INSN1 != LI) NEWVAL = $TMP3 [delay slot]. */
11736 if (insn1 != SYNC_INSN1_MOVE && insn1 != SYNC_INSN1_LI && tmp3 != newval)
11738 mips_multi_copy_insn (tmp3_insn);
11739 mips_multi_set_operand (mips_multi_last_index (), 0, newval);
11742 mips_multi_add_insn ("nop", NULL);
11744 /* Output the acquire side of the memory barrier. */
11745 if (TARGET_SYNC_AFTER_SC)
11746 mips_multi_add_insn ("sync", NULL);
11748 /* Output the exit label, if needed. */
11749 if (required_oldval)
11750 mips_multi_add_label ("2:");
11752 #undef READ_OPERAND
11755 /* Output and/or return the asm template for sync loop INSN, which has
11756 the operands given by OPERANDS. */
11759 mips_output_sync_loop (rtx insn, rtx *operands)
11761 mips_process_sync_loop (insn, operands);
11763 /* Use branch-likely instructions to work around the LL/SC R10000
11765 mips_branch_likely = TARGET_FIX_R10000;
11767 mips_push_asm_switch (&mips_noreorder);
11768 mips_push_asm_switch (&mips_nomacro);
11769 mips_push_asm_switch (&mips_noat);
11770 mips_start_ll_sc_sync_block ();
11772 mips_multi_write ();
11774 mips_end_ll_sc_sync_block ();
11775 mips_pop_asm_switch (&mips_noat);
11776 mips_pop_asm_switch (&mips_nomacro);
11777 mips_pop_asm_switch (&mips_noreorder);
11782 /* Return the number of individual instructions in sync loop INSN,
11783 which has the operands given by OPERANDS. */
11786 mips_sync_loop_insns (rtx insn, rtx *operands)
11788 mips_process_sync_loop (insn, operands);
11789 return mips_multi_num_insns;
11792 /* Return the assembly code for DIV or DDIV instruction DIVISION, which has
11793 the operands given by OPERANDS. Add in a divide-by-zero check if needed.
11795 When working around R4000 and R4400 errata, we need to make sure that
11796 the division is not immediately followed by a shift[1][2]. We also
11797 need to stop the division from being put into a branch delay slot[3].
11798 The easiest way to avoid both problems is to add a nop after the
11799 division. When a divide-by-zero check is needed, this nop can be
11800 used to fill the branch delay slot.
11802 [1] If a double-word or a variable shift executes immediately
11803 after starting an integer division, the shift may give an
11804 incorrect result. See quotations of errata #16 and #28 from
11805 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
11806 in mips.md for details.
11808 [2] A similar bug to [1] exists for all revisions of the
11809 R4000 and the R4400 when run in an MC configuration.
11810 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
11812 "19. In this following sequence:
11814 ddiv (or ddivu or div or divu)
11815 dsll32 (or dsrl32, dsra32)
11817 if an MPT stall occurs, while the divide is slipping the cpu
11818 pipeline, then the following double shift would end up with an
11821 Workaround: The compiler needs to avoid generating any
11822 sequence with divide followed by extended double shift."
11824 This erratum is also present in "MIPS R4400MC Errata, Processor
11825 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
11826 & 3.0" as errata #10 and #4, respectively.
11828 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
11829 (also valid for MIPS R4000MC processors):
11831 "52. R4000SC: This bug does not apply for the R4000PC.
11833 There are two flavors of this bug:
11835 1) If the instruction just after divide takes an RF exception
11836 (tlb-refill, tlb-invalid) and gets an instruction cache
11837 miss (both primary and secondary) and the line which is
11838 currently in secondary cache at this index had the first
11839 data word, where the bits 5..2 are set, then R4000 would
11840 get a wrong result for the div.
11845 ------------------- # end-of page. -tlb-refill
11850 ------------------- # end-of page. -tlb-invalid
11853 2) If the divide is in the taken branch delay slot, where the
11854 target takes RF exception and gets an I-cache miss for the
11855 exception vector or where I-cache miss occurs for the
11856 target address, under the above mentioned scenarios, the
11857 div would get wrong results.
11860 j r2 # to next page mapped or unmapped
11861 div r8,r9 # this bug would be there as long
11862 # as there is an ICache miss and
11863 nop # the "data pattern" is present
11866 beq r0, r0, NextPage # to Next page
11870 This bug is present for div, divu, ddiv, and ddivu
11873 Workaround: For item 1), OS could make sure that the next page
11874 after the divide instruction is also mapped. For item 2), the
11875 compiler could make sure that the divide instruction is not in
11876 the branch delay slot."
11878 These processors have PRId values of 0x00004220 and 0x00004300 for
11879 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
11882 mips_output_division (const char *division, rtx *operands)
11887 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
11889 output_asm_insn (s, operands);
11892 if (TARGET_CHECK_ZERO_DIV)
11896 output_asm_insn (s, operands);
11897 s = "bnez\t%2,1f\n\tbreak\t7\n1:";
11899 else if (GENERATE_DIVIDE_TRAPS)
11901 /* Avoid long replay penalty on load miss by putting the trap before
11904 output_asm_insn ("teq\t%2,%.,7", operands);
11907 output_asm_insn (s, operands);
11908 s = "teq\t%2,%.,7";
11913 output_asm_insn ("%(bne\t%2,%.,1f", operands);
11914 output_asm_insn (s, operands);
11915 s = "break\t7%)\n1:";
11921 /* Return true if IN_INSN is a multiply-add or multiply-subtract
11922 instruction and if OUT_INSN assigns to the accumulator operand. */
11925 mips_linked_madd_p (rtx out_insn, rtx in_insn)
11929 x = single_set (in_insn);
11935 if (GET_CODE (x) == PLUS
11936 && GET_CODE (XEXP (x, 0)) == MULT
11937 && reg_set_p (XEXP (x, 1), out_insn))
11940 if (GET_CODE (x) == MINUS
11941 && GET_CODE (XEXP (x, 1)) == MULT
11942 && reg_set_p (XEXP (x, 0), out_insn))
11948 /* True if the dependency between OUT_INSN and IN_INSN is on the store
11949 data rather than the address. We need this because the cprestore
11950 pattern is type "store", but is defined using an UNSPEC_VOLATILE,
11951 which causes the default routine to abort. We just return false
11955 mips_store_data_bypass_p (rtx out_insn, rtx in_insn)
11957 if (GET_CODE (PATTERN (in_insn)) == UNSPEC_VOLATILE)
11960 return !store_data_bypass_p (out_insn, in_insn);
11964 /* Variables and flags used in scheduler hooks when tuning for
11968 /* Variables to support Loongson 2E/2F round-robin [F]ALU1/2 dispatch
11971 /* If true, then next ALU1/2 instruction will go to ALU1. */
11974 /* If true, then next FALU1/2 unstruction will go to FALU1. */
11977 /* Codes to query if [f]alu{1,2}_core units are subscribed or not. */
11978 int alu1_core_unit_code;
11979 int alu2_core_unit_code;
11980 int falu1_core_unit_code;
11981 int falu2_core_unit_code;
11983 /* True if current cycle has a multi instruction.
11984 This flag is used in mips_ls2_dfa_post_advance_cycle. */
11985 bool cycle_has_multi_p;
11987 /* Instructions to subscribe ls2_[f]alu{1,2}_turn_enabled units.
11988 These are used in mips_ls2_dfa_post_advance_cycle to initialize
11990 E.g., when alu1_turn_enabled_insn is issued it makes next ALU1/2
11991 instruction to go ALU1. */
11992 rtx alu1_turn_enabled_insn;
11993 rtx alu2_turn_enabled_insn;
11994 rtx falu1_turn_enabled_insn;
11995 rtx falu2_turn_enabled_insn;
11998 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
11999 dependencies have no cost, except on the 20Kc where output-dependence
12000 is treated like input-dependence. */
12003 mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
12004 rtx dep ATTRIBUTE_UNUSED, int cost)
12006 if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
12009 if (REG_NOTE_KIND (link) != 0)
12014 /* Return the number of instructions that can be issued per cycle. */
12017 mips_issue_rate (void)
12021 case PROCESSOR_74KC:
12022 case PROCESSOR_74KF2_1:
12023 case PROCESSOR_74KF1_1:
12024 case PROCESSOR_74KF3_2:
12025 /* The 74k is not strictly quad-issue cpu, but can be seen as one
12026 by the scheduler. It can issue 1 ALU, 1 AGEN and 2 FPU insns,
12027 but in reality only a maximum of 3 insns can be issued as
12028 floating-point loads and stores also require a slot in the
12030 case PROCESSOR_R10000:
12031 /* All R10K Processors are quad-issue (being the first MIPS
12032 processors to support this feature). */
12035 case PROCESSOR_20KC:
12036 case PROCESSOR_R4130:
12037 case PROCESSOR_R5400:
12038 case PROCESSOR_R5500:
12039 case PROCESSOR_R7000:
12040 case PROCESSOR_R9000:
12041 case PROCESSOR_OCTEON:
12044 case PROCESSOR_SB1:
12045 case PROCESSOR_SB1A:
12046 /* This is actually 4, but we get better performance if we claim 3.
12047 This is partly because of unwanted speculative code motion with the
12048 larger number, and partly because in most common cases we can't
12049 reach the theoretical max of 4. */
12052 case PROCESSOR_LOONGSON_2E:
12053 case PROCESSOR_LOONGSON_2F:
12054 case PROCESSOR_LOONGSON_3A:
12062 /* Implement TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN hook for Loongson2. */
12065 mips_ls2_init_dfa_post_cycle_insn (void)
12068 emit_insn (gen_ls2_alu1_turn_enabled_insn ());
12069 mips_ls2.alu1_turn_enabled_insn = get_insns ();
12073 emit_insn (gen_ls2_alu2_turn_enabled_insn ());
12074 mips_ls2.alu2_turn_enabled_insn = get_insns ();
12078 emit_insn (gen_ls2_falu1_turn_enabled_insn ());
12079 mips_ls2.falu1_turn_enabled_insn = get_insns ();
12083 emit_insn (gen_ls2_falu2_turn_enabled_insn ());
12084 mips_ls2.falu2_turn_enabled_insn = get_insns ();
12087 mips_ls2.alu1_core_unit_code = get_cpu_unit_code ("ls2_alu1_core");
12088 mips_ls2.alu2_core_unit_code = get_cpu_unit_code ("ls2_alu2_core");
12089 mips_ls2.falu1_core_unit_code = get_cpu_unit_code ("ls2_falu1_core");
12090 mips_ls2.falu2_core_unit_code = get_cpu_unit_code ("ls2_falu2_core");
12093 /* Implement TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN hook.
12094 Init data used in mips_dfa_post_advance_cycle. */
12097 mips_init_dfa_post_cycle_insn (void)
12099 if (TUNE_LOONGSON_2EF)
12100 mips_ls2_init_dfa_post_cycle_insn ();
12103 /* Initialize STATE when scheduling for Loongson 2E/2F.
12104 Support round-robin dispatch scheme by enabling only one of
12105 ALU1/ALU2 and one of FALU1/FALU2 units for ALU1/2 and FALU1/2 instructions
12109 mips_ls2_dfa_post_advance_cycle (state_t state)
12111 if (cpu_unit_reservation_p (state, mips_ls2.alu1_core_unit_code))
12113 /* Though there are no non-pipelined ALU1 insns,
12114 we can get an instruction of type 'multi' before reload. */
12115 gcc_assert (mips_ls2.cycle_has_multi_p);
12116 mips_ls2.alu1_turn_p = false;
12119 mips_ls2.cycle_has_multi_p = false;
12121 if (cpu_unit_reservation_p (state, mips_ls2.alu2_core_unit_code))
12122 /* We have a non-pipelined alu instruction in the core,
12123 adjust round-robin counter. */
12124 mips_ls2.alu1_turn_p = true;
12126 if (mips_ls2.alu1_turn_p)
12128 if (state_transition (state, mips_ls2.alu1_turn_enabled_insn) >= 0)
12129 gcc_unreachable ();
12133 if (state_transition (state, mips_ls2.alu2_turn_enabled_insn) >= 0)
12134 gcc_unreachable ();
12137 if (cpu_unit_reservation_p (state, mips_ls2.falu1_core_unit_code))
12139 /* There are no non-pipelined FALU1 insns. */
12140 gcc_unreachable ();
12141 mips_ls2.falu1_turn_p = false;
12144 if (cpu_unit_reservation_p (state, mips_ls2.falu2_core_unit_code))
12145 /* We have a non-pipelined falu instruction in the core,
12146 adjust round-robin counter. */
12147 mips_ls2.falu1_turn_p = true;
12149 if (mips_ls2.falu1_turn_p)
12151 if (state_transition (state, mips_ls2.falu1_turn_enabled_insn) >= 0)
12152 gcc_unreachable ();
12156 if (state_transition (state, mips_ls2.falu2_turn_enabled_insn) >= 0)
12157 gcc_unreachable ();
12161 /* Implement TARGET_SCHED_DFA_POST_ADVANCE_CYCLE.
12162 This hook is being called at the start of each cycle. */
12165 mips_dfa_post_advance_cycle (void)
12167 if (TUNE_LOONGSON_2EF)
12168 mips_ls2_dfa_post_advance_cycle (curr_state);
12171 /* Implement TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
12172 be as wide as the scheduling freedom in the DFA. */
12175 mips_multipass_dfa_lookahead (void)
12177 /* Can schedule up to 4 of the 6 function units in any one cycle. */
12181 if (TUNE_LOONGSON_2EF || TUNE_LOONGSON_3A)
12190 /* Remove the instruction at index LOWER from ready queue READY and
12191 reinsert it in front of the instruction at index HIGHER. LOWER must
12195 mips_promote_ready (rtx *ready, int lower, int higher)
12200 new_head = ready[lower];
12201 for (i = lower; i < higher; i++)
12202 ready[i] = ready[i + 1];
12203 ready[i] = new_head;
12206 /* If the priority of the instruction at POS2 in the ready queue READY
12207 is within LIMIT units of that of the instruction at POS1, swap the
12208 instructions if POS2 is not already less than POS1. */
12211 mips_maybe_swap_ready (rtx *ready, int pos1, int pos2, int limit)
12214 && INSN_PRIORITY (ready[pos1]) + limit >= INSN_PRIORITY (ready[pos2]))
12218 temp = ready[pos1];
12219 ready[pos1] = ready[pos2];
12220 ready[pos2] = temp;
12224 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
12225 that may clobber hi or lo. */
12226 static rtx mips_macc_chains_last_hilo;
12228 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
12229 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
12232 mips_macc_chains_record (rtx insn)
12234 if (get_attr_may_clobber_hilo (insn))
12235 mips_macc_chains_last_hilo = insn;
12238 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
12239 has NREADY elements, looking for a multiply-add or multiply-subtract
12240 instruction that is cumulative with mips_macc_chains_last_hilo.
12241 If there is one, promote it ahead of anything else that might
12242 clobber hi or lo. */
12245 mips_macc_chains_reorder (rtx *ready, int nready)
12249 if (mips_macc_chains_last_hilo != 0)
12250 for (i = nready - 1; i >= 0; i--)
12251 if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
12253 for (j = nready - 1; j > i; j--)
12254 if (recog_memoized (ready[j]) >= 0
12255 && get_attr_may_clobber_hilo (ready[j]))
12257 mips_promote_ready (ready, i, j);
12264 /* The last instruction to be scheduled. */
12265 static rtx vr4130_last_insn;
12267 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
12268 points to an rtx that is initially an instruction. Nullify the rtx
12269 if the instruction uses the value of register X. */
12272 vr4130_true_reg_dependence_p_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED,
12277 insn_ptr = (rtx *) data;
12280 && reg_referenced_p (x, PATTERN (*insn_ptr)))
12284 /* Return true if there is true register dependence between vr4130_last_insn
12288 vr4130_true_reg_dependence_p (rtx insn)
12290 note_stores (PATTERN (vr4130_last_insn),
12291 vr4130_true_reg_dependence_p_1, &insn);
12295 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
12296 the ready queue and that INSN2 is the instruction after it, return
12297 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
12298 in which INSN1 and INSN2 can probably issue in parallel, but for
12299 which (INSN2, INSN1) should be less sensitive to instruction
12300 alignment than (INSN1, INSN2). See 4130.md for more details. */
12303 vr4130_swap_insns_p (rtx insn1, rtx insn2)
12305 sd_iterator_def sd_it;
12308 /* Check for the following case:
12310 1) there is some other instruction X with an anti dependence on INSN1;
12311 2) X has a higher priority than INSN2; and
12312 3) X is an arithmetic instruction (and thus has no unit restrictions).
12314 If INSN1 is the last instruction blocking X, it would better to
12315 choose (INSN1, X) over (INSN2, INSN1). */
12316 FOR_EACH_DEP (insn1, SD_LIST_FORW, sd_it, dep)
12317 if (DEP_TYPE (dep) == REG_DEP_ANTI
12318 && INSN_PRIORITY (DEP_CON (dep)) > INSN_PRIORITY (insn2)
12319 && recog_memoized (DEP_CON (dep)) >= 0
12320 && get_attr_vr4130_class (DEP_CON (dep)) == VR4130_CLASS_ALU)
12323 if (vr4130_last_insn != 0
12324 && recog_memoized (insn1) >= 0
12325 && recog_memoized (insn2) >= 0)
12327 /* See whether INSN1 and INSN2 use different execution units,
12328 or if they are both ALU-type instructions. If so, they can
12329 probably execute in parallel. */
12330 enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
12331 enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
12332 if (class1 != class2 || class1 == VR4130_CLASS_ALU)
12334 /* If only one of the instructions has a dependence on
12335 vr4130_last_insn, prefer to schedule the other one first. */
12336 bool dep1_p = vr4130_true_reg_dependence_p (insn1);
12337 bool dep2_p = vr4130_true_reg_dependence_p (insn2);
12338 if (dep1_p != dep2_p)
12341 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
12342 is not an ALU-type instruction and if INSN1 uses the same
12343 execution unit. (Note that if this condition holds, we already
12344 know that INSN2 uses a different execution unit.) */
12345 if (class1 != VR4130_CLASS_ALU
12346 && recog_memoized (vr4130_last_insn) >= 0
12347 && class1 == get_attr_vr4130_class (vr4130_last_insn))
12354 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
12355 queue with at least two instructions. Swap the first two if
12356 vr4130_swap_insns_p says that it could be worthwhile. */
12359 vr4130_reorder (rtx *ready, int nready)
12361 if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
12362 mips_promote_ready (ready, nready - 2, nready - 1);
12365 /* Record whether last 74k AGEN instruction was a load or store. */
12366 static enum attr_type mips_last_74k_agen_insn = TYPE_UNKNOWN;
12368 /* Initialize mips_last_74k_agen_insn from INSN. A null argument
12369 resets to TYPE_UNKNOWN state. */
12372 mips_74k_agen_init (rtx insn)
12374 if (!insn || CALL_P (insn) || JUMP_P (insn))
12375 mips_last_74k_agen_insn = TYPE_UNKNOWN;
12378 enum attr_type type = get_attr_type (insn);
12379 if (type == TYPE_LOAD || type == TYPE_STORE)
12380 mips_last_74k_agen_insn = type;
12384 /* A TUNE_74K helper function. The 74K AGEN pipeline likes multiple
12385 loads to be grouped together, and multiple stores to be grouped
12386 together. Swap things around in the ready queue to make this happen. */
12389 mips_74k_agen_reorder (rtx *ready, int nready)
12392 int store_pos, load_pos;
12397 for (i = nready - 1; i >= 0; i--)
12399 rtx insn = ready[i];
12400 if (USEFUL_INSN_P (insn))
12401 switch (get_attr_type (insn))
12404 if (store_pos == -1)
12409 if (load_pos == -1)
12418 if (load_pos == -1 || store_pos == -1)
12421 switch (mips_last_74k_agen_insn)
12424 /* Prefer to schedule loads since they have a higher latency. */
12426 /* Swap loads to the front of the queue. */
12427 mips_maybe_swap_ready (ready, load_pos, store_pos, 4);
12430 /* Swap stores to the front of the queue. */
12431 mips_maybe_swap_ready (ready, store_pos, load_pos, 4);
12438 /* Implement TARGET_SCHED_INIT. */
12441 mips_sched_init (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
12442 int max_ready ATTRIBUTE_UNUSED)
12444 mips_macc_chains_last_hilo = 0;
12445 vr4130_last_insn = 0;
12446 mips_74k_agen_init (NULL_RTX);
12448 /* When scheduling for Loongson2, branch instructions go to ALU1,
12449 therefore basic block is most likely to start with round-robin counter
12450 pointed to ALU2. */
12451 mips_ls2.alu1_turn_p = false;
12452 mips_ls2.falu1_turn_p = true;
12455 /* Subroutine used by TARGET_SCHED_REORDER and TARGET_SCHED_REORDER2. */
12458 mips_sched_reorder_1 (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
12459 rtx *ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
12461 if (!reload_completed
12462 && TUNE_MACC_CHAINS
12464 mips_macc_chains_reorder (ready, *nreadyp);
12466 if (reload_completed
12468 && !TARGET_VR4130_ALIGN
12470 vr4130_reorder (ready, *nreadyp);
12473 mips_74k_agen_reorder (ready, *nreadyp);
12476 /* Implement TARGET_SCHED_REORDER. */
12479 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
12480 rtx *ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
12482 mips_sched_reorder_1 (file, verbose, ready, nreadyp, cycle);
12483 return mips_issue_rate ();
12486 /* Implement TARGET_SCHED_REORDER2. */
12489 mips_sched_reorder2 (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
12490 rtx *ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
12492 mips_sched_reorder_1 (file, verbose, ready, nreadyp, cycle);
12493 return cached_can_issue_more;
12496 /* Update round-robin counters for ALU1/2 and FALU1/2. */
12499 mips_ls2_variable_issue (rtx insn)
12501 if (mips_ls2.alu1_turn_p)
12503 if (cpu_unit_reservation_p (curr_state, mips_ls2.alu1_core_unit_code))
12504 mips_ls2.alu1_turn_p = false;
12508 if (cpu_unit_reservation_p (curr_state, mips_ls2.alu2_core_unit_code))
12509 mips_ls2.alu1_turn_p = true;
12512 if (mips_ls2.falu1_turn_p)
12514 if (cpu_unit_reservation_p (curr_state, mips_ls2.falu1_core_unit_code))
12515 mips_ls2.falu1_turn_p = false;
12519 if (cpu_unit_reservation_p (curr_state, mips_ls2.falu2_core_unit_code))
12520 mips_ls2.falu1_turn_p = true;
12523 if (recog_memoized (insn) >= 0)
12524 mips_ls2.cycle_has_multi_p |= (get_attr_type (insn) == TYPE_MULTI);
12527 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
12530 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
12531 rtx insn, int more)
12533 /* Ignore USEs and CLOBBERs; don't count them against the issue rate. */
12534 if (USEFUL_INSN_P (insn))
12536 if (get_attr_type (insn) != TYPE_GHOST)
12538 if (!reload_completed && TUNE_MACC_CHAINS)
12539 mips_macc_chains_record (insn);
12540 vr4130_last_insn = insn;
12542 mips_74k_agen_init (insn);
12543 else if (TUNE_LOONGSON_2EF)
12544 mips_ls2_variable_issue (insn);
12547 /* Instructions of type 'multi' should all be split before
12548 the second scheduling pass. */
12549 gcc_assert (!reload_completed
12550 || recog_memoized (insn) < 0
12551 || get_attr_type (insn) != TYPE_MULTI);
12553 cached_can_issue_more = more;
12557 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
12558 return the first operand of the associated PREF or PREFX insn. */
12561 mips_prefetch_cookie (rtx write, rtx locality)
12563 /* store_streamed / load_streamed. */
12564 if (INTVAL (locality) <= 0)
12565 return GEN_INT (INTVAL (write) + 4);
12567 /* store / load. */
12568 if (INTVAL (locality) <= 2)
12571 /* store_retained / load_retained. */
12572 return GEN_INT (INTVAL (write) + 6);
12575 /* Flags that indicate when a built-in function is available.
12577 BUILTIN_AVAIL_NON_MIPS16
12578 The function is available on the current target, but only
12579 in non-MIPS16 mode. */
12580 #define BUILTIN_AVAIL_NON_MIPS16 1
12582 /* Declare an availability predicate for built-in functions that
12583 require non-MIPS16 mode and also require COND to be true.
12584 NAME is the main part of the predicate's name. */
12585 #define AVAIL_NON_MIPS16(NAME, COND) \
12586 static unsigned int \
12587 mips_builtin_avail_##NAME (void) \
12589 return (COND) ? BUILTIN_AVAIL_NON_MIPS16 : 0; \
12592 /* This structure describes a single built-in function. */
12593 struct mips_builtin_description {
12594 /* The code of the main .md file instruction. See mips_builtin_type
12595 for more information. */
12596 enum insn_code icode;
12598 /* The floating-point comparison code to use with ICODE, if any. */
12599 enum mips_fp_condition cond;
12601 /* The name of the built-in function. */
12604 /* Specifies how the function should be expanded. */
12605 enum mips_builtin_type builtin_type;
12607 /* The function's prototype. */
12608 enum mips_function_type function_type;
12610 /* Whether the function is available. */
12611 unsigned int (*avail) (void);
12614 AVAIL_NON_MIPS16 (paired_single, TARGET_PAIRED_SINGLE_FLOAT)
12615 AVAIL_NON_MIPS16 (sb1_paired_single, TARGET_SB1 && TARGET_PAIRED_SINGLE_FLOAT)
12616 AVAIL_NON_MIPS16 (mips3d, TARGET_MIPS3D)
12617 AVAIL_NON_MIPS16 (dsp, TARGET_DSP)
12618 AVAIL_NON_MIPS16 (dspr2, TARGET_DSPR2)
12619 AVAIL_NON_MIPS16 (dsp_32, !TARGET_64BIT && TARGET_DSP)
12620 AVAIL_NON_MIPS16 (dspr2_32, !TARGET_64BIT && TARGET_DSPR2)
12621 AVAIL_NON_MIPS16 (loongson, TARGET_LOONGSON_VECTORS)
12622 AVAIL_NON_MIPS16 (cache, TARGET_CACHE_BUILTIN)
12624 /* Construct a mips_builtin_description from the given arguments.
12626 INSN is the name of the associated instruction pattern, without the
12627 leading CODE_FOR_mips_.
12629 CODE is the floating-point condition code associated with the
12630 function. It can be 'f' if the field is not applicable.
12632 NAME is the name of the function itself, without the leading
12635 BUILTIN_TYPE and FUNCTION_TYPE are mips_builtin_description fields.
12637 AVAIL is the name of the availability predicate, without the leading
12638 mips_builtin_avail_. */
12639 #define MIPS_BUILTIN(INSN, COND, NAME, BUILTIN_TYPE, \
12640 FUNCTION_TYPE, AVAIL) \
12641 { CODE_FOR_mips_ ## INSN, MIPS_FP_COND_ ## COND, \
12642 "__builtin_mips_" NAME, BUILTIN_TYPE, FUNCTION_TYPE, \
12643 mips_builtin_avail_ ## AVAIL }
12645 /* Define __builtin_mips_<INSN>, which is a MIPS_BUILTIN_DIRECT function
12646 mapped to instruction CODE_FOR_mips_<INSN>, FUNCTION_TYPE and AVAIL
12647 are as for MIPS_BUILTIN. */
12648 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \
12649 MIPS_BUILTIN (INSN, f, #INSN, MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, AVAIL)
12651 /* Define __builtin_mips_<INSN>_<COND>_{s,d} functions, both of which
12652 are subject to mips_builtin_avail_<AVAIL>. */
12653 #define CMP_SCALAR_BUILTINS(INSN, COND, AVAIL) \
12654 MIPS_BUILTIN (INSN ## _cond_s, COND, #INSN "_" #COND "_s", \
12655 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, AVAIL), \
12656 MIPS_BUILTIN (INSN ## _cond_d, COND, #INSN "_" #COND "_d", \
12657 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, AVAIL)
12659 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
12660 The lower and upper forms are subject to mips_builtin_avail_<AVAIL>
12661 while the any and all forms are subject to mips_builtin_avail_mips3d. */
12662 #define CMP_PS_BUILTINS(INSN, COND, AVAIL) \
12663 MIPS_BUILTIN (INSN ## _cond_ps, COND, "any_" #INSN "_" #COND "_ps", \
12664 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, \
12666 MIPS_BUILTIN (INSN ## _cond_ps, COND, "all_" #INSN "_" #COND "_ps", \
12667 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, \
12669 MIPS_BUILTIN (INSN ## _cond_ps, COND, "lower_" #INSN "_" #COND "_ps", \
12670 MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, \
12672 MIPS_BUILTIN (INSN ## _cond_ps, COND, "upper_" #INSN "_" #COND "_ps", \
12673 MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, \
12676 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
12677 are subject to mips_builtin_avail_mips3d. */
12678 #define CMP_4S_BUILTINS(INSN, COND) \
12679 MIPS_BUILTIN (INSN ## _cond_4s, COND, "any_" #INSN "_" #COND "_4s", \
12680 MIPS_BUILTIN_CMP_ANY, \
12681 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, mips3d), \
12682 MIPS_BUILTIN (INSN ## _cond_4s, COND, "all_" #INSN "_" #COND "_4s", \
12683 MIPS_BUILTIN_CMP_ALL, \
12684 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, mips3d)
12686 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
12687 instruction requires mips_builtin_avail_<AVAIL>. */
12688 #define MOVTF_BUILTINS(INSN, COND, AVAIL) \
12689 MIPS_BUILTIN (INSN ## _cond_ps, COND, "movt_" #INSN "_" #COND "_ps", \
12690 MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
12692 MIPS_BUILTIN (INSN ## _cond_ps, COND, "movf_" #INSN "_" #COND "_ps", \
12693 MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
12696 /* Define all the built-in functions related to C.cond.fmt condition COND. */
12697 #define CMP_BUILTINS(COND) \
12698 MOVTF_BUILTINS (c, COND, paired_single), \
12699 MOVTF_BUILTINS (cabs, COND, mips3d), \
12700 CMP_SCALAR_BUILTINS (cabs, COND, mips3d), \
12701 CMP_PS_BUILTINS (c, COND, paired_single), \
12702 CMP_PS_BUILTINS (cabs, COND, mips3d), \
12703 CMP_4S_BUILTINS (c, COND), \
12704 CMP_4S_BUILTINS (cabs, COND)
12706 /* Define __builtin_mips_<INSN>, which is a MIPS_BUILTIN_DIRECT_NO_TARGET
12707 function mapped to instruction CODE_FOR_mips_<INSN>, FUNCTION_TYPE
12708 and AVAIL are as for MIPS_BUILTIN. */
12709 #define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \
12710 MIPS_BUILTIN (INSN, f, #INSN, MIPS_BUILTIN_DIRECT_NO_TARGET, \
12711 FUNCTION_TYPE, AVAIL)
12713 /* Define __builtin_mips_bposge<VALUE>. <VALUE> is 32 for the MIPS32 DSP
12714 branch instruction. AVAIL is as for MIPS_BUILTIN. */
12715 #define BPOSGE_BUILTIN(VALUE, AVAIL) \
12716 MIPS_BUILTIN (bposge, f, "bposge" #VALUE, \
12717 MIPS_BUILTIN_BPOSGE ## VALUE, MIPS_SI_FTYPE_VOID, AVAIL)
12719 /* Define a Loongson MIPS_BUILTIN_DIRECT function __builtin_loongson_<FN_NAME>
12720 for instruction CODE_FOR_loongson_<INSN>. FUNCTION_TYPE is a
12721 builtin_description field. */
12722 #define LOONGSON_BUILTIN_ALIAS(INSN, FN_NAME, FUNCTION_TYPE) \
12723 { CODE_FOR_loongson_ ## INSN, MIPS_FP_COND_f, \
12724 "__builtin_loongson_" #FN_NAME, MIPS_BUILTIN_DIRECT, \
12725 FUNCTION_TYPE, mips_builtin_avail_loongson }
12727 /* Define a Loongson MIPS_BUILTIN_DIRECT function __builtin_loongson_<INSN>
12728 for instruction CODE_FOR_loongson_<INSN>. FUNCTION_TYPE is a
12729 builtin_description field. */
12730 #define LOONGSON_BUILTIN(INSN, FUNCTION_TYPE) \
12731 LOONGSON_BUILTIN_ALIAS (INSN, INSN, FUNCTION_TYPE)
12733 /* Like LOONGSON_BUILTIN, but add _<SUFFIX> to the end of the function name.
12734 We use functions of this form when the same insn can be usefully applied
12735 to more than one datatype. */
12736 #define LOONGSON_BUILTIN_SUFFIX(INSN, SUFFIX, FUNCTION_TYPE) \
12737 LOONGSON_BUILTIN_ALIAS (INSN, INSN ## _ ## SUFFIX, FUNCTION_TYPE)
12739 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
12740 #define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
12741 #define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
12742 #define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
12743 #define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
12744 #define CODE_FOR_mips_mul_ph CODE_FOR_mulv2hi3
12745 #define CODE_FOR_mips_mult CODE_FOR_mulsidi3_32bit
12746 #define CODE_FOR_mips_multu CODE_FOR_umulsidi3_32bit
12748 #define CODE_FOR_loongson_packsswh CODE_FOR_vec_pack_ssat_v2si
12749 #define CODE_FOR_loongson_packsshb CODE_FOR_vec_pack_ssat_v4hi
12750 #define CODE_FOR_loongson_packushb CODE_FOR_vec_pack_usat_v4hi
12751 #define CODE_FOR_loongson_paddw CODE_FOR_addv2si3
12752 #define CODE_FOR_loongson_paddh CODE_FOR_addv4hi3
12753 #define CODE_FOR_loongson_paddb CODE_FOR_addv8qi3
12754 #define CODE_FOR_loongson_paddsh CODE_FOR_ssaddv4hi3
12755 #define CODE_FOR_loongson_paddsb CODE_FOR_ssaddv8qi3
12756 #define CODE_FOR_loongson_paddush CODE_FOR_usaddv4hi3
12757 #define CODE_FOR_loongson_paddusb CODE_FOR_usaddv8qi3
12758 #define CODE_FOR_loongson_pmaxsh CODE_FOR_smaxv4hi3
12759 #define CODE_FOR_loongson_pmaxub CODE_FOR_umaxv8qi3
12760 #define CODE_FOR_loongson_pminsh CODE_FOR_sminv4hi3
12761 #define CODE_FOR_loongson_pminub CODE_FOR_uminv8qi3
12762 #define CODE_FOR_loongson_pmulhuh CODE_FOR_umulv4hi3_highpart
12763 #define CODE_FOR_loongson_pmulhh CODE_FOR_smulv4hi3_highpart
12764 #define CODE_FOR_loongson_pmullh CODE_FOR_mulv4hi3
12765 #define CODE_FOR_loongson_psllh CODE_FOR_ashlv4hi3
12766 #define CODE_FOR_loongson_psllw CODE_FOR_ashlv2si3
12767 #define CODE_FOR_loongson_psrlh CODE_FOR_lshrv4hi3
12768 #define CODE_FOR_loongson_psrlw CODE_FOR_lshrv2si3
12769 #define CODE_FOR_loongson_psrah CODE_FOR_ashrv4hi3
12770 #define CODE_FOR_loongson_psraw CODE_FOR_ashrv2si3
12771 #define CODE_FOR_loongson_psubw CODE_FOR_subv2si3
12772 #define CODE_FOR_loongson_psubh CODE_FOR_subv4hi3
12773 #define CODE_FOR_loongson_psubb CODE_FOR_subv8qi3
12774 #define CODE_FOR_loongson_psubsh CODE_FOR_sssubv4hi3
12775 #define CODE_FOR_loongson_psubsb CODE_FOR_sssubv8qi3
12776 #define CODE_FOR_loongson_psubush CODE_FOR_ussubv4hi3
12777 #define CODE_FOR_loongson_psubusb CODE_FOR_ussubv8qi3
12778 #define CODE_FOR_loongson_punpckhbh CODE_FOR_vec_interleave_highv8qi
12779 #define CODE_FOR_loongson_punpckhhw CODE_FOR_vec_interleave_highv4hi
12780 #define CODE_FOR_loongson_punpckhwd CODE_FOR_vec_interleave_highv2si
12781 #define CODE_FOR_loongson_punpcklbh CODE_FOR_vec_interleave_lowv8qi
12782 #define CODE_FOR_loongson_punpcklhw CODE_FOR_vec_interleave_lowv4hi
12783 #define CODE_FOR_loongson_punpcklwd CODE_FOR_vec_interleave_lowv2si
12785 static const struct mips_builtin_description mips_builtins[] = {
12786 DIRECT_BUILTIN (pll_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
12787 DIRECT_BUILTIN (pul_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
12788 DIRECT_BUILTIN (plu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
12789 DIRECT_BUILTIN (puu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
12790 DIRECT_BUILTIN (cvt_ps_s, MIPS_V2SF_FTYPE_SF_SF, paired_single),
12791 DIRECT_BUILTIN (cvt_s_pl, MIPS_SF_FTYPE_V2SF, paired_single),
12792 DIRECT_BUILTIN (cvt_s_pu, MIPS_SF_FTYPE_V2SF, paired_single),
12793 DIRECT_BUILTIN (abs_ps, MIPS_V2SF_FTYPE_V2SF, paired_single),
12795 DIRECT_BUILTIN (alnv_ps, MIPS_V2SF_FTYPE_V2SF_V2SF_INT, paired_single),
12796 DIRECT_BUILTIN (addr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
12797 DIRECT_BUILTIN (mulr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
12798 DIRECT_BUILTIN (cvt_pw_ps, MIPS_V2SF_FTYPE_V2SF, mips3d),
12799 DIRECT_BUILTIN (cvt_ps_pw, MIPS_V2SF_FTYPE_V2SF, mips3d),
12801 DIRECT_BUILTIN (recip1_s, MIPS_SF_FTYPE_SF, mips3d),
12802 DIRECT_BUILTIN (recip1_d, MIPS_DF_FTYPE_DF, mips3d),
12803 DIRECT_BUILTIN (recip1_ps, MIPS_V2SF_FTYPE_V2SF, mips3d),
12804 DIRECT_BUILTIN (recip2_s, MIPS_SF_FTYPE_SF_SF, mips3d),
12805 DIRECT_BUILTIN (recip2_d, MIPS_DF_FTYPE_DF_DF, mips3d),
12806 DIRECT_BUILTIN (recip2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
12808 DIRECT_BUILTIN (rsqrt1_s, MIPS_SF_FTYPE_SF, mips3d),
12809 DIRECT_BUILTIN (rsqrt1_d, MIPS_DF_FTYPE_DF, mips3d),
12810 DIRECT_BUILTIN (rsqrt1_ps, MIPS_V2SF_FTYPE_V2SF, mips3d),
12811 DIRECT_BUILTIN (rsqrt2_s, MIPS_SF_FTYPE_SF_SF, mips3d),
12812 DIRECT_BUILTIN (rsqrt2_d, MIPS_DF_FTYPE_DF_DF, mips3d),
12813 DIRECT_BUILTIN (rsqrt2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
12815 MIPS_FP_CONDITIONS (CMP_BUILTINS),
12817 /* Built-in functions for the SB-1 processor. */
12818 DIRECT_BUILTIN (sqrt_ps, MIPS_V2SF_FTYPE_V2SF, sb1_paired_single),
12820 /* Built-in functions for the DSP ASE (32-bit and 64-bit). */
12821 DIRECT_BUILTIN (addq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
12822 DIRECT_BUILTIN (addq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
12823 DIRECT_BUILTIN (addq_s_w, MIPS_SI_FTYPE_SI_SI, dsp),
12824 DIRECT_BUILTIN (addu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
12825 DIRECT_BUILTIN (addu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
12826 DIRECT_BUILTIN (subq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
12827 DIRECT_BUILTIN (subq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
12828 DIRECT_BUILTIN (subq_s_w, MIPS_SI_FTYPE_SI_SI, dsp),
12829 DIRECT_BUILTIN (subu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
12830 DIRECT_BUILTIN (subu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
12831 DIRECT_BUILTIN (addsc, MIPS_SI_FTYPE_SI_SI, dsp),
12832 DIRECT_BUILTIN (addwc, MIPS_SI_FTYPE_SI_SI, dsp),
12833 DIRECT_BUILTIN (modsub, MIPS_SI_FTYPE_SI_SI, dsp),
12834 DIRECT_BUILTIN (raddu_w_qb, MIPS_SI_FTYPE_V4QI, dsp),
12835 DIRECT_BUILTIN (absq_s_ph, MIPS_V2HI_FTYPE_V2HI, dsp),
12836 DIRECT_BUILTIN (absq_s_w, MIPS_SI_FTYPE_SI, dsp),
12837 DIRECT_BUILTIN (precrq_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, dsp),
12838 DIRECT_BUILTIN (precrq_ph_w, MIPS_V2HI_FTYPE_SI_SI, dsp),
12839 DIRECT_BUILTIN (precrq_rs_ph_w, MIPS_V2HI_FTYPE_SI_SI, dsp),
12840 DIRECT_BUILTIN (precrqu_s_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, dsp),
12841 DIRECT_BUILTIN (preceq_w_phl, MIPS_SI_FTYPE_V2HI, dsp),
12842 DIRECT_BUILTIN (preceq_w_phr, MIPS_SI_FTYPE_V2HI, dsp),
12843 DIRECT_BUILTIN (precequ_ph_qbl, MIPS_V2HI_FTYPE_V4QI, dsp),
12844 DIRECT_BUILTIN (precequ_ph_qbr, MIPS_V2HI_FTYPE_V4QI, dsp),
12845 DIRECT_BUILTIN (precequ_ph_qbla, MIPS_V2HI_FTYPE_V4QI, dsp),
12846 DIRECT_BUILTIN (precequ_ph_qbra, MIPS_V2HI_FTYPE_V4QI, dsp),
12847 DIRECT_BUILTIN (preceu_ph_qbl, MIPS_V2HI_FTYPE_V4QI, dsp),
12848 DIRECT_BUILTIN (preceu_ph_qbr, MIPS_V2HI_FTYPE_V4QI, dsp),
12849 DIRECT_BUILTIN (preceu_ph_qbla, MIPS_V2HI_FTYPE_V4QI, dsp),
12850 DIRECT_BUILTIN (preceu_ph_qbra, MIPS_V2HI_FTYPE_V4QI, dsp),
12851 DIRECT_BUILTIN (shll_qb, MIPS_V4QI_FTYPE_V4QI_SI, dsp),
12852 DIRECT_BUILTIN (shll_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
12853 DIRECT_BUILTIN (shll_s_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
12854 DIRECT_BUILTIN (shll_s_w, MIPS_SI_FTYPE_SI_SI, dsp),
12855 DIRECT_BUILTIN (shrl_qb, MIPS_V4QI_FTYPE_V4QI_SI, dsp),
12856 DIRECT_BUILTIN (shra_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
12857 DIRECT_BUILTIN (shra_r_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
12858 DIRECT_BUILTIN (shra_r_w, MIPS_SI_FTYPE_SI_SI, dsp),
12859 DIRECT_BUILTIN (muleu_s_ph_qbl, MIPS_V2HI_FTYPE_V4QI_V2HI, dsp),
12860 DIRECT_BUILTIN (muleu_s_ph_qbr, MIPS_V2HI_FTYPE_V4QI_V2HI, dsp),
12861 DIRECT_BUILTIN (mulq_rs_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
12862 DIRECT_BUILTIN (muleq_s_w_phl, MIPS_SI_FTYPE_V2HI_V2HI, dsp),
12863 DIRECT_BUILTIN (muleq_s_w_phr, MIPS_SI_FTYPE_V2HI_V2HI, dsp),
12864 DIRECT_BUILTIN (bitrev, MIPS_SI_FTYPE_SI, dsp),
12865 DIRECT_BUILTIN (insv, MIPS_SI_FTYPE_SI_SI, dsp),
12866 DIRECT_BUILTIN (repl_qb, MIPS_V4QI_FTYPE_SI, dsp),
12867 DIRECT_BUILTIN (repl_ph, MIPS_V2HI_FTYPE_SI, dsp),
12868 DIRECT_NO_TARGET_BUILTIN (cmpu_eq_qb, MIPS_VOID_FTYPE_V4QI_V4QI, dsp),
12869 DIRECT_NO_TARGET_BUILTIN (cmpu_lt_qb, MIPS_VOID_FTYPE_V4QI_V4QI, dsp),
12870 DIRECT_NO_TARGET_BUILTIN (cmpu_le_qb, MIPS_VOID_FTYPE_V4QI_V4QI, dsp),
12871 DIRECT_BUILTIN (cmpgu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, dsp),
12872 DIRECT_BUILTIN (cmpgu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, dsp),
12873 DIRECT_BUILTIN (cmpgu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, dsp),
12874 DIRECT_NO_TARGET_BUILTIN (cmp_eq_ph, MIPS_VOID_FTYPE_V2HI_V2HI, dsp),
12875 DIRECT_NO_TARGET_BUILTIN (cmp_lt_ph, MIPS_VOID_FTYPE_V2HI_V2HI, dsp),
12876 DIRECT_NO_TARGET_BUILTIN (cmp_le_ph, MIPS_VOID_FTYPE_V2HI_V2HI, dsp),
12877 DIRECT_BUILTIN (pick_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
12878 DIRECT_BUILTIN (pick_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
12879 DIRECT_BUILTIN (packrl_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
12880 DIRECT_NO_TARGET_BUILTIN (wrdsp, MIPS_VOID_FTYPE_SI_SI, dsp),
12881 DIRECT_BUILTIN (rddsp, MIPS_SI_FTYPE_SI, dsp),
12882 DIRECT_BUILTIN (lbux, MIPS_SI_FTYPE_POINTER_SI, dsp),
12883 DIRECT_BUILTIN (lhx, MIPS_SI_FTYPE_POINTER_SI, dsp),
12884 DIRECT_BUILTIN (lwx, MIPS_SI_FTYPE_POINTER_SI, dsp),
12885 BPOSGE_BUILTIN (32, dsp),
12887 /* The following are for the MIPS DSP ASE REV 2 (32-bit and 64-bit). */
12888 DIRECT_BUILTIN (absq_s_qb, MIPS_V4QI_FTYPE_V4QI, dspr2),
12889 DIRECT_BUILTIN (addu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
12890 DIRECT_BUILTIN (addu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
12891 DIRECT_BUILTIN (adduh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
12892 DIRECT_BUILTIN (adduh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
12893 DIRECT_BUILTIN (append, MIPS_SI_FTYPE_SI_SI_SI, dspr2),
12894 DIRECT_BUILTIN (balign, MIPS_SI_FTYPE_SI_SI_SI, dspr2),
12895 DIRECT_BUILTIN (cmpgdu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, dspr2),
12896 DIRECT_BUILTIN (cmpgdu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, dspr2),
12897 DIRECT_BUILTIN (cmpgdu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, dspr2),
12898 DIRECT_BUILTIN (mul_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
12899 DIRECT_BUILTIN (mul_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
12900 DIRECT_BUILTIN (mulq_rs_w, MIPS_SI_FTYPE_SI_SI, dspr2),
12901 DIRECT_BUILTIN (mulq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
12902 DIRECT_BUILTIN (mulq_s_w, MIPS_SI_FTYPE_SI_SI, dspr2),
12903 DIRECT_BUILTIN (precr_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, dspr2),
12904 DIRECT_BUILTIN (precr_sra_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, dspr2),
12905 DIRECT_BUILTIN (precr_sra_r_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, dspr2),
12906 DIRECT_BUILTIN (prepend, MIPS_SI_FTYPE_SI_SI_SI, dspr2),
12907 DIRECT_BUILTIN (shra_qb, MIPS_V4QI_FTYPE_V4QI_SI, dspr2),
12908 DIRECT_BUILTIN (shra_r_qb, MIPS_V4QI_FTYPE_V4QI_SI, dspr2),
12909 DIRECT_BUILTIN (shrl_ph, MIPS_V2HI_FTYPE_V2HI_SI, dspr2),
12910 DIRECT_BUILTIN (subu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
12911 DIRECT_BUILTIN (subu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
12912 DIRECT_BUILTIN (subuh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
12913 DIRECT_BUILTIN (subuh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
12914 DIRECT_BUILTIN (addqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
12915 DIRECT_BUILTIN (addqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
12916 DIRECT_BUILTIN (addqh_w, MIPS_SI_FTYPE_SI_SI, dspr2),
12917 DIRECT_BUILTIN (addqh_r_w, MIPS_SI_FTYPE_SI_SI, dspr2),
12918 DIRECT_BUILTIN (subqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
12919 DIRECT_BUILTIN (subqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
12920 DIRECT_BUILTIN (subqh_w, MIPS_SI_FTYPE_SI_SI, dspr2),
12921 DIRECT_BUILTIN (subqh_r_w, MIPS_SI_FTYPE_SI_SI, dspr2),
12923 /* Built-in functions for the DSP ASE (32-bit only). */
12924 DIRECT_BUILTIN (dpau_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
12925 DIRECT_BUILTIN (dpau_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
12926 DIRECT_BUILTIN (dpsu_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
12927 DIRECT_BUILTIN (dpsu_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
12928 DIRECT_BUILTIN (dpaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
12929 DIRECT_BUILTIN (dpsq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
12930 DIRECT_BUILTIN (mulsaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
12931 DIRECT_BUILTIN (dpaq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, dsp_32),
12932 DIRECT_BUILTIN (dpsq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, dsp_32),
12933 DIRECT_BUILTIN (maq_s_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
12934 DIRECT_BUILTIN (maq_s_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
12935 DIRECT_BUILTIN (maq_sa_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
12936 DIRECT_BUILTIN (maq_sa_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
12937 DIRECT_BUILTIN (extr_w, MIPS_SI_FTYPE_DI_SI, dsp_32),
12938 DIRECT_BUILTIN (extr_r_w, MIPS_SI_FTYPE_DI_SI, dsp_32),
12939 DIRECT_BUILTIN (extr_rs_w, MIPS_SI_FTYPE_DI_SI, dsp_32),
12940 DIRECT_BUILTIN (extr_s_h, MIPS_SI_FTYPE_DI_SI, dsp_32),
12941 DIRECT_BUILTIN (extp, MIPS_SI_FTYPE_DI_SI, dsp_32),
12942 DIRECT_BUILTIN (extpdp, MIPS_SI_FTYPE_DI_SI, dsp_32),
12943 DIRECT_BUILTIN (shilo, MIPS_DI_FTYPE_DI_SI, dsp_32),
12944 DIRECT_BUILTIN (mthlip, MIPS_DI_FTYPE_DI_SI, dsp_32),
12945 DIRECT_BUILTIN (madd, MIPS_DI_FTYPE_DI_SI_SI, dsp_32),
12946 DIRECT_BUILTIN (maddu, MIPS_DI_FTYPE_DI_USI_USI, dsp_32),
12947 DIRECT_BUILTIN (msub, MIPS_DI_FTYPE_DI_SI_SI, dsp_32),
12948 DIRECT_BUILTIN (msubu, MIPS_DI_FTYPE_DI_USI_USI, dsp_32),
12949 DIRECT_BUILTIN (mult, MIPS_DI_FTYPE_SI_SI, dsp_32),
12950 DIRECT_BUILTIN (multu, MIPS_DI_FTYPE_USI_USI, dsp_32),
12952 /* The following are for the MIPS DSP ASE REV 2 (32-bit only). */
12953 DIRECT_BUILTIN (dpa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
12954 DIRECT_BUILTIN (dps_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
12955 DIRECT_BUILTIN (mulsa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
12956 DIRECT_BUILTIN (dpax_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
12957 DIRECT_BUILTIN (dpsx_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
12958 DIRECT_BUILTIN (dpaqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
12959 DIRECT_BUILTIN (dpaqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
12960 DIRECT_BUILTIN (dpsqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
12961 DIRECT_BUILTIN (dpsqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
12963 /* Builtin functions for ST Microelectronics Loongson-2E/2F cores. */
12964 LOONGSON_BUILTIN (packsswh, MIPS_V4HI_FTYPE_V2SI_V2SI),
12965 LOONGSON_BUILTIN (packsshb, MIPS_V8QI_FTYPE_V4HI_V4HI),
12966 LOONGSON_BUILTIN (packushb, MIPS_UV8QI_FTYPE_UV4HI_UV4HI),
12967 LOONGSON_BUILTIN_SUFFIX (paddw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
12968 LOONGSON_BUILTIN_SUFFIX (paddh, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
12969 LOONGSON_BUILTIN_SUFFIX (paddb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
12970 LOONGSON_BUILTIN_SUFFIX (paddw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
12971 LOONGSON_BUILTIN_SUFFIX (paddh, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
12972 LOONGSON_BUILTIN_SUFFIX (paddb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
12973 LOONGSON_BUILTIN_SUFFIX (paddd, u, MIPS_UDI_FTYPE_UDI_UDI),
12974 LOONGSON_BUILTIN_SUFFIX (paddd, s, MIPS_DI_FTYPE_DI_DI),
12975 LOONGSON_BUILTIN (paddsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
12976 LOONGSON_BUILTIN (paddsb, MIPS_V8QI_FTYPE_V8QI_V8QI),
12977 LOONGSON_BUILTIN (paddush, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
12978 LOONGSON_BUILTIN (paddusb, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
12979 LOONGSON_BUILTIN_ALIAS (pandn_d, pandn_ud, MIPS_UDI_FTYPE_UDI_UDI),
12980 LOONGSON_BUILTIN_ALIAS (pandn_w, pandn_uw, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
12981 LOONGSON_BUILTIN_ALIAS (pandn_h, pandn_uh, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
12982 LOONGSON_BUILTIN_ALIAS (pandn_b, pandn_ub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
12983 LOONGSON_BUILTIN_ALIAS (pandn_d, pandn_sd, MIPS_DI_FTYPE_DI_DI),
12984 LOONGSON_BUILTIN_ALIAS (pandn_w, pandn_sw, MIPS_V2SI_FTYPE_V2SI_V2SI),
12985 LOONGSON_BUILTIN_ALIAS (pandn_h, pandn_sh, MIPS_V4HI_FTYPE_V4HI_V4HI),
12986 LOONGSON_BUILTIN_ALIAS (pandn_b, pandn_sb, MIPS_V8QI_FTYPE_V8QI_V8QI),
12987 LOONGSON_BUILTIN (pavgh, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
12988 LOONGSON_BUILTIN (pavgb, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
12989 LOONGSON_BUILTIN_SUFFIX (pcmpeqw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
12990 LOONGSON_BUILTIN_SUFFIX (pcmpeqh, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
12991 LOONGSON_BUILTIN_SUFFIX (pcmpeqb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
12992 LOONGSON_BUILTIN_SUFFIX (pcmpeqw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
12993 LOONGSON_BUILTIN_SUFFIX (pcmpeqh, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
12994 LOONGSON_BUILTIN_SUFFIX (pcmpeqb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
12995 LOONGSON_BUILTIN_SUFFIX (pcmpgtw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
12996 LOONGSON_BUILTIN_SUFFIX (pcmpgth, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
12997 LOONGSON_BUILTIN_SUFFIX (pcmpgtb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
12998 LOONGSON_BUILTIN_SUFFIX (pcmpgtw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
12999 LOONGSON_BUILTIN_SUFFIX (pcmpgth, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
13000 LOONGSON_BUILTIN_SUFFIX (pcmpgtb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
13001 LOONGSON_BUILTIN_SUFFIX (pextrh, u, MIPS_UV4HI_FTYPE_UV4HI_USI),
13002 LOONGSON_BUILTIN_SUFFIX (pextrh, s, MIPS_V4HI_FTYPE_V4HI_USI),
13003 LOONGSON_BUILTIN_SUFFIX (pinsrh_0, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
13004 LOONGSON_BUILTIN_SUFFIX (pinsrh_1, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
13005 LOONGSON_BUILTIN_SUFFIX (pinsrh_2, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
13006 LOONGSON_BUILTIN_SUFFIX (pinsrh_3, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
13007 LOONGSON_BUILTIN_SUFFIX (pinsrh_0, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
13008 LOONGSON_BUILTIN_SUFFIX (pinsrh_1, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
13009 LOONGSON_BUILTIN_SUFFIX (pinsrh_2, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
13010 LOONGSON_BUILTIN_SUFFIX (pinsrh_3, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
13011 LOONGSON_BUILTIN (pmaddhw, MIPS_V2SI_FTYPE_V4HI_V4HI),
13012 LOONGSON_BUILTIN (pmaxsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
13013 LOONGSON_BUILTIN (pmaxub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
13014 LOONGSON_BUILTIN (pminsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
13015 LOONGSON_BUILTIN (pminub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
13016 LOONGSON_BUILTIN_SUFFIX (pmovmskb, u, MIPS_UV8QI_FTYPE_UV8QI),
13017 LOONGSON_BUILTIN_SUFFIX (pmovmskb, s, MIPS_V8QI_FTYPE_V8QI),
13018 LOONGSON_BUILTIN (pmulhuh, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
13019 LOONGSON_BUILTIN (pmulhh, MIPS_V4HI_FTYPE_V4HI_V4HI),
13020 LOONGSON_BUILTIN (pmullh, MIPS_V4HI_FTYPE_V4HI_V4HI),
13021 LOONGSON_BUILTIN (pmuluw, MIPS_UDI_FTYPE_UV2SI_UV2SI),
13022 LOONGSON_BUILTIN (pasubub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
13023 LOONGSON_BUILTIN (biadd, MIPS_UV4HI_FTYPE_UV8QI),
13024 LOONGSON_BUILTIN (psadbh, MIPS_UV4HI_FTYPE_UV8QI_UV8QI),
13025 LOONGSON_BUILTIN_SUFFIX (pshufh, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI_UQI),
13026 LOONGSON_BUILTIN_SUFFIX (pshufh, s, MIPS_V4HI_FTYPE_V4HI_V4HI_UQI),
13027 LOONGSON_BUILTIN_SUFFIX (psllh, u, MIPS_UV4HI_FTYPE_UV4HI_UQI),
13028 LOONGSON_BUILTIN_SUFFIX (psllh, s, MIPS_V4HI_FTYPE_V4HI_UQI),
13029 LOONGSON_BUILTIN_SUFFIX (psllw, u, MIPS_UV2SI_FTYPE_UV2SI_UQI),
13030 LOONGSON_BUILTIN_SUFFIX (psllw, s, MIPS_V2SI_FTYPE_V2SI_UQI),
13031 LOONGSON_BUILTIN_SUFFIX (psrah, u, MIPS_UV4HI_FTYPE_UV4HI_UQI),
13032 LOONGSON_BUILTIN_SUFFIX (psrah, s, MIPS_V4HI_FTYPE_V4HI_UQI),
13033 LOONGSON_BUILTIN_SUFFIX (psraw, u, MIPS_UV2SI_FTYPE_UV2SI_UQI),
13034 LOONGSON_BUILTIN_SUFFIX (psraw, s, MIPS_V2SI_FTYPE_V2SI_UQI),
13035 LOONGSON_BUILTIN_SUFFIX (psrlh, u, MIPS_UV4HI_FTYPE_UV4HI_UQI),
13036 LOONGSON_BUILTIN_SUFFIX (psrlh, s, MIPS_V4HI_FTYPE_V4HI_UQI),
13037 LOONGSON_BUILTIN_SUFFIX (psrlw, u, MIPS_UV2SI_FTYPE_UV2SI_UQI),
13038 LOONGSON_BUILTIN_SUFFIX (psrlw, s, MIPS_V2SI_FTYPE_V2SI_UQI),
13039 LOONGSON_BUILTIN_SUFFIX (psubw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
13040 LOONGSON_BUILTIN_SUFFIX (psubh, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
13041 LOONGSON_BUILTIN_SUFFIX (psubb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
13042 LOONGSON_BUILTIN_SUFFIX (psubw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
13043 LOONGSON_BUILTIN_SUFFIX (psubh, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
13044 LOONGSON_BUILTIN_SUFFIX (psubb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
13045 LOONGSON_BUILTIN_SUFFIX (psubd, u, MIPS_UDI_FTYPE_UDI_UDI),
13046 LOONGSON_BUILTIN_SUFFIX (psubd, s, MIPS_DI_FTYPE_DI_DI),
13047 LOONGSON_BUILTIN (psubsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
13048 LOONGSON_BUILTIN (psubsb, MIPS_V8QI_FTYPE_V8QI_V8QI),
13049 LOONGSON_BUILTIN (psubush, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
13050 LOONGSON_BUILTIN (psubusb, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
13051 LOONGSON_BUILTIN_SUFFIX (punpckhbh, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
13052 LOONGSON_BUILTIN_SUFFIX (punpckhhw, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
13053 LOONGSON_BUILTIN_SUFFIX (punpckhwd, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
13054 LOONGSON_BUILTIN_SUFFIX (punpckhbh, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
13055 LOONGSON_BUILTIN_SUFFIX (punpckhhw, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
13056 LOONGSON_BUILTIN_SUFFIX (punpckhwd, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
13057 LOONGSON_BUILTIN_SUFFIX (punpcklbh, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
13058 LOONGSON_BUILTIN_SUFFIX (punpcklhw, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
13059 LOONGSON_BUILTIN_SUFFIX (punpcklwd, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
13060 LOONGSON_BUILTIN_SUFFIX (punpcklbh, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
13061 LOONGSON_BUILTIN_SUFFIX (punpcklhw, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
13062 LOONGSON_BUILTIN_SUFFIX (punpcklwd, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
13064 /* Sundry other built-in functions. */
13065 DIRECT_NO_TARGET_BUILTIN (cache, MIPS_VOID_FTYPE_SI_CVPOINTER, cache)
13068 /* Index I is the function declaration for mips_builtins[I], or null if the
13069 function isn't defined on this target. */
13070 static GTY(()) tree mips_builtin_decls[ARRAY_SIZE (mips_builtins)];
13072 /* MODE is a vector mode whose elements have type TYPE. Return the type
13073 of the vector itself. */
13076 mips_builtin_vector_type (tree type, enum machine_mode mode)
13078 static tree types[2 * (int) MAX_MACHINE_MODE];
13081 mode_index = (int) mode;
13083 if (TREE_CODE (type) == INTEGER_TYPE && TYPE_UNSIGNED (type))
13084 mode_index += MAX_MACHINE_MODE;
13086 if (types[mode_index] == NULL_TREE)
13087 types[mode_index] = build_vector_type_for_mode (type, mode);
13088 return types[mode_index];
13091 /* Return a type for 'const volatile void *'. */
13094 mips_build_cvpointer_type (void)
13098 if (cache == NULL_TREE)
13099 cache = build_pointer_type (build_qualified_type
13101 TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE));
13105 /* Source-level argument types. */
13106 #define MIPS_ATYPE_VOID void_type_node
13107 #define MIPS_ATYPE_INT integer_type_node
13108 #define MIPS_ATYPE_POINTER ptr_type_node
13109 #define MIPS_ATYPE_CVPOINTER mips_build_cvpointer_type ()
13111 /* Standard mode-based argument types. */
13112 #define MIPS_ATYPE_UQI unsigned_intQI_type_node
13113 #define MIPS_ATYPE_SI intSI_type_node
13114 #define MIPS_ATYPE_USI unsigned_intSI_type_node
13115 #define MIPS_ATYPE_DI intDI_type_node
13116 #define MIPS_ATYPE_UDI unsigned_intDI_type_node
13117 #define MIPS_ATYPE_SF float_type_node
13118 #define MIPS_ATYPE_DF double_type_node
13120 /* Vector argument types. */
13121 #define MIPS_ATYPE_V2SF mips_builtin_vector_type (float_type_node, V2SFmode)
13122 #define MIPS_ATYPE_V2HI mips_builtin_vector_type (intHI_type_node, V2HImode)
13123 #define MIPS_ATYPE_V2SI mips_builtin_vector_type (intSI_type_node, V2SImode)
13124 #define MIPS_ATYPE_V4QI mips_builtin_vector_type (intQI_type_node, V4QImode)
13125 #define MIPS_ATYPE_V4HI mips_builtin_vector_type (intHI_type_node, V4HImode)
13126 #define MIPS_ATYPE_V8QI mips_builtin_vector_type (intQI_type_node, V8QImode)
13127 #define MIPS_ATYPE_UV2SI \
13128 mips_builtin_vector_type (unsigned_intSI_type_node, V2SImode)
13129 #define MIPS_ATYPE_UV4HI \
13130 mips_builtin_vector_type (unsigned_intHI_type_node, V4HImode)
13131 #define MIPS_ATYPE_UV8QI \
13132 mips_builtin_vector_type (unsigned_intQI_type_node, V8QImode)
13134 /* MIPS_FTYPE_ATYPESN takes N MIPS_FTYPES-like type codes and lists
13135 their associated MIPS_ATYPEs. */
13136 #define MIPS_FTYPE_ATYPES1(A, B) \
13137 MIPS_ATYPE_##A, MIPS_ATYPE_##B
13139 #define MIPS_FTYPE_ATYPES2(A, B, C) \
13140 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C
13142 #define MIPS_FTYPE_ATYPES3(A, B, C, D) \
13143 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C, MIPS_ATYPE_##D
13145 #define MIPS_FTYPE_ATYPES4(A, B, C, D, E) \
13146 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C, MIPS_ATYPE_##D, \
13149 /* Return the function type associated with function prototype TYPE. */
13152 mips_build_function_type (enum mips_function_type type)
13154 static tree types[(int) MIPS_MAX_FTYPE_MAX];
13156 if (types[(int) type] == NULL_TREE)
13159 #define DEF_MIPS_FTYPE(NUM, ARGS) \
13160 case MIPS_FTYPE_NAME##NUM ARGS: \
13161 types[(int) type] \
13162 = build_function_type_list (MIPS_FTYPE_ATYPES##NUM ARGS, \
13165 #include "config/mips/mips-ftypes.def"
13166 #undef DEF_MIPS_FTYPE
13168 gcc_unreachable ();
13171 return types[(int) type];
13174 /* Implement TARGET_INIT_BUILTINS. */
13177 mips_init_builtins (void)
13179 const struct mips_builtin_description *d;
13182 /* Iterate through all of the bdesc arrays, initializing all of the
13183 builtin functions. */
13184 for (i = 0; i < ARRAY_SIZE (mips_builtins); i++)
13186 d = &mips_builtins[i];
13188 mips_builtin_decls[i]
13189 = add_builtin_function (d->name,
13190 mips_build_function_type (d->function_type),
13191 i, BUILT_IN_MD, NULL, NULL);
13195 /* Implement TARGET_BUILTIN_DECL. */
13198 mips_builtin_decl (unsigned int code, bool initialize_p ATTRIBUTE_UNUSED)
13200 if (code >= ARRAY_SIZE (mips_builtins))
13201 return error_mark_node;
13202 return mips_builtin_decls[code];
13205 /* Take argument ARGNO from EXP's argument list and convert it into
13206 an expand operand. Store the operand in *OP. */
13209 mips_prepare_builtin_arg (struct expand_operand *op, tree exp,
13210 unsigned int argno)
13215 arg = CALL_EXPR_ARG (exp, argno);
13216 value = expand_normal (arg);
13217 create_input_operand (op, value, TYPE_MODE (TREE_TYPE (arg)));
13220 /* Expand instruction ICODE as part of a built-in function sequence.
13221 Use the first NOPS elements of OPS as the instruction's operands.
13222 HAS_TARGET_P is true if operand 0 is a target; it is false if the
13223 instruction has no target.
13225 Return the target rtx if HAS_TARGET_P, otherwise return const0_rtx. */
13228 mips_expand_builtin_insn (enum insn_code icode, unsigned int nops,
13229 struct expand_operand *ops, bool has_target_p)
13231 if (!maybe_expand_insn (icode, nops, ops))
13233 error ("invalid argument to built-in function");
13234 return has_target_p ? gen_reg_rtx (ops[0].mode) : const0_rtx;
13236 return has_target_p ? ops[0].value : const0_rtx;
13239 /* Expand a floating-point comparison for built-in function call EXP.
13240 The first NARGS arguments are the values to be compared. ICODE is
13241 the .md pattern that does the comparison and COND is the condition
13242 that is being tested. Return an rtx for the result. */
13245 mips_expand_builtin_compare_1 (enum insn_code icode,
13246 enum mips_fp_condition cond,
13247 tree exp, int nargs)
13249 struct expand_operand ops[MAX_RECOG_OPERANDS];
13252 /* The instruction should have a target operand, an operand for each
13253 argument, and an operand for COND. */
13254 gcc_assert (nargs + 2 == insn_data[(int) icode].n_generator_args);
13257 create_output_operand (&ops[opno++], NULL_RTX,
13258 insn_data[(int) icode].operand[0].mode);
13259 for (argno = 0; argno < nargs; argno++)
13260 mips_prepare_builtin_arg (&ops[opno++], exp, argno);
13261 create_integer_operand (&ops[opno++], (int) cond);
13262 return mips_expand_builtin_insn (icode, opno, ops, true);
13265 /* Expand a MIPS_BUILTIN_DIRECT or MIPS_BUILTIN_DIRECT_NO_TARGET function;
13266 HAS_TARGET_P says which. EXP is the CALL_EXPR that calls the function
13267 and ICODE is the code of the associated .md pattern. TARGET, if nonnull,
13268 suggests a good place to put the result. */
13271 mips_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
13274 struct expand_operand ops[MAX_RECOG_OPERANDS];
13277 /* Map any target to operand 0. */
13280 create_output_operand (&ops[opno++], target, TYPE_MODE (TREE_TYPE (exp)));
13282 /* Map the arguments to the other operands. */
13283 gcc_assert (opno + call_expr_nargs (exp)
13284 == insn_data[icode].n_generator_args);
13285 for (argno = 0; argno < call_expr_nargs (exp); argno++)
13286 mips_prepare_builtin_arg (&ops[opno++], exp, argno);
13288 return mips_expand_builtin_insn (icode, opno, ops, has_target_p);
13291 /* Expand a __builtin_mips_movt_*_ps or __builtin_mips_movf_*_ps
13292 function; TYPE says which. EXP is the CALL_EXPR that calls the
13293 function, ICODE is the instruction that should be used to compare
13294 the first two arguments, and COND is the condition it should test.
13295 TARGET, if nonnull, suggests a good place to put the result. */
13298 mips_expand_builtin_movtf (enum mips_builtin_type type,
13299 enum insn_code icode, enum mips_fp_condition cond,
13300 rtx target, tree exp)
13302 struct expand_operand ops[4];
13305 cmp_result = mips_expand_builtin_compare_1 (icode, cond, exp, 2);
13306 create_output_operand (&ops[0], target, TYPE_MODE (TREE_TYPE (exp)));
13307 if (type == MIPS_BUILTIN_MOVT)
13309 mips_prepare_builtin_arg (&ops[2], exp, 2);
13310 mips_prepare_builtin_arg (&ops[1], exp, 3);
13314 mips_prepare_builtin_arg (&ops[1], exp, 2);
13315 mips_prepare_builtin_arg (&ops[2], exp, 3);
13317 create_fixed_operand (&ops[3], cmp_result);
13318 return mips_expand_builtin_insn (CODE_FOR_mips_cond_move_tf_ps,
13322 /* Move VALUE_IF_TRUE into TARGET if CONDITION is true; move VALUE_IF_FALSE
13323 into TARGET otherwise. Return TARGET. */
13326 mips_builtin_branch_and_move (rtx condition, rtx target,
13327 rtx value_if_true, rtx value_if_false)
13329 rtx true_label, done_label;
13331 true_label = gen_label_rtx ();
13332 done_label = gen_label_rtx ();
13334 /* First assume that CONDITION is false. */
13335 mips_emit_move (target, value_if_false);
13337 /* Branch to TRUE_LABEL if CONDITION is true and DONE_LABEL otherwise. */
13338 emit_jump_insn (gen_condjump (condition, true_label));
13339 emit_jump_insn (gen_jump (done_label));
13342 /* Fix TARGET if CONDITION is true. */
13343 emit_label (true_label);
13344 mips_emit_move (target, value_if_true);
13346 emit_label (done_label);
13350 /* Expand a comparison built-in function of type BUILTIN_TYPE. EXP is
13351 the CALL_EXPR that calls the function, ICODE is the code of the
13352 comparison instruction, and COND is the condition it should test.
13353 TARGET, if nonnull, suggests a good place to put the boolean result. */
13356 mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
13357 enum insn_code icode, enum mips_fp_condition cond,
13358 rtx target, tree exp)
13360 rtx offset, condition, cmp_result;
13362 if (target == 0 || GET_MODE (target) != SImode)
13363 target = gen_reg_rtx (SImode);
13364 cmp_result = mips_expand_builtin_compare_1 (icode, cond, exp,
13365 call_expr_nargs (exp));
13367 /* If the comparison sets more than one register, we define the result
13368 to be 0 if all registers are false and -1 if all registers are true.
13369 The value of the complete result is indeterminate otherwise. */
13370 switch (builtin_type)
13372 case MIPS_BUILTIN_CMP_ALL:
13373 condition = gen_rtx_NE (VOIDmode, cmp_result, constm1_rtx);
13374 return mips_builtin_branch_and_move (condition, target,
13375 const0_rtx, const1_rtx);
13377 case MIPS_BUILTIN_CMP_UPPER:
13378 case MIPS_BUILTIN_CMP_LOWER:
13379 offset = GEN_INT (builtin_type == MIPS_BUILTIN_CMP_UPPER);
13380 condition = gen_single_cc (cmp_result, offset);
13381 return mips_builtin_branch_and_move (condition, target,
13382 const1_rtx, const0_rtx);
13385 condition = gen_rtx_NE (VOIDmode, cmp_result, const0_rtx);
13386 return mips_builtin_branch_and_move (condition, target,
13387 const1_rtx, const0_rtx);
13391 /* Expand a bposge built-in function of type BUILTIN_TYPE. TARGET,
13392 if nonnull, suggests a good place to put the boolean result. */
13395 mips_expand_builtin_bposge (enum mips_builtin_type builtin_type, rtx target)
13397 rtx condition, cmp_result;
13400 if (target == 0 || GET_MODE (target) != SImode)
13401 target = gen_reg_rtx (SImode);
13403 cmp_result = gen_rtx_REG (CCDSPmode, CCDSP_PO_REGNUM);
13405 if (builtin_type == MIPS_BUILTIN_BPOSGE32)
13410 condition = gen_rtx_GE (VOIDmode, cmp_result, GEN_INT (cmp_value));
13411 return mips_builtin_branch_and_move (condition, target,
13412 const1_rtx, const0_rtx);
13415 /* Implement TARGET_EXPAND_BUILTIN. */
13418 mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
13419 enum machine_mode mode, int ignore)
13422 unsigned int fcode, avail;
13423 const struct mips_builtin_description *d;
13425 fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
13426 fcode = DECL_FUNCTION_CODE (fndecl);
13427 gcc_assert (fcode < ARRAY_SIZE (mips_builtins));
13428 d = &mips_builtins[fcode];
13429 avail = d->avail ();
13430 gcc_assert (avail != 0);
13433 error ("built-in function %qE not supported for MIPS16",
13434 DECL_NAME (fndecl));
13435 return ignore ? const0_rtx : CONST0_RTX (mode);
13437 switch (d->builtin_type)
13439 case MIPS_BUILTIN_DIRECT:
13440 return mips_expand_builtin_direct (d->icode, target, exp, true);
13442 case MIPS_BUILTIN_DIRECT_NO_TARGET:
13443 return mips_expand_builtin_direct (d->icode, target, exp, false);
13445 case MIPS_BUILTIN_MOVT:
13446 case MIPS_BUILTIN_MOVF:
13447 return mips_expand_builtin_movtf (d->builtin_type, d->icode,
13448 d->cond, target, exp);
13450 case MIPS_BUILTIN_CMP_ANY:
13451 case MIPS_BUILTIN_CMP_ALL:
13452 case MIPS_BUILTIN_CMP_UPPER:
13453 case MIPS_BUILTIN_CMP_LOWER:
13454 case MIPS_BUILTIN_CMP_SINGLE:
13455 return mips_expand_builtin_compare (d->builtin_type, d->icode,
13456 d->cond, target, exp);
13458 case MIPS_BUILTIN_BPOSGE32:
13459 return mips_expand_builtin_bposge (d->builtin_type, target);
13461 gcc_unreachable ();
13464 /* An entry in the MIPS16 constant pool. VALUE is the pool constant,
13465 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
13466 struct mips16_constant {
13467 struct mips16_constant *next;
13470 enum machine_mode mode;
13473 /* Information about an incomplete MIPS16 constant pool. FIRST is the
13474 first constant, HIGHEST_ADDRESS is the highest address that the first
13475 byte of the pool can have, and INSN_ADDRESS is the current instruction
13477 struct mips16_constant_pool {
13478 struct mips16_constant *first;
13479 int highest_address;
13483 /* Add constant VALUE to POOL and return its label. MODE is the
13484 value's mode (used for CONST_INTs, etc.). */
13487 mips16_add_constant (struct mips16_constant_pool *pool,
13488 rtx value, enum machine_mode mode)
13490 struct mips16_constant **p, *c;
13491 bool first_of_size_p;
13493 /* See whether the constant is already in the pool. If so, return the
13494 existing label, otherwise leave P pointing to the place where the
13495 constant should be added.
13497 Keep the pool sorted in increasing order of mode size so that we can
13498 reduce the number of alignments needed. */
13499 first_of_size_p = true;
13500 for (p = &pool->first; *p != 0; p = &(*p)->next)
13502 if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
13503 return (*p)->label;
13504 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
13506 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
13507 first_of_size_p = false;
13510 /* In the worst case, the constant needed by the earliest instruction
13511 will end up at the end of the pool. The entire pool must then be
13512 accessible from that instruction.
13514 When adding the first constant, set the pool's highest address to
13515 the address of the first out-of-range byte. Adjust this address
13516 downwards each time a new constant is added. */
13517 if (pool->first == 0)
13518 /* For LWPC, ADDIUPC and DADDIUPC, the base PC value is the address
13519 of the instruction with the lowest two bits clear. The base PC
13520 value for LDPC has the lowest three bits clear. Assume the worst
13521 case here; namely that the PC-relative instruction occupies the
13522 last 2 bytes in an aligned word. */
13523 pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
13524 pool->highest_address -= GET_MODE_SIZE (mode);
13525 if (first_of_size_p)
13526 /* Take into account the worst possible padding due to alignment. */
13527 pool->highest_address -= GET_MODE_SIZE (mode) - 1;
13529 /* Create a new entry. */
13530 c = XNEW (struct mips16_constant);
13533 c->label = gen_label_rtx ();
13540 /* Output constant VALUE after instruction INSN and return the last
13541 instruction emitted. MODE is the mode of the constant. */
13544 mips16_emit_constants_1 (enum machine_mode mode, rtx value, rtx insn)
13546 if (SCALAR_INT_MODE_P (mode) || ALL_SCALAR_FIXED_POINT_MODE_P (mode))
13548 rtx size = GEN_INT (GET_MODE_SIZE (mode));
13549 return emit_insn_after (gen_consttable_int (value, size), insn);
13552 if (SCALAR_FLOAT_MODE_P (mode))
13553 return emit_insn_after (gen_consttable_float (value), insn);
13555 if (VECTOR_MODE_P (mode))
13559 for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
13560 insn = mips16_emit_constants_1 (GET_MODE_INNER (mode),
13561 CONST_VECTOR_ELT (value, i), insn);
13565 gcc_unreachable ();
13568 /* Dump out the constants in CONSTANTS after INSN. */
13571 mips16_emit_constants (struct mips16_constant *constants, rtx insn)
13573 struct mips16_constant *c, *next;
13577 for (c = constants; c != NULL; c = next)
13579 /* If necessary, increase the alignment of PC. */
13580 if (align < GET_MODE_SIZE (c->mode))
13582 int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
13583 insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
13585 align = GET_MODE_SIZE (c->mode);
13587 insn = emit_label_after (c->label, insn);
13588 insn = mips16_emit_constants_1 (c->mode, c->value, insn);
13594 emit_barrier_after (insn);
13597 /* Return the length of instruction INSN. */
13600 mips16_insn_length (rtx insn)
13604 rtx body = PATTERN (insn);
13605 if (GET_CODE (body) == ADDR_VEC)
13606 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
13607 if (GET_CODE (body) == ADDR_DIFF_VEC)
13608 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
13610 return get_attr_length (insn);
13613 /* If *X is a symbolic constant that refers to the constant pool, add
13614 the constant to POOL and rewrite *X to use the constant's label. */
13617 mips16_rewrite_pool_constant (struct mips16_constant_pool *pool, rtx *x)
13619 rtx base, offset, label;
13621 split_const (*x, &base, &offset);
13622 if (GET_CODE (base) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (base))
13624 label = mips16_add_constant (pool, get_pool_constant (base),
13625 get_pool_mode (base));
13626 base = gen_rtx_LABEL_REF (Pmode, label);
13627 *x = mips_unspec_address_offset (base, offset, SYMBOL_PC_RELATIVE);
13631 /* This structure is used to communicate with mips16_rewrite_pool_refs.
13632 INSN is the instruction we're rewriting and POOL points to the current
13634 struct mips16_rewrite_pool_refs_info {
13636 struct mips16_constant_pool *pool;
13639 /* Rewrite *X so that constant pool references refer to the constant's
13640 label instead. DATA points to a mips16_rewrite_pool_refs_info
13644 mips16_rewrite_pool_refs (rtx *x, void *data)
13646 struct mips16_rewrite_pool_refs_info *info =
13647 (struct mips16_rewrite_pool_refs_info *) data;
13649 if (force_to_mem_operand (*x, Pmode))
13651 rtx mem = force_const_mem (GET_MODE (*x), *x);
13652 validate_change (info->insn, x, mem, false);
13657 mips16_rewrite_pool_constant (info->pool, &XEXP (*x, 0));
13661 if (TARGET_MIPS16_TEXT_LOADS)
13662 mips16_rewrite_pool_constant (info->pool, x);
13664 return GET_CODE (*x) == CONST ? -1 : 0;
13667 /* Return whether CFG is used in mips_reorg. */
13670 mips_cfg_in_reorg (void)
13672 return (mips_r10k_cache_barrier != R10K_CACHE_BARRIER_NONE
13673 || TARGET_RELAX_PIC_CALLS);
13676 /* Build MIPS16 constant pools. */
13679 mips16_lay_out_constants (void)
13681 struct mips16_constant_pool pool;
13682 struct mips16_rewrite_pool_refs_info info;
13685 if (!TARGET_MIPS16_PCREL_LOADS)
13688 if (mips_cfg_in_reorg ())
13689 split_all_insns ();
13691 split_all_insns_noflow ();
13693 memset (&pool, 0, sizeof (pool));
13694 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13696 /* Rewrite constant pool references in INSN. */
13697 if (USEFUL_INSN_P (insn))
13701 for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &info);
13704 pool.insn_address += mips16_insn_length (insn);
13706 if (pool.first != NULL)
13708 /* If there are no natural barriers between the first user of
13709 the pool and the highest acceptable address, we'll need to
13710 create a new instruction to jump around the constant pool.
13711 In the worst case, this instruction will be 4 bytes long.
13713 If it's too late to do this transformation after INSN,
13714 do it immediately before INSN. */
13715 if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
13719 label = gen_label_rtx ();
13721 jump = emit_jump_insn_before (gen_jump (label), insn);
13722 JUMP_LABEL (jump) = label;
13723 LABEL_NUSES (label) = 1;
13724 barrier = emit_barrier_after (jump);
13726 emit_label_after (label, barrier);
13727 pool.insn_address += 4;
13730 /* See whether the constant pool is now out of range of the first
13731 user. If so, output the constants after the previous barrier.
13732 Note that any instructions between BARRIER and INSN (inclusive)
13733 will use negative offsets to refer to the pool. */
13734 if (pool.insn_address > pool.highest_address)
13736 mips16_emit_constants (pool.first, barrier);
13740 else if (BARRIER_P (insn))
13744 mips16_emit_constants (pool.first, get_last_insn ());
13747 /* Return true if it is worth r10k_simplify_address's while replacing
13748 an address with X. We are looking for constants, and for addresses
13749 at a known offset from the incoming stack pointer. */
13752 r10k_simplified_address_p (rtx x)
13754 if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
13756 return x == virtual_incoming_args_rtx || CONSTANT_P (x);
13759 /* X is an expression that appears in INSN. Try to use the UD chains
13760 to simplify it, returning the simplified form on success and the
13761 original form otherwise. Replace the incoming value of $sp with
13762 virtual_incoming_args_rtx (which should never occur in X otherwise). */
13765 r10k_simplify_address (rtx x, rtx insn)
13767 rtx newx, op0, op1, set, def_insn, note;
13769 struct df_link *defs;
13774 op0 = r10k_simplify_address (XEXP (x, 0), insn);
13775 if (op0 != XEXP (x, 0))
13776 newx = simplify_gen_unary (GET_CODE (x), GET_MODE (x),
13777 op0, GET_MODE (XEXP (x, 0)));
13779 else if (BINARY_P (x))
13781 op0 = r10k_simplify_address (XEXP (x, 0), insn);
13782 op1 = r10k_simplify_address (XEXP (x, 1), insn);
13783 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
13784 newx = simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1);
13786 else if (GET_CODE (x) == LO_SUM)
13788 /* LO_SUMs can be offset from HIGHs, if we know they won't
13789 overflow. See mips_classify_address for the rationale behind
13791 op0 = r10k_simplify_address (XEXP (x, 0), insn);
13792 if (GET_CODE (op0) == HIGH)
13793 newx = XEXP (x, 1);
13795 else if (REG_P (x))
13797 /* Uses are recorded by regno_reg_rtx, not X itself. */
13798 use = df_find_use (insn, regno_reg_rtx[REGNO (x)]);
13800 defs = DF_REF_CHAIN (use);
13802 /* Require a single definition. */
13803 if (defs && defs->next == NULL)
13806 if (DF_REF_IS_ARTIFICIAL (def))
13808 /* Replace the incoming value of $sp with
13809 virtual_incoming_args_rtx. */
13810 if (x == stack_pointer_rtx
13811 && DF_REF_BB (def) == ENTRY_BLOCK_PTR)
13812 newx = virtual_incoming_args_rtx;
13814 else if (dominated_by_p (CDI_DOMINATORS, DF_REF_BB (use),
13817 /* Make sure that DEF_INSN is a single set of REG. */
13818 def_insn = DF_REF_INSN (def);
13819 if (NONJUMP_INSN_P (def_insn))
13821 set = single_set (def_insn);
13822 if (set && rtx_equal_p (SET_DEST (set), x))
13824 /* Prefer to use notes, since the def-use chains
13825 are often shorter. */
13826 note = find_reg_equal_equiv_note (def_insn);
13828 newx = XEXP (note, 0);
13830 newx = SET_SRC (set);
13831 newx = r10k_simplify_address (newx, def_insn);
13837 if (newx && r10k_simplified_address_p (newx))
13842 /* Return true if ADDRESS is known to be an uncached address
13843 on R10K systems. */
13846 r10k_uncached_address_p (unsigned HOST_WIDE_INT address)
13848 unsigned HOST_WIDE_INT upper;
13850 /* Check for KSEG1. */
13851 if (address + 0x60000000 < 0x20000000)
13854 /* Check for uncached XKPHYS addresses. */
13855 if (Pmode == DImode)
13857 upper = (address >> 40) & 0xf9ffff;
13858 if (upper == 0x900000 || upper == 0xb80000)
13864 /* Return true if we can prove that an access to address X in instruction
13865 INSN would be safe from R10K speculation. This X is a general
13866 expression; it might not be a legitimate address. */
13869 r10k_safe_address_p (rtx x, rtx insn)
13872 HOST_WIDE_INT offset_val;
13874 x = r10k_simplify_address (x, insn);
13876 /* Check for references to the stack frame. It doesn't really matter
13877 how much of the frame has been allocated at INSN; -mr10k-cache-barrier
13878 allows us to assume that accesses to any part of the eventual frame
13879 is safe from speculation at any point in the function. */
13880 mips_split_plus (x, &base, &offset_val);
13881 if (base == virtual_incoming_args_rtx
13882 && offset_val >= -cfun->machine->frame.total_size
13883 && offset_val < cfun->machine->frame.args_size)
13886 /* Check for uncached addresses. */
13887 if (CONST_INT_P (x))
13888 return r10k_uncached_address_p (INTVAL (x));
13890 /* Check for accesses to a static object. */
13891 split_const (x, &base, &offset);
13892 return offset_within_block_p (base, INTVAL (offset));
13895 /* Return true if a MEM with MEM_EXPR EXPR and MEM_OFFSET OFFSET is
13896 an in-range access to an automatic variable, or to an object with
13897 a link-time-constant address. */
13900 r10k_safe_mem_expr_p (tree expr, rtx offset)
13902 if (expr == NULL_TREE
13903 || offset == NULL_RTX
13904 || !CONST_INT_P (offset)
13905 || INTVAL (offset) < 0
13906 || INTVAL (offset) >= int_size_in_bytes (TREE_TYPE (expr)))
13909 while (TREE_CODE (expr) == COMPONENT_REF)
13911 expr = TREE_OPERAND (expr, 0);
13912 if (expr == NULL_TREE)
13916 return DECL_P (expr);
13919 /* A for_each_rtx callback for which DATA points to the instruction
13920 containing *X. Stop the search if we find a MEM that is not safe
13921 from R10K speculation. */
13924 r10k_needs_protection_p_1 (rtx *loc, void *data)
13932 if (r10k_safe_mem_expr_p (MEM_EXPR (mem), MEM_OFFSET (mem)))
13935 if (r10k_safe_address_p (XEXP (mem, 0), (rtx) data))
13941 /* A note_stores callback for which DATA points to an instruction pointer.
13942 If *DATA is nonnull, make it null if it X contains a MEM that is not
13943 safe from R10K speculation. */
13946 r10k_needs_protection_p_store (rtx x, const_rtx pat ATTRIBUTE_UNUSED,
13951 insn_ptr = (rtx *) data;
13952 if (*insn_ptr && for_each_rtx (&x, r10k_needs_protection_p_1, *insn_ptr))
13953 *insn_ptr = NULL_RTX;
13956 /* A for_each_rtx callback that iterates over the pattern of a CALL_INSN.
13957 Return nonzero if the call is not to a declared function. */
13960 r10k_needs_protection_p_call (rtx *loc, void *data ATTRIBUTE_UNUSED)
13969 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_DECL (x))
13975 /* Return true if instruction INSN needs to be protected by an R10K
13979 r10k_needs_protection_p (rtx insn)
13982 return for_each_rtx (&PATTERN (insn), r10k_needs_protection_p_call, NULL);
13984 if (mips_r10k_cache_barrier == R10K_CACHE_BARRIER_STORE)
13986 note_stores (PATTERN (insn), r10k_needs_protection_p_store, &insn);
13987 return insn == NULL_RTX;
13990 return for_each_rtx (&PATTERN (insn), r10k_needs_protection_p_1, insn);
13993 /* Return true if BB is only reached by blocks in PROTECTED_BBS and if every
13994 edge is unconditional. */
13997 r10k_protected_bb_p (basic_block bb, sbitmap protected_bbs)
14002 FOR_EACH_EDGE (e, ei, bb->preds)
14003 if (!single_succ_p (e->src)
14004 || !TEST_BIT (protected_bbs, e->src->index)
14005 || (e->flags & EDGE_COMPLEX) != 0)
14010 /* Implement -mr10k-cache-barrier= for the current function. */
14013 r10k_insert_cache_barriers (void)
14015 int *rev_post_order;
14018 sbitmap protected_bbs;
14019 rtx insn, end, unprotected_region;
14023 sorry ("%qs does not support MIPS16 code", "-mr10k-cache-barrier");
14027 /* Calculate dominators. */
14028 calculate_dominance_info (CDI_DOMINATORS);
14030 /* Bit X of PROTECTED_BBS is set if the last operation in basic block
14031 X is protected by a cache barrier. */
14032 protected_bbs = sbitmap_alloc (last_basic_block);
14033 sbitmap_zero (protected_bbs);
14035 /* Iterate over the basic blocks in reverse post-order. */
14036 rev_post_order = XNEWVEC (int, last_basic_block);
14037 n = pre_and_rev_post_order_compute (NULL, rev_post_order, false);
14038 for (i = 0; i < n; i++)
14040 bb = BASIC_BLOCK (rev_post_order[i]);
14042 /* If this block is only reached by unconditional edges, and if the
14043 source of every edge is protected, the beginning of the block is
14045 if (r10k_protected_bb_p (bb, protected_bbs))
14046 unprotected_region = NULL_RTX;
14048 unprotected_region = pc_rtx;
14049 end = NEXT_INSN (BB_END (bb));
14051 /* UNPROTECTED_REGION is:
14053 - null if we are processing a protected region,
14054 - pc_rtx if we are processing an unprotected region but have
14055 not yet found the first instruction in it
14056 - the first instruction in an unprotected region otherwise. */
14057 for (insn = BB_HEAD (bb); insn != end; insn = NEXT_INSN (insn))
14059 if (unprotected_region && USEFUL_INSN_P (insn))
14061 if (recog_memoized (insn) == CODE_FOR_mips_cache)
14062 /* This CACHE instruction protects the following code. */
14063 unprotected_region = NULL_RTX;
14066 /* See if INSN is the first instruction in this
14067 unprotected region. */
14068 if (unprotected_region == pc_rtx)
14069 unprotected_region = insn;
14071 /* See if INSN needs to be protected. If so,
14072 we must insert a cache barrier somewhere between
14073 PREV_INSN (UNPROTECTED_REGION) and INSN. It isn't
14074 clear which position is better performance-wise,
14075 but as a tie-breaker, we assume that it is better
14076 to allow delay slots to be back-filled where
14077 possible, and that it is better not to insert
14078 barriers in the middle of already-scheduled code.
14079 We therefore insert the barrier at the beginning
14081 if (r10k_needs_protection_p (insn))
14083 emit_insn_before (gen_r10k_cache_barrier (),
14084 unprotected_region);
14085 unprotected_region = NULL_RTX;
14091 /* The called function is not required to protect the exit path.
14092 The code that follows a call is therefore unprotected. */
14093 unprotected_region = pc_rtx;
14096 /* Record whether the end of this block is protected. */
14097 if (unprotected_region == NULL_RTX)
14098 SET_BIT (protected_bbs, bb->index);
14100 XDELETEVEC (rev_post_order);
14102 sbitmap_free (protected_bbs);
14104 free_dominance_info (CDI_DOMINATORS);
14107 /* If INSN is a call, return the underlying CALL expr. Return NULL_RTX
14108 otherwise. If INSN has two call rtx, then store the second one in
14112 mips_call_expr_from_insn (rtx insn, rtx *second_call)
14117 if (!CALL_P (insn))
14120 x = PATTERN (insn);
14121 if (GET_CODE (x) == PARALLEL)
14123 /* Calls returning complex values have two CALL rtx. Look for the second
14124 one here, and return it via the SECOND_CALL arg. */
14125 x2 = XVECEXP (x, 0, 1);
14126 if (GET_CODE (x2) == SET)
14128 if (GET_CODE (x2) == CALL)
14131 x = XVECEXP (x, 0, 0);
14133 if (GET_CODE (x) == SET)
14135 gcc_assert (GET_CODE (x) == CALL);
14140 /* REG is set in DEF. See if the definition is one of the ways we load a
14141 register with a symbol address for a mips_use_pic_fn_addr_reg_p call. If
14142 it is return the symbol reference of the function, otherwise return
14146 mips_pic_call_symbol_from_set (df_ref def, rtx reg)
14150 if (DF_REF_IS_ARTIFICIAL (def))
14153 def_insn = DF_REF_INSN (def);
14154 set = single_set (def_insn);
14155 if (set && rtx_equal_p (SET_DEST (set), reg))
14157 rtx note, src, symbol;
14159 /* First, look at REG_EQUAL/EQUIV notes. */
14160 note = find_reg_equal_equiv_note (def_insn);
14161 if (note && GET_CODE (XEXP (note, 0)) == SYMBOL_REF)
14162 return XEXP (note, 0);
14164 /* For %call16 references we don't have REG_EQUAL. */
14165 src = SET_SRC (set);
14166 symbol = mips_strip_unspec_call (src);
14169 gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
14173 /* Follow simple register copies. */
14175 return mips_find_pic_call_symbol (def_insn, src);
14181 /* Find the definition of the use of REG in INSN. See if the definition is
14182 one of the ways we load a register with a symbol address for a
14183 mips_use_pic_fn_addr_reg_p call. If it is return the symbol reference of
14184 the function, otherwise return NULL_RTX. */
14187 mips_find_pic_call_symbol (rtx insn, rtx reg)
14190 struct df_link *defs;
14193 use = df_find_use (insn, regno_reg_rtx[REGNO (reg)]);
14196 defs = DF_REF_CHAIN (use);
14199 symbol = mips_pic_call_symbol_from_set (defs->ref, reg);
14203 /* If we have more than one definition, they need to be identical. */
14204 for (defs = defs->next; defs; defs = defs->next)
14208 other = mips_pic_call_symbol_from_set (defs->ref, reg);
14209 if (!rtx_equal_p (symbol, other))
14216 /* Replace the args_size operand of the call expression CALL with the
14217 call-attribute UNSPEC and fill in SYMBOL as the function symbol. */
14220 mips_annotate_pic_call_expr (rtx call, rtx symbol)
14224 args_size = XEXP (call, 1);
14225 XEXP (call, 1) = gen_rtx_UNSPEC (GET_MODE (args_size),
14226 gen_rtvec (2, args_size, symbol),
14230 /* OPERANDS[ARGS_SIZE_OPNO] is the arg_size operand of a CALL expression. See
14231 if instead of the arg_size argument it contains the call attributes. If
14232 yes return true along with setting OPERANDS[ARGS_SIZE_OPNO] to the function
14233 symbol from the call attributes. Also return false if ARGS_SIZE_OPNO is
14237 mips_get_pic_call_symbol (rtx *operands, int args_size_opno)
14239 rtx args_size, symbol;
14241 if (!TARGET_RELAX_PIC_CALLS || args_size_opno == -1)
14244 args_size = operands[args_size_opno];
14245 if (GET_CODE (args_size) != UNSPEC)
14247 gcc_assert (XINT (args_size, 1) == UNSPEC_CALL_ATTR);
14249 symbol = XVECEXP (args_size, 0, 1);
14250 gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
14252 operands[args_size_opno] = symbol;
14256 /* Use DF to annotate PIC indirect calls with the function symbol they
14260 mips_annotate_pic_calls (void)
14266 FOR_BB_INSNS (bb, insn)
14268 rtx call, reg, symbol, second_call;
14271 call = mips_call_expr_from_insn (insn, &second_call);
14274 gcc_assert (MEM_P (XEXP (call, 0)));
14275 reg = XEXP (XEXP (call, 0), 0);
14279 symbol = mips_find_pic_call_symbol (insn, reg);
14282 mips_annotate_pic_call_expr (call, symbol);
14284 mips_annotate_pic_call_expr (second_call, symbol);
14289 /* A temporary variable used by for_each_rtx callbacks, etc. */
14290 static rtx mips_sim_insn;
14292 /* A structure representing the state of the processor pipeline.
14293 Used by the mips_sim_* family of functions. */
14295 /* The maximum number of instructions that can be issued in a cycle.
14296 (Caches mips_issue_rate.) */
14297 unsigned int issue_rate;
14299 /* The current simulation time. */
14302 /* How many more instructions can be issued in the current cycle. */
14303 unsigned int insns_left;
14305 /* LAST_SET[X].INSN is the last instruction to set register X.
14306 LAST_SET[X].TIME is the time at which that instruction was issued.
14307 INSN is null if no instruction has yet set register X. */
14311 } last_set[FIRST_PSEUDO_REGISTER];
14313 /* The pipeline's current DFA state. */
14317 /* Reset STATE to the initial simulation state. */
14320 mips_sim_reset (struct mips_sim *state)
14323 state->insns_left = state->issue_rate;
14324 memset (&state->last_set, 0, sizeof (state->last_set));
14325 state_reset (state->dfa_state);
14328 /* Initialize STATE before its first use. DFA_STATE points to an
14329 allocated but uninitialized DFA state. */
14332 mips_sim_init (struct mips_sim *state, state_t dfa_state)
14334 state->issue_rate = mips_issue_rate ();
14335 state->dfa_state = dfa_state;
14336 mips_sim_reset (state);
14339 /* Advance STATE by one clock cycle. */
14342 mips_sim_next_cycle (struct mips_sim *state)
14345 state->insns_left = state->issue_rate;
14346 state_transition (state->dfa_state, 0);
14349 /* Advance simulation state STATE until instruction INSN can read
14353 mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
14355 unsigned int regno, end_regno;
14357 end_regno = END_REGNO (reg);
14358 for (regno = REGNO (reg); regno < end_regno; regno++)
14359 if (state->last_set[regno].insn != 0)
14363 t = (state->last_set[regno].time
14364 + insn_latency (state->last_set[regno].insn, insn));
14365 while (state->time < t)
14366 mips_sim_next_cycle (state);
14370 /* A for_each_rtx callback. If *X is a register, advance simulation state
14371 DATA until mips_sim_insn can read the register's value. */
14374 mips_sim_wait_regs_2 (rtx *x, void *data)
14377 mips_sim_wait_reg ((struct mips_sim *) data, mips_sim_insn, *x);
14381 /* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
14384 mips_sim_wait_regs_1 (rtx *x, void *data)
14386 for_each_rtx (x, mips_sim_wait_regs_2, data);
14389 /* Advance simulation state STATE until all of INSN's register
14390 dependencies are satisfied. */
14393 mips_sim_wait_regs (struct mips_sim *state, rtx insn)
14395 mips_sim_insn = insn;
14396 note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
14399 /* Advance simulation state STATE until the units required by
14400 instruction INSN are available. */
14403 mips_sim_wait_units (struct mips_sim *state, rtx insn)
14407 tmp_state = alloca (state_size ());
14408 while (state->insns_left == 0
14409 || (memcpy (tmp_state, state->dfa_state, state_size ()),
14410 state_transition (tmp_state, insn) >= 0))
14411 mips_sim_next_cycle (state);
14414 /* Advance simulation state STATE until INSN is ready to issue. */
14417 mips_sim_wait_insn (struct mips_sim *state, rtx insn)
14419 mips_sim_wait_regs (state, insn);
14420 mips_sim_wait_units (state, insn);
14423 /* mips_sim_insn has just set X. Update the LAST_SET array
14424 in simulation state DATA. */
14427 mips_sim_record_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
14429 struct mips_sim *state;
14431 state = (struct mips_sim *) data;
14434 unsigned int regno, end_regno;
14436 end_regno = END_REGNO (x);
14437 for (regno = REGNO (x); regno < end_regno; regno++)
14439 state->last_set[regno].insn = mips_sim_insn;
14440 state->last_set[regno].time = state->time;
14445 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
14446 can issue immediately (i.e., that mips_sim_wait_insn has already
14450 mips_sim_issue_insn (struct mips_sim *state, rtx insn)
14452 state_transition (state->dfa_state, insn);
14453 state->insns_left--;
14455 mips_sim_insn = insn;
14456 note_stores (PATTERN (insn), mips_sim_record_set, state);
14459 /* Simulate issuing a NOP in state STATE. */
14462 mips_sim_issue_nop (struct mips_sim *state)
14464 if (state->insns_left == 0)
14465 mips_sim_next_cycle (state);
14466 state->insns_left--;
14469 /* Update simulation state STATE so that it's ready to accept the instruction
14470 after INSN. INSN should be part of the main rtl chain, not a member of a
14474 mips_sim_finish_insn (struct mips_sim *state, rtx insn)
14476 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
14478 mips_sim_issue_nop (state);
14480 switch (GET_CODE (SEQ_BEGIN (insn)))
14484 /* We can't predict the processor state after a call or label. */
14485 mips_sim_reset (state);
14489 /* The delay slots of branch likely instructions are only executed
14490 when the branch is taken. Therefore, if the caller has simulated
14491 the delay slot instruction, STATE does not really reflect the state
14492 of the pipeline for the instruction after the delay slot. Also,
14493 branch likely instructions tend to incur a penalty when not taken,
14494 so there will probably be an extra delay between the branch and
14495 the instruction after the delay slot. */
14496 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
14497 mips_sim_reset (state);
14505 /* The VR4130 pipeline issues aligned pairs of instructions together,
14506 but it stalls the second instruction if it depends on the first.
14507 In order to cut down the amount of logic required, this dependence
14508 check is not based on a full instruction decode. Instead, any non-SPECIAL
14509 instruction is assumed to modify the register specified by bits 20-16
14510 (which is usually the "rt" field).
14512 In BEQ, BEQL, BNE and BNEL instructions, the rt field is actually an
14513 input, so we can end up with a false dependence between the branch
14514 and its delay slot. If this situation occurs in instruction INSN,
14515 try to avoid it by swapping rs and rt. */
14518 vr4130_avoid_branch_rt_conflict (rtx insn)
14522 first = SEQ_BEGIN (insn);
14523 second = SEQ_END (insn);
14525 && NONJUMP_INSN_P (second)
14526 && GET_CODE (PATTERN (first)) == SET
14527 && GET_CODE (SET_DEST (PATTERN (first))) == PC
14528 && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
14530 /* Check for the right kind of condition. */
14531 rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
14532 if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
14533 && REG_P (XEXP (cond, 0))
14534 && REG_P (XEXP (cond, 1))
14535 && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
14536 && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
14538 /* SECOND mentions the rt register but not the rs register. */
14539 rtx tmp = XEXP (cond, 0);
14540 XEXP (cond, 0) = XEXP (cond, 1);
14541 XEXP (cond, 1) = tmp;
14546 /* Implement -mvr4130-align. Go through each basic block and simulate the
14547 processor pipeline. If we find that a pair of instructions could execute
14548 in parallel, and the first of those instructions is not 8-byte aligned,
14549 insert a nop to make it aligned. */
14552 vr4130_align_insns (void)
14554 struct mips_sim state;
14555 rtx insn, subinsn, last, last2, next;
14560 /* LAST is the last instruction before INSN to have a nonzero length.
14561 LAST2 is the last such instruction before LAST. */
14565 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
14568 mips_sim_init (&state, alloca (state_size ()));
14569 for (insn = get_insns (); insn != 0; insn = next)
14571 unsigned int length;
14573 next = NEXT_INSN (insn);
14575 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
14576 This isn't really related to the alignment pass, but we do it on
14577 the fly to avoid a separate instruction walk. */
14578 vr4130_avoid_branch_rt_conflict (insn);
14580 if (USEFUL_INSN_P (insn))
14581 FOR_EACH_SUBINSN (subinsn, insn)
14583 mips_sim_wait_insn (&state, subinsn);
14585 /* If we want this instruction to issue in parallel with the
14586 previous one, make sure that the previous instruction is
14587 aligned. There are several reasons why this isn't worthwhile
14588 when the second instruction is a call:
14590 - Calls are less likely to be performance critical,
14591 - There's a good chance that the delay slot can execute
14592 in parallel with the call.
14593 - The return address would then be unaligned.
14595 In general, if we're going to insert a nop between instructions
14596 X and Y, it's better to insert it immediately after X. That
14597 way, if the nop makes Y aligned, it will also align any labels
14598 between X and Y. */
14599 if (state.insns_left != state.issue_rate
14600 && !CALL_P (subinsn))
14602 if (subinsn == SEQ_BEGIN (insn) && aligned_p)
14604 /* SUBINSN is the first instruction in INSN and INSN is
14605 aligned. We want to align the previous instruction
14606 instead, so insert a nop between LAST2 and LAST.
14608 Note that LAST could be either a single instruction
14609 or a branch with a delay slot. In the latter case,
14610 LAST, like INSN, is already aligned, but the delay
14611 slot must have some extra delay that stops it from
14612 issuing at the same time as the branch. We therefore
14613 insert a nop before the branch in order to align its
14615 emit_insn_after (gen_nop (), last2);
14618 else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
14620 /* SUBINSN is the delay slot of INSN, but INSN is
14621 currently unaligned. Insert a nop between
14622 LAST and INSN to align it. */
14623 emit_insn_after (gen_nop (), last);
14627 mips_sim_issue_insn (&state, subinsn);
14629 mips_sim_finish_insn (&state, insn);
14631 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
14632 length = get_attr_length (insn);
14635 /* If the instruction is an asm statement or multi-instruction
14636 mips.md patern, the length is only an estimate. Insert an
14637 8 byte alignment after it so that the following instructions
14638 can be handled correctly. */
14639 if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
14640 && (recog_memoized (insn) < 0 || length >= 8))
14642 next = emit_insn_after (gen_align (GEN_INT (3)), insn);
14643 next = NEXT_INSN (next);
14644 mips_sim_next_cycle (&state);
14647 else if (length & 4)
14648 aligned_p = !aligned_p;
14653 /* See whether INSN is an aligned label. */
14654 if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
14660 /* This structure records that the current function has a LO_SUM
14661 involving SYMBOL_REF or LABEL_REF BASE and that MAX_OFFSET is
14662 the largest offset applied to BASE by all such LO_SUMs. */
14663 struct mips_lo_sum_offset {
14665 HOST_WIDE_INT offset;
14668 /* Return a hash value for SYMBOL_REF or LABEL_REF BASE. */
14671 mips_hash_base (rtx base)
14673 int do_not_record_p;
14675 return hash_rtx (base, GET_MODE (base), &do_not_record_p, NULL, false);
14678 /* Hash-table callbacks for mips_lo_sum_offsets. */
14681 mips_lo_sum_offset_hash (const void *entry)
14683 return mips_hash_base (((const struct mips_lo_sum_offset *) entry)->base);
14687 mips_lo_sum_offset_eq (const void *entry, const void *value)
14689 return rtx_equal_p (((const struct mips_lo_sum_offset *) entry)->base,
14690 (const_rtx) value);
14693 /* Look up symbolic constant X in HTAB, which is a hash table of
14694 mips_lo_sum_offsets. If OPTION is NO_INSERT, return true if X can be
14695 paired with a recorded LO_SUM, otherwise record X in the table. */
14698 mips_lo_sum_offset_lookup (htab_t htab, rtx x, enum insert_option option)
14702 struct mips_lo_sum_offset *entry;
14704 /* Split X into a base and offset. */
14705 split_const (x, &base, &offset);
14706 if (UNSPEC_ADDRESS_P (base))
14707 base = UNSPEC_ADDRESS (base);
14709 /* Look up the base in the hash table. */
14710 slot = htab_find_slot_with_hash (htab, base, mips_hash_base (base), option);
14714 entry = (struct mips_lo_sum_offset *) *slot;
14715 if (option == INSERT)
14719 entry = XNEW (struct mips_lo_sum_offset);
14720 entry->base = base;
14721 entry->offset = INTVAL (offset);
14726 if (INTVAL (offset) > entry->offset)
14727 entry->offset = INTVAL (offset);
14730 return INTVAL (offset) <= entry->offset;
14733 /* A for_each_rtx callback for which DATA is a mips_lo_sum_offset hash table.
14734 Record every LO_SUM in *LOC. */
14737 mips_record_lo_sum (rtx *loc, void *data)
14739 if (GET_CODE (*loc) == LO_SUM)
14740 mips_lo_sum_offset_lookup ((htab_t) data, XEXP (*loc, 1), INSERT);
14744 /* Return true if INSN is a SET of an orphaned high-part relocation.
14745 HTAB is a hash table of mips_lo_sum_offsets that describes all the
14746 LO_SUMs in the current function. */
14749 mips_orphaned_high_part_p (htab_t htab, rtx insn)
14751 enum mips_symbol_type type;
14754 set = single_set (insn);
14757 /* Check for %his. */
14759 if (GET_CODE (x) == HIGH
14760 && absolute_symbolic_operand (XEXP (x, 0), VOIDmode))
14761 return !mips_lo_sum_offset_lookup (htab, XEXP (x, 0), NO_INSERT);
14763 /* Check for local %gots (and %got_pages, which is redundant but OK). */
14764 if (GET_CODE (x) == UNSPEC
14765 && XINT (x, 1) == UNSPEC_LOAD_GOT
14766 && mips_symbolic_constant_p (XVECEXP (x, 0, 1),
14767 SYMBOL_CONTEXT_LEA, &type)
14768 && type == SYMBOL_GOTOFF_PAGE)
14769 return !mips_lo_sum_offset_lookup (htab, XVECEXP (x, 0, 1), NO_INSERT);
14774 /* Subroutine of mips_reorg_process_insns. If there is a hazard between
14775 INSN and a previous instruction, avoid it by inserting nops after
14778 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
14779 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
14780 before using the value of that register. *HILO_DELAY counts the
14781 number of instructions since the last hilo hazard (that is,
14782 the number of instructions since the last MFLO or MFHI).
14784 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
14785 for the next instruction.
14787 LO_REG is an rtx for the LO register, used in dependence checking. */
14790 mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
14791 rtx *delayed_reg, rtx lo_reg)
14796 pattern = PATTERN (insn);
14798 /* Do not put the whole function in .set noreorder if it contains
14799 an asm statement. We don't know whether there will be hazards
14800 between the asm statement and the gcc-generated code. */
14801 if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
14802 cfun->machine->all_noreorder_p = false;
14804 /* Ignore zero-length instructions (barriers and the like). */
14805 ninsns = get_attr_length (insn) / 4;
14809 /* Work out how many nops are needed. Note that we only care about
14810 registers that are explicitly mentioned in the instruction's pattern.
14811 It doesn't matter that calls use the argument registers or that they
14812 clobber hi and lo. */
14813 if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
14814 nops = 2 - *hilo_delay;
14815 else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
14820 /* Insert the nops between this instruction and the previous one.
14821 Each new nop takes us further from the last hilo hazard. */
14822 *hilo_delay += nops;
14824 emit_insn_after (gen_hazard_nop (), after);
14826 /* Set up the state for the next instruction. */
14827 *hilo_delay += ninsns;
14829 if (INSN_CODE (insn) >= 0)
14830 switch (get_attr_hazard (insn))
14840 set = single_set (insn);
14842 *delayed_reg = SET_DEST (set);
14847 /* Go through the instruction stream and insert nops where necessary.
14848 Also delete any high-part relocations whose partnering low parts
14849 are now all dead. See if the whole function can then be put into
14850 .set noreorder and .set nomacro. */
14853 mips_reorg_process_insns (void)
14855 rtx insn, last_insn, subinsn, next_insn, lo_reg, delayed_reg;
14859 /* Force all instructions to be split into their final form. */
14860 split_all_insns_noflow ();
14862 /* Recalculate instruction lengths without taking nops into account. */
14863 cfun->machine->ignore_hazard_length_p = true;
14864 shorten_branches (get_insns ());
14866 cfun->machine->all_noreorder_p = true;
14868 /* We don't track MIPS16 PC-relative offsets closely enough to make
14869 a good job of "set .noreorder" code in MIPS16 mode. */
14871 cfun->machine->all_noreorder_p = false;
14873 /* Code that doesn't use explicit relocs can't be ".set nomacro". */
14874 if (!TARGET_EXPLICIT_RELOCS)
14875 cfun->machine->all_noreorder_p = false;
14877 /* Profiled functions can't be all noreorder because the profiler
14878 support uses assembler macros. */
14880 cfun->machine->all_noreorder_p = false;
14882 /* Code compiled with -mfix-vr4120 or -mfix-24k can't be all noreorder
14883 because we rely on the assembler to work around some errata. */
14884 if (TARGET_FIX_VR4120 || TARGET_FIX_24K)
14885 cfun->machine->all_noreorder_p = false;
14887 /* The same is true for -mfix-vr4130 if we might generate MFLO or
14888 MFHI instructions. Note that we avoid using MFLO and MFHI if
14889 the VR4130 MACC and DMACC instructions are available instead;
14890 see the *mfhilo_{si,di}_macc patterns. */
14891 if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
14892 cfun->machine->all_noreorder_p = false;
14894 htab = htab_create (37, mips_lo_sum_offset_hash,
14895 mips_lo_sum_offset_eq, free);
14897 /* Make a first pass over the instructions, recording all the LO_SUMs. */
14898 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
14899 FOR_EACH_SUBINSN (subinsn, insn)
14900 if (USEFUL_INSN_P (subinsn))
14901 for_each_rtx (&PATTERN (subinsn), mips_record_lo_sum, htab);
14906 lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
14908 /* Make a second pass over the instructions. Delete orphaned
14909 high-part relocations or turn them into NOPs. Avoid hazards
14910 by inserting NOPs. */
14911 for (insn = get_insns (); insn != 0; insn = next_insn)
14913 next_insn = NEXT_INSN (insn);
14914 if (USEFUL_INSN_P (insn))
14916 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
14918 /* If we find an orphaned high-part relocation in a delay
14919 slot, it's easier to turn that instruction into a NOP than
14920 to delete it. The delay slot will be a NOP either way. */
14921 FOR_EACH_SUBINSN (subinsn, insn)
14922 if (INSN_P (subinsn))
14924 if (mips_orphaned_high_part_p (htab, subinsn))
14926 PATTERN (subinsn) = gen_nop ();
14927 INSN_CODE (subinsn) = CODE_FOR_nop;
14929 mips_avoid_hazard (last_insn, subinsn, &hilo_delay,
14930 &delayed_reg, lo_reg);
14936 /* INSN is a single instruction. Delete it if it's an
14937 orphaned high-part relocation. */
14938 if (mips_orphaned_high_part_p (htab, insn))
14939 delete_insn (insn);
14940 /* Also delete cache barriers if the last instruction
14941 was an annulled branch. INSN will not be speculatively
14943 else if (recog_memoized (insn) == CODE_FOR_r10k_cache_barrier
14945 && INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (last_insn)))
14946 delete_insn (insn);
14949 mips_avoid_hazard (last_insn, insn, &hilo_delay,
14950 &delayed_reg, lo_reg);
14957 htab_delete (htab);
14960 /* If we are using a GOT, but have not decided to use a global pointer yet,
14961 see whether we need one to implement long branches. Convert the ghost
14962 global-pointer instructions into real ones if so. */
14965 mips_expand_ghost_gp_insns (void)
14970 /* Quick exit if we already know that we will or won't need a
14972 if (!TARGET_USE_GOT
14973 || cfun->machine->global_pointer == INVALID_REGNUM
14974 || mips_must_initialize_gp_p ())
14977 shorten_branches (get_insns ());
14979 /* Look for a branch that is longer than normal. The normal length for
14980 non-MIPS16 branches is 8, because the length includes the delay slot.
14981 It is 4 for MIPS16, because MIPS16 branches are extended instructions,
14982 but they have no delay slot. */
14983 normal_length = (TARGET_MIPS16 ? 4 : 8);
14984 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
14986 && USEFUL_INSN_P (insn)
14987 && get_attr_length (insn) > normal_length)
14990 if (insn == NULL_RTX)
14993 /* We've now established that we need $gp. */
14994 cfun->machine->must_initialize_gp_p = true;
14995 split_all_insns_noflow ();
15000 /* Subroutine of mips_reorg to manage passes that require DF. */
15003 mips_df_reorg (void)
15005 /* Create def-use chains. */
15006 df_set_flags (DF_EQ_NOTES);
15007 df_chain_add_problem (DF_UD_CHAIN);
15010 if (TARGET_RELAX_PIC_CALLS)
15011 mips_annotate_pic_calls ();
15013 if (mips_r10k_cache_barrier != R10K_CACHE_BARRIER_NONE)
15014 r10k_insert_cache_barriers ();
15016 df_finish_pass (false);
15019 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
15024 /* Restore the BLOCK_FOR_INSN pointers, which are needed by DF. Also during
15025 insn splitting in mips16_lay_out_constants, DF insn info is only kept up
15026 to date if the CFG is available. */
15027 if (mips_cfg_in_reorg ())
15028 compute_bb_for_insn ();
15029 mips16_lay_out_constants ();
15030 if (mips_cfg_in_reorg ())
15033 free_bb_for_insn ();
15036 if (optimize > 0 && flag_delayed_branch)
15037 dbr_schedule (get_insns ());
15038 mips_reorg_process_insns ();
15040 && TARGET_EXPLICIT_RELOCS
15042 && TARGET_VR4130_ALIGN)
15043 vr4130_align_insns ();
15044 if (mips_expand_ghost_gp_insns ())
15045 /* The expansion could invalidate some of the VR4130 alignment
15046 optimizations, but this should be an extremely rare case anyhow. */
15047 mips_reorg_process_insns ();
15050 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
15051 in order to avoid duplicating too much logic from elsewhere. */
15054 mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
15055 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
15058 rtx this_rtx, temp1, temp2, insn, fnaddr;
15059 bool use_sibcall_p;
15061 /* Pretend to be a post-reload pass while generating rtl. */
15062 reload_completed = 1;
15064 /* Mark the end of the (empty) prologue. */
15065 emit_note (NOTE_INSN_PROLOGUE_END);
15067 /* Determine if we can use a sibcall to call FUNCTION directly. */
15068 fnaddr = XEXP (DECL_RTL (function), 0);
15069 use_sibcall_p = (mips_function_ok_for_sibcall (function, NULL)
15070 && const_call_insn_operand (fnaddr, Pmode));
15072 /* Determine if we need to load FNADDR from the GOT. */
15074 && (mips_got_symbol_type_p
15075 (mips_classify_symbol (fnaddr, SYMBOL_CONTEXT_LEA))))
15077 /* Pick a global pointer. Use a call-clobbered register if
15078 TARGET_CALL_SAVED_GP. */
15079 cfun->machine->global_pointer
15080 = TARGET_CALL_SAVED_GP ? 15 : GLOBAL_POINTER_REGNUM;
15081 cfun->machine->must_initialize_gp_p = true;
15082 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
15084 /* Set up the global pointer for n32 or n64 abicalls. */
15085 mips_emit_loadgp ();
15088 /* We need two temporary registers in some cases. */
15089 temp1 = gen_rtx_REG (Pmode, 2);
15090 temp2 = gen_rtx_REG (Pmode, 3);
15092 /* Find out which register contains the "this" pointer. */
15093 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
15094 this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
15096 this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST);
15098 /* Add DELTA to THIS_RTX. */
15101 rtx offset = GEN_INT (delta);
15102 if (!SMALL_OPERAND (delta))
15104 mips_emit_move (temp1, offset);
15107 emit_insn (gen_add3_insn (this_rtx, this_rtx, offset));
15110 /* If needed, add *(*THIS_RTX + VCALL_OFFSET) to THIS_RTX. */
15111 if (vcall_offset != 0)
15115 /* Set TEMP1 to *THIS_RTX. */
15116 mips_emit_move (temp1, gen_rtx_MEM (Pmode, this_rtx));
15118 /* Set ADDR to a legitimate address for *THIS_RTX + VCALL_OFFSET. */
15119 addr = mips_add_offset (temp2, temp1, vcall_offset);
15121 /* Load the offset and add it to THIS_RTX. */
15122 mips_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
15123 emit_insn (gen_add3_insn (this_rtx, this_rtx, temp1));
15126 /* Jump to the target function. Use a sibcall if direct jumps are
15127 allowed, otherwise load the address into a register first. */
15130 insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
15131 SIBLING_CALL_P (insn) = 1;
15135 /* This is messy. GAS treats "la $25,foo" as part of a call
15136 sequence and may allow a global "foo" to be lazily bound.
15137 The general move patterns therefore reject this combination.
15139 In this context, lazy binding would actually be OK
15140 for TARGET_CALL_CLOBBERED_GP, but it's still wrong for
15141 TARGET_CALL_SAVED_GP; see mips_load_call_address.
15142 We must therefore load the address via a temporary
15143 register if mips_dangerous_for_la25_p.
15145 If we jump to the temporary register rather than $25,
15146 the assembler can use the move insn to fill the jump's
15149 We can use the same technique for MIPS16 code, where $25
15150 is not a valid JR register. */
15151 if (TARGET_USE_PIC_FN_ADDR_REG
15153 && !mips_dangerous_for_la25_p (fnaddr))
15154 temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
15155 mips_load_call_address (MIPS_CALL_SIBCALL, temp1, fnaddr);
15157 if (TARGET_USE_PIC_FN_ADDR_REG
15158 && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
15159 mips_emit_move (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
15160 emit_jump_insn (gen_indirect_jump (temp1));
15163 /* Run just enough of rest_of_compilation. This sequence was
15164 "borrowed" from alpha.c. */
15165 insn = get_insns ();
15166 insn_locators_alloc ();
15167 split_all_insns_noflow ();
15168 mips16_lay_out_constants ();
15169 shorten_branches (insn);
15170 final_start_function (insn, file, 1);
15171 final (insn, file, 1);
15172 final_end_function ();
15174 /* Clean up the vars set above. Note that final_end_function resets
15175 the global pointer for us. */
15176 reload_completed = 0;
15179 /* The last argument passed to mips_set_mips16_mode, or negative if the
15180 function hasn't been called yet.
15182 There are two copies of this information. One is saved and restored
15183 by the PCH process while the other is specific to this compiler
15184 invocation. The information calculated by mips_set_mips16_mode
15185 is invalid unless the two variables are the same. */
15186 static int was_mips16_p = -1;
15187 static GTY(()) int was_mips16_pch_p = -1;
15189 /* Set up the target-dependent global state so that it matches the
15190 current function's ISA mode. */
15193 mips_set_mips16_mode (int mips16_p)
15195 if (mips16_p == was_mips16_p
15196 && mips16_p == was_mips16_pch_p)
15199 /* Restore base settings of various flags. */
15200 target_flags = mips_base_target_flags;
15201 flag_schedule_insns = mips_base_schedule_insns;
15202 flag_reorder_blocks_and_partition = mips_base_reorder_blocks_and_partition;
15203 flag_move_loop_invariants = mips_base_move_loop_invariants;
15204 align_loops = mips_base_align_loops;
15205 align_jumps = mips_base_align_jumps;
15206 align_functions = mips_base_align_functions;
15210 /* Switch to MIPS16 mode. */
15211 target_flags |= MASK_MIPS16;
15213 /* Don't run the scheduler before reload, since it tends to
15214 increase register pressure. */
15215 flag_schedule_insns = 0;
15217 /* Don't do hot/cold partitioning. mips16_lay_out_constants expects
15218 the whole function to be in a single section. */
15219 flag_reorder_blocks_and_partition = 0;
15221 /* Don't move loop invariants, because it tends to increase
15222 register pressure. It also introduces an extra move in cases
15223 where the constant is the first operand in a two-operand binary
15224 instruction, or when it forms a register argument to a functon
15226 flag_move_loop_invariants = 0;
15228 target_flags |= MASK_EXPLICIT_RELOCS;
15230 /* Experiments suggest we get the best overall section-anchor
15231 results from using the range of an unextended LW or SW. Code
15232 that makes heavy use of byte or short accesses can do better
15233 with ranges of 0...31 and 0...63 respectively, but most code is
15234 sensitive to the range of LW and SW instead. */
15235 targetm.min_anchor_offset = 0;
15236 targetm.max_anchor_offset = 127;
15238 targetm.const_anchor = 0;
15240 /* MIPS16 has no BAL instruction. */
15241 target_flags &= ~MASK_RELAX_PIC_CALLS;
15243 if (flag_pic && !TARGET_OLDABI)
15244 sorry ("MIPS16 PIC for ABIs other than o32 and o64");
15247 sorry ("MIPS16 -mxgot code");
15249 if (TARGET_HARD_FLOAT_ABI && !TARGET_OLDABI)
15250 sorry ("hard-float MIPS16 code for ABIs other than o32 and o64");
15254 /* Switch to normal (non-MIPS16) mode. */
15255 target_flags &= ~MASK_MIPS16;
15257 /* Provide default values for align_* for 64-bit targets. */
15260 if (align_loops == 0)
15262 if (align_jumps == 0)
15264 if (align_functions == 0)
15265 align_functions = 8;
15268 targetm.min_anchor_offset = -32768;
15269 targetm.max_anchor_offset = 32767;
15271 targetm.const_anchor = 0x8000;
15274 /* (Re)initialize MIPS target internals for new ISA. */
15275 mips_init_relocs ();
15279 if (!mips16_globals)
15280 mips16_globals = save_target_globals ();
15282 restore_target_globals (mips16_globals);
15285 restore_target_globals (&default_target_globals);
15287 was_mips16_p = mips16_p;
15288 was_mips16_pch_p = mips16_p;
15291 /* Implement TARGET_SET_CURRENT_FUNCTION. Decide whether the current
15292 function should use the MIPS16 ISA and switch modes accordingly. */
15295 mips_set_current_function (tree fndecl)
15297 mips_set_mips16_mode (mips_use_mips16_mode_p (fndecl));
15300 /* Allocate a chunk of memory for per-function machine-dependent data. */
15302 static struct machine_function *
15303 mips_init_machine_status (void)
15305 return ggc_alloc_cleared_machine_function ();
15308 /* Return the processor associated with the given ISA level, or null
15309 if the ISA isn't valid. */
15311 static const struct mips_cpu_info *
15312 mips_cpu_info_from_isa (int isa)
15316 for (i = 0; i < ARRAY_SIZE (mips_cpu_info_table); i++)
15317 if (mips_cpu_info_table[i].isa == isa)
15318 return mips_cpu_info_table + i;
15323 /* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
15324 with a final "000" replaced by "k". Ignore case.
15326 Note: this function is shared between GCC and GAS. */
15329 mips_strict_matching_cpu_name_p (const char *canonical, const char *given)
15331 while (*given != 0 && TOLOWER (*given) == TOLOWER (*canonical))
15332 given++, canonical++;
15334 return ((*given == 0 && *canonical == 0)
15335 || (strcmp (canonical, "000") == 0 && strcasecmp (given, "k") == 0));
15338 /* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
15339 CPU name. We've traditionally allowed a lot of variation here.
15341 Note: this function is shared between GCC and GAS. */
15344 mips_matching_cpu_name_p (const char *canonical, const char *given)
15346 /* First see if the name matches exactly, or with a final "000"
15347 turned into "k". */
15348 if (mips_strict_matching_cpu_name_p (canonical, given))
15351 /* If not, try comparing based on numerical designation alone.
15352 See if GIVEN is an unadorned number, or 'r' followed by a number. */
15353 if (TOLOWER (*given) == 'r')
15355 if (!ISDIGIT (*given))
15358 /* Skip over some well-known prefixes in the canonical name,
15359 hoping to find a number there too. */
15360 if (TOLOWER (canonical[0]) == 'v' && TOLOWER (canonical[1]) == 'r')
15362 else if (TOLOWER (canonical[0]) == 'r' && TOLOWER (canonical[1]) == 'm')
15364 else if (TOLOWER (canonical[0]) == 'r')
15367 return mips_strict_matching_cpu_name_p (canonical, given);
15370 /* Return the mips_cpu_info entry for the processor or ISA given
15371 by CPU_STRING. Return null if the string isn't recognized.
15373 A similar function exists in GAS. */
15375 static const struct mips_cpu_info *
15376 mips_parse_cpu (const char *cpu_string)
15381 /* In the past, we allowed upper-case CPU names, but it doesn't
15382 work well with the multilib machinery. */
15383 for (s = cpu_string; *s != 0; s++)
15386 warning (0, "CPU names must be lower case");
15390 /* 'from-abi' selects the most compatible architecture for the given
15391 ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
15392 EABIs, we have to decide whether we're using the 32-bit or 64-bit
15394 if (strcasecmp (cpu_string, "from-abi") == 0)
15395 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
15396 : ABI_NEEDS_64BIT_REGS ? 3
15397 : (TARGET_64BIT ? 3 : 1));
15399 /* 'default' has traditionally been a no-op. Probably not very useful. */
15400 if (strcasecmp (cpu_string, "default") == 0)
15403 for (i = 0; i < ARRAY_SIZE (mips_cpu_info_table); i++)
15404 if (mips_matching_cpu_name_p (mips_cpu_info_table[i].name, cpu_string))
15405 return mips_cpu_info_table + i;
15410 /* Set up globals to generate code for the ISA or processor
15411 described by INFO. */
15414 mips_set_architecture (const struct mips_cpu_info *info)
15418 mips_arch_info = info;
15419 mips_arch = info->cpu;
15420 mips_isa = info->isa;
15424 /* Likewise for tuning. */
15427 mips_set_tune (const struct mips_cpu_info *info)
15431 mips_tune_info = info;
15432 mips_tune = info->cpu;
15436 /* Implement TARGET_HANDLE_OPTION. */
15439 mips_handle_option (struct gcc_options *opts, struct gcc_options *opts_set,
15440 const struct cl_decoded_option *decoded,
15441 location_t loc ATTRIBUTE_UNUSED)
15443 size_t code = decoded->opt_index;
15444 const char *arg = decoded->arg;
15446 gcc_assert (opts == &global_options);
15447 gcc_assert (opts_set == &global_options_set);
15453 return mips_parse_cpu (arg) != 0;
15456 mips_isa_option_info = mips_parse_cpu (ACONCAT (("mips", arg, NULL)));
15457 return mips_isa_option_info != 0;
15459 case OPT_mno_flush_func:
15460 opts->x_mips_cache_flush_func = NULL;
15468 /* Implement TARGET_OPTION_OVERRIDE. */
15471 mips_option_override (void)
15473 int i, start, regno, mode;
15475 /* Process flags as though we were generating non-MIPS16 code. */
15476 mips_base_mips16 = TARGET_MIPS16;
15477 target_flags &= ~MASK_MIPS16;
15479 #ifdef SUBTARGET_OVERRIDE_OPTIONS
15480 SUBTARGET_OVERRIDE_OPTIONS;
15483 /* -mno-float overrides -mhard-float and -msoft-float. */
15484 if (TARGET_NO_FLOAT)
15486 target_flags |= MASK_SOFT_FLOAT_ABI;
15487 target_flags_explicit |= MASK_SOFT_FLOAT_ABI;
15490 if (TARGET_FLIP_MIPS16)
15491 TARGET_INTERLINK_MIPS16 = 1;
15493 /* Set the small data limit. */
15494 mips_small_data_threshold = (global_options_set.x_g_switch_value
15496 : MIPS_DEFAULT_GVALUE);
15498 /* The following code determines the architecture and register size.
15499 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
15500 The GAS and GCC code should be kept in sync as much as possible. */
15502 if (mips_arch_string != 0)
15503 mips_set_architecture (mips_parse_cpu (mips_arch_string));
15505 if (mips_isa_option_info != 0)
15507 if (mips_arch_info == 0)
15508 mips_set_architecture (mips_isa_option_info);
15509 else if (mips_arch_info->isa != mips_isa_option_info->isa)
15510 error ("%<-%s%> conflicts with the other architecture options, "
15511 "which specify a %s processor",
15512 mips_isa_option_info->name,
15513 mips_cpu_info_from_isa (mips_arch_info->isa)->name);
15516 if (mips_arch_info == 0)
15518 #ifdef MIPS_CPU_STRING_DEFAULT
15519 mips_set_architecture (mips_parse_cpu (MIPS_CPU_STRING_DEFAULT));
15521 mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT));
15525 if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
15526 error ("%<-march=%s%> is not compatible with the selected ABI",
15527 mips_arch_info->name);
15529 /* Optimize for mips_arch, unless -mtune selects a different processor. */
15530 if (mips_tune_string != 0)
15531 mips_set_tune (mips_parse_cpu (mips_tune_string));
15533 if (mips_tune_info == 0)
15534 mips_set_tune (mips_arch_info);
15536 if ((target_flags_explicit & MASK_64BIT) != 0)
15538 /* The user specified the size of the integer registers. Make sure
15539 it agrees with the ABI and ISA. */
15540 if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
15541 error ("%<-mgp64%> used with a 32-bit processor");
15542 else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
15543 error ("%<-mgp32%> used with a 64-bit ABI");
15544 else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
15545 error ("%<-mgp64%> used with a 32-bit ABI");
15549 /* Infer the integer register size from the ABI and processor.
15550 Restrict ourselves to 32-bit registers if that's all the
15551 processor has, or if the ABI cannot handle 64-bit registers. */
15552 if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
15553 target_flags &= ~MASK_64BIT;
15555 target_flags |= MASK_64BIT;
15558 if ((target_flags_explicit & MASK_FLOAT64) != 0)
15560 if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
15561 error ("unsupported combination: %s", "-mfp64 -msingle-float");
15562 else if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
15563 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
15564 else if (!TARGET_64BIT && TARGET_FLOAT64)
15566 if (!ISA_HAS_MXHC1)
15567 error ("%<-mgp32%> and %<-mfp64%> can only be combined if"
15568 " the target supports the mfhc1 and mthc1 instructions");
15569 else if (mips_abi != ABI_32)
15570 error ("%<-mgp32%> and %<-mfp64%> can only be combined when using"
15576 /* -msingle-float selects 32-bit float registers. Otherwise the
15577 float registers should be the same size as the integer ones. */
15578 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
15579 target_flags |= MASK_FLOAT64;
15581 target_flags &= ~MASK_FLOAT64;
15584 /* End of code shared with GAS. */
15586 /* If no -mlong* option was given, infer it from the other options. */
15587 if ((target_flags_explicit & MASK_LONG64) == 0)
15589 if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
15590 target_flags |= MASK_LONG64;
15592 target_flags &= ~MASK_LONG64;
15595 if (!TARGET_OLDABI)
15596 flag_pcc_struct_return = 0;
15598 /* Decide which rtx_costs structure to use. */
15600 mips_cost = &mips_rtx_cost_optimize_size;
15602 mips_cost = &mips_rtx_cost_data[mips_tune];
15604 /* If the user hasn't specified a branch cost, use the processor's
15606 if (mips_branch_cost == 0)
15607 mips_branch_cost = mips_cost->branch_cost;
15609 /* If neither -mbranch-likely nor -mno-branch-likely was given
15610 on the command line, set MASK_BRANCHLIKELY based on the target
15611 architecture and tuning flags. Annulled delay slots are a
15612 size win, so we only consider the processor-specific tuning
15613 for !optimize_size. */
15614 if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
15616 if (ISA_HAS_BRANCHLIKELY
15618 || (mips_tune_info->tune_flags & PTF_AVOID_BRANCHLIKELY) == 0))
15619 target_flags |= MASK_BRANCHLIKELY;
15621 target_flags &= ~MASK_BRANCHLIKELY;
15623 else if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
15624 warning (0, "the %qs architecture does not support branch-likely"
15625 " instructions", mips_arch_info->name);
15627 /* The effect of -mabicalls isn't defined for the EABI. */
15628 if (mips_abi == ABI_EABI && TARGET_ABICALLS)
15630 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
15631 target_flags &= ~MASK_ABICALLS;
15634 if (TARGET_ABICALLS_PIC2)
15635 /* We need to set flag_pic for executables as well as DSOs
15636 because we may reference symbols that are not defined in
15637 the final executable. (MIPS does not use things like
15638 copy relocs, for example.)
15640 There is a body of code that uses __PIC__ to distinguish
15641 between -mabicalls and -mno-abicalls code. The non-__PIC__
15642 variant is usually appropriate for TARGET_ABICALLS_PIC0, as
15643 long as any indirect jumps use $25. */
15646 /* -mvr4130-align is a "speed over size" optimization: it usually produces
15647 faster code, but at the expense of more nops. Enable it at -O3 and
15649 if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
15650 target_flags |= MASK_VR4130_ALIGN;
15652 /* Prefer a call to memcpy over inline code when optimizing for size,
15653 though see MOVE_RATIO in mips.h. */
15654 if (optimize_size && (target_flags_explicit & MASK_MEMCPY) == 0)
15655 target_flags |= MASK_MEMCPY;
15657 /* If we have a nonzero small-data limit, check that the -mgpopt
15658 setting is consistent with the other target flags. */
15659 if (mips_small_data_threshold > 0)
15663 if (!TARGET_EXPLICIT_RELOCS)
15664 error ("%<-mno-gpopt%> needs %<-mexplicit-relocs%>");
15666 TARGET_LOCAL_SDATA = false;
15667 TARGET_EXTERN_SDATA = false;
15671 if (TARGET_VXWORKS_RTP)
15672 warning (0, "cannot use small-data accesses for %qs", "-mrtp");
15674 if (TARGET_ABICALLS)
15675 warning (0, "cannot use small-data accesses for %qs",
15680 #ifdef MIPS_TFMODE_FORMAT
15681 REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT;
15684 /* Make sure that the user didn't turn off paired single support when
15685 MIPS-3D support is requested. */
15687 && (target_flags_explicit & MASK_PAIRED_SINGLE_FLOAT)
15688 && !TARGET_PAIRED_SINGLE_FLOAT)
15689 error ("%<-mips3d%> requires %<-mpaired-single%>");
15691 /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
15693 target_flags |= MASK_PAIRED_SINGLE_FLOAT;
15695 /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
15696 and TARGET_HARD_FLOAT_ABI are both true. */
15697 if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT_ABI))
15698 error ("%qs must be used with %qs",
15699 TARGET_MIPS3D ? "-mips3d" : "-mpaired-single",
15700 TARGET_HARD_FLOAT_ABI ? "-mfp64" : "-mhard-float");
15702 /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
15704 if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_HAS_PAIRED_SINGLE)
15705 warning (0, "the %qs architecture does not support paired-single"
15706 " instructions", mips_arch_info->name);
15708 if (mips_r10k_cache_barrier != R10K_CACHE_BARRIER_NONE
15709 && !TARGET_CACHE_BUILTIN)
15711 error ("%qs requires a target that provides the %qs instruction",
15712 "-mr10k-cache-barrier", "cache");
15713 mips_r10k_cache_barrier = R10K_CACHE_BARRIER_NONE;
15716 /* If TARGET_DSPR2, enable MASK_DSP. */
15718 target_flags |= MASK_DSP;
15720 /* .eh_frame addresses should be the same width as a C pointer.
15721 Most MIPS ABIs support only one pointer size, so the assembler
15722 will usually know exactly how big an .eh_frame address is.
15724 Unfortunately, this is not true of the 64-bit EABI. The ABI was
15725 originally defined to use 64-bit pointers (i.e. it is LP64), and
15726 this is still the default mode. However, we also support an n32-like
15727 ILP32 mode, which is selected by -mlong32. The problem is that the
15728 assembler has traditionally not had an -mlong option, so it has
15729 traditionally not known whether we're using the ILP32 or LP64 form.
15731 As it happens, gas versions up to and including 2.19 use _32-bit_
15732 addresses for EABI64 .cfi_* directives. This is wrong for the
15733 default LP64 mode, so we can't use the directives by default.
15734 Moreover, since gas's current behavior is at odds with gcc's
15735 default behavior, it seems unwise to rely on future versions
15736 of gas behaving the same way. We therefore avoid using .cfi
15737 directives for -mlong32 as well. */
15738 if (mips_abi == ABI_EABI && TARGET_64BIT)
15739 flag_dwarf2_cfi_asm = 0;
15741 /* .cfi_* directives generate a read-only section, so fall back on
15742 manual .eh_frame creation if we need the section to be writable. */
15743 if (TARGET_WRITABLE_EH_FRAME)
15744 flag_dwarf2_cfi_asm = 0;
15746 mips_init_print_operand_punct ();
15748 /* Set up array to map GCC register number to debug register number.
15749 Ignore the special purpose register numbers. */
15751 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
15753 mips_dbx_regno[i] = INVALID_REGNUM;
15754 if (GP_REG_P (i) || FP_REG_P (i) || ALL_COP_REG_P (i))
15755 mips_dwarf_regno[i] = i;
15757 mips_dwarf_regno[i] = INVALID_REGNUM;
15760 start = GP_DBX_FIRST - GP_REG_FIRST;
15761 for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
15762 mips_dbx_regno[i] = i + start;
15764 start = FP_DBX_FIRST - FP_REG_FIRST;
15765 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
15766 mips_dbx_regno[i] = i + start;
15768 /* Accumulator debug registers use big-endian ordering. */
15769 mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
15770 mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
15771 mips_dwarf_regno[HI_REGNUM] = MD_REG_FIRST + 0;
15772 mips_dwarf_regno[LO_REGNUM] = MD_REG_FIRST + 1;
15773 for (i = DSP_ACC_REG_FIRST; i <= DSP_ACC_REG_LAST; i += 2)
15775 mips_dwarf_regno[i + TARGET_LITTLE_ENDIAN] = i;
15776 mips_dwarf_regno[i + TARGET_BIG_ENDIAN] = i + 1;
15779 /* Set up mips_hard_regno_mode_ok. */
15780 for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
15781 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
15782 mips_hard_regno_mode_ok[mode][regno]
15783 = mips_hard_regno_mode_ok_p (regno, (enum machine_mode) mode);
15785 /* Function to allocate machine-dependent function status. */
15786 init_machine_status = &mips_init_machine_status;
15788 /* Default to working around R4000 errata only if the processor
15789 was selected explicitly. */
15790 if ((target_flags_explicit & MASK_FIX_R4000) == 0
15791 && mips_matching_cpu_name_p (mips_arch_info->name, "r4000"))
15792 target_flags |= MASK_FIX_R4000;
15794 /* Default to working around R4400 errata only if the processor
15795 was selected explicitly. */
15796 if ((target_flags_explicit & MASK_FIX_R4400) == 0
15797 && mips_matching_cpu_name_p (mips_arch_info->name, "r4400"))
15798 target_flags |= MASK_FIX_R4400;
15800 /* Default to working around R10000 errata only if the processor
15801 was selected explicitly. */
15802 if ((target_flags_explicit & MASK_FIX_R10000) == 0
15803 && mips_matching_cpu_name_p (mips_arch_info->name, "r10000"))
15804 target_flags |= MASK_FIX_R10000;
15806 /* Make sure that branch-likely instructions available when using
15807 -mfix-r10000. The instructions are not available if either:
15809 1. -mno-branch-likely was passed.
15810 2. The selected ISA does not support branch-likely and
15811 the command line does not include -mbranch-likely. */
15812 if (TARGET_FIX_R10000
15813 && ((target_flags_explicit & MASK_BRANCHLIKELY) == 0
15814 ? !ISA_HAS_BRANCHLIKELY
15815 : !TARGET_BRANCHLIKELY))
15816 sorry ("%qs requires branch-likely instructions", "-mfix-r10000");
15818 if (TARGET_SYNCI && !ISA_HAS_SYNCI)
15820 warning (0, "the %qs architecture does not support the synci "
15821 "instruction", mips_arch_info->name);
15822 target_flags &= ~MASK_SYNCI;
15825 /* Only optimize PIC indirect calls if they are actually required. */
15826 if (!TARGET_USE_GOT || !TARGET_EXPLICIT_RELOCS)
15827 target_flags &= ~MASK_RELAX_PIC_CALLS;
15829 /* Save base state of options. */
15830 mips_base_target_flags = target_flags;
15831 mips_base_schedule_insns = flag_schedule_insns;
15832 mips_base_reorder_blocks_and_partition = flag_reorder_blocks_and_partition;
15833 mips_base_move_loop_invariants = flag_move_loop_invariants;
15834 mips_base_align_loops = align_loops;
15835 mips_base_align_jumps = align_jumps;
15836 mips_base_align_functions = align_functions;
15838 /* Now select the ISA mode.
15840 Do all CPP-sensitive stuff in non-MIPS16 mode; we'll switch to
15841 MIPS16 mode afterwards if need be. */
15842 mips_set_mips16_mode (false);
15845 /* Implement TARGET_OPTION_OPTIMIZATION_TABLE. */
15846 static const struct default_options mips_option_optimization_table[] =
15848 { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
15849 { OPT_LEVELS_NONE, 0, NULL, 0 }
15852 /* Swap the register information for registers I and I + 1, which
15853 currently have the wrong endianness. Note that the registers'
15854 fixedness and call-clobberedness might have been set on the
15858 mips_swap_registers (unsigned int i)
15863 #define SWAP_INT(X, Y) (tmpi = (X), (X) = (Y), (Y) = tmpi)
15864 #define SWAP_STRING(X, Y) (tmps = (X), (X) = (Y), (Y) = tmps)
15866 SWAP_INT (fixed_regs[i], fixed_regs[i + 1]);
15867 SWAP_INT (call_used_regs[i], call_used_regs[i + 1]);
15868 SWAP_INT (call_really_used_regs[i], call_really_used_regs[i + 1]);
15869 SWAP_STRING (reg_names[i], reg_names[i + 1]);
15875 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
15878 mips_conditional_register_usage (void)
15883 /* These DSP control register fields are global. */
15884 global_regs[CCDSP_PO_REGNUM] = 1;
15885 global_regs[CCDSP_SC_REGNUM] = 1;
15891 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
15892 fixed_regs[regno] = call_used_regs[regno] = 1;
15894 if (!TARGET_HARD_FLOAT)
15898 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
15899 fixed_regs[regno] = call_used_regs[regno] = 1;
15900 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
15901 fixed_regs[regno] = call_used_regs[regno] = 1;
15903 else if (! ISA_HAS_8CC)
15907 /* We only have a single condition-code register. We implement
15908 this by fixing all the condition-code registers and generating
15909 RTL that refers directly to ST_REG_FIRST. */
15910 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
15911 fixed_regs[regno] = call_used_regs[regno] = 1;
15913 /* In MIPS16 mode, we permit the $t temporary registers to be used
15914 for reload. We prohibit the unused $s registers, since they
15915 are call-saved, and saving them via a MIPS16 register would
15916 probably waste more time than just reloading the value. */
15919 fixed_regs[18] = call_used_regs[18] = 1;
15920 fixed_regs[19] = call_used_regs[19] = 1;
15921 fixed_regs[20] = call_used_regs[20] = 1;
15922 fixed_regs[21] = call_used_regs[21] = 1;
15923 fixed_regs[22] = call_used_regs[22] = 1;
15924 fixed_regs[23] = call_used_regs[23] = 1;
15925 fixed_regs[26] = call_used_regs[26] = 1;
15926 fixed_regs[27] = call_used_regs[27] = 1;
15927 fixed_regs[30] = call_used_regs[30] = 1;
15929 /* $f20-$f23 are call-clobbered for n64. */
15930 if (mips_abi == ABI_64)
15933 for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
15934 call_really_used_regs[regno] = call_used_regs[regno] = 1;
15936 /* Odd registers in the range $f21-$f31 (inclusive) are call-clobbered
15938 if (mips_abi == ABI_N32)
15941 for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
15942 call_really_used_regs[regno] = call_used_regs[regno] = 1;
15944 /* Make sure that double-register accumulator values are correctly
15945 ordered for the current endianness. */
15946 if (TARGET_LITTLE_ENDIAN)
15948 unsigned int regno;
15950 mips_swap_registers (MD_REG_FIRST);
15951 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno += 2)
15952 mips_swap_registers (regno);
15956 /* Initialize vector TARGET to VALS. */
15959 mips_expand_vector_init (rtx target, rtx vals)
15961 enum machine_mode mode;
15962 enum machine_mode inner;
15963 unsigned int i, n_elts;
15966 mode = GET_MODE (target);
15967 inner = GET_MODE_INNER (mode);
15968 n_elts = GET_MODE_NUNITS (mode);
15970 gcc_assert (VECTOR_MODE_P (mode));
15972 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
15973 for (i = 0; i < n_elts; i++)
15974 emit_move_insn (adjust_address_nv (mem, inner, i * GET_MODE_SIZE (inner)),
15975 XVECEXP (vals, 0, i));
15977 emit_move_insn (target, mem);
15980 /* When generating MIPS16 code, we want to allocate $24 (T_REG) before
15981 other registers for instructions for which it is possible. This
15982 encourages the compiler to use CMP in cases where an XOR would
15983 require some register shuffling. */
15986 mips_order_regs_for_local_alloc (void)
15990 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
15991 reg_alloc_order[i] = i;
15995 /* It really doesn't matter where we put register 0, since it is
15996 a fixed register anyhow. */
15997 reg_alloc_order[0] = 24;
15998 reg_alloc_order[24] = 0;
16002 /* Implement EH_USES. */
16005 mips_eh_uses (unsigned int regno)
16007 if (reload_completed && !TARGET_ABSOLUTE_JUMPS)
16009 /* We need to force certain registers to be live in order to handle
16010 PIC long branches correctly. See mips_must_initialize_gp_p for
16012 if (mips_cfun_has_cprestore_slot_p ())
16014 if (regno == CPRESTORE_SLOT_REGNUM)
16019 if (cfun->machine->global_pointer == regno)
16027 /* Implement EPILOGUE_USES. */
16030 mips_epilogue_uses (unsigned int regno)
16032 /* Say that the epilogue uses the return address register. Note that
16033 in the case of sibcalls, the values "used by the epilogue" are
16034 considered live at the start of the called function. */
16035 if (regno == RETURN_ADDR_REGNUM)
16038 /* If using a GOT, say that the epilogue also uses GOT_VERSION_REGNUM.
16039 See the comment above load_call<mode> for details. */
16040 if (TARGET_USE_GOT && (regno) == GOT_VERSION_REGNUM)
16043 /* An interrupt handler must preserve some registers that are
16044 ordinarily call-clobbered. */
16045 if (cfun->machine->interrupt_handler_p
16046 && mips_interrupt_extra_call_saved_reg_p (regno))
16052 /* A for_each_rtx callback. Stop the search if *X is an AT register. */
16055 mips_at_reg_p (rtx *x, void *data ATTRIBUTE_UNUSED)
16057 return REG_P (*x) && REGNO (*x) == AT_REGNUM;
16060 /* Return true if INSN needs to be wrapped in ".set noat".
16061 INSN has NOPERANDS operands, stored in OPVEC. */
16064 mips_need_noat_wrapper_p (rtx insn, rtx *opvec, int noperands)
16068 if (recog_memoized (insn) >= 0)
16069 for (i = 0; i < noperands; i++)
16070 if (for_each_rtx (&opvec[i], mips_at_reg_p, NULL))
16075 /* Implement FINAL_PRESCAN_INSN. */
16078 mips_final_prescan_insn (rtx insn, rtx *opvec, int noperands)
16080 if (mips_need_noat_wrapper_p (insn, opvec, noperands))
16081 mips_push_asm_switch (&mips_noat);
16084 /* Implement TARGET_ASM_FINAL_POSTSCAN_INSN. */
16087 mips_final_postscan_insn (FILE *file ATTRIBUTE_UNUSED, rtx insn,
16088 rtx *opvec, int noperands)
16090 if (mips_need_noat_wrapper_p (insn, opvec, noperands))
16091 mips_pop_asm_switch (&mips_noat);
16094 /* Return the function that is used to expand the <u>mulsidi3 pattern.
16095 EXT_CODE is the code of the extension used. Return NULL if widening
16096 multiplication shouldn't be used. */
16099 mips_mulsidi3_gen_fn (enum rtx_code ext_code)
16103 signed_p = ext_code == SIGN_EXTEND;
16106 /* Don't use widening multiplication with MULT when we have DMUL. Even
16107 with the extension of its input operands DMUL is faster. Note that
16108 the extension is not needed for signed multiplication. In order to
16109 ensure that we always remove the redundant sign-extension in this
16110 case we still expand mulsidi3 for DMUL. */
16112 return signed_p ? gen_mulsidi3_64bit_dmul : NULL;
16113 if (TARGET_FIX_R4000)
16115 return signed_p ? gen_mulsidi3_64bit : gen_umulsidi3_64bit;
16119 if (TARGET_FIX_R4000 && !ISA_HAS_DSP)
16120 return signed_p ? gen_mulsidi3_32bit_r4000 : gen_umulsidi3_32bit_r4000;
16121 return signed_p ? gen_mulsidi3_32bit : gen_umulsidi3_32bit;
16125 /* Return the size in bytes of the trampoline code, padded to
16126 TRAMPOLINE_ALIGNMENT bits. The static chain pointer and target
16127 function address immediately follow. */
16130 mips_trampoline_code_size (void)
16132 if (TARGET_USE_PIC_FN_ADDR_REG)
16134 else if (ptr_mode == DImode)
16136 else if (ISA_HAS_LOAD_DELAY)
16142 /* Implement TARGET_TRAMPOLINE_INIT. */
16145 mips_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
16147 rtx addr, end_addr, high, low, opcode, mem;
16150 HOST_WIDE_INT end_addr_offset, static_chain_offset, target_function_offset;
16152 /* Work out the offsets of the pointers from the start of the
16153 trampoline code. */
16154 end_addr_offset = mips_trampoline_code_size ();
16155 static_chain_offset = end_addr_offset;
16156 target_function_offset = static_chain_offset + GET_MODE_SIZE (ptr_mode);
16158 /* Get pointers to the beginning and end of the code block. */
16159 addr = force_reg (Pmode, XEXP (m_tramp, 0));
16160 end_addr = mips_force_binary (Pmode, PLUS, addr, GEN_INT (end_addr_offset));
16162 #define OP(X) gen_int_mode (X, SImode)
16164 /* Build up the code in TRAMPOLINE. */
16166 if (TARGET_USE_PIC_FN_ADDR_REG)
16168 /* $25 contains the address of the trampoline. Emit code of the form:
16170 l[wd] $1, target_function_offset($25)
16171 l[wd] $static_chain, static_chain_offset($25)
16174 trampoline[i++] = OP (MIPS_LOAD_PTR (AT_REGNUM,
16175 target_function_offset,
16176 PIC_FUNCTION_ADDR_REGNUM));
16177 trampoline[i++] = OP (MIPS_LOAD_PTR (STATIC_CHAIN_REGNUM,
16178 static_chain_offset,
16179 PIC_FUNCTION_ADDR_REGNUM));
16180 trampoline[i++] = OP (MIPS_JR (AT_REGNUM));
16181 trampoline[i++] = OP (MIPS_MOVE (PIC_FUNCTION_ADDR_REGNUM, AT_REGNUM));
16183 else if (ptr_mode == DImode)
16185 /* It's too cumbersome to create the full 64-bit address, so let's
16191 1: l[wd] $25, target_function_offset - 12($31)
16192 l[wd] $static_chain, static_chain_offset - 12($31)
16196 where 12 is the offset of "1:" from the start of the code block. */
16197 trampoline[i++] = OP (MIPS_MOVE (AT_REGNUM, RETURN_ADDR_REGNUM));
16198 trampoline[i++] = OP (MIPS_BAL (1));
16199 trampoline[i++] = OP (MIPS_NOP);
16200 trampoline[i++] = OP (MIPS_LOAD_PTR (PIC_FUNCTION_ADDR_REGNUM,
16201 target_function_offset - 12,
16202 RETURN_ADDR_REGNUM));
16203 trampoline[i++] = OP (MIPS_LOAD_PTR (STATIC_CHAIN_REGNUM,
16204 static_chain_offset - 12,
16205 RETURN_ADDR_REGNUM));
16206 trampoline[i++] = OP (MIPS_JR (PIC_FUNCTION_ADDR_REGNUM));
16207 trampoline[i++] = OP (MIPS_MOVE (RETURN_ADDR_REGNUM, AT_REGNUM));
16211 /* If the target has load delays, emit:
16213 lui $1, %hi(end_addr)
16214 lw $25, %lo(end_addr + ...)($1)
16215 lw $static_chain, %lo(end_addr + ...)($1)
16221 lui $1, %hi(end_addr)
16222 lw $25, %lo(end_addr + ...)($1)
16224 lw $static_chain, %lo(end_addr + ...)($1). */
16226 /* Split END_ADDR into %hi and %lo values. Trampolines are aligned
16227 to 64 bits, so the %lo value will have the bottom 3 bits clear. */
16228 high = expand_simple_binop (SImode, PLUS, end_addr, GEN_INT (0x8000),
16229 NULL, false, OPTAB_WIDEN);
16230 high = expand_simple_binop (SImode, LSHIFTRT, high, GEN_INT (16),
16231 NULL, false, OPTAB_WIDEN);
16232 low = convert_to_mode (SImode, gen_lowpart (HImode, end_addr), true);
16234 /* Emit the LUI. */
16235 opcode = OP (MIPS_LUI (AT_REGNUM, 0));
16236 trampoline[i++] = expand_simple_binop (SImode, IOR, opcode, high,
16237 NULL, false, OPTAB_WIDEN);
16239 /* Emit the load of the target function. */
16240 opcode = OP (MIPS_LOAD_PTR (PIC_FUNCTION_ADDR_REGNUM,
16241 target_function_offset - end_addr_offset,
16243 trampoline[i++] = expand_simple_binop (SImode, IOR, opcode, low,
16244 NULL, false, OPTAB_WIDEN);
16246 /* Emit the JR here, if we can. */
16247 if (!ISA_HAS_LOAD_DELAY)
16248 trampoline[i++] = OP (MIPS_JR (PIC_FUNCTION_ADDR_REGNUM));
16250 /* Emit the load of the static chain register. */
16251 opcode = OP (MIPS_LOAD_PTR (STATIC_CHAIN_REGNUM,
16252 static_chain_offset - end_addr_offset,
16254 trampoline[i++] = expand_simple_binop (SImode, IOR, opcode, low,
16255 NULL, false, OPTAB_WIDEN);
16257 /* Emit the JR, if we couldn't above. */
16258 if (ISA_HAS_LOAD_DELAY)
16260 trampoline[i++] = OP (MIPS_JR (PIC_FUNCTION_ADDR_REGNUM));
16261 trampoline[i++] = OP (MIPS_NOP);
16267 /* Copy the trampoline code. Leave any padding uninitialized. */
16268 for (j = 0; j < i; j++)
16270 mem = adjust_address (m_tramp, SImode, j * GET_MODE_SIZE (SImode));
16271 mips_emit_move (mem, trampoline[j]);
16274 /* Set up the static chain pointer field. */
16275 mem = adjust_address (m_tramp, ptr_mode, static_chain_offset);
16276 mips_emit_move (mem, chain_value);
16278 /* Set up the target function field. */
16279 mem = adjust_address (m_tramp, ptr_mode, target_function_offset);
16280 mips_emit_move (mem, XEXP (DECL_RTL (fndecl), 0));
16282 /* Flush the code part of the trampoline. */
16283 emit_insn (gen_add3_insn (end_addr, addr, GEN_INT (TRAMPOLINE_SIZE)));
16284 emit_insn (gen_clear_cache (addr, end_addr));
16287 /* Implement FUNCTION_PROFILER. */
16289 void mips_function_profiler (FILE *file)
16292 sorry ("mips16 function profiling");
16293 if (TARGET_LONG_CALLS)
16295 /* For TARGET_LONG_CALLS use $3 for the address of _mcount. */
16296 if (Pmode == DImode)
16297 fprintf (file, "\tdla\t%s,_mcount\n", reg_names[3]);
16299 fprintf (file, "\tla\t%s,_mcount\n", reg_names[3]);
16301 mips_push_asm_switch (&mips_noat);
16302 fprintf (file, "\tmove\t%s,%s\t\t# save current return address\n",
16303 reg_names[AT_REGNUM], reg_names[RETURN_ADDR_REGNUM]);
16304 /* _mcount treats $2 as the static chain register. */
16305 if (cfun->static_chain_decl != NULL)
16306 fprintf (file, "\tmove\t%s,%s\n", reg_names[2],
16307 reg_names[STATIC_CHAIN_REGNUM]);
16308 if (TARGET_MCOUNT_RA_ADDRESS)
16310 /* If TARGET_MCOUNT_RA_ADDRESS load $12 with the address of the
16311 ra save location. */
16312 if (cfun->machine->frame.ra_fp_offset == 0)
16313 /* ra not saved, pass zero. */
16314 fprintf (file, "\tmove\t%s,%s\n", reg_names[12], reg_names[0]);
16316 fprintf (file, "\t%s\t%s," HOST_WIDE_INT_PRINT_DEC "(%s)\n",
16317 Pmode == DImode ? "dla" : "la", reg_names[12],
16318 cfun->machine->frame.ra_fp_offset,
16319 reg_names[STACK_POINTER_REGNUM]);
16321 if (!TARGET_NEWABI)
16323 "\t%s\t%s,%s,%d\t\t# _mcount pops 2 words from stack\n",
16324 TARGET_64BIT ? "dsubu" : "subu",
16325 reg_names[STACK_POINTER_REGNUM],
16326 reg_names[STACK_POINTER_REGNUM],
16327 Pmode == DImode ? 16 : 8);
16329 if (TARGET_LONG_CALLS)
16330 fprintf (file, "\tjalr\t%s\n", reg_names[3]);
16332 fprintf (file, "\tjal\t_mcount\n");
16333 mips_pop_asm_switch (&mips_noat);
16334 /* _mcount treats $2 as the static chain register. */
16335 if (cfun->static_chain_decl != NULL)
16336 fprintf (file, "\tmove\t%s,%s\n", reg_names[STATIC_CHAIN_REGNUM],
16340 /* Implement TARGET_SHIFT_TRUNCATION_MASK. We want to keep the default
16341 behaviour of TARGET_SHIFT_TRUNCATION_MASK for non-vector modes even
16342 when TARGET_LOONGSON_VECTORS is true. */
16344 static unsigned HOST_WIDE_INT
16345 mips_shift_truncation_mask (enum machine_mode mode)
16347 if (TARGET_LOONGSON_VECTORS && VECTOR_MODE_P (mode))
16350 return GET_MODE_BITSIZE (mode) - 1;
16354 /* Initialize the GCC target structure. */
16355 #undef TARGET_ASM_ALIGNED_HI_OP
16356 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
16357 #undef TARGET_ASM_ALIGNED_SI_OP
16358 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
16359 #undef TARGET_ASM_ALIGNED_DI_OP
16360 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
16362 #undef TARGET_OPTION_OVERRIDE
16363 #define TARGET_OPTION_OVERRIDE mips_option_override
16364 #undef TARGET_OPTION_OPTIMIZATION_TABLE
16365 #define TARGET_OPTION_OPTIMIZATION_TABLE mips_option_optimization_table
16367 #undef TARGET_LEGITIMIZE_ADDRESS
16368 #define TARGET_LEGITIMIZE_ADDRESS mips_legitimize_address
16370 #undef TARGET_ASM_FUNCTION_PROLOGUE
16371 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
16372 #undef TARGET_ASM_FUNCTION_EPILOGUE
16373 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
16374 #undef TARGET_ASM_SELECT_RTX_SECTION
16375 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
16376 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
16377 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
16379 #undef TARGET_SCHED_INIT
16380 #define TARGET_SCHED_INIT mips_sched_init
16381 #undef TARGET_SCHED_REORDER
16382 #define TARGET_SCHED_REORDER mips_sched_reorder
16383 #undef TARGET_SCHED_REORDER2
16384 #define TARGET_SCHED_REORDER2 mips_sched_reorder2
16385 #undef TARGET_SCHED_VARIABLE_ISSUE
16386 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
16387 #undef TARGET_SCHED_ADJUST_COST
16388 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
16389 #undef TARGET_SCHED_ISSUE_RATE
16390 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
16391 #undef TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN
16392 #define TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN mips_init_dfa_post_cycle_insn
16393 #undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
16394 #define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE mips_dfa_post_advance_cycle
16395 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
16396 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
16397 mips_multipass_dfa_lookahead
16398 #undef TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P
16399 #define TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P \
16400 mips_small_register_classes_for_mode_p
16402 #undef TARGET_DEFAULT_TARGET_FLAGS
16403 #define TARGET_DEFAULT_TARGET_FLAGS \
16405 | TARGET_CPU_DEFAULT \
16406 | TARGET_ENDIAN_DEFAULT \
16407 | TARGET_FP_EXCEPTIONS_DEFAULT \
16408 | MASK_CHECK_ZERO_DIV \
16410 #undef TARGET_HANDLE_OPTION
16411 #define TARGET_HANDLE_OPTION mips_handle_option
16413 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
16414 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
16416 #undef TARGET_INSERT_ATTRIBUTES
16417 #define TARGET_INSERT_ATTRIBUTES mips_insert_attributes
16418 #undef TARGET_MERGE_DECL_ATTRIBUTES
16419 #define TARGET_MERGE_DECL_ATTRIBUTES mips_merge_decl_attributes
16420 #undef TARGET_SET_CURRENT_FUNCTION
16421 #define TARGET_SET_CURRENT_FUNCTION mips_set_current_function
16423 #undef TARGET_VALID_POINTER_MODE
16424 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
16425 #undef TARGET_REGISTER_MOVE_COST
16426 #define TARGET_REGISTER_MOVE_COST mips_register_move_cost
16427 #undef TARGET_MEMORY_MOVE_COST
16428 #define TARGET_MEMORY_MOVE_COST mips_memory_move_cost
16429 #undef TARGET_RTX_COSTS
16430 #define TARGET_RTX_COSTS mips_rtx_costs
16431 #undef TARGET_ADDRESS_COST
16432 #define TARGET_ADDRESS_COST mips_address_cost
16434 #undef TARGET_IN_SMALL_DATA_P
16435 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
16437 #undef TARGET_MACHINE_DEPENDENT_REORG
16438 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
16440 #undef TARGET_PREFERRED_RELOAD_CLASS
16441 #define TARGET_PREFERRED_RELOAD_CLASS mips_preferred_reload_class
16443 #undef TARGET_ASM_FILE_START
16444 #define TARGET_ASM_FILE_START mips_file_start
16445 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
16446 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
16448 #undef TARGET_INIT_LIBFUNCS
16449 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
16451 #undef TARGET_BUILD_BUILTIN_VA_LIST
16452 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
16453 #undef TARGET_EXPAND_BUILTIN_VA_START
16454 #define TARGET_EXPAND_BUILTIN_VA_START mips_va_start
16455 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
16456 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
16458 #undef TARGET_PROMOTE_FUNCTION_MODE
16459 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
16460 #undef TARGET_PROMOTE_PROTOTYPES
16461 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
16463 #undef TARGET_FUNCTION_VALUE
16464 #define TARGET_FUNCTION_VALUE mips_function_value
16465 #undef TARGET_LIBCALL_VALUE
16466 #define TARGET_LIBCALL_VALUE mips_libcall_value
16467 #undef TARGET_FUNCTION_VALUE_REGNO_P
16468 #define TARGET_FUNCTION_VALUE_REGNO_P mips_function_value_regno_p
16469 #undef TARGET_RETURN_IN_MEMORY
16470 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
16471 #undef TARGET_RETURN_IN_MSB
16472 #define TARGET_RETURN_IN_MSB mips_return_in_msb
16474 #undef TARGET_ASM_OUTPUT_MI_THUNK
16475 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
16476 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
16477 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
16479 #undef TARGET_PRINT_OPERAND
16480 #define TARGET_PRINT_OPERAND mips_print_operand
16481 #undef TARGET_PRINT_OPERAND_ADDRESS
16482 #define TARGET_PRINT_OPERAND_ADDRESS mips_print_operand_address
16483 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
16484 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P mips_print_operand_punct_valid_p
16486 #undef TARGET_SETUP_INCOMING_VARARGS
16487 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
16488 #undef TARGET_STRICT_ARGUMENT_NAMING
16489 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
16490 #undef TARGET_MUST_PASS_IN_STACK
16491 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
16492 #undef TARGET_PASS_BY_REFERENCE
16493 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
16494 #undef TARGET_CALLEE_COPIES
16495 #define TARGET_CALLEE_COPIES mips_callee_copies
16496 #undef TARGET_ARG_PARTIAL_BYTES
16497 #define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
16498 #undef TARGET_FUNCTION_ARG
16499 #define TARGET_FUNCTION_ARG mips_function_arg
16500 #undef TARGET_FUNCTION_ARG_ADVANCE
16501 #define TARGET_FUNCTION_ARG_ADVANCE mips_function_arg_advance
16502 #undef TARGET_FUNCTION_ARG_BOUNDARY
16503 #define TARGET_FUNCTION_ARG_BOUNDARY mips_function_arg_boundary
16505 #undef TARGET_MODE_REP_EXTENDED
16506 #define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
16508 #undef TARGET_VECTOR_MODE_SUPPORTED_P
16509 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
16511 #undef TARGET_SCALAR_MODE_SUPPORTED_P
16512 #define TARGET_SCALAR_MODE_SUPPORTED_P mips_scalar_mode_supported_p
16514 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
16515 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE mips_preferred_simd_mode
16517 #undef TARGET_INIT_BUILTINS
16518 #define TARGET_INIT_BUILTINS mips_init_builtins
16519 #undef TARGET_BUILTIN_DECL
16520 #define TARGET_BUILTIN_DECL mips_builtin_decl
16521 #undef TARGET_EXPAND_BUILTIN
16522 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
16524 #undef TARGET_HAVE_TLS
16525 #define TARGET_HAVE_TLS HAVE_AS_TLS
16527 #undef TARGET_CANNOT_FORCE_CONST_MEM
16528 #define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
16530 #undef TARGET_LEGITIMATE_CONSTANT_P
16531 #define TARGET_LEGITIMATE_CONSTANT_P mips_legitimate_constant_p
16533 #undef TARGET_ENCODE_SECTION_INFO
16534 #define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
16536 #undef TARGET_ATTRIBUTE_TABLE
16537 #define TARGET_ATTRIBUTE_TABLE mips_attribute_table
16538 /* All our function attributes are related to how out-of-line copies should
16539 be compiled or called. They don't in themselves prevent inlining. */
16540 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
16541 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
16543 #undef TARGET_EXTRA_LIVE_ON_ENTRY
16544 #define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
16546 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
16547 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
16548 #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
16549 #define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
16551 #undef TARGET_COMP_TYPE_ATTRIBUTES
16552 #define TARGET_COMP_TYPE_ATTRIBUTES mips_comp_type_attributes
16554 #ifdef HAVE_AS_DTPRELWORD
16555 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
16556 #define TARGET_ASM_OUTPUT_DWARF_DTPREL mips_output_dwarf_dtprel
16558 #undef TARGET_DWARF_REGISTER_SPAN
16559 #define TARGET_DWARF_REGISTER_SPAN mips_dwarf_register_span
16561 #undef TARGET_ASM_FINAL_POSTSCAN_INSN
16562 #define TARGET_ASM_FINAL_POSTSCAN_INSN mips_final_postscan_insn
16564 #undef TARGET_LEGITIMATE_ADDRESS_P
16565 #define TARGET_LEGITIMATE_ADDRESS_P mips_legitimate_address_p
16567 #undef TARGET_FRAME_POINTER_REQUIRED
16568 #define TARGET_FRAME_POINTER_REQUIRED mips_frame_pointer_required
16570 #undef TARGET_CAN_ELIMINATE
16571 #define TARGET_CAN_ELIMINATE mips_can_eliminate
16573 #undef TARGET_CONDITIONAL_REGISTER_USAGE
16574 #define TARGET_CONDITIONAL_REGISTER_USAGE mips_conditional_register_usage
16576 #undef TARGET_TRAMPOLINE_INIT
16577 #define TARGET_TRAMPOLINE_INIT mips_trampoline_init
16579 #undef TARGET_ASM_OUTPUT_SOURCE_FILENAME
16580 #define TARGET_ASM_OUTPUT_SOURCE_FILENAME mips_output_filename
16582 #undef TARGET_SHIFT_TRUNCATION_MASK
16583 #define TARGET_SHIFT_TRUNCATION_MASK mips_shift_truncation_mask
16585 struct gcc_target targetm = TARGET_INITIALIZER;
16587 #include "gt-mips.h"