1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999-2016 Free Software Foundation, Inc.
3 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
4 Ulrich Weigand (uweigand@de.ibm.com) and
5 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
28 #include "target-globals.h"
37 #include "stringpool.h"
44 #include "diagnostic-core.h"
45 #include "diagnostic.h"
47 #include "fold-const.h"
48 #include "print-tree.h"
49 #include "stor-layout.h"
52 #include "conditions.h"
54 #include "insn-attr.h"
66 #include "cfgcleanup.h"
68 #include "langhooks.h"
69 #include "internal-fn.h"
70 #include "gimple-fold.h"
75 #include "tree-pass.h"
80 #include "tm-constrs.h"
82 /* This file should be included last. */
83 #include "target-def.h"
85 /* Remember the last target of s390_set_current_function. */
86 static GTY(()) tree s390_previous_fndecl;
88 /* Define the specific costs for a given cpu. */
90 struct processor_costs
93 const int m; /* cost of an M instruction. */
94 const int mghi; /* cost of an MGHI instruction. */
95 const int mh; /* cost of an MH instruction. */
96 const int mhi; /* cost of an MHI instruction. */
97 const int ml; /* cost of an ML instruction. */
98 const int mr; /* cost of an MR instruction. */
99 const int ms; /* cost of an MS instruction. */
100 const int msg; /* cost of an MSG instruction. */
101 const int msgf; /* cost of an MSGF instruction. */
102 const int msgfr; /* cost of an MSGFR instruction. */
103 const int msgr; /* cost of an MSGR instruction. */
104 const int msr; /* cost of an MSR instruction. */
105 const int mult_df; /* cost of multiplication in DFmode. */
108 const int sqxbr; /* cost of square root in TFmode. */
109 const int sqdbr; /* cost of square root in DFmode. */
110 const int sqebr; /* cost of square root in SFmode. */
111 /* multiply and add */
112 const int madbr; /* cost of multiply and add in DFmode. */
113 const int maebr; /* cost of multiply and add in SFmode. */
125 #define s390_cost ((const struct processor_costs *)(s390_cost_pointer))
128 struct processor_costs z900_cost =
130 COSTS_N_INSNS (5), /* M */
131 COSTS_N_INSNS (10), /* MGHI */
132 COSTS_N_INSNS (5), /* MH */
133 COSTS_N_INSNS (4), /* MHI */
134 COSTS_N_INSNS (5), /* ML */
135 COSTS_N_INSNS (5), /* MR */
136 COSTS_N_INSNS (4), /* MS */
137 COSTS_N_INSNS (15), /* MSG */
138 COSTS_N_INSNS (7), /* MSGF */
139 COSTS_N_INSNS (7), /* MSGFR */
140 COSTS_N_INSNS (10), /* MSGR */
141 COSTS_N_INSNS (4), /* MSR */
142 COSTS_N_INSNS (7), /* multiplication in DFmode */
143 COSTS_N_INSNS (13), /* MXBR */
144 COSTS_N_INSNS (136), /* SQXBR */
145 COSTS_N_INSNS (44), /* SQDBR */
146 COSTS_N_INSNS (35), /* SQEBR */
147 COSTS_N_INSNS (18), /* MADBR */
148 COSTS_N_INSNS (13), /* MAEBR */
149 COSTS_N_INSNS (134), /* DXBR */
150 COSTS_N_INSNS (30), /* DDBR */
151 COSTS_N_INSNS (27), /* DEBR */
152 COSTS_N_INSNS (220), /* DLGR */
153 COSTS_N_INSNS (34), /* DLR */
154 COSTS_N_INSNS (34), /* DR */
155 COSTS_N_INSNS (32), /* DSGFR */
156 COSTS_N_INSNS (32), /* DSGR */
160 struct processor_costs z990_cost =
162 COSTS_N_INSNS (4), /* M */
163 COSTS_N_INSNS (2), /* MGHI */
164 COSTS_N_INSNS (2), /* MH */
165 COSTS_N_INSNS (2), /* MHI */
166 COSTS_N_INSNS (4), /* ML */
167 COSTS_N_INSNS (4), /* MR */
168 COSTS_N_INSNS (5), /* MS */
169 COSTS_N_INSNS (6), /* MSG */
170 COSTS_N_INSNS (4), /* MSGF */
171 COSTS_N_INSNS (4), /* MSGFR */
172 COSTS_N_INSNS (4), /* MSGR */
173 COSTS_N_INSNS (4), /* MSR */
174 COSTS_N_INSNS (1), /* multiplication in DFmode */
175 COSTS_N_INSNS (28), /* MXBR */
176 COSTS_N_INSNS (130), /* SQXBR */
177 COSTS_N_INSNS (66), /* SQDBR */
178 COSTS_N_INSNS (38), /* SQEBR */
179 COSTS_N_INSNS (1), /* MADBR */
180 COSTS_N_INSNS (1), /* MAEBR */
181 COSTS_N_INSNS (60), /* DXBR */
182 COSTS_N_INSNS (40), /* DDBR */
183 COSTS_N_INSNS (26), /* DEBR */
184 COSTS_N_INSNS (176), /* DLGR */
185 COSTS_N_INSNS (31), /* DLR */
186 COSTS_N_INSNS (31), /* DR */
187 COSTS_N_INSNS (31), /* DSGFR */
188 COSTS_N_INSNS (31), /* DSGR */
192 struct processor_costs z9_109_cost =
194 COSTS_N_INSNS (4), /* M */
195 COSTS_N_INSNS (2), /* MGHI */
196 COSTS_N_INSNS (2), /* MH */
197 COSTS_N_INSNS (2), /* MHI */
198 COSTS_N_INSNS (4), /* ML */
199 COSTS_N_INSNS (4), /* MR */
200 COSTS_N_INSNS (5), /* MS */
201 COSTS_N_INSNS (6), /* MSG */
202 COSTS_N_INSNS (4), /* MSGF */
203 COSTS_N_INSNS (4), /* MSGFR */
204 COSTS_N_INSNS (4), /* MSGR */
205 COSTS_N_INSNS (4), /* MSR */
206 COSTS_N_INSNS (1), /* multiplication in DFmode */
207 COSTS_N_INSNS (28), /* MXBR */
208 COSTS_N_INSNS (130), /* SQXBR */
209 COSTS_N_INSNS (66), /* SQDBR */
210 COSTS_N_INSNS (38), /* SQEBR */
211 COSTS_N_INSNS (1), /* MADBR */
212 COSTS_N_INSNS (1), /* MAEBR */
213 COSTS_N_INSNS (60), /* DXBR */
214 COSTS_N_INSNS (40), /* DDBR */
215 COSTS_N_INSNS (26), /* DEBR */
216 COSTS_N_INSNS (30), /* DLGR */
217 COSTS_N_INSNS (23), /* DLR */
218 COSTS_N_INSNS (23), /* DR */
219 COSTS_N_INSNS (24), /* DSGFR */
220 COSTS_N_INSNS (24), /* DSGR */
224 struct processor_costs z10_cost =
226 COSTS_N_INSNS (10), /* M */
227 COSTS_N_INSNS (10), /* MGHI */
228 COSTS_N_INSNS (10), /* MH */
229 COSTS_N_INSNS (10), /* MHI */
230 COSTS_N_INSNS (10), /* ML */
231 COSTS_N_INSNS (10), /* MR */
232 COSTS_N_INSNS (10), /* MS */
233 COSTS_N_INSNS (10), /* MSG */
234 COSTS_N_INSNS (10), /* MSGF */
235 COSTS_N_INSNS (10), /* MSGFR */
236 COSTS_N_INSNS (10), /* MSGR */
237 COSTS_N_INSNS (10), /* MSR */
238 COSTS_N_INSNS (1) , /* multiplication in DFmode */
239 COSTS_N_INSNS (50), /* MXBR */
240 COSTS_N_INSNS (120), /* SQXBR */
241 COSTS_N_INSNS (52), /* SQDBR */
242 COSTS_N_INSNS (38), /* SQEBR */
243 COSTS_N_INSNS (1), /* MADBR */
244 COSTS_N_INSNS (1), /* MAEBR */
245 COSTS_N_INSNS (111), /* DXBR */
246 COSTS_N_INSNS (39), /* DDBR */
247 COSTS_N_INSNS (32), /* DEBR */
248 COSTS_N_INSNS (160), /* DLGR */
249 COSTS_N_INSNS (71), /* DLR */
250 COSTS_N_INSNS (71), /* DR */
251 COSTS_N_INSNS (71), /* DSGFR */
252 COSTS_N_INSNS (71), /* DSGR */
256 struct processor_costs z196_cost =
258 COSTS_N_INSNS (7), /* M */
259 COSTS_N_INSNS (5), /* MGHI */
260 COSTS_N_INSNS (5), /* MH */
261 COSTS_N_INSNS (5), /* MHI */
262 COSTS_N_INSNS (7), /* ML */
263 COSTS_N_INSNS (7), /* MR */
264 COSTS_N_INSNS (6), /* MS */
265 COSTS_N_INSNS (8), /* MSG */
266 COSTS_N_INSNS (6), /* MSGF */
267 COSTS_N_INSNS (6), /* MSGFR */
268 COSTS_N_INSNS (8), /* MSGR */
269 COSTS_N_INSNS (6), /* MSR */
270 COSTS_N_INSNS (1) , /* multiplication in DFmode */
271 COSTS_N_INSNS (40), /* MXBR B+40 */
272 COSTS_N_INSNS (100), /* SQXBR B+100 */
273 COSTS_N_INSNS (42), /* SQDBR B+42 */
274 COSTS_N_INSNS (28), /* SQEBR B+28 */
275 COSTS_N_INSNS (1), /* MADBR B */
276 COSTS_N_INSNS (1), /* MAEBR B */
277 COSTS_N_INSNS (101), /* DXBR B+101 */
278 COSTS_N_INSNS (29), /* DDBR */
279 COSTS_N_INSNS (22), /* DEBR */
280 COSTS_N_INSNS (160), /* DLGR cracked */
281 COSTS_N_INSNS (160), /* DLR cracked */
282 COSTS_N_INSNS (160), /* DR expanded */
283 COSTS_N_INSNS (160), /* DSGFR cracked */
284 COSTS_N_INSNS (160), /* DSGR cracked */
288 struct processor_costs zEC12_cost =
290 COSTS_N_INSNS (7), /* M */
291 COSTS_N_INSNS (5), /* MGHI */
292 COSTS_N_INSNS (5), /* MH */
293 COSTS_N_INSNS (5), /* MHI */
294 COSTS_N_INSNS (7), /* ML */
295 COSTS_N_INSNS (7), /* MR */
296 COSTS_N_INSNS (6), /* MS */
297 COSTS_N_INSNS (8), /* MSG */
298 COSTS_N_INSNS (6), /* MSGF */
299 COSTS_N_INSNS (6), /* MSGFR */
300 COSTS_N_INSNS (8), /* MSGR */
301 COSTS_N_INSNS (6), /* MSR */
302 COSTS_N_INSNS (1) , /* multiplication in DFmode */
303 COSTS_N_INSNS (40), /* MXBR B+40 */
304 COSTS_N_INSNS (100), /* SQXBR B+100 */
305 COSTS_N_INSNS (42), /* SQDBR B+42 */
306 COSTS_N_INSNS (28), /* SQEBR B+28 */
307 COSTS_N_INSNS (1), /* MADBR B */
308 COSTS_N_INSNS (1), /* MAEBR B */
309 COSTS_N_INSNS (131), /* DXBR B+131 */
310 COSTS_N_INSNS (29), /* DDBR */
311 COSTS_N_INSNS (22), /* DEBR */
312 COSTS_N_INSNS (160), /* DLGR cracked */
313 COSTS_N_INSNS (160), /* DLR cracked */
314 COSTS_N_INSNS (160), /* DR expanded */
315 COSTS_N_INSNS (160), /* DSGFR cracked */
316 COSTS_N_INSNS (160), /* DSGR cracked */
321 const char *const name;
322 const enum processor_type processor;
323 const struct processor_costs *cost;
325 const processor_table[] =
327 { "g5", PROCESSOR_9672_G5, &z900_cost },
328 { "g6", PROCESSOR_9672_G6, &z900_cost },
329 { "z900", PROCESSOR_2064_Z900, &z900_cost },
330 { "z990", PROCESSOR_2084_Z990, &z990_cost },
331 { "z9-109", PROCESSOR_2094_Z9_109, &z9_109_cost },
332 { "z9-ec", PROCESSOR_2094_Z9_EC, &z9_109_cost },
333 { "z10", PROCESSOR_2097_Z10, &z10_cost },
334 { "z196", PROCESSOR_2817_Z196, &z196_cost },
335 { "zEC12", PROCESSOR_2827_ZEC12, &zEC12_cost },
336 { "z13", PROCESSOR_2964_Z13, &zEC12_cost },
337 { "native", PROCESSOR_NATIVE, NULL }
340 extern int reload_completed;
342 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
343 static rtx_insn *last_scheduled_insn;
344 #define MAX_SCHED_UNITS 3
345 static int last_scheduled_unit_distance[MAX_SCHED_UNITS];
347 /* The maximum score added for an instruction whose unit hasn't been
348 in use for MAX_SCHED_MIX_DISTANCE steps. Increase this value to
349 give instruction mix scheduling more priority over instruction
351 #define MAX_SCHED_MIX_SCORE 8
353 /* The maximum distance up to which individual scores will be
354 calculated. Everything beyond this gives MAX_SCHED_MIX_SCORE.
355 Increase this with the OOO windows size of the machine. */
356 #define MAX_SCHED_MIX_DISTANCE 100
358 /* Structure used to hold the components of a S/390 memory
359 address. A legitimate address on S/390 is of the general
361 base + index + displacement
362 where any of the components is optional.
364 base and index are registers of the class ADDR_REGS,
365 displacement is an unsigned 12-bit immediate constant. */
376 /* The following structure is embedded in the machine
377 specific part of struct function. */
379 struct GTY (()) s390_frame_layout
381 /* Offset within stack frame. */
382 HOST_WIDE_INT gprs_offset;
383 HOST_WIDE_INT f0_offset;
384 HOST_WIDE_INT f4_offset;
385 HOST_WIDE_INT f8_offset;
386 HOST_WIDE_INT backchain_offset;
388 /* Number of first and last gpr where slots in the register
389 save area are reserved for. */
390 int first_save_gpr_slot;
391 int last_save_gpr_slot;
393 /* Location (FP register number) where GPRs (r0-r15) should
395 0 - does not need to be saved at all
397 #define SAVE_SLOT_NONE 0
398 #define SAVE_SLOT_STACK -1
399 signed char gpr_save_slots[16];
401 /* Number of first and last gpr to be saved, restored. */
403 int first_restore_gpr;
405 int last_restore_gpr;
407 /* Bits standing for floating point registers. Set, if the
408 respective register has to be saved. Starting with reg 16 (f0)
409 at the rightmost bit.
410 Bit 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
411 fpr 15 13 11 9 14 12 10 8 7 5 3 1 6 4 2 0
412 reg 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 */
413 unsigned int fpr_bitmap;
415 /* Number of floating point registers f8-f15 which must be saved. */
418 /* Set if return address needs to be saved.
419 This flag is set by s390_return_addr_rtx if it could not use
420 the initial value of r14 and therefore depends on r14 saved
422 bool save_return_addr_p;
424 /* Size of stack frame. */
425 HOST_WIDE_INT frame_size;
428 /* Define the structure for the machine field in struct function. */
430 struct GTY(()) machine_function
432 struct s390_frame_layout frame_layout;
434 /* Literal pool base register. */
437 /* True if we may need to perform branch splitting. */
438 bool split_branches_pending_p;
440 bool has_landing_pad_p;
442 /* True if the current function may contain a tbegin clobbering
446 /* For -fsplit-stack support: A stack local which holds a pointer to
447 the stack arguments for a function with a variable number of
448 arguments. This is set at the start of the function and is used
449 to initialize the overflow_arg_area field of the va_list
451 rtx split_stack_varargs_pointer;
454 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
456 #define cfun_frame_layout (cfun->machine->frame_layout)
457 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
458 #define cfun_save_arg_fprs_p (!!(TARGET_64BIT \
459 ? cfun_frame_layout.fpr_bitmap & 0x0f \
460 : cfun_frame_layout.fpr_bitmap & 0x03))
461 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
462 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
463 #define cfun_set_fpr_save(REGNO) (cfun->machine->frame_layout.fpr_bitmap |= \
464 (1 << (REGNO - FPR0_REGNUM)))
465 #define cfun_fpr_save_p(REGNO) (!!(cfun->machine->frame_layout.fpr_bitmap & \
466 (1 << (REGNO - FPR0_REGNUM))))
467 #define cfun_gpr_save_slot(REGNO) \
468 cfun->machine->frame_layout.gpr_save_slots[REGNO]
470 /* Number of GPRs and FPRs used for argument passing. */
471 #define GP_ARG_NUM_REG 5
472 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
473 #define VEC_ARG_NUM_REG 8
475 /* A couple of shortcuts. */
476 #define CONST_OK_FOR_J(x) \
477 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
478 #define CONST_OK_FOR_K(x) \
479 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
480 #define CONST_OK_FOR_Os(x) \
481 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
482 #define CONST_OK_FOR_Op(x) \
483 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
484 #define CONST_OK_FOR_On(x) \
485 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
487 #define REGNO_PAIR_OK(REGNO, MODE) \
488 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
490 /* That's the read ahead of the dynamic branch prediction unit in
491 bytes on a z10 (or higher) CPU. */
492 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
495 /* Indicate which ABI has been used for passing vector args.
496 0 - no vector type arguments have been passed where the ABI is relevant
497 1 - the old ABI has been used
498 2 - a vector type argument has been passed either in a vector register
499 or on the stack by value */
500 static int s390_vector_abi = 0;
502 /* Set the vector ABI marker if TYPE is subject to the vector ABI
503 switch. The vector ABI affects only vector data types. There are
504 two aspects of the vector ABI relevant here:
506 1. vectors >= 16 bytes have an alignment of 8 bytes with the new
507 ABI and natural alignment with the old.
509 2. vector <= 16 bytes are passed in VRs or by value on the stack
510 with the new ABI but by reference on the stack with the old.
512 If ARG_P is true TYPE is used for a function argument or return
513 value. The ABI marker then is set for all vector data types. If
514 ARG_P is false only type 1 vectors are being checked. */
517 s390_check_type_for_vector_abi (const_tree type, bool arg_p, bool in_struct_p)
519 static hash_set<const_tree> visited_types_hash;
524 if (type == NULL_TREE || TREE_CODE (type) == ERROR_MARK)
527 if (visited_types_hash.contains (type))
530 visited_types_hash.add (type);
532 if (VECTOR_TYPE_P (type))
534 int type_size = int_size_in_bytes (type);
536 /* Outside arguments only the alignment is changing and this
537 only happens for vector types >= 16 bytes. */
538 if (!arg_p && type_size < 16)
541 /* In arguments vector types > 16 are passed as before (GCC
542 never enforced the bigger alignment for arguments which was
543 required by the old vector ABI). However, it might still be
544 ABI relevant due to the changed alignment if it is a struct
546 if (arg_p && type_size > 16 && !in_struct_p)
549 s390_vector_abi = TARGET_VX_ABI ? 2 : 1;
551 else if (POINTER_TYPE_P (type) || TREE_CODE (type) == ARRAY_TYPE)
553 /* ARRAY_TYPE: Since with neither of the ABIs we have more than
554 natural alignment there will never be ABI dependent padding
555 in an array type. That's why we do not set in_struct_p to
557 s390_check_type_for_vector_abi (TREE_TYPE (type), arg_p, in_struct_p);
559 else if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
563 /* Check the return type. */
564 s390_check_type_for_vector_abi (TREE_TYPE (type), true, false);
566 for (arg_chain = TYPE_ARG_TYPES (type);
568 arg_chain = TREE_CHAIN (arg_chain))
569 s390_check_type_for_vector_abi (TREE_VALUE (arg_chain), true, false);
571 else if (RECORD_OR_UNION_TYPE_P (type))
575 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
577 if (TREE_CODE (field) != FIELD_DECL)
580 s390_check_type_for_vector_abi (TREE_TYPE (field), arg_p, true);
586 /* System z builtins. */
588 #include "s390-builtins.h"
590 const unsigned int bflags_builtin[S390_BUILTIN_MAX + 1] =
595 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, ...) BFLAGS,
597 #define OB_DEF_VAR(...)
598 #include "s390-builtins.def"
602 const unsigned int opflags_builtin[S390_BUILTIN_MAX + 1] =
607 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, OPFLAGS, ...) OPFLAGS,
609 #define OB_DEF_VAR(...)
610 #include "s390-builtins.def"
614 const unsigned int bflags_overloaded_builtin[S390_OVERLOADED_BUILTIN_MAX + 1] =
620 #define OB_DEF(NAME, FIRST_VAR_NAME, LAST_VAR_NAME, BFLAGS, ...) BFLAGS,
621 #define OB_DEF_VAR(...)
622 #include "s390-builtins.def"
627 opflags_overloaded_builtin_var[S390_OVERLOADED_BUILTIN_VAR_MAX + 1] =
634 #define OB_DEF_VAR(NAME, PATTERN, FLAGS, FNTYPE) FLAGS,
635 #include "s390-builtins.def"
639 tree s390_builtin_types[BT_MAX];
640 tree s390_builtin_fn_types[BT_FN_MAX];
641 tree s390_builtin_decls[S390_BUILTIN_MAX +
642 S390_OVERLOADED_BUILTIN_MAX +
643 S390_OVERLOADED_BUILTIN_VAR_MAX];
645 static enum insn_code const code_for_builtin[S390_BUILTIN_MAX + 1] = {
649 #define B_DEF(NAME, PATTERN, ...) CODE_FOR_##PATTERN,
651 #define OB_DEF_VAR(...)
653 #include "s390-builtins.def"
658 s390_init_builtins (void)
660 /* These definitions are being used in s390-builtins.def. */
661 tree returns_twice_attr = tree_cons (get_identifier ("returns_twice"),
663 tree noreturn_attr = tree_cons (get_identifier ("noreturn"), NULL, NULL);
664 tree c_uint64_type_node;
666 /* The uint64_type_node from tree.c is not compatible to the C99
667 uint64_t data type. What we want is c_uint64_type_node from
668 c-common.c. But since backend code is not supposed to interface
669 with the frontend we recreate it here. */
671 c_uint64_type_node = long_unsigned_type_node;
673 c_uint64_type_node = long_long_unsigned_type_node;
676 #define DEF_TYPE(INDEX, BFLAGS, NODE, CONST_P) \
677 if (s390_builtin_types[INDEX] == NULL) \
678 s390_builtin_types[INDEX] = (!CONST_P) ? \
679 (NODE) : build_type_variant ((NODE), 1, 0);
681 #undef DEF_POINTER_TYPE
682 #define DEF_POINTER_TYPE(INDEX, BFLAGS, INDEX_BASE) \
683 if (s390_builtin_types[INDEX] == NULL) \
684 s390_builtin_types[INDEX] = \
685 build_pointer_type (s390_builtin_types[INDEX_BASE]);
687 #undef DEF_DISTINCT_TYPE
688 #define DEF_DISTINCT_TYPE(INDEX, BFLAGS, INDEX_BASE) \
689 if (s390_builtin_types[INDEX] == NULL) \
690 s390_builtin_types[INDEX] = \
691 build_distinct_type_copy (s390_builtin_types[INDEX_BASE]);
693 #undef DEF_VECTOR_TYPE
694 #define DEF_VECTOR_TYPE(INDEX, BFLAGS, INDEX_BASE, ELEMENTS) \
695 if (s390_builtin_types[INDEX] == NULL) \
696 s390_builtin_types[INDEX] = \
697 build_vector_type (s390_builtin_types[INDEX_BASE], ELEMENTS);
699 #undef DEF_OPAQUE_VECTOR_TYPE
700 #define DEF_OPAQUE_VECTOR_TYPE(INDEX, BFLAGS, INDEX_BASE, ELEMENTS) \
701 if (s390_builtin_types[INDEX] == NULL) \
702 s390_builtin_types[INDEX] = \
703 build_opaque_vector_type (s390_builtin_types[INDEX_BASE], ELEMENTS);
706 #define DEF_FN_TYPE(INDEX, BFLAGS, args...) \
707 if (s390_builtin_fn_types[INDEX] == NULL) \
708 s390_builtin_fn_types[INDEX] = \
709 build_function_type_list (args, NULL_TREE);
711 #define DEF_OV_TYPE(...)
712 #include "s390-builtin-types.def"
715 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, OPFLAGS, FNTYPE) \
716 if (s390_builtin_decls[S390_BUILTIN_##NAME] == NULL) \
717 s390_builtin_decls[S390_BUILTIN_##NAME] = \
718 add_builtin_function ("__builtin_" #NAME, \
719 s390_builtin_fn_types[FNTYPE], \
720 S390_BUILTIN_##NAME, \
725 #define OB_DEF(NAME, FIRST_VAR_NAME, LAST_VAR_NAME, BFLAGS, FNTYPE) \
726 if (s390_builtin_decls[S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX] \
728 s390_builtin_decls[S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX] = \
729 add_builtin_function ("__builtin_" #NAME, \
730 s390_builtin_fn_types[FNTYPE], \
731 S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX, \
736 #define OB_DEF_VAR(...)
737 #include "s390-builtins.def"
741 /* Return true if ARG is appropriate as argument number ARGNUM of
742 builtin DECL. The operand flags from s390-builtins.def have to
743 passed as OP_FLAGS. */
745 s390_const_operand_ok (tree arg, int argnum, int op_flags, tree decl)
747 if (O_UIMM_P (op_flags))
749 int bitwidths[] = { 1, 2, 3, 4, 5, 8, 12, 16, 32 };
750 int bitwidth = bitwidths[op_flags - O_U1];
752 if (!tree_fits_uhwi_p (arg)
753 || tree_to_uhwi (arg) > ((unsigned HOST_WIDE_INT)1 << bitwidth) - 1)
755 error("constant argument %d for builtin %qF is out of range (0.."
756 HOST_WIDE_INT_PRINT_UNSIGNED ")",
758 ((unsigned HOST_WIDE_INT)1 << bitwidth) - 1);
763 if (O_SIMM_P (op_flags))
765 int bitwidths[] = { 2, 3, 4, 5, 8, 12, 16, 32 };
766 int bitwidth = bitwidths[op_flags - O_S2];
768 if (!tree_fits_shwi_p (arg)
769 || tree_to_shwi (arg) < -((HOST_WIDE_INT)1 << (bitwidth - 1))
770 || tree_to_shwi (arg) > (((HOST_WIDE_INT)1 << (bitwidth - 1)) - 1))
772 error("constant argument %d for builtin %qF is out of range ("
773 HOST_WIDE_INT_PRINT_DEC ".."
774 HOST_WIDE_INT_PRINT_DEC ")",
776 -((HOST_WIDE_INT)1 << (bitwidth - 1)),
777 ((HOST_WIDE_INT)1 << (bitwidth - 1)) - 1);
784 /* Expand an expression EXP that calls a built-in function,
785 with result going to TARGET if that's convenient
786 (and in mode MODE if that's convenient).
787 SUBTARGET may be used as the target for computing one of EXP's operands.
788 IGNORE is nonzero if the value is to be ignored. */
791 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
792 machine_mode mode ATTRIBUTE_UNUSED,
793 int ignore ATTRIBUTE_UNUSED)
797 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
798 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
799 enum insn_code icode;
800 rtx op[MAX_ARGS], pat;
804 call_expr_arg_iterator iter;
805 unsigned int all_op_flags = opflags_for_builtin (fcode);
806 machine_mode last_vec_mode = VOIDmode;
808 if (TARGET_DEBUG_ARG)
811 "s390_expand_builtin, code = %4d, %s, bflags = 0x%x\n",
812 (int)fcode, IDENTIFIER_POINTER (DECL_NAME (fndecl)),
813 bflags_for_builtin (fcode));
816 if (S390_USE_TARGET_ATTRIBUTE)
820 bflags = bflags_for_builtin (fcode);
821 if ((bflags & B_HTM) && !TARGET_HTM)
823 error ("Builtin %qF is not supported without -mhtm "
824 "(default with -march=zEC12 and higher).", fndecl);
827 if ((bflags & B_VX) && !TARGET_VX)
829 error ("Builtin %qF is not supported without -mvx "
830 "(default with -march=z13 and higher).", fndecl);
834 if (fcode >= S390_OVERLOADED_BUILTIN_VAR_OFFSET
835 && fcode < S390_ALL_BUILTIN_MAX)
839 else if (fcode < S390_OVERLOADED_BUILTIN_OFFSET)
841 icode = code_for_builtin[fcode];
842 /* Set a flag in the machine specific cfun part in order to support
843 saving/restoring of FPRs. */
844 if (fcode == S390_BUILTIN_tbegin || fcode == S390_BUILTIN_tbegin_retry)
845 cfun->machine->tbegin_p = true;
847 else if (fcode < S390_OVERLOADED_BUILTIN_VAR_OFFSET)
849 error ("Unresolved overloaded builtin");
853 internal_error ("bad builtin fcode");
856 internal_error ("bad builtin icode");
858 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
862 machine_mode tmode = insn_data[icode].operand[0].mode;
864 || GET_MODE (target) != tmode
865 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
866 target = gen_reg_rtx (tmode);
868 /* There are builtins (e.g. vec_promote) with no vector
869 arguments but an element selector. So we have to also look
870 at the vector return type when emitting the modulo
872 if (VECTOR_MODE_P (insn_data[icode].operand[0].mode))
873 last_vec_mode = insn_data[icode].operand[0].mode;
877 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
880 const struct insn_operand_data *insn_op;
881 unsigned int op_flags = all_op_flags & ((1 << O_SHIFT) - 1);
883 all_op_flags = all_op_flags >> O_SHIFT;
885 if (arg == error_mark_node)
887 if (arity >= MAX_ARGS)
890 if (O_IMM_P (op_flags)
891 && TREE_CODE (arg) != INTEGER_CST)
893 error ("constant value required for builtin %qF argument %d",
898 if (!s390_const_operand_ok (arg, arity + 1, op_flags, fndecl))
901 insn_op = &insn_data[icode].operand[arity + nonvoid];
902 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
904 /* expand_expr truncates constants to the target mode only if it
905 is "convenient". However, our checks below rely on this
907 if (CONST_INT_P (op[arity])
908 && SCALAR_INT_MODE_P (insn_op->mode)
909 && GET_MODE (op[arity]) != insn_op->mode)
910 op[arity] = GEN_INT (trunc_int_for_mode (INTVAL (op[arity]),
913 /* Wrap the expanded RTX for pointer types into a MEM expr with
914 the proper mode. This allows us to use e.g. (match_operand
915 "memory_operand"..) in the insn patterns instead of (mem
916 (match_operand "address_operand)). This is helpful for
917 patterns not just accepting MEMs. */
918 if (POINTER_TYPE_P (TREE_TYPE (arg))
919 && insn_op->predicate != address_operand)
920 op[arity] = gen_rtx_MEM (insn_op->mode, op[arity]);
922 /* Expand the module operation required on element selectors. */
923 if (op_flags == O_ELEM)
925 gcc_assert (last_vec_mode != VOIDmode);
926 op[arity] = simplify_expand_binop (SImode, code_to_optab (AND),
928 GEN_INT (GET_MODE_NUNITS (last_vec_mode) - 1),
929 NULL_RTX, 1, OPTAB_DIRECT);
932 /* Record the vector mode used for an element selector. This assumes:
933 1. There is no builtin with two different vector modes and an element selector
934 2. The element selector comes after the vector type it is referring to.
935 This currently the true for all the builtins but FIXME we
936 should better check for that. */
937 if (VECTOR_MODE_P (insn_op->mode))
938 last_vec_mode = insn_op->mode;
940 if (insn_op->predicate (op[arity], insn_op->mode))
946 if (MEM_P (op[arity])
947 && insn_op->predicate == memory_operand
948 && (GET_MODE (XEXP (op[arity], 0)) == Pmode
949 || GET_MODE (XEXP (op[arity], 0)) == VOIDmode))
951 op[arity] = replace_equiv_address (op[arity],
952 copy_to_mode_reg (Pmode,
953 XEXP (op[arity], 0)));
955 /* Some of the builtins require different modes/types than the
956 pattern in order to implement a specific API. Instead of
957 adding many expanders which do the mode change we do it here.
958 E.g. s390_vec_add_u128 required to have vector unsigned char
959 arguments is mapped to addti3. */
960 else if (insn_op->mode != VOIDmode
961 && GET_MODE (op[arity]) != VOIDmode
962 && GET_MODE (op[arity]) != insn_op->mode
963 && ((tmp_rtx = simplify_gen_subreg (insn_op->mode, op[arity],
964 GET_MODE (op[arity]), 0))
969 else if (GET_MODE (op[arity]) == insn_op->mode
970 || GET_MODE (op[arity]) == VOIDmode
971 || (insn_op->predicate == address_operand
972 && GET_MODE (op[arity]) == Pmode))
974 /* An address_operand usually has VOIDmode in the expander
975 so we cannot use this. */
976 machine_mode target_mode =
977 (insn_op->predicate == address_operand
978 ? Pmode : insn_op->mode);
979 op[arity] = copy_to_mode_reg (target_mode, op[arity]);
982 if (!insn_op->predicate (op[arity], insn_op->mode))
984 error ("Invalid argument %d for builtin %qF", arity + 1, fndecl);
993 pat = GEN_FCN (icode) (target);
997 pat = GEN_FCN (icode) (target, op[0]);
999 pat = GEN_FCN (icode) (op[0]);
1003 pat = GEN_FCN (icode) (target, op[0], op[1]);
1005 pat = GEN_FCN (icode) (op[0], op[1]);
1009 pat = GEN_FCN (icode) (target, op[0], op[1], op[2]);
1011 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
1015 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3]);
1017 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
1021 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4]);
1023 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]);
1027 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4], op[5]);
1029 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4], op[5]);
1045 static const int s390_hotpatch_hw_max = 1000000;
1046 static int s390_hotpatch_hw_before_label = 0;
1047 static int s390_hotpatch_hw_after_label = 0;
1049 /* Check whether the hotpatch attribute is applied to a function and, if it has
1050 an argument, the argument is valid. */
1053 s390_handle_hotpatch_attribute (tree *node, tree name, tree args,
1054 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1060 if (TREE_CODE (*node) != FUNCTION_DECL)
1062 warning (OPT_Wattributes, "%qE attribute only applies to functions",
1064 *no_add_attrs = true;
1066 if (args != NULL && TREE_CHAIN (args) != NULL)
1068 expr = TREE_VALUE (args);
1069 expr2 = TREE_VALUE (TREE_CHAIN (args));
1071 if (args == NULL || TREE_CHAIN (args) == NULL)
1073 else if (TREE_CODE (expr) != INTEGER_CST
1074 || !INTEGRAL_TYPE_P (TREE_TYPE (expr))
1075 || wi::gtu_p (expr, s390_hotpatch_hw_max))
1077 else if (TREE_CODE (expr2) != INTEGER_CST
1078 || !INTEGRAL_TYPE_P (TREE_TYPE (expr2))
1079 || wi::gtu_p (expr2, s390_hotpatch_hw_max))
1085 error ("requested %qE attribute is not a comma separated pair of"
1086 " non-negative integer constants or too large (max. %d)", name,
1087 s390_hotpatch_hw_max);
1088 *no_add_attrs = true;
1094 /* Expand the s390_vector_bool type attribute. */
1097 s390_handle_vectorbool_attribute (tree *node, tree name ATTRIBUTE_UNUSED,
1098 tree args ATTRIBUTE_UNUSED,
1099 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1101 tree type = *node, result = NULL_TREE;
1104 while (POINTER_TYPE_P (type)
1105 || TREE_CODE (type) == FUNCTION_TYPE
1106 || TREE_CODE (type) == METHOD_TYPE
1107 || TREE_CODE (type) == ARRAY_TYPE)
1108 type = TREE_TYPE (type);
1110 mode = TYPE_MODE (type);
1113 case DImode: case V2DImode: result = s390_builtin_types[BT_BV2DI]; break;
1114 case SImode: case V4SImode: result = s390_builtin_types[BT_BV4SI]; break;
1115 case HImode: case V8HImode: result = s390_builtin_types[BT_BV8HI]; break;
1116 case QImode: case V16QImode: result = s390_builtin_types[BT_BV16QI];
1120 *no_add_attrs = true; /* No need to hang on to the attribute. */
1123 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
1128 static const struct attribute_spec s390_attribute_table[] = {
1129 { "hotpatch", 2, 2, true, false, false, s390_handle_hotpatch_attribute, false },
1130 { "s390_vector_bool", 0, 0, false, true, false, s390_handle_vectorbool_attribute, true },
1132 { NULL, 0, 0, false, false, false, NULL, false }
1135 /* Return the alignment for LABEL. We default to the -falign-labels
1136 value except for the literal pool base label. */
1138 s390_label_align (rtx_insn *label)
1140 rtx_insn *prev_insn = prev_active_insn (label);
1143 if (prev_insn == NULL_RTX)
1146 set = single_set (prev_insn);
1148 if (set == NULL_RTX)
1151 src = SET_SRC (set);
1153 /* Don't align literal pool base labels. */
1154 if (GET_CODE (src) == UNSPEC
1155 && XINT (src, 1) == UNSPEC_MAIN_BASE)
1159 return align_labels_log;
1163 s390_libgcc_cmp_return_mode (void)
1165 return TARGET_64BIT ? DImode : SImode;
1169 s390_libgcc_shift_count_mode (void)
1171 return TARGET_64BIT ? DImode : SImode;
1175 s390_unwind_word_mode (void)
1177 return TARGET_64BIT ? DImode : SImode;
1180 /* Return true if the back end supports mode MODE. */
1182 s390_scalar_mode_supported_p (machine_mode mode)
1184 /* In contrast to the default implementation reject TImode constants on 31bit
1185 TARGET_ZARCH for ABI compliance. */
1186 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
1189 if (DECIMAL_FLOAT_MODE_P (mode))
1190 return default_decimal_float_supported_p ();
1192 return default_scalar_mode_supported_p (mode);
1195 /* Return true if the back end supports vector mode MODE. */
1197 s390_vector_mode_supported_p (machine_mode mode)
1201 if (!VECTOR_MODE_P (mode)
1203 || GET_MODE_SIZE (mode) > 16)
1206 inner = GET_MODE_INNER (mode);
1224 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
1227 s390_set_has_landing_pad_p (bool value)
1229 cfun->machine->has_landing_pad_p = value;
1232 /* If two condition code modes are compatible, return a condition code
1233 mode which is compatible with both. Otherwise, return
1237 s390_cc_modes_compatible (machine_mode m1, machine_mode m2)
1245 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
1246 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
1267 /* Return true if SET either doesn't set the CC register, or else
1268 the source and destination have matching CC modes and that
1269 CC mode is at least as constrained as REQ_MODE. */
1272 s390_match_ccmode_set (rtx set, machine_mode req_mode)
1274 machine_mode set_mode;
1276 gcc_assert (GET_CODE (set) == SET);
1278 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
1281 set_mode = GET_MODE (SET_DEST (set));
1300 if (req_mode != set_mode)
1305 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
1306 && req_mode != CCSRmode && req_mode != CCURmode)
1312 if (req_mode != CCAmode)
1320 return (GET_MODE (SET_SRC (set)) == set_mode);
1323 /* Return true if every SET in INSN that sets the CC register
1324 has source and destination with matching CC modes and that
1325 CC mode is at least as constrained as REQ_MODE.
1326 If REQ_MODE is VOIDmode, always return false. */
1329 s390_match_ccmode (rtx_insn *insn, machine_mode req_mode)
1333 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
1334 if (req_mode == VOIDmode)
1337 if (GET_CODE (PATTERN (insn)) == SET)
1338 return s390_match_ccmode_set (PATTERN (insn), req_mode);
1340 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1341 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1343 rtx set = XVECEXP (PATTERN (insn), 0, i);
1344 if (GET_CODE (set) == SET)
1345 if (!s390_match_ccmode_set (set, req_mode))
1352 /* If a test-under-mask instruction can be used to implement
1353 (compare (and ... OP1) OP2), return the CC mode required
1354 to do that. Otherwise, return VOIDmode.
1355 MIXED is true if the instruction can distinguish between
1356 CC1 and CC2 for mixed selected bits (TMxx), it is false
1357 if the instruction cannot (TM). */
1360 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
1364 /* ??? Fixme: should work on CONST_WIDE_INT as well. */
1365 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
1368 /* Selected bits all zero: CC0.
1369 e.g.: int a; if ((a & (16 + 128)) == 0) */
1370 if (INTVAL (op2) == 0)
1373 /* Selected bits all one: CC3.
1374 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
1375 if (INTVAL (op2) == INTVAL (op1))
1378 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
1380 if ((a & (16 + 128)) == 16) -> CCT1
1381 if ((a & (16 + 128)) == 128) -> CCT2 */
1384 bit1 = exact_log2 (INTVAL (op2));
1385 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
1386 if (bit0 != -1 && bit1 != -1)
1387 return bit0 > bit1 ? CCT1mode : CCT2mode;
1393 /* Given a comparison code OP (EQ, NE, etc.) and the operands
1394 OP0 and OP1 of a COMPARE, return the mode to be used for the
1398 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
1401 && register_operand (op0, DFmode)
1402 && register_operand (op1, DFmode))
1404 /* LT, LE, UNGT, UNGE require swapping OP0 and OP1. Either
1405 s390_emit_compare or s390_canonicalize_comparison will take
1427 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
1428 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1430 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
1431 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
1433 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1434 || GET_CODE (op1) == NEG)
1435 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1438 if (GET_CODE (op0) == AND)
1440 /* Check whether we can potentially do it via TM. */
1441 machine_mode ccmode;
1442 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
1443 if (ccmode != VOIDmode)
1445 /* Relax CCTmode to CCZmode to allow fall-back to AND
1446 if that turns out to be beneficial. */
1447 return ccmode == CCTmode ? CCZmode : ccmode;
1451 if (register_operand (op0, HImode)
1452 && GET_CODE (op1) == CONST_INT
1453 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
1455 if (register_operand (op0, QImode)
1456 && GET_CODE (op1) == CONST_INT
1457 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
1466 /* The only overflow condition of NEG and ABS happens when
1467 -INT_MAX is used as parameter, which stays negative. So
1468 we have an overflow from a positive value to a negative.
1469 Using CCAP mode the resulting cc can be used for comparisons. */
1470 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
1471 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1474 /* If constants are involved in an add instruction it is possible to use
1475 the resulting cc for comparisons with zero. Knowing the sign of the
1476 constant the overflow behavior gets predictable. e.g.:
1477 int a, b; if ((b = a + c) > 0)
1478 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
1479 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
1480 && (CONST_OK_FOR_K (INTVAL (XEXP (op0, 1)))
1481 || (CONST_OK_FOR_CONSTRAINT_P (INTVAL (XEXP (op0, 1)), 'O', "Os")
1482 /* Avoid INT32_MIN on 32 bit. */
1483 && (!TARGET_ZARCH || INTVAL (XEXP (op0, 1)) != -0x7fffffff - 1))))
1485 if (INTVAL (XEXP((op0), 1)) < 0)
1499 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1500 && GET_CODE (op1) != CONST_INT)
1506 if (GET_CODE (op0) == PLUS
1507 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1510 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1511 && GET_CODE (op1) != CONST_INT)
1517 if (GET_CODE (op0) == MINUS
1518 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1521 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1522 && GET_CODE (op1) != CONST_INT)
1531 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
1532 that we can implement more efficiently. */
1535 s390_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
1536 bool op0_preserve_value)
1538 if (op0_preserve_value)
1541 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
1542 if ((*code == EQ || *code == NE)
1543 && *op1 == const0_rtx
1544 && GET_CODE (*op0) == ZERO_EXTRACT
1545 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
1546 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
1547 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
1549 rtx inner = XEXP (*op0, 0);
1550 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
1551 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
1552 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
1554 if (len > 0 && len < modesize
1555 && pos >= 0 && pos + len <= modesize
1556 && modesize <= HOST_BITS_PER_WIDE_INT)
1558 unsigned HOST_WIDE_INT block;
1559 block = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
1560 block <<= modesize - pos - len;
1562 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
1563 gen_int_mode (block, GET_MODE (inner)));
1567 /* Narrow AND of memory against immediate to enable TM. */
1568 if ((*code == EQ || *code == NE)
1569 && *op1 == const0_rtx
1570 && GET_CODE (*op0) == AND
1571 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
1572 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
1574 rtx inner = XEXP (*op0, 0);
1575 rtx mask = XEXP (*op0, 1);
1577 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
1578 if (GET_CODE (inner) == SUBREG
1579 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
1580 && (GET_MODE_SIZE (GET_MODE (inner))
1581 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
1583 & GET_MODE_MASK (GET_MODE (inner))
1584 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
1586 inner = SUBREG_REG (inner);
1588 /* Do not change volatile MEMs. */
1589 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
1591 int part = s390_single_part (XEXP (*op0, 1),
1592 GET_MODE (inner), QImode, 0);
1595 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
1596 inner = adjust_address_nv (inner, QImode, part);
1597 *op0 = gen_rtx_AND (QImode, inner, mask);
1602 /* Narrow comparisons against 0xffff to HImode if possible. */
1603 if ((*code == EQ || *code == NE)
1604 && GET_CODE (*op1) == CONST_INT
1605 && INTVAL (*op1) == 0xffff
1606 && SCALAR_INT_MODE_P (GET_MODE (*op0))
1607 && (nonzero_bits (*op0, GET_MODE (*op0))
1608 & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
1610 *op0 = gen_lowpart (HImode, *op0);
1614 /* Remove redundant UNSPEC_STRCMPCC_TO_INT conversions if possible. */
1615 if (GET_CODE (*op0) == UNSPEC
1616 && XINT (*op0, 1) == UNSPEC_STRCMPCC_TO_INT
1617 && XVECLEN (*op0, 0) == 1
1618 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
1619 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
1620 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
1621 && *op1 == const0_rtx)
1623 enum rtx_code new_code = UNKNOWN;
1626 case EQ: new_code = EQ; break;
1627 case NE: new_code = NE; break;
1628 case LT: new_code = GTU; break;
1629 case GT: new_code = LTU; break;
1630 case LE: new_code = GEU; break;
1631 case GE: new_code = LEU; break;
1635 if (new_code != UNKNOWN)
1637 *op0 = XVECEXP (*op0, 0, 0);
1642 /* Remove redundant UNSPEC_CC_TO_INT conversions if possible. */
1643 if (GET_CODE (*op0) == UNSPEC
1644 && XINT (*op0, 1) == UNSPEC_CC_TO_INT
1645 && XVECLEN (*op0, 0) == 1
1646 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
1647 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
1648 && CONST_INT_P (*op1))
1650 enum rtx_code new_code = UNKNOWN;
1651 switch (GET_MODE (XVECEXP (*op0, 0, 0)))
1657 case EQ: new_code = EQ; break;
1658 case NE: new_code = NE; break;
1665 if (new_code != UNKNOWN)
1667 /* For CCRAWmode put the required cc mask into the second
1669 if (GET_MODE (XVECEXP (*op0, 0, 0)) == CCRAWmode
1670 && INTVAL (*op1) >= 0 && INTVAL (*op1) <= 3)
1671 *op1 = gen_rtx_CONST_INT (VOIDmode, 1 << (3 - INTVAL (*op1)));
1672 *op0 = XVECEXP (*op0, 0, 0);
1677 /* Simplify cascaded EQ, NE with const0_rtx. */
1678 if ((*code == NE || *code == EQ)
1679 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
1680 && GET_MODE (*op0) == SImode
1681 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
1682 && REG_P (XEXP (*op0, 0))
1683 && XEXP (*op0, 1) == const0_rtx
1684 && *op1 == const0_rtx)
1686 if ((*code == EQ && GET_CODE (*op0) == NE)
1687 || (*code == NE && GET_CODE (*op0) == EQ))
1691 *op0 = XEXP (*op0, 0);
1694 /* Prefer register over memory as first operand. */
1695 if (MEM_P (*op0) && REG_P (*op1))
1697 rtx tem = *op0; *op0 = *op1; *op1 = tem;
1698 *code = (int)swap_condition ((enum rtx_code)*code);
1701 /* Using the scalar variants of vector instructions for 64 bit FP
1702 comparisons might require swapping the operands. */
1704 && register_operand (*op0, DFmode)
1705 && register_operand (*op1, DFmode)
1706 && (*code == LT || *code == LE || *code == UNGT || *code == UNGE))
1712 case LT: *code = GT; break;
1713 case LE: *code = GE; break;
1714 case UNGT: *code = UNLE; break;
1715 case UNGE: *code = UNLT; break;
1718 tmp = *op0; *op0 = *op1; *op1 = tmp;
1722 /* Helper function for s390_emit_compare. If possible emit a 64 bit
1723 FP compare using the single element variant of vector instructions.
1724 Replace CODE with the comparison code to be used in the CC reg
1725 compare and return the condition code register RTX in CC. */
1728 s390_expand_vec_compare_scalar (enum rtx_code *code, rtx cmp1, rtx cmp2,
1731 machine_mode cmp_mode;
1732 bool swap_p = false;
1736 case EQ: cmp_mode = CCVEQmode; break;
1737 case NE: cmp_mode = CCVEQmode; break;
1738 case GT: cmp_mode = CCVFHmode; break;
1739 case GE: cmp_mode = CCVFHEmode; break;
1740 case UNLE: cmp_mode = CCVFHmode; break;
1741 case UNLT: cmp_mode = CCVFHEmode; break;
1742 case LT: cmp_mode = CCVFHmode; *code = GT; swap_p = true; break;
1743 case LE: cmp_mode = CCVFHEmode; *code = GE; swap_p = true; break;
1744 case UNGE: cmp_mode = CCVFHmode; *code = UNLE; swap_p = true; break;
1745 case UNGT: cmp_mode = CCVFHEmode; *code = UNLT; swap_p = true; break;
1746 default: return false;
1755 *cc = gen_rtx_REG (cmp_mode, CC_REGNUM);
1756 emit_insn (gen_rtx_PARALLEL (VOIDmode,
1759 gen_rtx_COMPARE (cmp_mode, cmp1,
1761 gen_rtx_CLOBBER (VOIDmode,
1762 gen_rtx_SCRATCH (V2DImode)))));
1767 /* Emit a compare instruction suitable to implement the comparison
1768 OP0 CODE OP1. Return the correct condition RTL to be placed in
1769 the IF_THEN_ELSE of the conditional branch testing the result. */
1772 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
1774 machine_mode mode = s390_select_ccmode (code, op0, op1);
1778 && register_operand (op0, DFmode)
1779 && register_operand (op1, DFmode)
1780 && s390_expand_vec_compare_scalar (&code, op0, op1, &cc))
1782 /* Work has been done by s390_expand_vec_compare_scalar already. */
1784 else if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
1786 /* Do not output a redundant compare instruction if a
1787 compare_and_swap pattern already computed the result and the
1788 machine modes are compatible. */
1789 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
1795 cc = gen_rtx_REG (mode, CC_REGNUM);
1796 emit_insn (gen_rtx_SET (cc, gen_rtx_COMPARE (mode, op0, op1)));
1799 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
1802 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
1804 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
1805 conditional branch testing the result. */
1808 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem,
1809 rtx cmp, rtx new_rtx)
1811 emit_insn (gen_atomic_compare_and_swapsi_internal (old, mem, cmp, new_rtx));
1812 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM),
1816 /* Emit a jump instruction to TARGET and return it. If COND is
1817 NULL_RTX, emit an unconditional jump, else a conditional jump under
1821 s390_emit_jump (rtx target, rtx cond)
1825 target = gen_rtx_LABEL_REF (VOIDmode, target);
1827 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
1829 insn = gen_rtx_SET (pc_rtx, target);
1830 return emit_jump_insn (insn);
1833 /* Return branch condition mask to implement a branch
1834 specified by CODE. Return -1 for invalid comparisons. */
1837 s390_branch_condition_mask (rtx code)
1839 const int CC0 = 1 << 3;
1840 const int CC1 = 1 << 2;
1841 const int CC2 = 1 << 1;
1842 const int CC3 = 1 << 0;
1844 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
1845 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
1846 gcc_assert (XEXP (code, 1) == const0_rtx
1847 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
1848 && CONST_INT_P (XEXP (code, 1))));
1851 switch (GET_MODE (XEXP (code, 0)))
1855 switch (GET_CODE (code))
1857 case EQ: return CC0;
1858 case NE: return CC1 | CC2 | CC3;
1864 switch (GET_CODE (code))
1866 case EQ: return CC1;
1867 case NE: return CC0 | CC2 | CC3;
1873 switch (GET_CODE (code))
1875 case EQ: return CC2;
1876 case NE: return CC0 | CC1 | CC3;
1882 switch (GET_CODE (code))
1884 case EQ: return CC3;
1885 case NE: return CC0 | CC1 | CC2;
1891 switch (GET_CODE (code))
1893 case EQ: return CC0 | CC2;
1894 case NE: return CC1 | CC3;
1900 switch (GET_CODE (code))
1902 case LTU: return CC2 | CC3; /* carry */
1903 case GEU: return CC0 | CC1; /* no carry */
1909 switch (GET_CODE (code))
1911 case GTU: return CC0 | CC1; /* borrow */
1912 case LEU: return CC2 | CC3; /* no borrow */
1918 switch (GET_CODE (code))
1920 case EQ: return CC0 | CC2;
1921 case NE: return CC1 | CC3;
1922 case LTU: return CC1;
1923 case GTU: return CC3;
1924 case LEU: return CC1 | CC2;
1925 case GEU: return CC2 | CC3;
1930 switch (GET_CODE (code))
1932 case EQ: return CC0;
1933 case NE: return CC1 | CC2 | CC3;
1934 case LTU: return CC1;
1935 case GTU: return CC2;
1936 case LEU: return CC0 | CC1;
1937 case GEU: return CC0 | CC2;
1943 switch (GET_CODE (code))
1945 case EQ: return CC0;
1946 case NE: return CC2 | CC1 | CC3;
1947 case LTU: return CC2;
1948 case GTU: return CC1;
1949 case LEU: return CC0 | CC2;
1950 case GEU: return CC0 | CC1;
1956 switch (GET_CODE (code))
1958 case EQ: return CC0;
1959 case NE: return CC1 | CC2 | CC3;
1960 case LT: return CC1 | CC3;
1961 case GT: return CC2;
1962 case LE: return CC0 | CC1 | CC3;
1963 case GE: return CC0 | CC2;
1969 switch (GET_CODE (code))
1971 case EQ: return CC0;
1972 case NE: return CC1 | CC2 | CC3;
1973 case LT: return CC1;
1974 case GT: return CC2 | CC3;
1975 case LE: return CC0 | CC1;
1976 case GE: return CC0 | CC2 | CC3;
1982 switch (GET_CODE (code))
1984 case EQ: return CC0;
1985 case NE: return CC1 | CC2 | CC3;
1986 case LT: return CC1;
1987 case GT: return CC2;
1988 case LE: return CC0 | CC1;
1989 case GE: return CC0 | CC2;
1990 case UNORDERED: return CC3;
1991 case ORDERED: return CC0 | CC1 | CC2;
1992 case UNEQ: return CC0 | CC3;
1993 case UNLT: return CC1 | CC3;
1994 case UNGT: return CC2 | CC3;
1995 case UNLE: return CC0 | CC1 | CC3;
1996 case UNGE: return CC0 | CC2 | CC3;
1997 case LTGT: return CC1 | CC2;
2003 switch (GET_CODE (code))
2005 case EQ: return CC0;
2006 case NE: return CC2 | CC1 | CC3;
2007 case LT: return CC2;
2008 case GT: return CC1;
2009 case LE: return CC0 | CC2;
2010 case GE: return CC0 | CC1;
2011 case UNORDERED: return CC3;
2012 case ORDERED: return CC0 | CC2 | CC1;
2013 case UNEQ: return CC0 | CC3;
2014 case UNLT: return CC2 | CC3;
2015 case UNGT: return CC1 | CC3;
2016 case UNLE: return CC0 | CC2 | CC3;
2017 case UNGE: return CC0 | CC1 | CC3;
2018 case LTGT: return CC2 | CC1;
2023 /* Vector comparison modes. */
2026 switch (GET_CODE (code))
2028 case EQ: return CC0;
2029 case NE: return CC3;
2034 switch (GET_CODE (code))
2036 case EQ: return CC0 | CC1;
2037 case NE: return CC3 | CC1;
2041 /* Integer vector compare modes. */
2044 switch (GET_CODE (code))
2046 case GT: return CC0;
2047 case LE: return CC3;
2052 switch (GET_CODE (code))
2054 case GT: return CC0 | CC1;
2055 case LE: return CC3 | CC1;
2060 switch (GET_CODE (code))
2062 case GTU: return CC0;
2063 case LEU: return CC3;
2068 switch (GET_CODE (code))
2070 case GTU: return CC0 | CC1;
2071 case LEU: return CC3 | CC1;
2075 /* FP vector compare modes. */
2078 switch (GET_CODE (code))
2080 case GT: return CC0;
2081 case UNLE: return CC3;
2086 switch (GET_CODE (code))
2088 case GT: return CC0 | CC1;
2089 case UNLE: return CC3 | CC1;
2094 switch (GET_CODE (code))
2096 case GE: return CC0;
2097 case UNLT: return CC3;
2102 switch (GET_CODE (code))
2104 case GE: return CC0 | CC1;
2105 case UNLT: return CC3 | CC1;
2111 switch (GET_CODE (code))
2114 return INTVAL (XEXP (code, 1));
2116 return (INTVAL (XEXP (code, 1))) ^ 0xf;
2127 /* Return branch condition mask to implement a compare and branch
2128 specified by CODE. Return -1 for invalid comparisons. */
2131 s390_compare_and_branch_condition_mask (rtx code)
2133 const int CC0 = 1 << 3;
2134 const int CC1 = 1 << 2;
2135 const int CC2 = 1 << 1;
2137 switch (GET_CODE (code))
2161 /* If INV is false, return assembler mnemonic string to implement
2162 a branch specified by CODE. If INV is true, return mnemonic
2163 for the corresponding inverted branch. */
2166 s390_branch_condition_mnemonic (rtx code, int inv)
2170 static const char *const mnemonic[16] =
2172 NULL, "o", "h", "nle",
2173 "l", "nhe", "lh", "ne",
2174 "e", "nlh", "he", "nl",
2175 "le", "nh", "no", NULL
2178 if (GET_CODE (XEXP (code, 0)) == REG
2179 && REGNO (XEXP (code, 0)) == CC_REGNUM
2180 && (XEXP (code, 1) == const0_rtx
2181 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
2182 && CONST_INT_P (XEXP (code, 1)))))
2183 mask = s390_branch_condition_mask (code);
2185 mask = s390_compare_and_branch_condition_mask (code);
2187 gcc_assert (mask >= 0);
2192 gcc_assert (mask >= 1 && mask <= 14);
2194 return mnemonic[mask];
2197 /* Return the part of op which has a value different from def.
2198 The size of the part is determined by mode.
2199 Use this function only if you already know that op really
2200 contains such a part. */
2202 unsigned HOST_WIDE_INT
2203 s390_extract_part (rtx op, machine_mode mode, int def)
2205 unsigned HOST_WIDE_INT value = 0;
2206 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
2207 int part_bits = GET_MODE_BITSIZE (mode);
2208 unsigned HOST_WIDE_INT part_mask
2209 = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;
2212 for (i = 0; i < max_parts; i++)
2215 value = (unsigned HOST_WIDE_INT) INTVAL (op);
2217 value >>= part_bits;
2219 if ((value & part_mask) != (def & part_mask))
2220 return value & part_mask;
2226 /* If OP is an integer constant of mode MODE with exactly one
2227 part of mode PART_MODE unequal to DEF, return the number of that
2228 part. Otherwise, return -1. */
2231 s390_single_part (rtx op,
2233 machine_mode part_mode,
2236 unsigned HOST_WIDE_INT value = 0;
2237 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
2238 unsigned HOST_WIDE_INT part_mask
2239 = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;
2242 if (GET_CODE (op) != CONST_INT)
2245 for (i = 0; i < n_parts; i++)
2248 value = (unsigned HOST_WIDE_INT) INTVAL (op);
2250 value >>= GET_MODE_BITSIZE (part_mode);
2252 if ((value & part_mask) != (def & part_mask))
2260 return part == -1 ? -1 : n_parts - 1 - part;
2263 /* Return true if IN contains a contiguous bitfield in the lower SIZE
2264 bits and no other bits are set in (the lower SIZE bits of) IN.
2266 PSTART and PEND can be used to obtain the start and end
2267 position (inclusive) of the bitfield relative to 64
2268 bits. *PSTART / *PEND gives the position of the first/last bit
2269 of the bitfield counting from the highest order bit starting
2273 s390_contiguous_bitmask_nowrap_p (unsigned HOST_WIDE_INT in, int size,
2274 int *pstart, int *pend)
2278 int lowbit = sizeof (HOST_WIDE_INT) * BITS_PER_UNIT - 1;
2279 int highbit = sizeof (HOST_WIDE_INT) * BITS_PER_UNIT - size;
2280 unsigned HOST_WIDE_INT bitmask = 1ULL;
2282 gcc_assert (!!pstart == !!pend);
2283 for (start = lowbit; start >= highbit; bitmask <<= 1, start--)
2286 /* Look for the rightmost bit of a contiguous range of ones. */
2293 /* Look for the firt zero bit after the range of ones. */
2294 if (! (bitmask & in))
2298 /* We're one past the last one-bit. */
2302 /* No one bits found. */
2305 if (start > highbit)
2307 unsigned HOST_WIDE_INT mask;
2309 /* Calculate a mask for all bits beyond the contiguous bits. */
2310 mask = ((~(0ULL) >> highbit) & (~(0ULL) << (lowbit - start + 1)));
2312 /* There are more bits set beyond the first range of one bits. */
2325 /* Same as s390_contiguous_bitmask_nowrap_p but also returns true
2326 if ~IN contains a contiguous bitfield. In that case, *END is <
2329 If WRAP_P is true, a bitmask that wraps around is also tested.
2330 When a wraparoud occurs *START is greater than *END (in
2331 non-null pointers), and the uppermost (64 - SIZE) bits are thus
2332 part of the range. If WRAP_P is false, no wraparound is
2336 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, bool wrap_p,
2337 int size, int *start, int *end)
2339 int bs = sizeof (HOST_WIDE_INT) * BITS_PER_UNIT;
2342 gcc_assert (!!start == !!end);
2343 if ((in & ((~(0ULL)) >> (bs - size))) == 0)
2344 /* This cannot be expressed as a contiguous bitmask. Exit early because
2345 the second call of s390_contiguous_bitmask_nowrap_p would accept this as
2348 b = s390_contiguous_bitmask_nowrap_p (in, size, start, end);
2353 b = s390_contiguous_bitmask_nowrap_p (~in, size, start, end);
2359 gcc_assert (s >= 1);
2360 *start = ((e + 1) & (bs - 1));
2361 *end = ((s - 1 + bs) & (bs - 1));
2367 /* Return true if OP contains the same contiguous bitfield in *all*
2368 its elements. START and END can be used to obtain the start and
2369 end position of the bitfield.
2371 START/STOP give the position of the first/last bit of the bitfield
2372 counting from the lowest order bit starting with zero. In order to
2373 use these values for S/390 instructions this has to be converted to
2374 "bits big endian" style. */
2377 s390_contiguous_bitmask_vector_p (rtx op, int *start, int *end)
2379 unsigned HOST_WIDE_INT mask;
2384 gcc_assert (!!start == !!end);
2385 if (!const_vec_duplicate_p (op, &elt)
2386 || !CONST_INT_P (elt))
2389 size = GET_MODE_UNIT_BITSIZE (GET_MODE (op));
2391 /* We cannot deal with V1TI/V1TF. This would require a vgmq. */
2395 mask = UINTVAL (elt);
2397 b = s390_contiguous_bitmask_p (mask, true, size, start, end);
2402 int bs = sizeof (HOST_WIDE_INT) * BITS_PER_UNIT;
2404 *start -= (bs - size);
2405 *end -= (bs - size);
2413 /* Return true if C consists only of byte chunks being either 0 or
2414 0xff. If MASK is !=NULL a byte mask is generated which is
2415 appropriate for the vector generate byte mask instruction. */
2418 s390_bytemask_vector_p (rtx op, unsigned *mask)
2421 unsigned tmp_mask = 0;
2422 int nunit, unit_size;
2424 if (!VECTOR_MODE_P (GET_MODE (op))
2425 || GET_CODE (op) != CONST_VECTOR
2426 || !CONST_INT_P (XVECEXP (op, 0, 0)))
2429 nunit = GET_MODE_NUNITS (GET_MODE (op));
2430 unit_size = GET_MODE_UNIT_SIZE (GET_MODE (op));
2432 for (i = 0; i < nunit; i++)
2434 unsigned HOST_WIDE_INT c;
2437 if (!CONST_INT_P (XVECEXP (op, 0, i)))
2440 c = UINTVAL (XVECEXP (op, 0, i));
2441 for (j = 0; j < unit_size; j++)
2443 if ((c & 0xff) != 0 && (c & 0xff) != 0xff)
2445 tmp_mask |= (c & 1) << ((nunit - 1 - i) * unit_size + j);
2446 c = c >> BITS_PER_UNIT;
2456 /* Check whether a rotate of ROTL followed by an AND of CONTIG is
2457 equivalent to a shift followed by the AND. In particular, CONTIG
2458 should not overlap the (rotated) bit 0/bit 63 gap. Negative values
2459 for ROTL indicate a rotate to the right. */
2462 s390_extzv_shift_ok (int bitsize, int rotl, unsigned HOST_WIDE_INT contig)
2467 ok = s390_contiguous_bitmask_nowrap_p (contig, bitsize, &start, &end);
2471 return (64 - end >= rotl);
2474 /* Translate "- rotate right" in BITSIZE mode to "rotate left" in
2476 rotl = -rotl + (64 - bitsize);
2477 return (start >= rotl);
2481 /* Check whether we can (and want to) split a double-word
2482 move in mode MODE from SRC to DST into two single-word
2483 moves, moving the subword FIRST_SUBWORD first. */
2486 s390_split_ok_p (rtx dst, rtx src, machine_mode mode, int first_subword)
2488 /* Floating point and vector registers cannot be split. */
2489 if (FP_REG_P (src) || FP_REG_P (dst) || VECTOR_REG_P (src) || VECTOR_REG_P (dst))
2492 /* We don't need to split if operands are directly accessible. */
2493 if (s_operand (src, mode) || s_operand (dst, mode))
2496 /* Non-offsettable memory references cannot be split. */
2497 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
2498 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
2501 /* Moving the first subword must not clobber a register
2502 needed to move the second subword. */
2503 if (register_operand (dst, mode))
2505 rtx subreg = operand_subword (dst, first_subword, 0, mode);
2506 if (reg_overlap_mentioned_p (subreg, src))
2513 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
2514 and [MEM2, MEM2 + SIZE] do overlap and false
2518 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
2520 rtx addr1, addr2, addr_delta;
2521 HOST_WIDE_INT delta;
2523 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
2529 addr1 = XEXP (mem1, 0);
2530 addr2 = XEXP (mem2, 0);
2532 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
2534 /* This overlapping check is used by peepholes merging memory block operations.
2535 Overlapping operations would otherwise be recognized by the S/390 hardware
2536 and would fall back to a slower implementation. Allowing overlapping
2537 operations would lead to slow code but not to wrong code. Therefore we are
2538 somewhat optimistic if we cannot prove that the memory blocks are
2540 That's why we return false here although this may accept operations on
2541 overlapping memory areas. */
2542 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
2545 delta = INTVAL (addr_delta);
2548 || (delta > 0 && delta < size)
2549 || (delta < 0 && -delta < size))
2555 /* Check whether the address of memory reference MEM2 equals exactly
2556 the address of memory reference MEM1 plus DELTA. Return true if
2557 we can prove this to be the case, false otherwise. */
2560 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
2562 rtx addr1, addr2, addr_delta;
2564 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
2567 addr1 = XEXP (mem1, 0);
2568 addr2 = XEXP (mem2, 0);
2570 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
2571 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
2577 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
2580 s390_expand_logical_operator (enum rtx_code code, machine_mode mode,
2583 machine_mode wmode = mode;
2584 rtx dst = operands[0];
2585 rtx src1 = operands[1];
2586 rtx src2 = operands[2];
2589 /* If we cannot handle the operation directly, use a temp register. */
2590 if (!s390_logical_operator_ok_p (operands))
2591 dst = gen_reg_rtx (mode);
2593 /* QImode and HImode patterns make sense only if we have a destination
2594 in memory. Otherwise perform the operation in SImode. */
2595 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
2598 /* Widen operands if required. */
2601 if (GET_CODE (dst) == SUBREG
2602 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
2604 else if (REG_P (dst))
2605 dst = gen_rtx_SUBREG (wmode, dst, 0);
2607 dst = gen_reg_rtx (wmode);
2609 if (GET_CODE (src1) == SUBREG
2610 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
2612 else if (GET_MODE (src1) != VOIDmode)
2613 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
2615 if (GET_CODE (src2) == SUBREG
2616 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
2618 else if (GET_MODE (src2) != VOIDmode)
2619 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
2622 /* Emit the instruction. */
2623 op = gen_rtx_SET (dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
2624 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
2625 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
2627 /* Fix up the destination if needed. */
2628 if (dst != operands[0])
2629 emit_move_insn (operands[0], gen_lowpart (mode, dst));
2632 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
2635 s390_logical_operator_ok_p (rtx *operands)
2637 /* If the destination operand is in memory, it needs to coincide
2638 with one of the source operands. After reload, it has to be
2639 the first source operand. */
2640 if (GET_CODE (operands[0]) == MEM)
2641 return rtx_equal_p (operands[0], operands[1])
2642 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
2647 /* Narrow logical operation CODE of memory operand MEMOP with immediate
2648 operand IMMOP to switch from SS to SI type instructions. */
2651 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
2653 int def = code == AND ? -1 : 0;
2657 gcc_assert (GET_CODE (*memop) == MEM);
2658 gcc_assert (!MEM_VOLATILE_P (*memop));
2660 mask = s390_extract_part (*immop, QImode, def);
2661 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
2662 gcc_assert (part >= 0);
2664 *memop = adjust_address (*memop, QImode, part);
2665 *immop = gen_int_mode (mask, QImode);
2669 /* How to allocate a 'struct machine_function'. */
2671 static struct machine_function *
2672 s390_init_machine_status (void)
2674 return ggc_cleared_alloc<machine_function> ();
2677 /* Map for smallest class containing reg regno. */
2679 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
2680 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 0 */
2681 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 4 */
2682 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 8 */
2683 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 12 */
2684 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 16 */
2685 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 20 */
2686 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 24 */
2687 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 28 */
2688 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS, /* 32 */
2689 ACCESS_REGS, ACCESS_REGS, VEC_REGS, VEC_REGS, /* 36 */
2690 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 40 */
2691 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 44 */
2692 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 48 */
2693 VEC_REGS, VEC_REGS /* 52 */
2696 /* Return attribute type of insn. */
2698 static enum attr_type
2699 s390_safe_attr_type (rtx_insn *insn)
2701 if (recog_memoized (insn) >= 0)
2702 return get_attr_type (insn);
2707 /* Return true if DISP is a valid short displacement. */
2710 s390_short_displacement (rtx disp)
2712 /* No displacement is OK. */
2716 /* Without the long displacement facility we don't need to
2717 distingiush between long and short displacement. */
2718 if (!TARGET_LONG_DISPLACEMENT)
2721 /* Integer displacement in range. */
2722 if (GET_CODE (disp) == CONST_INT)
2723 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
2725 /* GOT offset is not OK, the GOT can be large. */
2726 if (GET_CODE (disp) == CONST
2727 && GET_CODE (XEXP (disp, 0)) == UNSPEC
2728 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
2729 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
2732 /* All other symbolic constants are literal pool references,
2733 which are OK as the literal pool must be small. */
2734 if (GET_CODE (disp) == CONST)
2740 /* Decompose a RTL expression ADDR for a memory address into
2741 its components, returned in OUT.
2743 Returns false if ADDR is not a valid memory address, true
2744 otherwise. If OUT is NULL, don't return the components,
2745 but check for validity only.
2747 Note: Only addresses in canonical form are recognized.
2748 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
2749 canonical form so that they will be recognized. */
2752 s390_decompose_address (rtx addr, struct s390_address *out)
2754 HOST_WIDE_INT offset = 0;
2755 rtx base = NULL_RTX;
2756 rtx indx = NULL_RTX;
2757 rtx disp = NULL_RTX;
2759 bool pointer = false;
2760 bool base_ptr = false;
2761 bool indx_ptr = false;
2762 bool literal_pool = false;
2764 /* We may need to substitute the literal pool base register into the address
2765 below. However, at this point we do not know which register is going to
2766 be used as base, so we substitute the arg pointer register. This is going
2767 to be treated as holding a pointer below -- it shouldn't be used for any
2769 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
2771 /* Decompose address into base + index + displacement. */
2773 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
2776 else if (GET_CODE (addr) == PLUS)
2778 rtx op0 = XEXP (addr, 0);
2779 rtx op1 = XEXP (addr, 1);
2780 enum rtx_code code0 = GET_CODE (op0);
2781 enum rtx_code code1 = GET_CODE (op1);
2783 if (code0 == REG || code0 == UNSPEC)
2785 if (code1 == REG || code1 == UNSPEC)
2787 indx = op0; /* index + base */
2793 base = op0; /* base + displacement */
2798 else if (code0 == PLUS)
2800 indx = XEXP (op0, 0); /* index + base + disp */
2801 base = XEXP (op0, 1);
2812 disp = addr; /* displacement */
2814 /* Extract integer part of displacement. */
2818 if (GET_CODE (disp) == CONST_INT)
2820 offset = INTVAL (disp);
2823 else if (GET_CODE (disp) == CONST
2824 && GET_CODE (XEXP (disp, 0)) == PLUS
2825 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
2827 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
2828 disp = XEXP (XEXP (disp, 0), 0);
2832 /* Strip off CONST here to avoid special case tests later. */
2833 if (disp && GET_CODE (disp) == CONST)
2834 disp = XEXP (disp, 0);
2836 /* We can convert literal pool addresses to
2837 displacements by basing them off the base register. */
2838 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
2840 /* Either base or index must be free to hold the base register. */
2842 base = fake_pool_base, literal_pool = true;
2844 indx = fake_pool_base, literal_pool = true;
2848 /* Mark up the displacement. */
2849 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
2850 UNSPEC_LTREL_OFFSET);
2853 /* Validate base register. */
2856 if (GET_CODE (base) == UNSPEC)
2857 switch (XINT (base, 1))
2861 disp = gen_rtx_UNSPEC (Pmode,
2862 gen_rtvec (1, XVECEXP (base, 0, 0)),
2863 UNSPEC_LTREL_OFFSET);
2867 base = XVECEXP (base, 0, 1);
2870 case UNSPEC_LTREL_BASE:
2871 if (XVECLEN (base, 0) == 1)
2872 base = fake_pool_base, literal_pool = true;
2874 base = XVECEXP (base, 0, 1);
2881 if (!REG_P (base) || GET_MODE (base) != Pmode)
2884 if (REGNO (base) == STACK_POINTER_REGNUM
2885 || REGNO (base) == FRAME_POINTER_REGNUM
2886 || ((reload_completed || reload_in_progress)
2887 && frame_pointer_needed
2888 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
2889 || REGNO (base) == ARG_POINTER_REGNUM
2891 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
2892 pointer = base_ptr = true;
2894 if ((reload_completed || reload_in_progress)
2895 && base == cfun->machine->base_reg)
2896 pointer = base_ptr = literal_pool = true;
2899 /* Validate index register. */
2902 if (GET_CODE (indx) == UNSPEC)
2903 switch (XINT (indx, 1))
2907 disp = gen_rtx_UNSPEC (Pmode,
2908 gen_rtvec (1, XVECEXP (indx, 0, 0)),
2909 UNSPEC_LTREL_OFFSET);
2913 indx = XVECEXP (indx, 0, 1);
2916 case UNSPEC_LTREL_BASE:
2917 if (XVECLEN (indx, 0) == 1)
2918 indx = fake_pool_base, literal_pool = true;
2920 indx = XVECEXP (indx, 0, 1);
2927 if (!REG_P (indx) || GET_MODE (indx) != Pmode)
2930 if (REGNO (indx) == STACK_POINTER_REGNUM
2931 || REGNO (indx) == FRAME_POINTER_REGNUM
2932 || ((reload_completed || reload_in_progress)
2933 && frame_pointer_needed
2934 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
2935 || REGNO (indx) == ARG_POINTER_REGNUM
2937 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
2938 pointer = indx_ptr = true;
2940 if ((reload_completed || reload_in_progress)
2941 && indx == cfun->machine->base_reg)
2942 pointer = indx_ptr = literal_pool = true;
2945 /* Prefer to use pointer as base, not index. */
2946 if (base && indx && !base_ptr
2947 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
2954 /* Validate displacement. */
2957 /* If virtual registers are involved, the displacement will change later
2958 anyway as the virtual registers get eliminated. This could make a
2959 valid displacement invalid, but it is more likely to make an invalid
2960 displacement valid, because we sometimes access the register save area
2961 via negative offsets to one of those registers.
2962 Thus we don't check the displacement for validity here. If after
2963 elimination the displacement turns out to be invalid after all,
2964 this is fixed up by reload in any case. */
2965 /* LRA maintains always displacements up to date and we need to
2966 know the displacement is right during all LRA not only at the
2967 final elimination. */
2969 || (base != arg_pointer_rtx
2970 && indx != arg_pointer_rtx
2971 && base != return_address_pointer_rtx
2972 && indx != return_address_pointer_rtx
2973 && base != frame_pointer_rtx
2974 && indx != frame_pointer_rtx
2975 && base != virtual_stack_vars_rtx
2976 && indx != virtual_stack_vars_rtx))
2977 if (!DISP_IN_RANGE (offset))
2982 /* All the special cases are pointers. */
2985 /* In the small-PIC case, the linker converts @GOT
2986 and @GOTNTPOFF offsets to possible displacements. */
2987 if (GET_CODE (disp) == UNSPEC
2988 && (XINT (disp, 1) == UNSPEC_GOT
2989 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
2995 /* Accept pool label offsets. */
2996 else if (GET_CODE (disp) == UNSPEC
2997 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
3000 /* Accept literal pool references. */
3001 else if (GET_CODE (disp) == UNSPEC
3002 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
3004 /* In case CSE pulled a non literal pool reference out of
3005 the pool we have to reject the address. This is
3006 especially important when loading the GOT pointer on non
3007 zarch CPUs. In this case the literal pool contains an lt
3008 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
3009 will most likely exceed the displacement. */
3010 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
3011 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
3014 orig_disp = gen_rtx_CONST (Pmode, disp);
3017 /* If we have an offset, make sure it does not
3018 exceed the size of the constant pool entry. */
3019 rtx sym = XVECEXP (disp, 0, 0);
3020 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
3023 orig_disp = plus_constant (Pmode, orig_disp, offset);
3038 out->disp = orig_disp;
3039 out->pointer = pointer;
3040 out->literal_pool = literal_pool;
3046 /* Decompose a RTL expression OP for an address style operand into its
3047 components, and return the base register in BASE and the offset in
3048 OFFSET. While OP looks like an address it is never supposed to be
3051 Return true if OP is a valid address operand, false if not. */
3054 s390_decompose_addrstyle_without_index (rtx op, rtx *base,
3055 HOST_WIDE_INT *offset)
3059 /* We can have an integer constant, an address register,
3060 or a sum of the two. */
3061 if (CONST_SCALAR_INT_P (op))
3066 if (op && GET_CODE (op) == PLUS && CONST_SCALAR_INT_P (XEXP (op, 1)))
3071 while (op && GET_CODE (op) == SUBREG)
3072 op = SUBREG_REG (op);
3074 if (op && GET_CODE (op) != REG)
3079 if (off == NULL_RTX)
3081 else if (CONST_INT_P (off))
3082 *offset = INTVAL (off);
3083 else if (CONST_WIDE_INT_P (off))
3084 /* The offset will anyway be cut down to 12 bits so take just
3085 the lowest order chunk of the wide int. */
3086 *offset = CONST_WIDE_INT_ELT (off, 0);
3097 /* Return true if CODE is a valid address without index. */
3100 s390_legitimate_address_without_index_p (rtx op)
3102 struct s390_address addr;
3104 if (!s390_decompose_address (XEXP (op, 0), &addr))
3113 /* Return TRUE if ADDR is an operand valid for a load/store relative
3114 instruction. Be aware that the alignment of the operand needs to
3115 be checked separately.
3116 Valid addresses are single references or a sum of a reference and a
3117 constant integer. Return these parts in SYMREF and ADDEND. You can
3118 pass NULL in REF and/or ADDEND if you are not interested in these
3119 values. Literal pool references are *not* considered symbol
3123 s390_loadrelative_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
3125 HOST_WIDE_INT tmpaddend = 0;
3127 if (GET_CODE (addr) == CONST)
3128 addr = XEXP (addr, 0);
3130 if (GET_CODE (addr) == PLUS)
3132 if (!CONST_INT_P (XEXP (addr, 1)))
3135 tmpaddend = INTVAL (XEXP (addr, 1));
3136 addr = XEXP (addr, 0);
3139 if ((GET_CODE (addr) == SYMBOL_REF && !CONSTANT_POOL_ADDRESS_P (addr))
3140 || (GET_CODE (addr) == UNSPEC
3141 && (XINT (addr, 1) == UNSPEC_GOTENT
3142 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
3147 *addend = tmpaddend;
3154 /* Return true if the address in OP is valid for constraint letter C
3155 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
3156 pool MEMs should be accepted. Only the Q, R, S, T constraint
3157 letters are allowed for C. */
3160 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
3162 struct s390_address addr;
3163 bool decomposed = false;
3165 /* This check makes sure that no symbolic address (except literal
3166 pool references) are accepted by the R or T constraints. */
3167 if (s390_loadrelative_operand_p (op, NULL, NULL))
3170 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
3173 if (!s390_decompose_address (op, &addr))
3175 if (addr.literal_pool)
3180 /* With reload, we sometimes get intermediate address forms that are
3181 actually invalid as-is, but we need to accept them in the most
3182 generic cases below ('R' or 'T'), since reload will in fact fix
3183 them up. LRA behaves differently here; we never see such forms,
3184 but on the other hand, we need to strictly reject every invalid
3185 address form. Perform this check right up front. */
3186 if (lra_in_progress)
3188 if (!decomposed && !s390_decompose_address (op, &addr))
3195 case 'Q': /* no index short displacement */
3196 if (!decomposed && !s390_decompose_address (op, &addr))
3200 if (!s390_short_displacement (addr.disp))
3204 case 'R': /* with index short displacement */
3205 if (TARGET_LONG_DISPLACEMENT)
3207 if (!decomposed && !s390_decompose_address (op, &addr))
3209 if (!s390_short_displacement (addr.disp))
3212 /* Any invalid address here will be fixed up by reload,
3213 so accept it for the most generic constraint. */
3216 case 'S': /* no index long displacement */
3217 if (!decomposed && !s390_decompose_address (op, &addr))
3223 case 'T': /* with index long displacement */
3224 /* Any invalid address here will be fixed up by reload,
3225 so accept it for the most generic constraint. */
3235 /* Evaluates constraint strings described by the regular expression
3236 ([A|B|Z](Q|R|S|T))|Y and returns 1 if OP is a valid operand for
3237 the constraint given in STR, or 0 else. */
3240 s390_mem_constraint (const char *str, rtx op)
3247 /* Check for offsettable variants of memory constraints. */
3248 if (!MEM_P (op) || MEM_VOLATILE_P (op))
3250 if ((reload_completed || reload_in_progress)
3251 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
3253 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
3255 /* Check for non-literal-pool variants of memory constraints. */
3258 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
3263 if (GET_CODE (op) != MEM)
3265 return s390_check_qrst_address (c, XEXP (op, 0), true);
3267 /* Simply check for the basic form of a shift count. Reload will
3268 take care of making sure we have a proper base register. */
3269 if (!s390_decompose_addrstyle_without_index (op, NULL, NULL))
3273 return s390_check_qrst_address (str[1], op, true);
3281 /* Evaluates constraint strings starting with letter O. Input
3282 parameter C is the second letter following the "O" in the constraint
3283 string. Returns 1 if VALUE meets the respective constraint and 0
3287 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
3295 return trunc_int_for_mode (value, SImode) == value;
3299 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
3302 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
3310 /* Evaluates constraint strings starting with letter N. Parameter STR
3311 contains the letters following letter "N" in the constraint string.
3312 Returns true if VALUE matches the constraint. */
3315 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
3317 machine_mode mode, part_mode;
3319 int part, part_goal;
3325 part_goal = str[0] - '0';
3369 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
3372 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
3375 if (part_goal != -1 && part_goal != part)
3382 /* Returns true if the input parameter VALUE is a float zero. */
3385 s390_float_const_zero_p (rtx value)
3387 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
3388 && value == CONST0_RTX (GET_MODE (value)));
3391 /* Implement TARGET_REGISTER_MOVE_COST. */
3394 s390_register_move_cost (machine_mode mode,
3395 reg_class_t from, reg_class_t to)
3397 /* On s390, copy between fprs and gprs is expensive. */
3399 /* It becomes somewhat faster having ldgr/lgdr. */
3400 if (TARGET_Z10 && GET_MODE_SIZE (mode) == 8)
3402 /* ldgr is single cycle. */
3403 if (reg_classes_intersect_p (from, GENERAL_REGS)
3404 && reg_classes_intersect_p (to, FP_REGS))
3406 /* lgdr needs 3 cycles. */
3407 if (reg_classes_intersect_p (to, GENERAL_REGS)
3408 && reg_classes_intersect_p (from, FP_REGS))
3412 /* Otherwise copying is done via memory. */
3413 if ((reg_classes_intersect_p (from, GENERAL_REGS)
3414 && reg_classes_intersect_p (to, FP_REGS))
3415 || (reg_classes_intersect_p (from, FP_REGS)
3416 && reg_classes_intersect_p (to, GENERAL_REGS)))
3422 /* Implement TARGET_MEMORY_MOVE_COST. */
3425 s390_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
3426 reg_class_t rclass ATTRIBUTE_UNUSED,
3427 bool in ATTRIBUTE_UNUSED)
3432 /* Compute a (partial) cost for rtx X. Return true if the complete
3433 cost has been computed, and false if subexpressions should be
3434 scanned. In either case, *TOTAL contains the cost result. The
3435 initial value of *TOTAL is the default value computed by
3436 rtx_cost. It may be left unmodified. OUTER_CODE contains the
3437 code of the superexpression of x. */
3440 s390_rtx_costs (rtx x, machine_mode mode, int outer_code,
3441 int opno ATTRIBUTE_UNUSED,
3442 int *total, bool speed ATTRIBUTE_UNUSED)
3444 int code = GET_CODE (x);
3452 case CONST_WIDE_INT:
3459 if (GET_CODE (XEXP (x, 0)) == AND
3460 && GET_CODE (XEXP (x, 1)) == ASHIFT
3461 && REG_P (XEXP (XEXP (x, 0), 0))
3462 && REG_P (XEXP (XEXP (x, 1), 0))
3463 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
3464 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
3465 && (UINTVAL (XEXP (XEXP (x, 0), 1)) ==
3466 (1UL << UINTVAL (XEXP (XEXP (x, 1), 1))) - 1))
3468 *total = COSTS_N_INSNS (2);
3481 *total = COSTS_N_INSNS (1);
3486 *total = COSTS_N_INSNS (1);
3494 rtx left = XEXP (x, 0);
3495 rtx right = XEXP (x, 1);
3496 if (GET_CODE (right) == CONST_INT
3497 && CONST_OK_FOR_K (INTVAL (right)))
3498 *total = s390_cost->mhi;
3499 else if (GET_CODE (left) == SIGN_EXTEND)
3500 *total = s390_cost->mh;
3502 *total = s390_cost->ms; /* msr, ms, msy */
3507 rtx left = XEXP (x, 0);
3508 rtx right = XEXP (x, 1);
3511 if (GET_CODE (right) == CONST_INT
3512 && CONST_OK_FOR_K (INTVAL (right)))
3513 *total = s390_cost->mghi;
3514 else if (GET_CODE (left) == SIGN_EXTEND)
3515 *total = s390_cost->msgf;
3517 *total = s390_cost->msg; /* msgr, msg */
3519 else /* TARGET_31BIT */
3521 if (GET_CODE (left) == SIGN_EXTEND
3522 && GET_CODE (right) == SIGN_EXTEND)
3523 /* mulsidi case: mr, m */
3524 *total = s390_cost->m;
3525 else if (GET_CODE (left) == ZERO_EXTEND
3526 && GET_CODE (right) == ZERO_EXTEND
3527 && TARGET_CPU_ZARCH)
3528 /* umulsidi case: ml, mlr */
3529 *total = s390_cost->ml;
3531 /* Complex calculation is required. */
3532 *total = COSTS_N_INSNS (40);
3538 *total = s390_cost->mult_df;
3541 *total = s390_cost->mxbr;
3552 *total = s390_cost->madbr;
3555 *total = s390_cost->maebr;
3560 /* Negate in the third argument is free: FMSUB. */
3561 if (GET_CODE (XEXP (x, 2)) == NEG)
3563 *total += (rtx_cost (XEXP (x, 0), mode, FMA, 0, speed)
3564 + rtx_cost (XEXP (x, 1), mode, FMA, 1, speed)
3565 + rtx_cost (XEXP (XEXP (x, 2), 0), mode, FMA, 2, speed));
3572 if (mode == TImode) /* 128 bit division */
3573 *total = s390_cost->dlgr;
3574 else if (mode == DImode)
3576 rtx right = XEXP (x, 1);
3577 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
3578 *total = s390_cost->dlr;
3579 else /* 64 by 64 bit division */
3580 *total = s390_cost->dlgr;
3582 else if (mode == SImode) /* 32 bit division */
3583 *total = s390_cost->dlr;
3590 rtx right = XEXP (x, 1);
3591 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
3593 *total = s390_cost->dsgfr;
3595 *total = s390_cost->dr;
3596 else /* 64 by 64 bit division */
3597 *total = s390_cost->dsgr;
3599 else if (mode == SImode) /* 32 bit division */
3600 *total = s390_cost->dlr;
3601 else if (mode == SFmode)
3603 *total = s390_cost->debr;
3605 else if (mode == DFmode)
3607 *total = s390_cost->ddbr;
3609 else if (mode == TFmode)
3611 *total = s390_cost->dxbr;
3617 *total = s390_cost->sqebr;
3618 else if (mode == DFmode)
3619 *total = s390_cost->sqdbr;
3621 *total = s390_cost->sqxbr;
3626 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
3627 || outer_code == PLUS || outer_code == MINUS
3628 || outer_code == COMPARE)
3633 *total = COSTS_N_INSNS (1);
3634 if (GET_CODE (XEXP (x, 0)) == AND
3635 && GET_CODE (XEXP (x, 1)) == CONST_INT
3636 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
3638 rtx op0 = XEXP (XEXP (x, 0), 0);
3639 rtx op1 = XEXP (XEXP (x, 0), 1);
3640 rtx op2 = XEXP (x, 1);
3642 if (memory_operand (op0, GET_MODE (op0))
3643 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
3645 if (register_operand (op0, GET_MODE (op0))
3646 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
3656 /* Return the cost of an address rtx ADDR. */
3659 s390_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
3660 addr_space_t as ATTRIBUTE_UNUSED,
3661 bool speed ATTRIBUTE_UNUSED)
3663 struct s390_address ad;
3664 if (!s390_decompose_address (addr, &ad))
3667 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
3670 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
3671 otherwise return 0. */
3674 tls_symbolic_operand (rtx op)
3676 if (GET_CODE (op) != SYMBOL_REF)
3678 return SYMBOL_REF_TLS_MODEL (op);
3681 /* Split DImode access register reference REG (on 64-bit) into its constituent
3682 low and high parts, and store them into LO and HI. Note that gen_lowpart/
3683 gen_highpart cannot be used as they assume all registers are word-sized,
3684 while our access registers have only half that size. */
3687 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
3689 gcc_assert (TARGET_64BIT);
3690 gcc_assert (ACCESS_REG_P (reg));
3691 gcc_assert (GET_MODE (reg) == DImode);
3692 gcc_assert (!(REGNO (reg) & 1));
3694 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
3695 *hi = gen_rtx_REG (SImode, REGNO (reg));
3698 /* Return true if OP contains a symbol reference */
3701 symbolic_reference_mentioned_p (rtx op)
3706 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
3709 fmt = GET_RTX_FORMAT (GET_CODE (op));
3710 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3716 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3717 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3721 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
3728 /* Return true if OP contains a reference to a thread-local symbol. */
3731 tls_symbolic_reference_mentioned_p (rtx op)
3736 if (GET_CODE (op) == SYMBOL_REF)
3737 return tls_symbolic_operand (op);
3739 fmt = GET_RTX_FORMAT (GET_CODE (op));
3740 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3746 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3747 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3751 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
3759 /* Return true if OP is a legitimate general operand when
3760 generating PIC code. It is given that flag_pic is on
3761 and that OP satisfies CONSTANT_P. */
3764 legitimate_pic_operand_p (rtx op)
3766 /* Accept all non-symbolic constants. */
3767 if (!SYMBOLIC_CONST (op))
3770 /* Reject everything else; must be handled
3771 via emit_symbolic_move. */
3775 /* Returns true if the constant value OP is a legitimate general operand.
3776 It is given that OP satisfies CONSTANT_P. */
3779 s390_legitimate_constant_p (machine_mode mode, rtx op)
3781 if (TARGET_VX && VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
3783 if (GET_MODE_SIZE (mode) != 16)
3786 if (!satisfies_constraint_j00 (op)
3787 && !satisfies_constraint_jm1 (op)
3788 && !satisfies_constraint_jKK (op)
3789 && !satisfies_constraint_jxx (op)
3790 && !satisfies_constraint_jyy (op))
3794 /* Accept all non-symbolic constants. */
3795 if (!SYMBOLIC_CONST (op))
3798 /* Accept immediate LARL operands. */
3799 if (TARGET_CPU_ZARCH && larl_operand (op, mode))
3802 /* Thread-local symbols are never legal constants. This is
3803 so that emit_call knows that computing such addresses
3804 might require a function call. */
3805 if (TLS_SYMBOLIC_CONST (op))
3808 /* In the PIC case, symbolic constants must *not* be
3809 forced into the literal pool. We accept them here,
3810 so that they will be handled by emit_symbolic_move. */
3814 /* All remaining non-PIC symbolic constants are
3815 forced into the literal pool. */
3819 /* Determine if it's legal to put X into the constant pool. This
3820 is not possible if X contains the address of a symbol that is
3821 not constant (TLS) or not known at final link time (PIC). */
3824 s390_cannot_force_const_mem (machine_mode mode, rtx x)
3826 switch (GET_CODE (x))
3830 case CONST_WIDE_INT:
3832 /* Accept all non-symbolic constants. */
3836 /* Labels are OK iff we are non-PIC. */
3837 return flag_pic != 0;
3840 /* 'Naked' TLS symbol references are never OK,
3841 non-TLS symbols are OK iff we are non-PIC. */
3842 if (tls_symbolic_operand (x))
3845 return flag_pic != 0;
3848 return s390_cannot_force_const_mem (mode, XEXP (x, 0));
3851 return s390_cannot_force_const_mem (mode, XEXP (x, 0))
3852 || s390_cannot_force_const_mem (mode, XEXP (x, 1));
3855 switch (XINT (x, 1))
3857 /* Only lt-relative or GOT-relative UNSPECs are OK. */
3858 case UNSPEC_LTREL_OFFSET:
3866 case UNSPEC_GOTNTPOFF:
3867 case UNSPEC_INDNTPOFF:
3870 /* If the literal pool shares the code section, be put
3871 execute template placeholders into the pool as well. */
3873 return TARGET_CPU_ZARCH;
3885 /* Returns true if the constant value OP is a legitimate general
3886 operand during and after reload. The difference to
3887 legitimate_constant_p is that this function will not accept
3888 a constant that would need to be forced to the literal pool
3889 before it can be used as operand.
3890 This function accepts all constants which can be loaded directly
3894 legitimate_reload_constant_p (rtx op)
3896 /* Accept la(y) operands. */
3897 if (GET_CODE (op) == CONST_INT
3898 && DISP_IN_RANGE (INTVAL (op)))
3901 /* Accept l(g)hi/l(g)fi operands. */
3902 if (GET_CODE (op) == CONST_INT
3903 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
3906 /* Accept lliXX operands. */
3908 && GET_CODE (op) == CONST_INT
3909 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
3910 && s390_single_part (op, word_mode, HImode, 0) >= 0)
3914 && GET_CODE (op) == CONST_INT
3915 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
3916 && s390_single_part (op, word_mode, SImode, 0) >= 0)
3919 /* Accept larl operands. */
3920 if (TARGET_CPU_ZARCH
3921 && larl_operand (op, VOIDmode))
3924 /* Accept floating-point zero operands that fit into a single GPR. */
3925 if (GET_CODE (op) == CONST_DOUBLE
3926 && s390_float_const_zero_p (op)
3927 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
3930 /* Accept double-word operands that can be split. */
3931 if (GET_CODE (op) == CONST_WIDE_INT
3932 || (GET_CODE (op) == CONST_INT
3933 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op)))
3935 machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
3936 rtx hi = operand_subword (op, 0, 0, dword_mode);
3937 rtx lo = operand_subword (op, 1, 0, dword_mode);
3938 return legitimate_reload_constant_p (hi)
3939 && legitimate_reload_constant_p (lo);
3942 /* Everything else cannot be handled without reload. */
3946 /* Returns true if the constant value OP is a legitimate fp operand
3947 during and after reload.
3948 This function accepts all constants which can be loaded directly
3952 legitimate_reload_fp_constant_p (rtx op)
3954 /* Accept floating-point zero operands if the load zero instruction
3955 can be used. Prior to z196 the load fp zero instruction caused a
3956 performance penalty if the result is used as BFP number. */
3958 && GET_CODE (op) == CONST_DOUBLE
3959 && s390_float_const_zero_p (op))
3965 /* Returns true if the constant value OP is a legitimate vector operand
3966 during and after reload.
3967 This function accepts all constants which can be loaded directly
3971 legitimate_reload_vector_constant_p (rtx op)
3973 if (TARGET_VX && GET_MODE_SIZE (GET_MODE (op)) == 16
3974 && (satisfies_constraint_j00 (op)
3975 || satisfies_constraint_jm1 (op)
3976 || satisfies_constraint_jKK (op)
3977 || satisfies_constraint_jxx (op)
3978 || satisfies_constraint_jyy (op)))
3984 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
3985 return the class of reg to actually use. */
3988 s390_preferred_reload_class (rtx op, reg_class_t rclass)
3990 switch (GET_CODE (op))
3992 /* Constants we cannot reload into general registers
3993 must be forced into the literal pool. */
3997 case CONST_WIDE_INT:
3998 if (reg_class_subset_p (GENERAL_REGS, rclass)
3999 && legitimate_reload_constant_p (op))
4000 return GENERAL_REGS;
4001 else if (reg_class_subset_p (ADDR_REGS, rclass)
4002 && legitimate_reload_constant_p (op))
4004 else if (reg_class_subset_p (FP_REGS, rclass)
4005 && legitimate_reload_fp_constant_p (op))
4007 else if (reg_class_subset_p (VEC_REGS, rclass)
4008 && legitimate_reload_vector_constant_p (op))
4013 /* If a symbolic constant or a PLUS is reloaded,
4014 it is most likely being used as an address, so
4015 prefer ADDR_REGS. If 'class' is not a superset
4016 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
4018 /* Symrefs cannot be pushed into the literal pool with -fPIC
4019 so we *MUST NOT* return NO_REGS for these cases
4020 (s390_cannot_force_const_mem will return true).
4022 On the other hand we MUST return NO_REGS for symrefs with
4023 invalid addend which might have been pushed to the literal
4024 pool (no -fPIC). Usually we would expect them to be
4025 handled via secondary reload but this does not happen if
4026 they are used as literal pool slot replacement in reload
4027 inheritance (see emit_input_reload_insns). */
4028 if (TARGET_CPU_ZARCH
4029 && GET_CODE (XEXP (op, 0)) == PLUS
4030 && GET_CODE (XEXP (XEXP(op, 0), 0)) == SYMBOL_REF
4031 && GET_CODE (XEXP (XEXP(op, 0), 1)) == CONST_INT)
4033 if (flag_pic && reg_class_subset_p (ADDR_REGS, rclass))
4041 if (!legitimate_reload_constant_p (op))
4045 /* load address will be used. */
4046 if (reg_class_subset_p (ADDR_REGS, rclass))
4058 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
4059 multiple of ALIGNMENT and the SYMBOL_REF being naturally
4063 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
4065 HOST_WIDE_INT addend;
4068 /* The "required alignment" might be 0 (e.g. for certain structs
4069 accessed via BLKmode). Early abort in this case, as well as when
4070 an alignment > 8 is required. */
4071 if (alignment < 2 || alignment > 8)
4074 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
4077 if (addend & (alignment - 1))
4080 if (GET_CODE (symref) == SYMBOL_REF)
4082 /* We have load-relative instructions for 2-byte, 4-byte, and
4083 8-byte alignment so allow only these. */
4086 case 8: return !SYMBOL_FLAG_NOTALIGN8_P (symref);
4087 case 4: return !SYMBOL_FLAG_NOTALIGN4_P (symref);
4088 case 2: return !SYMBOL_FLAG_NOTALIGN2_P (symref);
4089 default: return false;
4093 if (GET_CODE (symref) == UNSPEC
4094 && alignment <= UNITS_PER_LONG)
4100 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
4101 operand SCRATCH is used to reload the even part of the address and
4105 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
4107 HOST_WIDE_INT addend;
4110 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
4114 /* Easy case. The addend is even so larl will do fine. */
4115 emit_move_insn (reg, addr);
4118 /* We can leave the scratch register untouched if the target
4119 register is a valid base register. */
4120 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
4121 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
4124 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
4125 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
4128 emit_move_insn (scratch,
4129 gen_rtx_CONST (Pmode,
4130 gen_rtx_PLUS (Pmode, symref,
4131 GEN_INT (addend - 1))));
4133 emit_move_insn (scratch, symref);
4135 /* Increment the address using la in order to avoid clobbering cc. */
4136 s390_load_address (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
4140 /* Generate what is necessary to move between REG and MEM using
4141 SCRATCH. The direction is given by TOMEM. */
4144 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
4146 /* Reload might have pulled a constant out of the literal pool.
4147 Force it back in. */
4148 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
4149 || GET_CODE (mem) == CONST_WIDE_INT
4150 || GET_CODE (mem) == CONST_VECTOR
4151 || GET_CODE (mem) == CONST)
4152 mem = force_const_mem (GET_MODE (reg), mem);
4154 gcc_assert (MEM_P (mem));
4156 /* For a load from memory we can leave the scratch register
4157 untouched if the target register is a valid base register. */
4159 && REGNO (reg) < FIRST_PSEUDO_REGISTER
4160 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
4161 && GET_MODE (reg) == GET_MODE (scratch))
4164 /* Load address into scratch register. Since we can't have a
4165 secondary reload for a secondary reload we have to cover the case
4166 where larl would need a secondary reload here as well. */
4167 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
4169 /* Now we can use a standard load/store to do the move. */
4171 emit_move_insn (replace_equiv_address (mem, scratch), reg);
4173 emit_move_insn (reg, replace_equiv_address (mem, scratch));
4176 /* Inform reload about cases where moving X with a mode MODE to a register in
4177 RCLASS requires an extra scratch or immediate register. Return the class
4178 needed for the immediate register. */
4181 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
4182 machine_mode mode, secondary_reload_info *sri)
4184 enum reg_class rclass = (enum reg_class) rclass_i;
4186 /* Intermediate register needed. */
4187 if (reg_classes_intersect_p (CC_REGS, rclass))
4188 return GENERAL_REGS;
4192 /* The vst/vl vector move instructions allow only for short
4195 && GET_CODE (XEXP (x, 0)) == PLUS
4196 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4197 && !SHORT_DISP_IN_RANGE(INTVAL (XEXP (XEXP (x, 0), 1)))
4198 && reg_class_subset_p (rclass, VEC_REGS)
4199 && (!reg_class_subset_p (rclass, FP_REGS)
4200 || (GET_MODE_SIZE (mode) > 8
4201 && s390_class_max_nregs (FP_REGS, mode) == 1)))
4204 sri->icode = (TARGET_64BIT ?
4205 CODE_FOR_reloaddi_la_in :
4206 CODE_FOR_reloadsi_la_in);
4208 sri->icode = (TARGET_64BIT ?
4209 CODE_FOR_reloaddi_la_out :
4210 CODE_FOR_reloadsi_la_out);
4216 HOST_WIDE_INT offset;
4219 /* On z10 several optimizer steps may generate larl operands with
4222 && s390_loadrelative_operand_p (x, &symref, &offset)
4224 && !SYMBOL_FLAG_NOTALIGN2_P (symref)
4225 && (offset & 1) == 1)
4226 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
4227 : CODE_FOR_reloadsi_larl_odd_addend_z10);
4229 /* Handle all the (mem (symref)) accesses we cannot use the z10
4230 instructions for. */
4232 && s390_loadrelative_operand_p (XEXP (x, 0), NULL, NULL)
4234 || !reg_class_subset_p (rclass, GENERAL_REGS)
4235 || GET_MODE_SIZE (mode) > UNITS_PER_WORD
4236 || !s390_check_symref_alignment (XEXP (x, 0),
4237 GET_MODE_SIZE (mode))))
4239 #define __SECONDARY_RELOAD_CASE(M,m) \
4242 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
4243 CODE_FOR_reload##m##di_tomem_z10; \
4245 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
4246 CODE_FOR_reload##m##si_tomem_z10; \
4249 switch (GET_MODE (x))
4251 __SECONDARY_RELOAD_CASE (QI, qi);
4252 __SECONDARY_RELOAD_CASE (HI, hi);
4253 __SECONDARY_RELOAD_CASE (SI, si);
4254 __SECONDARY_RELOAD_CASE (DI, di);
4255 __SECONDARY_RELOAD_CASE (TI, ti);
4256 __SECONDARY_RELOAD_CASE (SF, sf);
4257 __SECONDARY_RELOAD_CASE (DF, df);
4258 __SECONDARY_RELOAD_CASE (TF, tf);
4259 __SECONDARY_RELOAD_CASE (SD, sd);
4260 __SECONDARY_RELOAD_CASE (DD, dd);
4261 __SECONDARY_RELOAD_CASE (TD, td);
4262 __SECONDARY_RELOAD_CASE (V1QI, v1qi);
4263 __SECONDARY_RELOAD_CASE (V2QI, v2qi);
4264 __SECONDARY_RELOAD_CASE (V4QI, v4qi);
4265 __SECONDARY_RELOAD_CASE (V8QI, v8qi);
4266 __SECONDARY_RELOAD_CASE (V16QI, v16qi);
4267 __SECONDARY_RELOAD_CASE (V1HI, v1hi);
4268 __SECONDARY_RELOAD_CASE (V2HI, v2hi);
4269 __SECONDARY_RELOAD_CASE (V4HI, v4hi);
4270 __SECONDARY_RELOAD_CASE (V8HI, v8hi);
4271 __SECONDARY_RELOAD_CASE (V1SI, v1si);
4272 __SECONDARY_RELOAD_CASE (V2SI, v2si);
4273 __SECONDARY_RELOAD_CASE (V4SI, v4si);
4274 __SECONDARY_RELOAD_CASE (V1DI, v1di);
4275 __SECONDARY_RELOAD_CASE (V2DI, v2di);
4276 __SECONDARY_RELOAD_CASE (V1TI, v1ti);
4277 __SECONDARY_RELOAD_CASE (V1SF, v1sf);
4278 __SECONDARY_RELOAD_CASE (V2SF, v2sf);
4279 __SECONDARY_RELOAD_CASE (V4SF, v4sf);
4280 __SECONDARY_RELOAD_CASE (V1DF, v1df);
4281 __SECONDARY_RELOAD_CASE (V2DF, v2df);
4282 __SECONDARY_RELOAD_CASE (V1TF, v1tf);
4286 #undef __SECONDARY_RELOAD_CASE
4290 /* We need a scratch register when loading a PLUS expression which
4291 is not a legitimate operand of the LOAD ADDRESS instruction. */
4292 /* LRA can deal with transformation of plus op very well -- so we
4293 don't need to prompt LRA in this case. */
4294 if (! lra_in_progress && in_p && s390_plus_operand (x, mode))
4295 sri->icode = (TARGET_64BIT ?
4296 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
4298 /* Performing a multiword move from or to memory we have to make sure the
4299 second chunk in memory is addressable without causing a displacement
4300 overflow. If that would be the case we calculate the address in
4301 a scratch register. */
4303 && GET_CODE (XEXP (x, 0)) == PLUS
4304 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4305 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
4306 + GET_MODE_SIZE (mode) - 1))
4308 /* For GENERAL_REGS a displacement overflow is no problem if occurring
4309 in a s_operand address since we may fallback to lm/stm. So we only
4310 have to care about overflows in the b+i+d case. */
4311 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
4312 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
4313 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
4314 /* For FP_REGS no lm/stm is available so this check is triggered
4315 for displacement overflows in b+i+d and b+d like addresses. */
4316 || (reg_classes_intersect_p (FP_REGS, rclass)
4317 && s390_class_max_nregs (FP_REGS, mode) > 1))
4320 sri->icode = (TARGET_64BIT ?
4321 CODE_FOR_reloaddi_la_in :
4322 CODE_FOR_reloadsi_la_in);
4324 sri->icode = (TARGET_64BIT ?
4325 CODE_FOR_reloaddi_la_out :
4326 CODE_FOR_reloadsi_la_out);
4330 /* A scratch address register is needed when a symbolic constant is
4331 copied to r0 compiling with -fPIC. In other cases the target
4332 register might be used as temporary (see legitimize_pic_address). */
4333 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
4334 sri->icode = (TARGET_64BIT ?
4335 CODE_FOR_reloaddi_PIC_addr :
4336 CODE_FOR_reloadsi_PIC_addr);
4338 /* Either scratch or no register needed. */
4342 /* Generate code to load SRC, which is PLUS that is not a
4343 legitimate operand for the LA instruction, into TARGET.
4344 SCRATCH may be used as scratch register. */
4347 s390_expand_plus_operand (rtx target, rtx src,
4351 struct s390_address ad;
4353 /* src must be a PLUS; get its two operands. */
4354 gcc_assert (GET_CODE (src) == PLUS);
4355 gcc_assert (GET_MODE (src) == Pmode);
4357 /* Check if any of the two operands is already scheduled
4358 for replacement by reload. This can happen e.g. when
4359 float registers occur in an address. */
4360 sum1 = find_replacement (&XEXP (src, 0));
4361 sum2 = find_replacement (&XEXP (src, 1));
4362 src = gen_rtx_PLUS (Pmode, sum1, sum2);
4364 /* If the address is already strictly valid, there's nothing to do. */
4365 if (!s390_decompose_address (src, &ad)
4366 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4367 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
4369 /* Otherwise, one of the operands cannot be an address register;
4370 we reload its value into the scratch register. */
4371 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
4373 emit_move_insn (scratch, sum1);
4376 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
4378 emit_move_insn (scratch, sum2);
4382 /* According to the way these invalid addresses are generated
4383 in reload.c, it should never happen (at least on s390) that
4384 *neither* of the PLUS components, after find_replacements
4385 was applied, is an address register. */
4386 if (sum1 == scratch && sum2 == scratch)
4392 src = gen_rtx_PLUS (Pmode, sum1, sum2);
4395 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
4396 is only ever performed on addresses, so we can mark the
4397 sum as legitimate for LA in any case. */
4398 s390_load_address (target, src);
4402 /* Return true if ADDR is a valid memory address.
4403 STRICT specifies whether strict register checking applies. */
4406 s390_legitimate_address_p (machine_mode mode, rtx addr, bool strict)
4408 struct s390_address ad;
4411 && larl_operand (addr, VOIDmode)
4412 && (mode == VOIDmode
4413 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
4416 if (!s390_decompose_address (addr, &ad))
4421 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4424 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
4430 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
4431 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
4435 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
4436 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
4442 /* Return true if OP is a valid operand for the LA instruction.
4443 In 31-bit, we need to prove that the result is used as an
4444 address, as LA performs only a 31-bit addition. */
4447 legitimate_la_operand_p (rtx op)
4449 struct s390_address addr;
4450 if (!s390_decompose_address (op, &addr))
4453 return (TARGET_64BIT || addr.pointer);
4456 /* Return true if it is valid *and* preferable to use LA to
4457 compute the sum of OP1 and OP2. */
4460 preferred_la_operand_p (rtx op1, rtx op2)
4462 struct s390_address addr;
4464 if (op2 != const0_rtx)
4465 op1 = gen_rtx_PLUS (Pmode, op1, op2);
4467 if (!s390_decompose_address (op1, &addr))
4469 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
4471 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
4474 /* Avoid LA instructions with index register on z196; it is
4475 preferable to use regular add instructions when possible.
4476 Starting with zEC12 the la with index register is "uncracked"
4478 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
4481 if (!TARGET_64BIT && !addr.pointer)
4487 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
4488 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
4494 /* Emit a forced load-address operation to load SRC into DST.
4495 This will use the LOAD ADDRESS instruction even in situations
4496 where legitimate_la_operand_p (SRC) returns false. */
4499 s390_load_address (rtx dst, rtx src)
4502 emit_move_insn (dst, src);
4504 emit_insn (gen_force_la_31 (dst, src));
4507 /* Return a legitimate reference for ORIG (an address) using the
4508 register REG. If REG is 0, a new pseudo is generated.
4510 There are two types of references that must be handled:
4512 1. Global data references must load the address from the GOT, via
4513 the PIC reg. An insn is emitted to do this load, and the reg is
4516 2. Static data references, constant pool addresses, and code labels
4517 compute the address as an offset from the GOT, whose base is in
4518 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
4519 differentiate them from global data objects. The returned
4520 address is the PIC reg + an unspec constant.
4522 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
4523 reg also appears in the address. */
4526 legitimize_pic_address (rtx orig, rtx reg)
4529 rtx addend = const0_rtx;
4532 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
4534 if (GET_CODE (addr) == CONST)
4535 addr = XEXP (addr, 0);
4537 if (GET_CODE (addr) == PLUS)
4539 addend = XEXP (addr, 1);
4540 addr = XEXP (addr, 0);
4543 if ((GET_CODE (addr) == LABEL_REF
4544 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr))
4545 || (GET_CODE (addr) == UNSPEC &&
4546 (XINT (addr, 1) == UNSPEC_GOTENT
4547 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
4548 && GET_CODE (addend) == CONST_INT)
4550 /* This can be locally addressed. */
4552 /* larl_operand requires UNSPECs to be wrapped in a const rtx. */
4553 rtx const_addr = (GET_CODE (addr) == UNSPEC ?
4554 gen_rtx_CONST (Pmode, addr) : addr);
4556 if (TARGET_CPU_ZARCH
4557 && larl_operand (const_addr, VOIDmode)
4558 && INTVAL (addend) < (HOST_WIDE_INT)1 << 31
4559 && INTVAL (addend) >= -((HOST_WIDE_INT)1 << 31))
4561 if (INTVAL (addend) & 1)
4563 /* LARL can't handle odd offsets, so emit a pair of LARL
4565 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4567 if (!DISP_IN_RANGE (INTVAL (addend)))
4569 HOST_WIDE_INT even = INTVAL (addend) - 1;
4570 addr = gen_rtx_PLUS (Pmode, addr, GEN_INT (even));
4571 addr = gen_rtx_CONST (Pmode, addr);
4572 addend = const1_rtx;
4575 emit_move_insn (temp, addr);
4576 new_rtx = gen_rtx_PLUS (Pmode, temp, addend);
4580 s390_load_address (reg, new_rtx);
4586 /* If the offset is even, we can just use LARL. This
4587 will happen automatically. */
4592 /* No larl - Access local symbols relative to the GOT. */
4594 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4596 if (reload_in_progress || reload_completed)
4597 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4599 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
4600 if (addend != const0_rtx)
4601 addr = gen_rtx_PLUS (Pmode, addr, addend);
4602 addr = gen_rtx_CONST (Pmode, addr);
4603 addr = force_const_mem (Pmode, addr);
4604 emit_move_insn (temp, addr);
4606 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4609 s390_load_address (reg, new_rtx);
4614 else if (GET_CODE (addr) == SYMBOL_REF && addend == const0_rtx)
4616 /* A non-local symbol reference without addend.
4618 The symbol ref is wrapped into an UNSPEC to make sure the
4619 proper operand modifier (@GOT or @GOTENT) will be emitted.
4620 This will tell the linker to put the symbol into the GOT.
4622 Additionally the code dereferencing the GOT slot is emitted here.
4624 An addend to the symref needs to be added afterwards.
4625 legitimize_pic_address calls itself recursively to handle
4626 that case. So no need to do it here. */
4629 reg = gen_reg_rtx (Pmode);
4633 /* Use load relative if possible.
4634 lgrl <target>, sym@GOTENT */
4635 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
4636 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4637 new_rtx = gen_const_mem (GET_MODE (reg), new_rtx);
4639 emit_move_insn (reg, new_rtx);
4642 else if (flag_pic == 1)
4644 /* Assume GOT offset is a valid displacement operand (< 4k
4645 or < 512k with z990). This is handled the same way in
4646 both 31- and 64-bit code (@GOT).
4647 lg <target>, sym@GOT(r12) */
4649 if (reload_in_progress || reload_completed)
4650 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4652 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
4653 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4654 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
4655 new_rtx = gen_const_mem (Pmode, new_rtx);
4656 emit_move_insn (reg, new_rtx);
4659 else if (TARGET_CPU_ZARCH)
4661 /* If the GOT offset might be >= 4k, we determine the position
4662 of the GOT entry via a PC-relative LARL (@GOTENT).
4663 larl temp, sym@GOTENT
4664 lg <target>, 0(temp) */
4666 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
4668 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
4669 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
4671 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
4672 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4673 emit_move_insn (temp, new_rtx);
4675 new_rtx = gen_const_mem (Pmode, temp);
4676 emit_move_insn (reg, new_rtx);
4682 /* If the GOT offset might be >= 4k, we have to load it
4683 from the literal pool (@GOT).
4685 lg temp, lit-litbase(r13)
4686 lg <target>, 0(temp)
4687 lit: .long sym@GOT */
4689 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
4691 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
4692 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
4694 if (reload_in_progress || reload_completed)
4695 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4697 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
4698 addr = gen_rtx_CONST (Pmode, addr);
4699 addr = force_const_mem (Pmode, addr);
4700 emit_move_insn (temp, addr);
4702 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4703 new_rtx = gen_const_mem (Pmode, new_rtx);
4704 emit_move_insn (reg, new_rtx);
4708 else if (GET_CODE (addr) == UNSPEC && GET_CODE (addend) == CONST_INT)
4710 gcc_assert (XVECLEN (addr, 0) == 1);
4711 switch (XINT (addr, 1))
4713 /* These address symbols (or PLT slots) relative to the GOT
4714 (not GOT slots!). In general this will exceed the
4715 displacement range so these value belong into the literal
4719 new_rtx = force_const_mem (Pmode, orig);
4722 /* For -fPIC the GOT size might exceed the displacement
4723 range so make sure the value is in the literal pool. */
4726 new_rtx = force_const_mem (Pmode, orig);
4729 /* For @GOTENT larl is used. This is handled like local
4735 /* @PLT is OK as is on 64-bit, must be converted to
4736 GOT-relative @PLTOFF on 31-bit. */
4738 if (!TARGET_CPU_ZARCH)
4740 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4742 if (reload_in_progress || reload_completed)
4743 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4745 addr = XVECEXP (addr, 0, 0);
4746 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
4748 if (addend != const0_rtx)
4749 addr = gen_rtx_PLUS (Pmode, addr, addend);
4750 addr = gen_rtx_CONST (Pmode, addr);
4751 addr = force_const_mem (Pmode, addr);
4752 emit_move_insn (temp, addr);
4754 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4757 s390_load_address (reg, new_rtx);
4762 /* On 64 bit larl can be used. This case is handled like
4763 local symbol refs. */
4767 /* Everything else cannot happen. */
4772 else if (addend != const0_rtx)
4774 /* Otherwise, compute the sum. */
4776 rtx base = legitimize_pic_address (addr, reg);
4777 new_rtx = legitimize_pic_address (addend,
4778 base == reg ? NULL_RTX : reg);
4779 if (GET_CODE (new_rtx) == CONST_INT)
4780 new_rtx = plus_constant (Pmode, base, INTVAL (new_rtx));
4783 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
4785 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
4786 new_rtx = XEXP (new_rtx, 1);
4788 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
4791 if (GET_CODE (new_rtx) == CONST)
4792 new_rtx = XEXP (new_rtx, 0);
4793 new_rtx = force_operand (new_rtx, 0);
4799 /* Load the thread pointer into a register. */
4802 s390_get_thread_pointer (void)
4804 rtx tp = gen_reg_rtx (Pmode);
4806 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
4807 mark_reg_pointer (tp, BITS_PER_WORD);
4812 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
4813 in s390_tls_symbol which always refers to __tls_get_offset.
4814 The returned offset is written to RESULT_REG and an USE rtx is
4815 generated for TLS_CALL. */
4817 static GTY(()) rtx s390_tls_symbol;
4820 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
4825 emit_insn (s390_load_got ());
4827 if (!s390_tls_symbol)
4828 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
4830 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
4831 gen_rtx_REG (Pmode, RETURN_REGNUM));
4833 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
4834 RTL_CONST_CALL_P (insn) = 1;
4837 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
4838 this (thread-local) address. REG may be used as temporary. */
4841 legitimize_tls_address (rtx addr, rtx reg)
4843 rtx new_rtx, tls_call, temp, base, r2;
4846 if (GET_CODE (addr) == SYMBOL_REF)
4847 switch (tls_symbolic_operand (addr))
4849 case TLS_MODEL_GLOBAL_DYNAMIC:
4851 r2 = gen_rtx_REG (Pmode, 2);
4852 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
4853 new_rtx = gen_rtx_CONST (Pmode, tls_call);
4854 new_rtx = force_const_mem (Pmode, new_rtx);
4855 emit_move_insn (r2, new_rtx);
4856 s390_emit_tls_call_insn (r2, tls_call);
4857 insn = get_insns ();
4860 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
4861 temp = gen_reg_rtx (Pmode);
4862 emit_libcall_block (insn, temp, r2, new_rtx);
4864 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4867 s390_load_address (reg, new_rtx);
4872 case TLS_MODEL_LOCAL_DYNAMIC:
4874 r2 = gen_rtx_REG (Pmode, 2);
4875 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
4876 new_rtx = gen_rtx_CONST (Pmode, tls_call);
4877 new_rtx = force_const_mem (Pmode, new_rtx);
4878 emit_move_insn (r2, new_rtx);
4879 s390_emit_tls_call_insn (r2, tls_call);
4880 insn = get_insns ();
4883 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
4884 temp = gen_reg_rtx (Pmode);
4885 emit_libcall_block (insn, temp, r2, new_rtx);
4887 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4888 base = gen_reg_rtx (Pmode);
4889 s390_load_address (base, new_rtx);
4891 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
4892 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4893 new_rtx = force_const_mem (Pmode, new_rtx);
4894 temp = gen_reg_rtx (Pmode);
4895 emit_move_insn (temp, new_rtx);
4897 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
4900 s390_load_address (reg, new_rtx);
4905 case TLS_MODEL_INITIAL_EXEC:
4908 /* Assume GOT offset < 4k. This is handled the same way
4909 in both 31- and 64-bit code. */
4911 if (reload_in_progress || reload_completed)
4912 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4914 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
4915 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4916 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
4917 new_rtx = gen_const_mem (Pmode, new_rtx);
4918 temp = gen_reg_rtx (Pmode);
4919 emit_move_insn (temp, new_rtx);
4921 else if (TARGET_CPU_ZARCH)
4923 /* If the GOT offset might be >= 4k, we determine the position
4924 of the GOT entry via a PC-relative LARL. */
4926 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
4927 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4928 temp = gen_reg_rtx (Pmode);
4929 emit_move_insn (temp, new_rtx);
4931 new_rtx = gen_const_mem (Pmode, temp);
4932 temp = gen_reg_rtx (Pmode);
4933 emit_move_insn (temp, new_rtx);
4937 /* If the GOT offset might be >= 4k, we have to load it
4938 from the literal pool. */
4940 if (reload_in_progress || reload_completed)
4941 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4943 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
4944 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4945 new_rtx = force_const_mem (Pmode, new_rtx);
4946 temp = gen_reg_rtx (Pmode);
4947 emit_move_insn (temp, new_rtx);
4949 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4950 new_rtx = gen_const_mem (Pmode, new_rtx);
4952 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
4953 temp = gen_reg_rtx (Pmode);
4954 emit_insn (gen_rtx_SET (temp, new_rtx));
4958 /* In position-dependent code, load the absolute address of
4959 the GOT entry from the literal pool. */
4961 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
4962 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4963 new_rtx = force_const_mem (Pmode, new_rtx);
4964 temp = gen_reg_rtx (Pmode);
4965 emit_move_insn (temp, new_rtx);
4968 new_rtx = gen_const_mem (Pmode, new_rtx);
4969 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
4970 temp = gen_reg_rtx (Pmode);
4971 emit_insn (gen_rtx_SET (temp, new_rtx));
4974 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4977 s390_load_address (reg, new_rtx);
4982 case TLS_MODEL_LOCAL_EXEC:
4983 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
4984 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4985 new_rtx = force_const_mem (Pmode, new_rtx);
4986 temp = gen_reg_rtx (Pmode);
4987 emit_move_insn (temp, new_rtx);
4989 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4992 s390_load_address (reg, new_rtx);
5001 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
5003 switch (XINT (XEXP (addr, 0), 1))
5005 case UNSPEC_INDNTPOFF:
5006 gcc_assert (TARGET_CPU_ZARCH);
5015 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
5016 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
5018 new_rtx = XEXP (XEXP (addr, 0), 0);
5019 if (GET_CODE (new_rtx) != SYMBOL_REF)
5020 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5022 new_rtx = legitimize_tls_address (new_rtx, reg);
5023 new_rtx = plus_constant (Pmode, new_rtx,
5024 INTVAL (XEXP (XEXP (addr, 0), 1)));
5025 new_rtx = force_operand (new_rtx, 0);
5029 gcc_unreachable (); /* for now ... */
5034 /* Emit insns making the address in operands[1] valid for a standard
5035 move to operands[0]. operands[1] is replaced by an address which
5036 should be used instead of the former RTX to emit the move
5040 emit_symbolic_move (rtx *operands)
5042 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
5044 if (GET_CODE (operands[0]) == MEM)
5045 operands[1] = force_reg (Pmode, operands[1]);
5046 else if (TLS_SYMBOLIC_CONST (operands[1]))
5047 operands[1] = legitimize_tls_address (operands[1], temp);
5049 operands[1] = legitimize_pic_address (operands[1], temp);
5052 /* Try machine-dependent ways of modifying an illegitimate address X
5053 to be legitimate. If we find one, return the new, valid address.
5055 OLDX is the address as it was before break_out_memory_refs was called.
5056 In some cases it is useful to look at this to decide what needs to be done.
5058 MODE is the mode of the operand pointed to by X.
5060 When -fpic is used, special handling is needed for symbolic references.
5061 See comments by legitimize_pic_address for details. */
5064 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
5065 machine_mode mode ATTRIBUTE_UNUSED)
5067 rtx constant_term = const0_rtx;
5069 if (TLS_SYMBOLIC_CONST (x))
5071 x = legitimize_tls_address (x, 0);
5073 if (s390_legitimate_address_p (mode, x, FALSE))
5076 else if (GET_CODE (x) == PLUS
5077 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
5078 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
5084 if (SYMBOLIC_CONST (x)
5085 || (GET_CODE (x) == PLUS
5086 && (SYMBOLIC_CONST (XEXP (x, 0))
5087 || SYMBOLIC_CONST (XEXP (x, 1)))))
5088 x = legitimize_pic_address (x, 0);
5090 if (s390_legitimate_address_p (mode, x, FALSE))
5094 x = eliminate_constant_term (x, &constant_term);
5096 /* Optimize loading of large displacements by splitting them
5097 into the multiple of 4K and the rest; this allows the
5098 former to be CSE'd if possible.
5100 Don't do this if the displacement is added to a register
5101 pointing into the stack frame, as the offsets will
5102 change later anyway. */
5104 if (GET_CODE (constant_term) == CONST_INT
5105 && !TARGET_LONG_DISPLACEMENT
5106 && !DISP_IN_RANGE (INTVAL (constant_term))
5107 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
5109 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
5110 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
5112 rtx temp = gen_reg_rtx (Pmode);
5113 rtx val = force_operand (GEN_INT (upper), temp);
5115 emit_move_insn (temp, val);
5117 x = gen_rtx_PLUS (Pmode, x, temp);
5118 constant_term = GEN_INT (lower);
5121 if (GET_CODE (x) == PLUS)
5123 if (GET_CODE (XEXP (x, 0)) == REG)
5125 rtx temp = gen_reg_rtx (Pmode);
5126 rtx val = force_operand (XEXP (x, 1), temp);
5128 emit_move_insn (temp, val);
5130 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
5133 else if (GET_CODE (XEXP (x, 1)) == REG)
5135 rtx temp = gen_reg_rtx (Pmode);
5136 rtx val = force_operand (XEXP (x, 0), temp);
5138 emit_move_insn (temp, val);
5140 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
5144 if (constant_term != const0_rtx)
5145 x = gen_rtx_PLUS (Pmode, x, constant_term);
5150 /* Try a machine-dependent way of reloading an illegitimate address AD
5151 operand. If we find one, push the reload and return the new address.
5153 MODE is the mode of the enclosing MEM. OPNUM is the operand number
5154 and TYPE is the reload type of the current reload. */
5157 legitimize_reload_address (rtx ad, machine_mode mode ATTRIBUTE_UNUSED,
5158 int opnum, int type)
5160 if (!optimize || TARGET_LONG_DISPLACEMENT)
5163 if (GET_CODE (ad) == PLUS)
5165 rtx tem = simplify_binary_operation (PLUS, Pmode,
5166 XEXP (ad, 0), XEXP (ad, 1));
5171 if (GET_CODE (ad) == PLUS
5172 && GET_CODE (XEXP (ad, 0)) == REG
5173 && GET_CODE (XEXP (ad, 1)) == CONST_INT
5174 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
5176 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
5177 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
5178 rtx cst, tem, new_rtx;
5180 cst = GEN_INT (upper);
5181 if (!legitimate_reload_constant_p (cst))
5182 cst = force_const_mem (Pmode, cst);
5184 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
5185 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
5187 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
5188 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
5189 opnum, (enum reload_type) type);
5196 /* Emit code to move LEN bytes from DST to SRC. */
5199 s390_expand_movmem (rtx dst, rtx src, rtx len)
5201 /* When tuning for z10 or higher we rely on the Glibc functions to
5202 do the right thing. Only for constant lengths below 64k we will
5203 generate inline code. */
5204 if (s390_tune >= PROCESSOR_2097_Z10
5205 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
5208 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
5210 if (INTVAL (len) > 0)
5211 emit_insn (gen_movmem_short (dst, src, GEN_INT (INTVAL (len) - 1)));
5214 else if (TARGET_MVCLE)
5216 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
5221 rtx dst_addr, src_addr, count, blocks, temp;
5222 rtx_code_label *loop_start_label = gen_label_rtx ();
5223 rtx_code_label *loop_end_label = gen_label_rtx ();
5224 rtx_code_label *end_label = gen_label_rtx ();
5227 mode = GET_MODE (len);
5228 if (mode == VOIDmode)
5231 dst_addr = gen_reg_rtx (Pmode);
5232 src_addr = gen_reg_rtx (Pmode);
5233 count = gen_reg_rtx (mode);
5234 blocks = gen_reg_rtx (mode);
5236 convert_move (count, len, 1);
5237 emit_cmp_and_jump_insns (count, const0_rtx,
5238 EQ, NULL_RTX, mode, 1, end_label);
5240 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
5241 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
5242 dst = change_address (dst, VOIDmode, dst_addr);
5243 src = change_address (src, VOIDmode, src_addr);
5245 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5248 emit_move_insn (count, temp);
5250 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5253 emit_move_insn (blocks, temp);
5255 emit_cmp_and_jump_insns (blocks, const0_rtx,
5256 EQ, NULL_RTX, mode, 1, loop_end_label);
5258 emit_label (loop_start_label);
5261 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
5265 /* Issue a read prefetch for the +3 cache line. */
5266 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
5267 const0_rtx, const0_rtx);
5268 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5269 emit_insn (prefetch);
5271 /* Issue a write prefetch for the +3 cache line. */
5272 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
5273 const1_rtx, const0_rtx);
5274 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5275 emit_insn (prefetch);
5278 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
5279 s390_load_address (dst_addr,
5280 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
5281 s390_load_address (src_addr,
5282 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
5284 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5287 emit_move_insn (blocks, temp);
5289 emit_cmp_and_jump_insns (blocks, const0_rtx,
5290 EQ, NULL_RTX, mode, 1, loop_end_label);
5292 emit_jump (loop_start_label);
5293 emit_label (loop_end_label);
5295 emit_insn (gen_movmem_short (dst, src,
5296 convert_to_mode (Pmode, count, 1)));
5297 emit_label (end_label);
5302 /* Emit code to set LEN bytes at DST to VAL.
5303 Make use of clrmem if VAL is zero. */
5306 s390_expand_setmem (rtx dst, rtx len, rtx val)
5308 if (GET_CODE (len) == CONST_INT && INTVAL (len) == 0)
5311 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
5313 if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
5315 if (val == const0_rtx && INTVAL (len) <= 256)
5316 emit_insn (gen_clrmem_short (dst, GEN_INT (INTVAL (len) - 1)));
5319 /* Initialize memory by storing the first byte. */
5320 emit_move_insn (adjust_address (dst, QImode, 0), val);
5322 if (INTVAL (len) > 1)
5324 /* Initiate 1 byte overlap move.
5325 The first byte of DST is propagated through DSTP1.
5326 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
5327 DST is set to size 1 so the rest of the memory location
5328 does not count as source operand. */
5329 rtx dstp1 = adjust_address (dst, VOIDmode, 1);
5330 set_mem_size (dst, 1);
5332 emit_insn (gen_movmem_short (dstp1, dst,
5333 GEN_INT (INTVAL (len) - 2)));
5338 else if (TARGET_MVCLE)
5340 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
5342 emit_insn (gen_setmem_long_di (dst, convert_to_mode (Pmode, len, 1),
5345 emit_insn (gen_setmem_long_si (dst, convert_to_mode (Pmode, len, 1),
5351 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
5352 rtx_code_label *loop_start_label = gen_label_rtx ();
5353 rtx_code_label *loop_end_label = gen_label_rtx ();
5354 rtx_code_label *end_label = gen_label_rtx ();
5357 mode = GET_MODE (len);
5358 if (mode == VOIDmode)
5361 dst_addr = gen_reg_rtx (Pmode);
5362 count = gen_reg_rtx (mode);
5363 blocks = gen_reg_rtx (mode);
5365 convert_move (count, len, 1);
5366 emit_cmp_and_jump_insns (count, const0_rtx,
5367 EQ, NULL_RTX, mode, 1, end_label);
5369 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
5370 dst = change_address (dst, VOIDmode, dst_addr);
5372 if (val == const0_rtx)
5373 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5377 dstp1 = adjust_address (dst, VOIDmode, 1);
5378 set_mem_size (dst, 1);
5380 /* Initialize memory by storing the first byte. */
5381 emit_move_insn (adjust_address (dst, QImode, 0), val);
5383 /* If count is 1 we are done. */
5384 emit_cmp_and_jump_insns (count, const1_rtx,
5385 EQ, NULL_RTX, mode, 1, end_label);
5387 temp = expand_binop (mode, add_optab, count, GEN_INT (-2), count, 1,
5391 emit_move_insn (count, temp);
5393 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5396 emit_move_insn (blocks, temp);
5398 emit_cmp_and_jump_insns (blocks, const0_rtx,
5399 EQ, NULL_RTX, mode, 1, loop_end_label);
5401 emit_label (loop_start_label);
5404 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
5406 /* Issue a write prefetch for the +4 cache line. */
5407 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
5409 const1_rtx, const0_rtx);
5410 emit_insn (prefetch);
5411 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5414 if (val == const0_rtx)
5415 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
5417 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (255)));
5418 s390_load_address (dst_addr,
5419 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
5421 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5424 emit_move_insn (blocks, temp);
5426 emit_cmp_and_jump_insns (blocks, const0_rtx,
5427 EQ, NULL_RTX, mode, 1, loop_end_label);
5429 emit_jump (loop_start_label);
5430 emit_label (loop_end_label);
5432 if (val == const0_rtx)
5433 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
5435 emit_insn (gen_movmem_short (dstp1, dst, convert_to_mode (Pmode, count, 1)));
5436 emit_label (end_label);
5440 /* Emit code to compare LEN bytes at OP0 with those at OP1,
5441 and return the result in TARGET. */
5444 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
5446 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
5449 /* When tuning for z10 or higher we rely on the Glibc functions to
5450 do the right thing. Only for constant lengths below 64k we will
5451 generate inline code. */
5452 if (s390_tune >= PROCESSOR_2097_Z10
5453 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
5456 /* As the result of CMPINT is inverted compared to what we need,
5457 we have to swap the operands. */
5458 tmp = op0; op0 = op1; op1 = tmp;
5460 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
5462 if (INTVAL (len) > 0)
5464 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
5465 emit_insn (gen_cmpint (target, ccreg));
5468 emit_move_insn (target, const0_rtx);
5470 else if (TARGET_MVCLE)
5472 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
5473 emit_insn (gen_cmpint (target, ccreg));
5477 rtx addr0, addr1, count, blocks, temp;
5478 rtx_code_label *loop_start_label = gen_label_rtx ();
5479 rtx_code_label *loop_end_label = gen_label_rtx ();
5480 rtx_code_label *end_label = gen_label_rtx ();
5483 mode = GET_MODE (len);
5484 if (mode == VOIDmode)
5487 addr0 = gen_reg_rtx (Pmode);
5488 addr1 = gen_reg_rtx (Pmode);
5489 count = gen_reg_rtx (mode);
5490 blocks = gen_reg_rtx (mode);
5492 convert_move (count, len, 1);
5493 emit_cmp_and_jump_insns (count, const0_rtx,
5494 EQ, NULL_RTX, mode, 1, end_label);
5496 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
5497 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
5498 op0 = change_address (op0, VOIDmode, addr0);
5499 op1 = change_address (op1, VOIDmode, addr1);
5501 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5504 emit_move_insn (count, temp);
5506 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5509 emit_move_insn (blocks, temp);
5511 emit_cmp_and_jump_insns (blocks, const0_rtx,
5512 EQ, NULL_RTX, mode, 1, loop_end_label);
5514 emit_label (loop_start_label);
5517 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
5521 /* Issue a read prefetch for the +2 cache line of operand 1. */
5522 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
5523 const0_rtx, const0_rtx);
5524 emit_insn (prefetch);
5525 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5527 /* Issue a read prefetch for the +2 cache line of operand 2. */
5528 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
5529 const0_rtx, const0_rtx);
5530 emit_insn (prefetch);
5531 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5534 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
5535 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
5536 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
5537 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
5538 temp = gen_rtx_SET (pc_rtx, temp);
5539 emit_jump_insn (temp);
5541 s390_load_address (addr0,
5542 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
5543 s390_load_address (addr1,
5544 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
5546 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5549 emit_move_insn (blocks, temp);
5551 emit_cmp_and_jump_insns (blocks, const0_rtx,
5552 EQ, NULL_RTX, mode, 1, loop_end_label);
5554 emit_jump (loop_start_label);
5555 emit_label (loop_end_label);
5557 emit_insn (gen_cmpmem_short (op0, op1,
5558 convert_to_mode (Pmode, count, 1)));
5559 emit_label (end_label);
5561 emit_insn (gen_cmpint (target, ccreg));
5566 /* Emit a conditional jump to LABEL for condition code mask MASK using
5567 comparsion operator COMPARISON. Return the emitted jump insn. */
5570 s390_emit_ccraw_jump (HOST_WIDE_INT mask, enum rtx_code comparison, rtx label)
5574 gcc_assert (comparison == EQ || comparison == NE);
5575 gcc_assert (mask > 0 && mask < 15);
5577 temp = gen_rtx_fmt_ee (comparison, VOIDmode,
5578 gen_rtx_REG (CCRAWmode, CC_REGNUM), GEN_INT (mask));
5579 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
5580 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
5581 temp = gen_rtx_SET (pc_rtx, temp);
5582 return emit_jump_insn (temp);
5585 /* Emit the instructions to implement strlen of STRING and store the
5586 result in TARGET. The string has the known ALIGNMENT. This
5587 version uses vector instructions and is therefore not appropriate
5588 for targets prior to z13. */
5591 s390_expand_vec_strlen (rtx target, rtx string, rtx alignment)
5593 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
5594 int very_likely = REG_BR_PROB_BASE - 1;
5595 rtx highest_index_to_load_reg = gen_reg_rtx (Pmode);
5596 rtx str_reg = gen_reg_rtx (V16QImode);
5597 rtx str_addr_base_reg = gen_reg_rtx (Pmode);
5598 rtx str_idx_reg = gen_reg_rtx (Pmode);
5599 rtx result_reg = gen_reg_rtx (V16QImode);
5600 rtx is_aligned_label = gen_label_rtx ();
5601 rtx into_loop_label = NULL_RTX;
5602 rtx loop_start_label = gen_label_rtx ();
5604 rtx len = gen_reg_rtx (QImode);
5607 s390_load_address (str_addr_base_reg, XEXP (string, 0));
5608 emit_move_insn (str_idx_reg, const0_rtx);
5610 if (INTVAL (alignment) < 16)
5612 /* Check whether the address happens to be aligned properly so
5613 jump directly to the aligned loop. */
5614 emit_cmp_and_jump_insns (gen_rtx_AND (Pmode,
5615 str_addr_base_reg, GEN_INT (15)),
5616 const0_rtx, EQ, NULL_RTX,
5617 Pmode, 1, is_aligned_label);
5619 temp = gen_reg_rtx (Pmode);
5620 temp = expand_binop (Pmode, and_optab, str_addr_base_reg,
5621 GEN_INT (15), temp, 1, OPTAB_DIRECT);
5622 gcc_assert (REG_P (temp));
5623 highest_index_to_load_reg =
5624 expand_binop (Pmode, sub_optab, GEN_INT (15), temp,
5625 highest_index_to_load_reg, 1, OPTAB_DIRECT);
5626 gcc_assert (REG_P (highest_index_to_load_reg));
5627 emit_insn (gen_vllv16qi (str_reg,
5628 convert_to_mode (SImode, highest_index_to_load_reg, 1),
5629 gen_rtx_MEM (BLKmode, str_addr_base_reg)));
5631 into_loop_label = gen_label_rtx ();
5632 s390_emit_jump (into_loop_label, NULL_RTX);
5636 emit_label (is_aligned_label);
5637 LABEL_NUSES (is_aligned_label) = INTVAL (alignment) < 16 ? 2 : 1;
5639 /* Reaching this point we are only performing 16 bytes aligned
5641 emit_move_insn (highest_index_to_load_reg, GEN_INT (15));
5643 emit_label (loop_start_label);
5644 LABEL_NUSES (loop_start_label) = 1;
5646 /* Load 16 bytes of the string into VR. */
5647 emit_move_insn (str_reg,
5648 gen_rtx_MEM (V16QImode,
5649 gen_rtx_PLUS (Pmode, str_idx_reg,
5650 str_addr_base_reg)));
5651 if (into_loop_label != NULL_RTX)
5653 emit_label (into_loop_label);
5654 LABEL_NUSES (into_loop_label) = 1;
5657 /* Increment string index by 16 bytes. */
5658 expand_binop (Pmode, add_optab, str_idx_reg, GEN_INT (16),
5659 str_idx_reg, 1, OPTAB_DIRECT);
5661 emit_insn (gen_vec_vfenesv16qi (result_reg, str_reg, str_reg,
5662 GEN_INT (VSTRING_FLAG_ZS | VSTRING_FLAG_CS)));
5664 add_int_reg_note (s390_emit_ccraw_jump (8, NE, loop_start_label),
5665 REG_BR_PROB, very_likely);
5666 emit_insn (gen_vec_extractv16qi (len, result_reg, GEN_INT (7)));
5668 /* If the string pointer wasn't aligned we have loaded less then 16
5669 bytes and the remaining bytes got filled with zeros (by vll).
5670 Now we have to check whether the resulting index lies within the
5671 bytes actually part of the string. */
5673 cond = s390_emit_compare (GT, convert_to_mode (Pmode, len, 1),
5674 highest_index_to_load_reg);
5675 s390_load_address (highest_index_to_load_reg,
5676 gen_rtx_PLUS (Pmode, highest_index_to_load_reg,
5679 emit_insn (gen_movdicc (str_idx_reg, cond,
5680 highest_index_to_load_reg, str_idx_reg));
5682 emit_insn (gen_movsicc (str_idx_reg, cond,
5683 highest_index_to_load_reg, str_idx_reg));
5685 add_int_reg_note (s390_emit_jump (is_aligned_label, cond), REG_BR_PROB,
5688 expand_binop (Pmode, add_optab, str_idx_reg,
5689 GEN_INT (-16), str_idx_reg, 1, OPTAB_DIRECT);
5690 /* FIXME: len is already zero extended - so avoid the llgcr emitted
5692 temp = expand_binop (Pmode, add_optab, str_idx_reg,
5693 convert_to_mode (Pmode, len, 1),
5694 target, 1, OPTAB_DIRECT);
5696 emit_move_insn (target, temp);
5700 s390_expand_vec_movstr (rtx result, rtx dst, rtx src)
5702 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
5703 rtx temp = gen_reg_rtx (Pmode);
5704 rtx src_addr = XEXP (src, 0);
5705 rtx dst_addr = XEXP (dst, 0);
5706 rtx src_addr_reg = gen_reg_rtx (Pmode);
5707 rtx dst_addr_reg = gen_reg_rtx (Pmode);
5708 rtx offset = gen_reg_rtx (Pmode);
5709 rtx vsrc = gen_reg_rtx (V16QImode);
5710 rtx vpos = gen_reg_rtx (V16QImode);
5711 rtx loadlen = gen_reg_rtx (SImode);
5712 rtx gpos_qi = gen_reg_rtx(QImode);
5713 rtx gpos = gen_reg_rtx (SImode);
5714 rtx done_label = gen_label_rtx ();
5715 rtx loop_label = gen_label_rtx ();
5716 rtx exit_label = gen_label_rtx ();
5717 rtx full_label = gen_label_rtx ();
5719 /* Perform a quick check for string ending on the first up to 16
5720 bytes and exit early if successful. */
5722 emit_insn (gen_vlbb (vsrc, src, GEN_INT (6)));
5723 emit_insn (gen_lcbb (loadlen, src_addr, GEN_INT (6)));
5724 emit_insn (gen_vfenezv16qi (vpos, vsrc, vsrc));
5725 emit_insn (gen_vec_extractv16qi (gpos_qi, vpos, GEN_INT (7)));
5726 emit_move_insn (gpos, gen_rtx_SUBREG (SImode, gpos_qi, 0));
5727 /* gpos is the byte index if a zero was found and 16 otherwise.
5728 So if it is lower than the loaded bytes we have a hit. */
5729 emit_cmp_and_jump_insns (gpos, loadlen, GE, NULL_RTX, SImode, 1,
5731 emit_insn (gen_vstlv16qi (vsrc, gpos, dst));
5733 force_expand_binop (Pmode, add_optab, dst_addr, gpos, result,
5735 emit_jump (exit_label);
5738 emit_label (full_label);
5739 LABEL_NUSES (full_label) = 1;
5741 /* Calculate `offset' so that src + offset points to the last byte
5742 before 16 byte alignment. */
5744 /* temp = src_addr & 0xf */
5745 force_expand_binop (Pmode, and_optab, src_addr, GEN_INT (15), temp,
5748 /* offset = 0xf - temp */
5749 emit_move_insn (offset, GEN_INT (15));
5750 force_expand_binop (Pmode, sub_optab, offset, temp, offset,
5753 /* Store `offset' bytes in the dstination string. The quick check
5754 has loaded at least `offset' bytes into vsrc. */
5756 emit_insn (gen_vstlv16qi (vsrc, gen_lowpart (SImode, offset), dst));
5758 /* Advance to the next byte to be loaded. */
5759 force_expand_binop (Pmode, add_optab, offset, const1_rtx, offset,
5762 /* Make sure the addresses are single regs which can be used as a
5764 emit_move_insn (src_addr_reg, src_addr);
5765 emit_move_insn (dst_addr_reg, dst_addr);
5769 emit_label (loop_label);
5770 LABEL_NUSES (loop_label) = 1;
5772 emit_move_insn (vsrc,
5773 gen_rtx_MEM (V16QImode,
5774 gen_rtx_PLUS (Pmode, src_addr_reg, offset)));
5776 emit_insn (gen_vec_vfenesv16qi (vpos, vsrc, vsrc,
5777 GEN_INT (VSTRING_FLAG_ZS | VSTRING_FLAG_CS)));
5778 add_int_reg_note (s390_emit_ccraw_jump (8, EQ, done_label),
5779 REG_BR_PROB, very_unlikely);
5781 emit_move_insn (gen_rtx_MEM (V16QImode,
5782 gen_rtx_PLUS (Pmode, dst_addr_reg, offset)),
5785 force_expand_binop (Pmode, add_optab, offset, GEN_INT (16),
5786 offset, 1, OPTAB_DIRECT);
5788 emit_jump (loop_label);
5793 /* We are done. Add the offset of the zero character to the dst_addr
5794 pointer to get the result. */
5796 emit_label (done_label);
5797 LABEL_NUSES (done_label) = 1;
5799 force_expand_binop (Pmode, add_optab, dst_addr_reg, offset, dst_addr_reg,
5802 emit_insn (gen_vec_extractv16qi (gpos_qi, vpos, GEN_INT (7)));
5803 emit_move_insn (gpos, gen_rtx_SUBREG (SImode, gpos_qi, 0));
5805 emit_insn (gen_vstlv16qi (vsrc, gpos, gen_rtx_MEM (BLKmode, dst_addr_reg)));
5807 force_expand_binop (Pmode, add_optab, dst_addr_reg, gpos, result,
5812 emit_label (exit_label);
5813 LABEL_NUSES (exit_label) = 1;
5817 /* Expand conditional increment or decrement using alc/slb instructions.
5818 Should generate code setting DST to either SRC or SRC + INCREMENT,
5819 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
5820 Returns true if successful, false otherwise.
5822 That makes it possible to implement some if-constructs without jumps e.g.:
5823 (borrow = CC0 | CC1 and carry = CC2 | CC3)
5824 unsigned int a, b, c;
5825 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
5826 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
5827 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
5828 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
5830 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
5831 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
5832 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
5833 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
5834 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
5837 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
5838 rtx dst, rtx src, rtx increment)
5840 machine_mode cmp_mode;
5841 machine_mode cc_mode;
5847 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
5848 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
5850 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
5851 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
5856 /* Try ADD LOGICAL WITH CARRY. */
5857 if (increment == const1_rtx)
5859 /* Determine CC mode to use. */
5860 if (cmp_code == EQ || cmp_code == NE)
5862 if (cmp_op1 != const0_rtx)
5864 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
5865 NULL_RTX, 0, OPTAB_WIDEN);
5866 cmp_op1 = const0_rtx;
5869 cmp_code = cmp_code == EQ ? LEU : GTU;
5872 if (cmp_code == LTU || cmp_code == LEU)
5877 cmp_code = swap_condition (cmp_code);
5894 /* Emit comparison instruction pattern. */
5895 if (!register_operand (cmp_op0, cmp_mode))
5896 cmp_op0 = force_reg (cmp_mode, cmp_op0);
5898 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
5899 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
5900 /* We use insn_invalid_p here to add clobbers if required. */
5901 ret = insn_invalid_p (emit_insn (insn), false);
5904 /* Emit ALC instruction pattern. */
5905 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
5906 gen_rtx_REG (cc_mode, CC_REGNUM),
5909 if (src != const0_rtx)
5911 if (!register_operand (src, GET_MODE (dst)))
5912 src = force_reg (GET_MODE (dst), src);
5914 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
5915 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
5918 p = rtvec_alloc (2);
5920 gen_rtx_SET (dst, op_res);
5922 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
5923 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
5928 /* Try SUBTRACT LOGICAL WITH BORROW. */
5929 if (increment == constm1_rtx)
5931 /* Determine CC mode to use. */
5932 if (cmp_code == EQ || cmp_code == NE)
5934 if (cmp_op1 != const0_rtx)
5936 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
5937 NULL_RTX, 0, OPTAB_WIDEN);
5938 cmp_op1 = const0_rtx;
5941 cmp_code = cmp_code == EQ ? LEU : GTU;
5944 if (cmp_code == GTU || cmp_code == GEU)
5949 cmp_code = swap_condition (cmp_code);
5966 /* Emit comparison instruction pattern. */
5967 if (!register_operand (cmp_op0, cmp_mode))
5968 cmp_op0 = force_reg (cmp_mode, cmp_op0);
5970 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
5971 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
5972 /* We use insn_invalid_p here to add clobbers if required. */
5973 ret = insn_invalid_p (emit_insn (insn), false);
5976 /* Emit SLB instruction pattern. */
5977 if (!register_operand (src, GET_MODE (dst)))
5978 src = force_reg (GET_MODE (dst), src);
5980 op_res = gen_rtx_MINUS (GET_MODE (dst),
5981 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
5982 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
5983 gen_rtx_REG (cc_mode, CC_REGNUM),
5985 p = rtvec_alloc (2);
5987 gen_rtx_SET (dst, op_res);
5989 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
5990 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
5998 /* Expand code for the insv template. Return true if successful. */
6001 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
6003 int bitsize = INTVAL (op1);
6004 int bitpos = INTVAL (op2);
6005 machine_mode mode = GET_MODE (dest);
6007 int smode_bsize, mode_bsize;
6010 if (bitsize + bitpos > GET_MODE_BITSIZE (mode))
6013 /* Generate INSERT IMMEDIATE (IILL et al). */
6014 /* (set (ze (reg)) (const_int)). */
6016 && register_operand (dest, word_mode)
6017 && (bitpos % 16) == 0
6018 && (bitsize % 16) == 0
6019 && const_int_operand (src, VOIDmode))
6021 HOST_WIDE_INT val = INTVAL (src);
6022 int regpos = bitpos + bitsize;
6024 while (regpos > bitpos)
6026 machine_mode putmode;
6029 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
6034 putsize = GET_MODE_BITSIZE (putmode);
6036 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
6039 gen_int_mode (val, putmode));
6042 gcc_assert (regpos == bitpos);
6046 smode = smallest_mode_for_size (bitsize, MODE_INT);
6047 smode_bsize = GET_MODE_BITSIZE (smode);
6048 mode_bsize = GET_MODE_BITSIZE (mode);
6050 /* Generate STORE CHARACTERS UNDER MASK (STCM et al). */
6052 && (bitsize % BITS_PER_UNIT) == 0
6054 && (register_operand (src, word_mode)
6055 || const_int_operand (src, VOIDmode)))
6057 /* Emit standard pattern if possible. */
6058 if (smode_bsize == bitsize)
6060 emit_move_insn (adjust_address (dest, smode, 0),
6061 gen_lowpart (smode, src));
6065 /* (set (ze (mem)) (const_int)). */
6066 else if (const_int_operand (src, VOIDmode))
6068 int size = bitsize / BITS_PER_UNIT;
6069 rtx src_mem = adjust_address (force_const_mem (word_mode, src),
6071 UNITS_PER_WORD - size);
6073 dest = adjust_address (dest, BLKmode, 0);
6074 set_mem_size (dest, size);
6075 s390_expand_movmem (dest, src_mem, GEN_INT (size));
6079 /* (set (ze (mem)) (reg)). */
6080 else if (register_operand (src, word_mode))
6083 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
6087 /* Emit st,stcmh sequence. */
6088 int stcmh_width = bitsize - 32;
6089 int size = stcmh_width / BITS_PER_UNIT;
6091 emit_move_insn (adjust_address (dest, SImode, size),
6092 gen_lowpart (SImode, src));
6093 set_mem_size (dest, size);
6094 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
6095 GEN_INT (stcmh_width),
6097 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT (32)));
6103 /* Generate INSERT CHARACTERS UNDER MASK (IC, ICM et al). */
6104 if ((bitpos % BITS_PER_UNIT) == 0
6105 && (bitsize % BITS_PER_UNIT) == 0
6106 && (bitpos & 32) == ((bitpos + bitsize - 1) & 32)
6108 && (mode == DImode || mode == SImode)
6109 && register_operand (dest, mode))
6111 /* Emit a strict_low_part pattern if possible. */
6112 if (smode_bsize == bitsize && bitpos == mode_bsize - smode_bsize)
6114 op = gen_rtx_STRICT_LOW_PART (VOIDmode, gen_lowpart (smode, dest));
6115 op = gen_rtx_SET (op, gen_lowpart (smode, src));
6116 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6117 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
6121 /* ??? There are more powerful versions of ICM that are not
6122 completely represented in the md file. */
6125 /* For z10, generate ROTATE THEN INSERT SELECTED BITS (RISBG et al). */
6126 if (TARGET_Z10 && (mode == DImode || mode == SImode))
6128 machine_mode mode_s = GET_MODE (src);
6130 if (CONSTANT_P (src))
6132 /* For constant zero values the representation with AND
6133 appears to be folded in more situations than the (set
6134 (zero_extract) ...).
6135 We only do this when the start and end of the bitfield
6136 remain in the same SImode chunk. That way nihf or nilf
6138 The AND patterns might still generate a risbg for this. */
6139 if (src == const0_rtx && bitpos / 32 == (bitpos + bitsize - 1) / 32)
6142 src = force_reg (mode, src);
6144 else if (mode_s != mode)
6146 gcc_assert (GET_MODE_BITSIZE (mode_s) >= bitsize);
6147 src = force_reg (mode_s, src);
6148 src = gen_lowpart (mode, src);
6151 op = gen_rtx_ZERO_EXTRACT (mode, dest, op1, op2),
6152 op = gen_rtx_SET (op, src);
6156 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6157 op = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber));
6167 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
6168 register that holds VAL of mode MODE shifted by COUNT bits. */
6171 s390_expand_mask_and_shift (rtx val, machine_mode mode, rtx count)
6173 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
6174 NULL_RTX, 1, OPTAB_DIRECT);
6175 return expand_simple_binop (SImode, ASHIFT, val, count,
6176 NULL_RTX, 1, OPTAB_DIRECT);
6179 /* Generate a vector comparison COND of CMP_OP1 and CMP_OP2 and store
6180 the result in TARGET. */
6183 s390_expand_vec_compare (rtx target, enum rtx_code cond,
6184 rtx cmp_op1, rtx cmp_op2)
6186 machine_mode mode = GET_MODE (target);
6187 bool neg_p = false, swap_p = false;
6190 if (GET_MODE (cmp_op1) == V2DFmode)
6194 /* NE a != b -> !(a == b) */
6195 case NE: cond = EQ; neg_p = true; break;
6196 /* UNGT a u> b -> !(b >= a) */
6197 case UNGT: cond = GE; neg_p = true; swap_p = true; break;
6198 /* UNGE a u>= b -> !(b > a) */
6199 case UNGE: cond = GT; neg_p = true; swap_p = true; break;
6200 /* LE: a <= b -> b >= a */
6201 case LE: cond = GE; swap_p = true; break;
6202 /* UNLE: a u<= b -> !(a > b) */
6203 case UNLE: cond = GT; neg_p = true; break;
6204 /* LT: a < b -> b > a */
6205 case LT: cond = GT; swap_p = true; break;
6206 /* UNLT: a u< b -> !(a >= b) */
6207 case UNLT: cond = GE; neg_p = true; break;
6209 emit_insn (gen_vec_cmpuneqv2df (target, cmp_op1, cmp_op2));
6212 emit_insn (gen_vec_cmpltgtv2df (target, cmp_op1, cmp_op2));
6215 emit_insn (gen_vec_orderedv2df (target, cmp_op1, cmp_op2));
6218 emit_insn (gen_vec_unorderedv2df (target, cmp_op1, cmp_op2));
6227 /* NE: a != b -> !(a == b) */
6228 case NE: cond = EQ; neg_p = true; break;
6229 /* GE: a >= b -> !(b > a) */
6230 case GE: cond = GT; neg_p = true; swap_p = true; break;
6231 /* GEU: a >= b -> !(b > a) */
6232 case GEU: cond = GTU; neg_p = true; swap_p = true; break;
6233 /* LE: a <= b -> !(a > b) */
6234 case LE: cond = GT; neg_p = true; break;
6235 /* LEU: a <= b -> !(a > b) */
6236 case LEU: cond = GTU; neg_p = true; break;
6237 /* LT: a < b -> b > a */
6238 case LT: cond = GT; swap_p = true; break;
6239 /* LTU: a < b -> b > a */
6240 case LTU: cond = GTU; swap_p = true; break;
6247 tmp = cmp_op1; cmp_op1 = cmp_op2; cmp_op2 = tmp;
6250 emit_insn (gen_rtx_SET (target, gen_rtx_fmt_ee (cond,
6252 cmp_op1, cmp_op2)));
6254 emit_insn (gen_rtx_SET (target, gen_rtx_NOT (mode, target)));
6257 /* Expand the comparison CODE of CMP1 and CMP2 and copy 1 or 0 into
6258 TARGET if either all (ALL_P is true) or any (ALL_P is false) of the
6259 elements in CMP1 and CMP2 fulfill the comparison. */
6261 s390_expand_vec_compare_cc (rtx target, enum rtx_code code,
6262 rtx cmp1, rtx cmp2, bool all_p)
6264 enum rtx_code new_code = code;
6265 machine_mode cmp_mode, full_cmp_mode, scratch_mode;
6266 rtx tmp_reg = gen_reg_rtx (SImode);
6267 bool swap_p = false;
6269 if (GET_MODE_CLASS (GET_MODE (cmp1)) == MODE_VECTOR_INT)
6273 case EQ: cmp_mode = CCVEQmode; break;
6274 case NE: cmp_mode = CCVEQmode; break;
6275 case GT: cmp_mode = CCVHmode; break;
6276 case GE: cmp_mode = CCVHmode; new_code = LE; swap_p = true; break;
6277 case LT: cmp_mode = CCVHmode; new_code = GT; swap_p = true; break;
6278 case LE: cmp_mode = CCVHmode; new_code = LE; break;
6279 case GTU: cmp_mode = CCVHUmode; break;
6280 case GEU: cmp_mode = CCVHUmode; new_code = LEU; swap_p = true; break;
6281 case LTU: cmp_mode = CCVHUmode; new_code = GTU; swap_p = true; break;
6282 case LEU: cmp_mode = CCVHUmode; new_code = LEU; break;
6283 default: gcc_unreachable ();
6285 scratch_mode = GET_MODE (cmp1);
6287 else if (GET_MODE (cmp1) == V2DFmode)
6291 case EQ: cmp_mode = CCVEQmode; break;
6292 case NE: cmp_mode = CCVEQmode; break;
6293 case GT: cmp_mode = CCVFHmode; break;
6294 case GE: cmp_mode = CCVFHEmode; break;
6295 case UNLE: cmp_mode = CCVFHmode; break;
6296 case UNLT: cmp_mode = CCVFHEmode; break;
6297 case LT: cmp_mode = CCVFHmode; new_code = GT; swap_p = true; break;
6298 case LE: cmp_mode = CCVFHEmode; new_code = GE; swap_p = true; break;
6299 default: gcc_unreachable ();
6301 scratch_mode = V2DImode;
6309 case CCVEQmode: full_cmp_mode = CCVEQANYmode; break;
6310 case CCVHmode: full_cmp_mode = CCVHANYmode; break;
6311 case CCVHUmode: full_cmp_mode = CCVHUANYmode; break;
6312 case CCVFHmode: full_cmp_mode = CCVFHANYmode; break;
6313 case CCVFHEmode: full_cmp_mode = CCVFHEANYmode; break;
6314 default: gcc_unreachable ();
6317 /* The modes without ANY match the ALL modes. */
6318 full_cmp_mode = cmp_mode;
6327 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6328 gen_rtvec (2, gen_rtx_SET (
6329 gen_rtx_REG (cmp_mode, CC_REGNUM),
6330 gen_rtx_COMPARE (cmp_mode, cmp1, cmp2)),
6331 gen_rtx_CLOBBER (VOIDmode,
6332 gen_rtx_SCRATCH (scratch_mode)))));
6333 emit_move_insn (target, const0_rtx);
6334 emit_move_insn (tmp_reg, const1_rtx);
6336 emit_move_insn (target,
6337 gen_rtx_IF_THEN_ELSE (SImode,
6338 gen_rtx_fmt_ee (new_code, VOIDmode,
6339 gen_rtx_REG (full_cmp_mode, CC_REGNUM),
6344 /* Generate a vector comparison expression loading either elements of
6345 THEN or ELS into TARGET depending on the comparison COND of CMP_OP1
6349 s390_expand_vcond (rtx target, rtx then, rtx els,
6350 enum rtx_code cond, rtx cmp_op1, rtx cmp_op2)
6353 machine_mode result_mode;
6356 machine_mode target_mode = GET_MODE (target);
6357 machine_mode cmp_mode = GET_MODE (cmp_op1);
6358 rtx op = (cond == LT) ? els : then;
6360 /* Try to optimize x < 0 ? -1 : 0 into (signed) x >> 31
6361 and x < 0 ? 1 : 0 into (unsigned) x >> 31. Likewise
6362 for short and byte (x >> 15 and x >> 7 respectively). */
6363 if ((cond == LT || cond == GE)
6364 && target_mode == cmp_mode
6365 && cmp_op2 == CONST0_RTX (cmp_mode)
6366 && op == CONST0_RTX (target_mode)
6367 && s390_vector_mode_supported_p (target_mode)
6368 && GET_MODE_CLASS (target_mode) == MODE_VECTOR_INT)
6370 rtx negop = (cond == LT) ? then : els;
6372 int shift = GET_MODE_BITSIZE (GET_MODE_INNER (target_mode)) - 1;
6374 /* if x < 0 ? 1 : 0 or if x >= 0 ? 0 : 1 */
6375 if (negop == CONST1_RTX (target_mode))
6377 rtx res = expand_simple_binop (cmp_mode, LSHIFTRT, cmp_op1,
6378 GEN_INT (shift), target,
6381 emit_move_insn (target, res);
6385 /* if x < 0 ? -1 : 0 or if x >= 0 ? 0 : -1 */
6386 else if (all_ones_operand (negop, target_mode))
6388 rtx res = expand_simple_binop (cmp_mode, ASHIFTRT, cmp_op1,
6389 GEN_INT (shift), target,
6392 emit_move_insn (target, res);
6397 /* We always use an integral type vector to hold the comparison
6399 result_mode = cmp_mode == V2DFmode ? V2DImode : cmp_mode;
6400 result_target = gen_reg_rtx (result_mode);
6402 /* We allow vector immediates as comparison operands that
6403 can be handled by the optimization above but not by the
6404 following code. Hence, force them into registers here. */
6405 if (!REG_P (cmp_op1))
6406 cmp_op1 = force_reg (GET_MODE (cmp_op1), cmp_op1);
6408 if (!REG_P (cmp_op2))
6409 cmp_op2 = force_reg (GET_MODE (cmp_op2), cmp_op2);
6411 s390_expand_vec_compare (result_target, cond,
6414 /* If the results are supposed to be either -1 or 0 we are done
6415 since this is what our compare instructions generate anyway. */
6416 if (all_ones_operand (then, GET_MODE (then))
6417 && const0_operand (els, GET_MODE (els)))
6419 emit_move_insn (target, gen_rtx_SUBREG (target_mode,
6424 /* Otherwise we will do a vsel afterwards. */
6425 /* This gets triggered e.g.
6426 with gcc.c-torture/compile/pr53410-1.c */
6428 then = force_reg (target_mode, then);
6431 els = force_reg (target_mode, els);
6433 tmp = gen_rtx_fmt_ee (EQ, VOIDmode,
6435 CONST0_RTX (result_mode));
6437 /* We compared the result against zero above so we have to swap then
6439 tmp = gen_rtx_IF_THEN_ELSE (target_mode, tmp, els, then);
6441 gcc_assert (target_mode == GET_MODE (then));
6442 emit_insn (gen_rtx_SET (target, tmp));
6445 /* Emit the RTX necessary to initialize the vector TARGET with values
6448 s390_expand_vec_init (rtx target, rtx vals)
6450 machine_mode mode = GET_MODE (target);
6451 machine_mode inner_mode = GET_MODE_INNER (mode);
6452 int n_elts = GET_MODE_NUNITS (mode);
6453 bool all_same = true, all_regs = true, all_const_int = true;
6457 for (i = 0; i < n_elts; ++i)
6459 x = XVECEXP (vals, 0, i);
6461 if (!CONST_INT_P (x))
6462 all_const_int = false;
6464 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6471 /* Use vector gen mask or vector gen byte mask if possible. */
6472 if (all_same && all_const_int
6473 && (XVECEXP (vals, 0, 0) == const0_rtx
6474 || s390_contiguous_bitmask_vector_p (XVECEXP (vals, 0, 0),
6476 || s390_bytemask_vector_p (XVECEXP (vals, 0, 0), NULL)))
6478 emit_insn (gen_rtx_SET (target,
6479 gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0))));
6485 emit_insn (gen_rtx_SET (target,
6486 gen_rtx_VEC_DUPLICATE (mode,
6487 XVECEXP (vals, 0, 0))));
6491 if (all_regs && REG_P (target) && n_elts == 2 && inner_mode == DImode)
6493 /* Use vector load pair. */
6494 emit_insn (gen_rtx_SET (target,
6495 gen_rtx_VEC_CONCAT (mode,
6496 XVECEXP (vals, 0, 0),
6497 XVECEXP (vals, 0, 1))));
6501 /* We are about to set the vector elements one by one. Zero out the
6502 full register first in order to help the data flow framework to
6503 detect it as full VR set. */
6504 emit_insn (gen_rtx_SET (target, CONST0_RTX (mode)));
6506 /* Unfortunately the vec_init expander is not allowed to fail. So
6507 we have to implement the fallback ourselves. */
6508 for (i = 0; i < n_elts; i++)
6510 rtx elem = XVECEXP (vals, 0, i);
6511 if (!general_operand (elem, GET_MODE (elem)))
6512 elem = force_reg (inner_mode, elem);
6514 emit_insn (gen_rtx_SET (target,
6515 gen_rtx_UNSPEC (mode,
6517 GEN_INT (i), target),
6522 /* Structure to hold the initial parameters for a compare_and_swap operation
6523 in HImode and QImode. */
6525 struct alignment_context
6527 rtx memsi; /* SI aligned memory location. */
6528 rtx shift; /* Bit offset with regard to lsb. */
6529 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
6530 rtx modemaski; /* ~modemask */
6531 bool aligned; /* True if memory is aligned, false else. */
6534 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
6535 structure AC for transparent simplifying, if the memory alignment is known
6536 to be at least 32bit. MEM is the memory location for the actual operation
6537 and MODE its mode. */
6540 init_alignment_context (struct alignment_context *ac, rtx mem,
6543 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
6544 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
6547 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
6550 /* Alignment is unknown. */
6551 rtx byteoffset, addr, align;
6553 /* Force the address into a register. */
6554 addr = force_reg (Pmode, XEXP (mem, 0));
6556 /* Align it to SImode. */
6557 align = expand_simple_binop (Pmode, AND, addr,
6558 GEN_INT (-GET_MODE_SIZE (SImode)),
6559 NULL_RTX, 1, OPTAB_DIRECT);
6561 ac->memsi = gen_rtx_MEM (SImode, align);
6562 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
6563 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
6564 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
6566 /* Calculate shiftcount. */
6567 byteoffset = expand_simple_binop (Pmode, AND, addr,
6568 GEN_INT (GET_MODE_SIZE (SImode) - 1),
6569 NULL_RTX, 1, OPTAB_DIRECT);
6570 /* As we already have some offset, evaluate the remaining distance. */
6571 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
6572 NULL_RTX, 1, OPTAB_DIRECT);
6575 /* Shift is the byte count, but we need the bitcount. */
6576 ac->shift = expand_simple_binop (SImode, ASHIFT, ac->shift, GEN_INT (3),
6577 NULL_RTX, 1, OPTAB_DIRECT);
6579 /* Calculate masks. */
6580 ac->modemask = expand_simple_binop (SImode, ASHIFT,
6581 GEN_INT (GET_MODE_MASK (mode)),
6582 ac->shift, NULL_RTX, 1, OPTAB_DIRECT);
6583 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask,
6587 /* A subroutine of s390_expand_cs_hqi. Insert INS into VAL. If possible,
6588 use a single insv insn into SEQ2. Otherwise, put prep insns in SEQ1 and
6589 perform the merge in SEQ2. */
6592 s390_two_part_insv (struct alignment_context *ac, rtx *seq1, rtx *seq2,
6593 machine_mode mode, rtx val, rtx ins)
6600 tmp = copy_to_mode_reg (SImode, val);
6601 if (s390_expand_insv (tmp, GEN_INT (GET_MODE_BITSIZE (mode)),
6605 *seq2 = get_insns ();
6612 /* Failed to use insv. Generate a two part shift and mask. */
6614 tmp = s390_expand_mask_and_shift (ins, mode, ac->shift);
6615 *seq1 = get_insns ();
6619 tmp = expand_simple_binop (SImode, IOR, tmp, val, NULL_RTX, 1, OPTAB_DIRECT);
6620 *seq2 = get_insns ();
6626 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
6627 the memory location, CMP the old value to compare MEM with and NEW_RTX the
6628 value to set if CMP == MEM. */
6631 s390_expand_cs_hqi (machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
6632 rtx cmp, rtx new_rtx, bool is_weak)
6634 struct alignment_context ac;
6635 rtx cmpv, newv, val, cc, seq0, seq1, seq2, seq3;
6636 rtx res = gen_reg_rtx (SImode);
6637 rtx_code_label *csloop = NULL, *csend = NULL;
6639 gcc_assert (MEM_P (mem));
6641 init_alignment_context (&ac, mem, mode);
6643 /* Load full word. Subsequent loads are performed by CS. */
6644 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
6645 NULL_RTX, 1, OPTAB_DIRECT);
6647 /* Prepare insertions of cmp and new_rtx into the loaded value. When
6648 possible, we try to use insv to make this happen efficiently. If
6649 that fails we'll generate code both inside and outside the loop. */
6650 cmpv = s390_two_part_insv (&ac, &seq0, &seq2, mode, val, cmp);
6651 newv = s390_two_part_insv (&ac, &seq1, &seq3, mode, val, new_rtx);
6658 /* Start CS loop. */
6661 /* Begin assuming success. */
6662 emit_move_insn (btarget, const1_rtx);
6664 csloop = gen_label_rtx ();
6665 csend = gen_label_rtx ();
6666 emit_label (csloop);
6669 /* val = "<mem>00..0<mem>"
6670 * cmp = "00..0<cmp>00..0"
6671 * new = "00..0<new>00..0"
6677 cc = s390_emit_compare_and_swap (EQ, res, ac.memsi, cmpv, newv);
6679 emit_insn (gen_cstorecc4 (btarget, cc, XEXP (cc, 0), XEXP (cc, 1)));
6684 /* Jump to end if we're done (likely?). */
6685 s390_emit_jump (csend, cc);
6687 /* Check for changes outside mode, and loop internal if so.
6688 Arrange the moves so that the compare is adjacent to the
6689 branch so that we can generate CRJ. */
6690 tmp = copy_to_reg (val);
6691 force_expand_binop (SImode, and_optab, res, ac.modemaski, val,
6693 cc = s390_emit_compare (NE, val, tmp);
6694 s390_emit_jump (csloop, cc);
6697 emit_move_insn (btarget, const0_rtx);
6701 /* Return the correct part of the bitfield. */
6702 convert_move (vtarget, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
6703 NULL_RTX, 1, OPTAB_DIRECT), 1);
6706 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
6707 and VAL the value to play with. If AFTER is true then store the value
6708 MEM holds after the operation, if AFTER is false then store the value MEM
6709 holds before the operation. If TARGET is zero then discard that value, else
6710 store it to TARGET. */
6713 s390_expand_atomic (machine_mode mode, enum rtx_code code,
6714 rtx target, rtx mem, rtx val, bool after)
6716 struct alignment_context ac;
6718 rtx new_rtx = gen_reg_rtx (SImode);
6719 rtx orig = gen_reg_rtx (SImode);
6720 rtx_code_label *csloop = gen_label_rtx ();
6722 gcc_assert (!target || register_operand (target, VOIDmode));
6723 gcc_assert (MEM_P (mem));
6725 init_alignment_context (&ac, mem, mode);
6727 /* Shift val to the correct bit positions.
6728 Preserve "icm", but prevent "ex icm". */
6729 if (!(ac.aligned && code == SET && MEM_P (val)))
6730 val = s390_expand_mask_and_shift (val, mode, ac.shift);
6732 /* Further preparation insns. */
6733 if (code == PLUS || code == MINUS)
6734 emit_move_insn (orig, val);
6735 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
6736 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
6737 NULL_RTX, 1, OPTAB_DIRECT);
6739 /* Load full word. Subsequent loads are performed by CS. */
6740 cmp = force_reg (SImode, ac.memsi);
6742 /* Start CS loop. */
6743 emit_label (csloop);
6744 emit_move_insn (new_rtx, cmp);
6746 /* Patch new with val at correct position. */
6751 val = expand_simple_binop (SImode, code, new_rtx, orig,
6752 NULL_RTX, 1, OPTAB_DIRECT);
6753 val = expand_simple_binop (SImode, AND, val, ac.modemask,
6754 NULL_RTX, 1, OPTAB_DIRECT);
6757 if (ac.aligned && MEM_P (val))
6758 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0,
6759 0, 0, SImode, val, false);
6762 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
6763 NULL_RTX, 1, OPTAB_DIRECT);
6764 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
6765 NULL_RTX, 1, OPTAB_DIRECT);
6771 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
6772 NULL_RTX, 1, OPTAB_DIRECT);
6774 case MULT: /* NAND */
6775 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
6776 NULL_RTX, 1, OPTAB_DIRECT);
6777 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
6778 NULL_RTX, 1, OPTAB_DIRECT);
6784 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
6785 ac.memsi, cmp, new_rtx));
6787 /* Return the correct part of the bitfield. */
6789 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
6790 after ? new_rtx : cmp, ac.shift,
6791 NULL_RTX, 1, OPTAB_DIRECT), 1);
6794 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
6795 We need to emit DTP-relative relocations. */
6797 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
6800 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
6805 fputs ("\t.long\t", file);
6808 fputs ("\t.quad\t", file);
6813 output_addr_const (file, x);
6814 fputs ("@DTPOFF", file);
6817 /* Return the proper mode for REGNO being represented in the dwarf
6820 s390_dwarf_frame_reg_mode (int regno)
6822 machine_mode save_mode = default_dwarf_frame_reg_mode (regno);
6824 /* Make sure not to return DImode for any GPR with -m31 -mzarch. */
6825 if (GENERAL_REGNO_P (regno))
6828 /* The rightmost 64 bits of vector registers are call-clobbered. */
6829 if (GET_MODE_SIZE (save_mode) > 8)
6835 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
6836 /* Implement TARGET_MANGLE_TYPE. */
6839 s390_mangle_type (const_tree type)
6841 type = TYPE_MAIN_VARIANT (type);
6843 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
6844 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
6847 if (type == s390_builtin_types[BT_BV16QI]) return "U6__boolc";
6848 if (type == s390_builtin_types[BT_BV8HI]) return "U6__bools";
6849 if (type == s390_builtin_types[BT_BV4SI]) return "U6__booli";
6850 if (type == s390_builtin_types[BT_BV2DI]) return "U6__booll";
6852 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
6853 && TARGET_LONG_DOUBLE_128)
6856 /* For all other types, use normal C++ mangling. */
6861 /* In the name of slightly smaller debug output, and to cater to
6862 general assembler lossage, recognize various UNSPEC sequences
6863 and turn them back into a direct symbol reference. */
6866 s390_delegitimize_address (rtx orig_x)
6870 orig_x = delegitimize_mem_from_attrs (orig_x);
6873 /* Extract the symbol ref from:
6874 (plus:SI (reg:SI 12 %r12)
6875 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
6876 UNSPEC_GOTOFF/PLTOFF)))
6878 (plus:SI (reg:SI 12 %r12)
6879 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
6880 UNSPEC_GOTOFF/PLTOFF)
6881 (const_int 4 [0x4])))) */
6882 if (GET_CODE (x) == PLUS
6883 && REG_P (XEXP (x, 0))
6884 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
6885 && GET_CODE (XEXP (x, 1)) == CONST)
6887 HOST_WIDE_INT offset = 0;
6889 /* The const operand. */
6890 y = XEXP (XEXP (x, 1), 0);
6892 if (GET_CODE (y) == PLUS
6893 && GET_CODE (XEXP (y, 1)) == CONST_INT)
6895 offset = INTVAL (XEXP (y, 1));
6899 if (GET_CODE (y) == UNSPEC
6900 && (XINT (y, 1) == UNSPEC_GOTOFF
6901 || XINT (y, 1) == UNSPEC_PLTOFF))
6902 return plus_constant (Pmode, XVECEXP (y, 0, 0), offset);
6905 if (GET_CODE (x) != MEM)
6909 if (GET_CODE (x) == PLUS
6910 && GET_CODE (XEXP (x, 1)) == CONST
6911 && GET_CODE (XEXP (x, 0)) == REG
6912 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
6914 y = XEXP (XEXP (x, 1), 0);
6915 if (GET_CODE (y) == UNSPEC
6916 && XINT (y, 1) == UNSPEC_GOT)
6917 y = XVECEXP (y, 0, 0);
6921 else if (GET_CODE (x) == CONST)
6923 /* Extract the symbol ref from:
6924 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
6925 UNSPEC_PLT/GOTENT))) */
6928 if (GET_CODE (y) == UNSPEC
6929 && (XINT (y, 1) == UNSPEC_GOTENT
6930 || XINT (y, 1) == UNSPEC_PLT))
6931 y = XVECEXP (y, 0, 0);
6938 if (GET_MODE (orig_x) != Pmode)
6940 if (GET_MODE (orig_x) == BLKmode)
6942 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
6949 /* Output operand OP to stdio stream FILE.
6950 OP is an address (register + offset) which is not used to address data;
6951 instead the rightmost bits are interpreted as the value. */
6954 print_addrstyle_operand (FILE *file, rtx op)
6956 HOST_WIDE_INT offset;
6959 /* Extract base register and offset. */
6960 if (!s390_decompose_addrstyle_without_index (op, &base, &offset))
6966 gcc_assert (GET_CODE (base) == REG);
6967 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
6968 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
6971 /* Offsets are constricted to twelve bits. */
6972 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
6974 fprintf (file, "(%s)", reg_names[REGNO (base)]);
6977 /* Assigns the number of NOP halfwords to be emitted before and after the
6978 function label to *HW_BEFORE and *HW_AFTER. Both pointers must not be NULL.
6979 If hotpatching is disabled for the function, the values are set to zero.
6983 s390_function_num_hotpatch_hw (tree decl,
6989 attr = lookup_attribute ("hotpatch", DECL_ATTRIBUTES (decl));
6991 /* Handle the arguments of the hotpatch attribute. The values
6992 specified via attribute might override the cmdline argument
6996 tree args = TREE_VALUE (attr);
6998 *hw_before = TREE_INT_CST_LOW (TREE_VALUE (args));
6999 *hw_after = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (args)));
7003 /* Use the values specified by the cmdline arguments. */
7004 *hw_before = s390_hotpatch_hw_before_label;
7005 *hw_after = s390_hotpatch_hw_after_label;
7009 /* Write the current .machine and .machinemode specification to the assembler
7012 #ifdef HAVE_AS_MACHINE_MACHINEMODE
7014 s390_asm_output_machine_for_arch (FILE *asm_out_file)
7016 fprintf (asm_out_file, "\t.machinemode %s\n",
7017 (TARGET_ZARCH) ? "zarch" : "esa");
7018 fprintf (asm_out_file, "\t.machine \"%s", processor_table[s390_arch].name);
7019 if (S390_USE_ARCHITECTURE_MODIFIERS)
7023 cpu_flags = processor_flags_table[(int) s390_arch];
7024 if (TARGET_HTM && !(cpu_flags & PF_TX))
7025 fprintf (asm_out_file, "+htm");
7026 else if (!TARGET_HTM && (cpu_flags & PF_TX))
7027 fprintf (asm_out_file, "+nohtm");
7028 if (TARGET_VX && !(cpu_flags & PF_VX))
7029 fprintf (asm_out_file, "+vx");
7030 else if (!TARGET_VX && (cpu_flags & PF_VX))
7031 fprintf (asm_out_file, "+novx");
7033 fprintf (asm_out_file, "\"\n");
7036 /* Write an extra function header before the very start of the function. */
7039 s390_asm_output_function_prefix (FILE *asm_out_file,
7040 const char *fnname ATTRIBUTE_UNUSED)
7042 if (DECL_FUNCTION_SPECIFIC_TARGET (current_function_decl) == NULL)
7044 /* Since only the function specific options are saved but not the indications
7045 which options are set, it's too much work here to figure out which options
7046 have actually changed. Thus, generate .machine and .machinemode whenever a
7047 function has the target attribute or pragma. */
7048 fprintf (asm_out_file, "\t.machinemode push\n");
7049 fprintf (asm_out_file, "\t.machine push\n");
7050 s390_asm_output_machine_for_arch (asm_out_file);
7053 /* Write an extra function footer after the very end of the function. */
7056 s390_asm_declare_function_size (FILE *asm_out_file,
7057 const char *fnname, tree decl)
7059 if (!flag_inhibit_size_directive)
7060 ASM_OUTPUT_MEASURED_SIZE (asm_out_file, fnname);
7061 if (DECL_FUNCTION_SPECIFIC_TARGET (decl) == NULL)
7063 fprintf (asm_out_file, "\t.machine pop\n");
7064 fprintf (asm_out_file, "\t.machinemode pop\n");
7068 /* Write the extra assembler code needed to declare a function properly. */
7071 s390_asm_output_function_label (FILE *asm_out_file, const char *fname,
7074 int hw_before, hw_after;
7076 s390_function_num_hotpatch_hw (decl, &hw_before, &hw_after);
7079 unsigned int function_alignment;
7082 /* Add a trampoline code area before the function label and initialize it
7083 with two-byte nop instructions. This area can be overwritten with code
7084 that jumps to a patched version of the function. */
7085 asm_fprintf (asm_out_file, "\tnopr\t%%r7"
7086 "\t# pre-label NOPs for hotpatch (%d halfwords)\n",
7088 for (i = 1; i < hw_before; i++)
7089 fputs ("\tnopr\t%r7\n", asm_out_file);
7091 /* Note: The function label must be aligned so that (a) the bytes of the
7092 following nop do not cross a cacheline boundary, and (b) a jump address
7093 (eight bytes for 64 bit targets, 4 bytes for 32 bit targets) can be
7094 stored directly before the label without crossing a cacheline
7095 boundary. All this is necessary to make sure the trampoline code can
7096 be changed atomically.
7097 This alignment is done automatically using the FOUNCTION_BOUNDARY, but
7098 if there are NOPs before the function label, the alignment is placed
7099 before them. So it is necessary to duplicate the alignment after the
7101 function_alignment = MAX (8, DECL_ALIGN (decl) / BITS_PER_UNIT);
7102 if (! DECL_USER_ALIGN (decl))
7103 function_alignment = MAX (function_alignment,
7104 (unsigned int) align_functions);
7105 fputs ("\t# alignment for hotpatch\n", asm_out_file);
7106 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (function_alignment));
7109 if (S390_USE_TARGET_ATTRIBUTE && TARGET_DEBUG_ARG)
7111 asm_fprintf (asm_out_file, "\t# fn:%s ar%d\n", fname, s390_arch);
7112 asm_fprintf (asm_out_file, "\t# fn:%s tu%d\n", fname, s390_tune);
7113 asm_fprintf (asm_out_file, "\t# fn:%s sg%d\n", fname, s390_stack_guard);
7114 asm_fprintf (asm_out_file, "\t# fn:%s ss%d\n", fname, s390_stack_size);
7115 asm_fprintf (asm_out_file, "\t# fn:%s bc%d\n", fname, s390_branch_cost);
7116 asm_fprintf (asm_out_file, "\t# fn:%s wf%d\n", fname,
7117 s390_warn_framesize);
7118 asm_fprintf (asm_out_file, "\t# fn:%s ba%d\n", fname, TARGET_BACKCHAIN);
7119 asm_fprintf (asm_out_file, "\t# fn:%s hd%d\n", fname, TARGET_HARD_DFP);
7120 asm_fprintf (asm_out_file, "\t# fn:%s hf%d\n", fname, !TARGET_SOFT_FLOAT);
7121 asm_fprintf (asm_out_file, "\t# fn:%s ht%d\n", fname, TARGET_OPT_HTM);
7122 asm_fprintf (asm_out_file, "\t# fn:%s vx%d\n", fname, TARGET_OPT_VX);
7123 asm_fprintf (asm_out_file, "\t# fn:%s ps%d\n", fname,
7124 TARGET_PACKED_STACK);
7125 asm_fprintf (asm_out_file, "\t# fn:%s se%d\n", fname, TARGET_SMALL_EXEC);
7126 asm_fprintf (asm_out_file, "\t# fn:%s mv%d\n", fname, TARGET_MVCLE);
7127 asm_fprintf (asm_out_file, "\t# fn:%s zv%d\n", fname, TARGET_ZVECTOR);
7128 asm_fprintf (asm_out_file, "\t# fn:%s wd%d\n", fname,
7129 s390_warn_dynamicstack_p);
7131 ASM_OUTPUT_LABEL (asm_out_file, fname);
7133 asm_fprintf (asm_out_file,
7134 "\t# post-label NOPs for hotpatch (%d halfwords)\n",
7138 /* Output machine-dependent UNSPECs occurring in address constant X
7139 in assembler syntax to stdio stream FILE. Returns true if the
7140 constant X could be recognized, false otherwise. */
7143 s390_output_addr_const_extra (FILE *file, rtx x)
7145 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
7146 switch (XINT (x, 1))
7149 output_addr_const (file, XVECEXP (x, 0, 0));
7150 fprintf (file, "@GOTENT");
7153 output_addr_const (file, XVECEXP (x, 0, 0));
7154 fprintf (file, "@GOT");
7157 output_addr_const (file, XVECEXP (x, 0, 0));
7158 fprintf (file, "@GOTOFF");
7161 output_addr_const (file, XVECEXP (x, 0, 0));
7162 fprintf (file, "@PLT");
7165 output_addr_const (file, XVECEXP (x, 0, 0));
7166 fprintf (file, "@PLTOFF");
7169 output_addr_const (file, XVECEXP (x, 0, 0));
7170 fprintf (file, "@TLSGD");
7173 assemble_name (file, get_some_local_dynamic_name ());
7174 fprintf (file, "@TLSLDM");
7177 output_addr_const (file, XVECEXP (x, 0, 0));
7178 fprintf (file, "@DTPOFF");
7181 output_addr_const (file, XVECEXP (x, 0, 0));
7182 fprintf (file, "@NTPOFF");
7184 case UNSPEC_GOTNTPOFF:
7185 output_addr_const (file, XVECEXP (x, 0, 0));
7186 fprintf (file, "@GOTNTPOFF");
7188 case UNSPEC_INDNTPOFF:
7189 output_addr_const (file, XVECEXP (x, 0, 0));
7190 fprintf (file, "@INDNTPOFF");
7194 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
7195 switch (XINT (x, 1))
7197 case UNSPEC_POOL_OFFSET:
7198 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
7199 output_addr_const (file, x);
7205 /* Output address operand ADDR in assembler syntax to
7206 stdio stream FILE. */
7209 print_operand_address (FILE *file, rtx addr)
7211 struct s390_address ad;
7213 if (s390_loadrelative_operand_p (addr, NULL, NULL))
7217 output_operand_lossage ("symbolic memory references are "
7218 "only supported on z10 or later");
7221 output_addr_const (file, addr);
7225 if (!s390_decompose_address (addr, &ad)
7226 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7227 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
7228 output_operand_lossage ("cannot decompose address");
7231 output_addr_const (file, ad.disp);
7233 fprintf (file, "0");
7235 if (ad.base && ad.indx)
7236 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
7237 reg_names[REGNO (ad.base)]);
7239 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
7242 /* Output operand X in assembler syntax to stdio stream FILE.
7243 CODE specified the format flag. The following format flags
7246 'C': print opcode suffix for branch condition.
7247 'D': print opcode suffix for inverse branch condition.
7248 'E': print opcode suffix for branch on index instruction.
7249 'G': print the size of the operand in bytes.
7250 'J': print tls_load/tls_gdcall/tls_ldcall suffix
7251 'M': print the second word of a TImode operand.
7252 'N': print the second word of a DImode operand.
7253 'O': print only the displacement of a memory reference or address.
7254 'R': print only the base register of a memory reference or address.
7255 'S': print S-type memory reference (base+displacement).
7256 'Y': print address style operand without index (e.g. shift count or setmem
7259 'b': print integer X as if it's an unsigned byte.
7260 'c': print integer X as if it's an signed byte.
7261 'e': "end" contiguous bitmask X in either DImode or vector inner mode.
7262 'f': "end" contiguous bitmask X in SImode.
7263 'h': print integer X as if it's a signed halfword.
7264 'i': print the first nonzero HImode part of X.
7265 'j': print the first HImode part unequal to -1 of X.
7266 'k': print the first nonzero SImode part of X.
7267 'm': print the first SImode part unequal to -1 of X.
7268 'o': print integer X as if it's an unsigned 32bit word.
7269 's': "start" of contiguous bitmask X in either DImode or vector inner mode.
7270 't': CONST_INT: "start" of contiguous bitmask X in SImode.
7271 CONST_VECTOR: Generate a bitmask for vgbm instruction.
7272 'x': print integer X as if it's an unsigned halfword.
7273 'v': print register number as vector register (v1 instead of f1).
7277 print_operand (FILE *file, rtx x, int code)
7284 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
7288 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
7292 if (GET_CODE (x) == LE)
7293 fprintf (file, "l");
7294 else if (GET_CODE (x) == GT)
7295 fprintf (file, "h");
7297 output_operand_lossage ("invalid comparison operator "
7298 "for 'E' output modifier");
7302 if (GET_CODE (x) == SYMBOL_REF)
7304 fprintf (file, "%s", ":tls_load:");
7305 output_addr_const (file, x);
7307 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
7309 fprintf (file, "%s", ":tls_gdcall:");
7310 output_addr_const (file, XVECEXP (x, 0, 0));
7312 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
7314 fprintf (file, "%s", ":tls_ldcall:");
7315 const char *name = get_some_local_dynamic_name ();
7317 assemble_name (file, name);
7320 output_operand_lossage ("invalid reference for 'J' output modifier");
7324 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
7329 struct s390_address ad;
7332 ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
7335 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7338 output_operand_lossage ("invalid address for 'O' output modifier");
7343 output_addr_const (file, ad.disp);
7345 fprintf (file, "0");
7351 struct s390_address ad;
7354 ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
7357 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7360 output_operand_lossage ("invalid address for 'R' output modifier");
7365 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
7367 fprintf (file, "0");
7373 struct s390_address ad;
7378 output_operand_lossage ("memory reference expected for "
7379 "'S' output modifier");
7382 ret = s390_decompose_address (XEXP (x, 0), &ad);
7385 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7388 output_operand_lossage ("invalid address for 'S' output modifier");
7393 output_addr_const (file, ad.disp);
7395 fprintf (file, "0");
7398 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
7403 if (GET_CODE (x) == REG)
7404 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
7405 else if (GET_CODE (x) == MEM)
7406 x = change_address (x, VOIDmode,
7407 plus_constant (Pmode, XEXP (x, 0), 4));
7409 output_operand_lossage ("register or memory expression expected "
7410 "for 'N' output modifier");
7414 if (GET_CODE (x) == REG)
7415 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
7416 else if (GET_CODE (x) == MEM)
7417 x = change_address (x, VOIDmode,
7418 plus_constant (Pmode, XEXP (x, 0), 8));
7420 output_operand_lossage ("register or memory expression expected "
7421 "for 'M' output modifier");
7425 print_addrstyle_operand (file, x);
7429 switch (GET_CODE (x))
7432 /* Print FP regs as fx instead of vx when they are accessed
7433 through non-vector mode. */
7435 || VECTOR_NOFP_REG_P (x)
7436 || (FP_REG_P (x) && VECTOR_MODE_P (GET_MODE (x)))
7437 || (VECTOR_REG_P (x)
7438 && (GET_MODE_SIZE (GET_MODE (x)) /
7439 s390_class_max_nregs (FP_REGS, GET_MODE (x))) > 8))
7440 fprintf (file, "%%v%s", reg_names[REGNO (x)] + 2);
7442 fprintf (file, "%s", reg_names[REGNO (x)]);
7446 output_address (GET_MODE (x), XEXP (x, 0));
7453 output_addr_const (file, x);
7466 ival = ((ival & 0xff) ^ 0x80) - 0x80;
7472 ival = ((ival & 0xffff) ^ 0x8000) - 0x8000;
7475 ival = s390_extract_part (x, HImode, 0);
7478 ival = s390_extract_part (x, HImode, -1);
7481 ival = s390_extract_part (x, SImode, 0);
7484 ival = s390_extract_part (x, SImode, -1);
7496 len = (code == 's' || code == 'e' ? 64 : 32);
7497 ok = s390_contiguous_bitmask_p (ival, true, len, &start, &end);
7499 if (code == 's' || code == 't')
7506 output_operand_lossage ("invalid constant for output modifier '%c'", code);
7508 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
7511 case CONST_WIDE_INT:
7513 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7514 CONST_WIDE_INT_ELT (x, 0) & 0xff);
7515 else if (code == 'x')
7516 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7517 CONST_WIDE_INT_ELT (x, 0) & 0xffff);
7518 else if (code == 'h')
7519 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7520 ((CONST_WIDE_INT_ELT (x, 0) & 0xffff) ^ 0x8000) - 0x8000);
7524 output_operand_lossage ("invalid constant - try using "
7525 "an output modifier");
7527 output_operand_lossage ("invalid constant for output modifier '%c'",
7535 gcc_assert (const_vec_duplicate_p (x));
7536 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7537 ((INTVAL (XVECEXP (x, 0, 0)) & 0xffff) ^ 0x8000) - 0x8000);
7545 ok = s390_contiguous_bitmask_vector_p (x, &start, &end);
7547 ival = (code == 's') ? start : end;
7548 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
7554 bool ok = s390_bytemask_vector_p (x, &mask);
7556 fprintf (file, "%u", mask);
7561 output_operand_lossage ("invalid constant vector for output "
7562 "modifier '%c'", code);
7568 output_operand_lossage ("invalid expression - try using "
7569 "an output modifier");
7571 output_operand_lossage ("invalid expression for output "
7572 "modifier '%c'", code);
7577 /* Target hook for assembling integer objects. We need to define it
7578 here to work a round a bug in some versions of GAS, which couldn't
7579 handle values smaller than INT_MIN when printed in decimal. */
7582 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
7584 if (size == 8 && aligned_p
7585 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
7587 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
7591 return default_assemble_integer (x, size, aligned_p);
7594 /* Returns true if register REGNO is used for forming
7595 a memory address in expression X. */
7598 reg_used_in_mem_p (int regno, rtx x)
7600 enum rtx_code code = GET_CODE (x);
7606 if (refers_to_regno_p (regno, XEXP (x, 0)))
7609 else if (code == SET
7610 && GET_CODE (SET_DEST (x)) == PC)
7612 if (refers_to_regno_p (regno, SET_SRC (x)))
7616 fmt = GET_RTX_FORMAT (code);
7617 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7620 && reg_used_in_mem_p (regno, XEXP (x, i)))
7623 else if (fmt[i] == 'E')
7624 for (j = 0; j < XVECLEN (x, i); j++)
7625 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
7631 /* Returns true if expression DEP_RTX sets an address register
7632 used by instruction INSN to address memory. */
7635 addr_generation_dependency_p (rtx dep_rtx, rtx_insn *insn)
7639 if (NONJUMP_INSN_P (dep_rtx))
7640 dep_rtx = PATTERN (dep_rtx);
7642 if (GET_CODE (dep_rtx) == SET)
7644 target = SET_DEST (dep_rtx);
7645 if (GET_CODE (target) == STRICT_LOW_PART)
7646 target = XEXP (target, 0);
7647 while (GET_CODE (target) == SUBREG)
7648 target = SUBREG_REG (target);
7650 if (GET_CODE (target) == REG)
7652 int regno = REGNO (target);
7654 if (s390_safe_attr_type (insn) == TYPE_LA)
7656 pat = PATTERN (insn);
7657 if (GET_CODE (pat) == PARALLEL)
7659 gcc_assert (XVECLEN (pat, 0) == 2);
7660 pat = XVECEXP (pat, 0, 0);
7662 gcc_assert (GET_CODE (pat) == SET);
7663 return refers_to_regno_p (regno, SET_SRC (pat));
7665 else if (get_attr_atype (insn) == ATYPE_AGEN)
7666 return reg_used_in_mem_p (regno, PATTERN (insn));
7672 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
7675 s390_agen_dep_p (rtx_insn *dep_insn, rtx_insn *insn)
7677 rtx dep_rtx = PATTERN (dep_insn);
7680 if (GET_CODE (dep_rtx) == SET
7681 && addr_generation_dependency_p (dep_rtx, insn))
7683 else if (GET_CODE (dep_rtx) == PARALLEL)
7685 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
7687 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
7695 /* A C statement (sans semicolon) to update the integer scheduling priority
7696 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
7697 reduce the priority to execute INSN later. Do not define this macro if
7698 you do not need to adjust the scheduling priorities of insns.
7700 A STD instruction should be scheduled earlier,
7701 in order to use the bypass. */
7703 s390_adjust_priority (rtx_insn *insn, int priority)
7705 if (! INSN_P (insn))
7708 if (s390_tune <= PROCESSOR_2064_Z900)
7711 switch (s390_safe_attr_type (insn))
7715 priority = priority << 3;
7719 priority = priority << 1;
7728 /* The number of instructions that can be issued per cycle. */
7731 s390_issue_rate (void)
7735 case PROCESSOR_2084_Z990:
7736 case PROCESSOR_2094_Z9_109:
7737 case PROCESSOR_2094_Z9_EC:
7738 case PROCESSOR_2817_Z196:
7740 case PROCESSOR_2097_Z10:
7742 case PROCESSOR_9672_G5:
7743 case PROCESSOR_9672_G6:
7744 case PROCESSOR_2064_Z900:
7745 /* Starting with EC12 we use the sched_reorder hook to take care
7746 of instruction dispatch constraints. The algorithm only
7747 picks the best instruction and assumes only a single
7748 instruction gets issued per cycle. */
7749 case PROCESSOR_2827_ZEC12:
7750 case PROCESSOR_2964_Z13:
7757 s390_first_cycle_multipass_dfa_lookahead (void)
7762 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
7763 Fix up MEMs as required. */
7766 annotate_constant_pool_refs (rtx *x)
7771 gcc_assert (GET_CODE (*x) != SYMBOL_REF
7772 || !CONSTANT_POOL_ADDRESS_P (*x));
7774 /* Literal pool references can only occur inside a MEM ... */
7775 if (GET_CODE (*x) == MEM)
7777 rtx memref = XEXP (*x, 0);
7779 if (GET_CODE (memref) == SYMBOL_REF
7780 && CONSTANT_POOL_ADDRESS_P (memref))
7782 rtx base = cfun->machine->base_reg;
7783 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
7786 *x = replace_equiv_address (*x, addr);
7790 if (GET_CODE (memref) == CONST
7791 && GET_CODE (XEXP (memref, 0)) == PLUS
7792 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
7793 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
7794 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
7796 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
7797 rtx sym = XEXP (XEXP (memref, 0), 0);
7798 rtx base = cfun->machine->base_reg;
7799 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
7802 *x = replace_equiv_address (*x, plus_constant (Pmode, addr, off));
7807 /* ... or a load-address type pattern. */
7808 if (GET_CODE (*x) == SET)
7810 rtx addrref = SET_SRC (*x);
7812 if (GET_CODE (addrref) == SYMBOL_REF
7813 && CONSTANT_POOL_ADDRESS_P (addrref))
7815 rtx base = cfun->machine->base_reg;
7816 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
7819 SET_SRC (*x) = addr;
7823 if (GET_CODE (addrref) == CONST
7824 && GET_CODE (XEXP (addrref, 0)) == PLUS
7825 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
7826 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
7827 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
7829 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
7830 rtx sym = XEXP (XEXP (addrref, 0), 0);
7831 rtx base = cfun->machine->base_reg;
7832 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
7835 SET_SRC (*x) = plus_constant (Pmode, addr, off);
7840 /* Annotate LTREL_BASE as well. */
7841 if (GET_CODE (*x) == UNSPEC
7842 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
7844 rtx base = cfun->machine->base_reg;
7845 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
7850 fmt = GET_RTX_FORMAT (GET_CODE (*x));
7851 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
7855 annotate_constant_pool_refs (&XEXP (*x, i));
7857 else if (fmt[i] == 'E')
7859 for (j = 0; j < XVECLEN (*x, i); j++)
7860 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
7865 /* Split all branches that exceed the maximum distance.
7866 Returns true if this created a new literal pool entry. */
7869 s390_split_branches (void)
7871 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
7872 int new_literal = 0, ret;
7877 /* We need correct insn addresses. */
7879 shorten_branches (get_insns ());
7881 /* Find all branches that exceed 64KB, and split them. */
7883 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7885 if (! JUMP_P (insn) || tablejump_p (insn, NULL, NULL))
7888 pat = PATTERN (insn);
7889 if (GET_CODE (pat) == PARALLEL)
7890 pat = XVECEXP (pat, 0, 0);
7891 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
7894 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
7896 label = &SET_SRC (pat);
7898 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
7900 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
7901 label = &XEXP (SET_SRC (pat), 1);
7902 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
7903 label = &XEXP (SET_SRC (pat), 2);
7910 if (get_attr_length (insn) <= 4)
7913 /* We are going to use the return register as scratch register,
7914 make sure it will be saved/restored by the prologue/epilogue. */
7915 cfun_frame_layout.save_return_addr_p = 1;
7920 rtx mem = force_const_mem (Pmode, *label);
7921 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, mem),
7923 INSN_ADDRESSES_NEW (set_insn, -1);
7924 annotate_constant_pool_refs (&PATTERN (set_insn));
7931 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
7932 UNSPEC_LTREL_OFFSET);
7933 target = gen_rtx_CONST (Pmode, target);
7934 target = force_const_mem (Pmode, target);
7935 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, target),
7937 INSN_ADDRESSES_NEW (set_insn, -1);
7938 annotate_constant_pool_refs (&PATTERN (set_insn));
7940 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
7941 cfun->machine->base_reg),
7943 target = gen_rtx_PLUS (Pmode, temp_reg, target);
7946 ret = validate_change (insn, label, target, 0);
7954 /* Find an annotated literal pool symbol referenced in RTX X,
7955 and store it at REF. Will abort if X contains references to
7956 more than one such pool symbol; multiple references to the same
7957 symbol are allowed, however.
7959 The rtx pointed to by REF must be initialized to NULL_RTX
7960 by the caller before calling this routine. */
7963 find_constant_pool_ref (rtx x, rtx *ref)
7968 /* Ignore LTREL_BASE references. */
7969 if (GET_CODE (x) == UNSPEC
7970 && XINT (x, 1) == UNSPEC_LTREL_BASE)
7972 /* Likewise POOL_ENTRY insns. */
7973 if (GET_CODE (x) == UNSPEC_VOLATILE
7974 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
7977 gcc_assert (GET_CODE (x) != SYMBOL_REF
7978 || !CONSTANT_POOL_ADDRESS_P (x));
7980 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
7982 rtx sym = XVECEXP (x, 0, 0);
7983 gcc_assert (GET_CODE (sym) == SYMBOL_REF
7984 && CONSTANT_POOL_ADDRESS_P (sym));
7986 if (*ref == NULL_RTX)
7989 gcc_assert (*ref == sym);
7994 fmt = GET_RTX_FORMAT (GET_CODE (x));
7995 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
7999 find_constant_pool_ref (XEXP (x, i), ref);
8001 else if (fmt[i] == 'E')
8003 for (j = 0; j < XVECLEN (x, i); j++)
8004 find_constant_pool_ref (XVECEXP (x, i, j), ref);
8009 /* Replace every reference to the annotated literal pool
8010 symbol REF in X by its base plus OFFSET. */
8013 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
8018 gcc_assert (*x != ref);
8020 if (GET_CODE (*x) == UNSPEC
8021 && XINT (*x, 1) == UNSPEC_LTREF
8022 && XVECEXP (*x, 0, 0) == ref)
8024 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
8028 if (GET_CODE (*x) == PLUS
8029 && GET_CODE (XEXP (*x, 1)) == CONST_INT
8030 && GET_CODE (XEXP (*x, 0)) == UNSPEC
8031 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
8032 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
8034 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
8035 *x = plus_constant (Pmode, addr, INTVAL (XEXP (*x, 1)));
8039 fmt = GET_RTX_FORMAT (GET_CODE (*x));
8040 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
8044 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
8046 else if (fmt[i] == 'E')
8048 for (j = 0; j < XVECLEN (*x, i); j++)
8049 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
8054 /* Check whether X contains an UNSPEC_LTREL_BASE.
8055 Return its constant pool symbol if found, NULL_RTX otherwise. */
8058 find_ltrel_base (rtx x)
8063 if (GET_CODE (x) == UNSPEC
8064 && XINT (x, 1) == UNSPEC_LTREL_BASE)
8065 return XVECEXP (x, 0, 0);
8067 fmt = GET_RTX_FORMAT (GET_CODE (x));
8068 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8072 rtx fnd = find_ltrel_base (XEXP (x, i));
8076 else if (fmt[i] == 'E')
8078 for (j = 0; j < XVECLEN (x, i); j++)
8080 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
8090 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
8093 replace_ltrel_base (rtx *x)
8098 if (GET_CODE (*x) == UNSPEC
8099 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
8101 *x = XVECEXP (*x, 0, 1);
8105 fmt = GET_RTX_FORMAT (GET_CODE (*x));
8106 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
8110 replace_ltrel_base (&XEXP (*x, i));
8112 else if (fmt[i] == 'E')
8114 for (j = 0; j < XVECLEN (*x, i); j++)
8115 replace_ltrel_base (&XVECEXP (*x, i, j));
8121 /* We keep a list of constants which we have to add to internal
8122 constant tables in the middle of large functions. */
8124 #define NR_C_MODES 32
8125 machine_mode constant_modes[NR_C_MODES] =
8127 TFmode, TImode, TDmode,
8128 V16QImode, V8HImode, V4SImode, V2DImode, V1TImode,
8129 V4SFmode, V2DFmode, V1TFmode,
8130 DFmode, DImode, DDmode,
8131 V8QImode, V4HImode, V2SImode, V1DImode, V2SFmode, V1DFmode,
8132 SFmode, SImode, SDmode,
8133 V4QImode, V2HImode, V1SImode, V1SFmode,
8142 struct constant *next;
8144 rtx_code_label *label;
8147 struct constant_pool
8149 struct constant_pool *next;
8150 rtx_insn *first_insn;
8151 rtx_insn *pool_insn;
8153 rtx_insn *emit_pool_after;
8155 struct constant *constants[NR_C_MODES];
8156 struct constant *execute;
8157 rtx_code_label *label;
8161 /* Allocate new constant_pool structure. */
8163 static struct constant_pool *
8164 s390_alloc_pool (void)
8166 struct constant_pool *pool;
8169 pool = (struct constant_pool *) xmalloc (sizeof *pool);
8171 for (i = 0; i < NR_C_MODES; i++)
8172 pool->constants[i] = NULL;
8174 pool->execute = NULL;
8175 pool->label = gen_label_rtx ();
8176 pool->first_insn = NULL;
8177 pool->pool_insn = NULL;
8178 pool->insns = BITMAP_ALLOC (NULL);
8180 pool->emit_pool_after = NULL;
8185 /* Create new constant pool covering instructions starting at INSN
8186 and chain it to the end of POOL_LIST. */
8188 static struct constant_pool *
8189 s390_start_pool (struct constant_pool **pool_list, rtx_insn *insn)
8191 struct constant_pool *pool, **prev;
8193 pool = s390_alloc_pool ();
8194 pool->first_insn = insn;
8196 for (prev = pool_list; *prev; prev = &(*prev)->next)
8203 /* End range of instructions covered by POOL at INSN and emit
8204 placeholder insn representing the pool. */
8207 s390_end_pool (struct constant_pool *pool, rtx_insn *insn)
8209 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
8212 insn = get_last_insn ();
8214 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
8215 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8218 /* Add INSN to the list of insns covered by POOL. */
8221 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
8223 bitmap_set_bit (pool->insns, INSN_UID (insn));
8226 /* Return pool out of POOL_LIST that covers INSN. */
8228 static struct constant_pool *
8229 s390_find_pool (struct constant_pool *pool_list, rtx insn)
8231 struct constant_pool *pool;
8233 for (pool = pool_list; pool; pool = pool->next)
8234 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
8240 /* Add constant VAL of mode MODE to the constant pool POOL. */
8243 s390_add_constant (struct constant_pool *pool, rtx val, machine_mode mode)
8248 for (i = 0; i < NR_C_MODES; i++)
8249 if (constant_modes[i] == mode)
8251 gcc_assert (i != NR_C_MODES);
8253 for (c = pool->constants[i]; c != NULL; c = c->next)
8254 if (rtx_equal_p (val, c->value))
8259 c = (struct constant *) xmalloc (sizeof *c);
8261 c->label = gen_label_rtx ();
8262 c->next = pool->constants[i];
8263 pool->constants[i] = c;
8264 pool->size += GET_MODE_SIZE (mode);
8268 /* Return an rtx that represents the offset of X from the start of
8272 s390_pool_offset (struct constant_pool *pool, rtx x)
8276 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
8277 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
8278 UNSPEC_POOL_OFFSET);
8279 return gen_rtx_CONST (GET_MODE (x), x);
8282 /* Find constant VAL of mode MODE in the constant pool POOL.
8283 Return an RTX describing the distance from the start of
8284 the pool to the location of the new constant. */
8287 s390_find_constant (struct constant_pool *pool, rtx val,
8293 for (i = 0; i < NR_C_MODES; i++)
8294 if (constant_modes[i] == mode)
8296 gcc_assert (i != NR_C_MODES);
8298 for (c = pool->constants[i]; c != NULL; c = c->next)
8299 if (rtx_equal_p (val, c->value))
8304 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
8307 /* Check whether INSN is an execute. Return the label_ref to its
8308 execute target template if so, NULL_RTX otherwise. */
8311 s390_execute_label (rtx insn)
8313 if (NONJUMP_INSN_P (insn)
8314 && GET_CODE (PATTERN (insn)) == PARALLEL
8315 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
8316 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
8317 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
8322 /* Add execute target for INSN to the constant pool POOL. */
8325 s390_add_execute (struct constant_pool *pool, rtx insn)
8329 for (c = pool->execute; c != NULL; c = c->next)
8330 if (INSN_UID (insn) == INSN_UID (c->value))
8335 c = (struct constant *) xmalloc (sizeof *c);
8337 c->label = gen_label_rtx ();
8338 c->next = pool->execute;
8344 /* Find execute target for INSN in the constant pool POOL.
8345 Return an RTX describing the distance from the start of
8346 the pool to the location of the execute target. */
8349 s390_find_execute (struct constant_pool *pool, rtx insn)
8353 for (c = pool->execute; c != NULL; c = c->next)
8354 if (INSN_UID (insn) == INSN_UID (c->value))
8359 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
8362 /* For an execute INSN, extract the execute target template. */
8365 s390_execute_target (rtx insn)
8367 rtx pattern = PATTERN (insn);
8368 gcc_assert (s390_execute_label (insn));
8370 if (XVECLEN (pattern, 0) == 2)
8372 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
8376 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
8379 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
8380 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
8382 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
8388 /* Indicate that INSN cannot be duplicated. This is the case for
8389 execute insns that carry a unique label. */
8392 s390_cannot_copy_insn_p (rtx_insn *insn)
8394 rtx label = s390_execute_label (insn);
8395 return label && label != const0_rtx;
8398 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
8399 do not emit the pool base label. */
8402 s390_dump_pool (struct constant_pool *pool, bool remote_label)
8405 rtx_insn *insn = pool->pool_insn;
8408 /* Switch to rodata section. */
8409 if (TARGET_CPU_ZARCH)
8411 insn = emit_insn_after (gen_pool_section_start (), insn);
8412 INSN_ADDRESSES_NEW (insn, -1);
8415 /* Ensure minimum pool alignment. */
8416 if (TARGET_CPU_ZARCH)
8417 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
8419 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
8420 INSN_ADDRESSES_NEW (insn, -1);
8422 /* Emit pool base label. */
8425 insn = emit_label_after (pool->label, insn);
8426 INSN_ADDRESSES_NEW (insn, -1);
8429 /* Dump constants in descending alignment requirement order,
8430 ensuring proper alignment for every constant. */
8431 for (i = 0; i < NR_C_MODES; i++)
8432 for (c = pool->constants[i]; c; c = c->next)
8434 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
8435 rtx value = copy_rtx (c->value);
8436 if (GET_CODE (value) == CONST
8437 && GET_CODE (XEXP (value, 0)) == UNSPEC
8438 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
8439 && XVECLEN (XEXP (value, 0), 0) == 1)
8440 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
8442 insn = emit_label_after (c->label, insn);
8443 INSN_ADDRESSES_NEW (insn, -1);
8445 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
8446 gen_rtvec (1, value),
8447 UNSPECV_POOL_ENTRY);
8448 insn = emit_insn_after (value, insn);
8449 INSN_ADDRESSES_NEW (insn, -1);
8452 /* Ensure minimum alignment for instructions. */
8453 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
8454 INSN_ADDRESSES_NEW (insn, -1);
8456 /* Output in-pool execute template insns. */
8457 for (c = pool->execute; c; c = c->next)
8459 insn = emit_label_after (c->label, insn);
8460 INSN_ADDRESSES_NEW (insn, -1);
8462 insn = emit_insn_after (s390_execute_target (c->value), insn);
8463 INSN_ADDRESSES_NEW (insn, -1);
8466 /* Switch back to previous section. */
8467 if (TARGET_CPU_ZARCH)
8469 insn = emit_insn_after (gen_pool_section_end (), insn);
8470 INSN_ADDRESSES_NEW (insn, -1);
8473 insn = emit_barrier_after (insn);
8474 INSN_ADDRESSES_NEW (insn, -1);
8476 /* Remove placeholder insn. */
8477 remove_insn (pool->pool_insn);
8480 /* Free all memory used by POOL. */
8483 s390_free_pool (struct constant_pool *pool)
8485 struct constant *c, *next;
8488 for (i = 0; i < NR_C_MODES; i++)
8489 for (c = pool->constants[i]; c; c = next)
8495 for (c = pool->execute; c; c = next)
8501 BITMAP_FREE (pool->insns);
8506 /* Collect main literal pool. Return NULL on overflow. */
8508 static struct constant_pool *
8509 s390_mainpool_start (void)
8511 struct constant_pool *pool;
8514 pool = s390_alloc_pool ();
8516 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8518 if (NONJUMP_INSN_P (insn)
8519 && GET_CODE (PATTERN (insn)) == SET
8520 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
8521 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
8523 /* There might be two main_pool instructions if base_reg
8524 is call-clobbered; one for shrink-wrapped code and one
8525 for the rest. We want to keep the first. */
8526 if (pool->pool_insn)
8528 insn = PREV_INSN (insn);
8529 delete_insn (NEXT_INSN (insn));
8532 pool->pool_insn = insn;
8535 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
8537 s390_add_execute (pool, insn);
8539 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8541 rtx pool_ref = NULL_RTX;
8542 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8545 rtx constant = get_pool_constant (pool_ref);
8546 machine_mode mode = get_pool_mode (pool_ref);
8547 s390_add_constant (pool, constant, mode);
8551 /* If hot/cold partitioning is enabled we have to make sure that
8552 the literal pool is emitted in the same section where the
8553 initialization of the literal pool base pointer takes place.
8554 emit_pool_after is only used in the non-overflow case on non
8555 Z cpus where we can emit the literal pool at the end of the
8556 function body within the text section. */
8558 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
8559 && !pool->emit_pool_after)
8560 pool->emit_pool_after = PREV_INSN (insn);
8563 gcc_assert (pool->pool_insn || pool->size == 0);
8565 if (pool->size >= 4096)
8567 /* We're going to chunkify the pool, so remove the main
8568 pool placeholder insn. */
8569 remove_insn (pool->pool_insn);
8571 s390_free_pool (pool);
8575 /* If the functions ends with the section where the literal pool
8576 should be emitted set the marker to its end. */
8577 if (pool && !pool->emit_pool_after)
8578 pool->emit_pool_after = get_last_insn ();
8583 /* POOL holds the main literal pool as collected by s390_mainpool_start.
8584 Modify the current function to output the pool constants as well as
8585 the pool register setup instruction. */
8588 s390_mainpool_finish (struct constant_pool *pool)
8590 rtx base_reg = cfun->machine->base_reg;
8592 /* If the pool is empty, we're done. */
8593 if (pool->size == 0)
8595 /* We don't actually need a base register after all. */
8596 cfun->machine->base_reg = NULL_RTX;
8598 if (pool->pool_insn)
8599 remove_insn (pool->pool_insn);
8600 s390_free_pool (pool);
8604 /* We need correct insn addresses. */
8605 shorten_branches (get_insns ());
8607 /* On zSeries, we use a LARL to load the pool register. The pool is
8608 located in the .rodata section, so we emit it after the function. */
8609 if (TARGET_CPU_ZARCH)
8611 rtx set = gen_main_base_64 (base_reg, pool->label);
8612 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
8613 INSN_ADDRESSES_NEW (insn, -1);
8614 remove_insn (pool->pool_insn);
8616 insn = get_last_insn ();
8617 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8618 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8620 s390_dump_pool (pool, 0);
8623 /* On S/390, if the total size of the function's code plus literal pool
8624 does not exceed 4096 bytes, we use BASR to set up a function base
8625 pointer, and emit the literal pool at the end of the function. */
8626 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
8627 + pool->size + 8 /* alignment slop */ < 4096)
8629 rtx set = gen_main_base_31_small (base_reg, pool->label);
8630 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
8631 INSN_ADDRESSES_NEW (insn, -1);
8632 remove_insn (pool->pool_insn);
8634 insn = emit_label_after (pool->label, insn);
8635 INSN_ADDRESSES_NEW (insn, -1);
8637 /* emit_pool_after will be set by s390_mainpool_start to the
8638 last insn of the section where the literal pool should be
8640 insn = pool->emit_pool_after;
8642 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8643 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8645 s390_dump_pool (pool, 1);
8648 /* Otherwise, we emit an inline literal pool and use BASR to branch
8649 over it, setting up the pool register at the same time. */
8652 rtx_code_label *pool_end = gen_label_rtx ();
8654 rtx pat = gen_main_base_31_large (base_reg, pool->label, pool_end);
8655 rtx_insn *insn = emit_jump_insn_after (pat, pool->pool_insn);
8656 JUMP_LABEL (insn) = pool_end;
8657 INSN_ADDRESSES_NEW (insn, -1);
8658 remove_insn (pool->pool_insn);
8660 insn = emit_label_after (pool->label, insn);
8661 INSN_ADDRESSES_NEW (insn, -1);
8663 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8664 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8666 insn = emit_label_after (pool_end, pool->pool_insn);
8667 INSN_ADDRESSES_NEW (insn, -1);
8669 s390_dump_pool (pool, 1);
8673 /* Replace all literal pool references. */
8675 for (rtx_insn *insn = get_insns (); insn; insn = NEXT_INSN (insn))
8678 replace_ltrel_base (&PATTERN (insn));
8680 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8682 rtx addr, pool_ref = NULL_RTX;
8683 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8686 if (s390_execute_label (insn))
8687 addr = s390_find_execute (pool, insn);
8689 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
8690 get_pool_mode (pool_ref));
8692 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
8693 INSN_CODE (insn) = -1;
8699 /* Free the pool. */
8700 s390_free_pool (pool);
8703 /* POOL holds the main literal pool as collected by s390_mainpool_start.
8704 We have decided we cannot use this pool, so revert all changes
8705 to the current function that were done by s390_mainpool_start. */
8707 s390_mainpool_cancel (struct constant_pool *pool)
8709 /* We didn't actually change the instruction stream, so simply
8710 free the pool memory. */
8711 s390_free_pool (pool);
8715 /* Chunkify the literal pool. */
8717 #define S390_POOL_CHUNK_MIN 0xc00
8718 #define S390_POOL_CHUNK_MAX 0xe00
8720 static struct constant_pool *
8721 s390_chunkify_start (void)
8723 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
8726 rtx pending_ltrel = NULL_RTX;
8729 rtx (*gen_reload_base) (rtx, rtx) =
8730 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
8733 /* We need correct insn addresses. */
8735 shorten_branches (get_insns ());
8737 /* Scan all insns and move literals to pool chunks. */
8739 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8741 bool section_switch_p = false;
8743 /* Check for pending LTREL_BASE. */
8746 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
8749 gcc_assert (ltrel_base == pending_ltrel);
8750 pending_ltrel = NULL_RTX;
8754 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
8757 curr_pool = s390_start_pool (&pool_list, insn);
8759 s390_add_execute (curr_pool, insn);
8760 s390_add_pool_insn (curr_pool, insn);
8762 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8764 rtx pool_ref = NULL_RTX;
8765 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8768 rtx constant = get_pool_constant (pool_ref);
8769 machine_mode mode = get_pool_mode (pool_ref);
8772 curr_pool = s390_start_pool (&pool_list, insn);
8774 s390_add_constant (curr_pool, constant, mode);
8775 s390_add_pool_insn (curr_pool, insn);
8777 /* Don't split the pool chunk between a LTREL_OFFSET load
8778 and the corresponding LTREL_BASE. */
8779 if (GET_CODE (constant) == CONST
8780 && GET_CODE (XEXP (constant, 0)) == UNSPEC
8781 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
8783 gcc_assert (!pending_ltrel);
8784 pending_ltrel = pool_ref;
8789 if (JUMP_P (insn) || JUMP_TABLE_DATA_P (insn) || LABEL_P (insn))
8792 s390_add_pool_insn (curr_pool, insn);
8793 /* An LTREL_BASE must follow within the same basic block. */
8794 gcc_assert (!pending_ltrel);
8798 switch (NOTE_KIND (insn))
8800 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
8801 section_switch_p = true;
8803 case NOTE_INSN_VAR_LOCATION:
8804 case NOTE_INSN_CALL_ARG_LOCATION:
8811 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
8812 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
8815 if (TARGET_CPU_ZARCH)
8817 if (curr_pool->size < S390_POOL_CHUNK_MAX)
8820 s390_end_pool (curr_pool, NULL);
8825 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
8826 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
8829 /* We will later have to insert base register reload insns.
8830 Those will have an effect on code size, which we need to
8831 consider here. This calculation makes rather pessimistic
8832 worst-case assumptions. */
8836 if (chunk_size < S390_POOL_CHUNK_MIN
8837 && curr_pool->size < S390_POOL_CHUNK_MIN
8838 && !section_switch_p)
8841 /* Pool chunks can only be inserted after BARRIERs ... */
8842 if (BARRIER_P (insn))
8844 s390_end_pool (curr_pool, insn);
8849 /* ... so if we don't find one in time, create one. */
8850 else if (chunk_size > S390_POOL_CHUNK_MAX
8851 || curr_pool->size > S390_POOL_CHUNK_MAX
8852 || section_switch_p)
8854 rtx_insn *label, *jump, *barrier, *next, *prev;
8856 if (!section_switch_p)
8858 /* We can insert the barrier only after a 'real' insn. */
8859 if (! NONJUMP_INSN_P (insn) && ! CALL_P (insn))
8861 if (get_attr_length (insn) == 0)
8863 /* Don't separate LTREL_BASE from the corresponding
8864 LTREL_OFFSET load. */
8871 next = NEXT_INSN (insn);
8875 && (NOTE_KIND (next) == NOTE_INSN_VAR_LOCATION
8876 || NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION));
8880 gcc_assert (!pending_ltrel);
8882 /* The old pool has to end before the section switch
8883 note in order to make it part of the current
8885 insn = PREV_INSN (insn);
8888 label = gen_label_rtx ();
8890 if (prev && NOTE_P (prev))
8891 prev = prev_nonnote_insn (prev);
8893 jump = emit_jump_insn_after_setloc (gen_jump (label), insn,
8894 INSN_LOCATION (prev));
8896 jump = emit_jump_insn_after_noloc (gen_jump (label), insn);
8897 barrier = emit_barrier_after (jump);
8898 insn = emit_label_after (label, barrier);
8899 JUMP_LABEL (jump) = label;
8900 LABEL_NUSES (label) = 1;
8902 INSN_ADDRESSES_NEW (jump, -1);
8903 INSN_ADDRESSES_NEW (barrier, -1);
8904 INSN_ADDRESSES_NEW (insn, -1);
8906 s390_end_pool (curr_pool, barrier);
8914 s390_end_pool (curr_pool, NULL);
8915 gcc_assert (!pending_ltrel);
8917 /* Find all labels that are branched into
8918 from an insn belonging to a different chunk. */
8920 far_labels = BITMAP_ALLOC (NULL);
8922 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8924 rtx_jump_table_data *table;
8926 /* Labels marked with LABEL_PRESERVE_P can be target
8927 of non-local jumps, so we have to mark them.
8928 The same holds for named labels.
8930 Don't do that, however, if it is the label before
8934 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
8936 rtx_insn *vec_insn = NEXT_INSN (insn);
8937 if (! vec_insn || ! JUMP_TABLE_DATA_P (vec_insn))
8938 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
8940 /* Check potential targets in a table jump (casesi_jump). */
8941 else if (tablejump_p (insn, NULL, &table))
8943 rtx vec_pat = PATTERN (table);
8944 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
8946 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
8948 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
8950 if (s390_find_pool (pool_list, label)
8951 != s390_find_pool (pool_list, insn))
8952 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
8955 /* If we have a direct jump (conditional or unconditional),
8956 check all potential targets. */
8957 else if (JUMP_P (insn))
8959 rtx pat = PATTERN (insn);
8961 if (GET_CODE (pat) == PARALLEL)
8962 pat = XVECEXP (pat, 0, 0);
8964 if (GET_CODE (pat) == SET)
8966 rtx label = JUMP_LABEL (insn);
8967 if (label && !ANY_RETURN_P (label))
8969 if (s390_find_pool (pool_list, label)
8970 != s390_find_pool (pool_list, insn))
8971 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
8977 /* Insert base register reload insns before every pool. */
8979 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
8981 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
8983 rtx_insn *insn = curr_pool->first_insn;
8984 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
8987 /* Insert base register reload insns at every far label. */
8989 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8991 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
8993 struct constant_pool *pool = s390_find_pool (pool_list, insn);
8996 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
8998 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
9003 BITMAP_FREE (far_labels);
9006 /* Recompute insn addresses. */
9008 init_insn_lengths ();
9009 shorten_branches (get_insns ());
9014 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
9015 After we have decided to use this list, finish implementing
9016 all changes to the current function as required. */
9019 s390_chunkify_finish (struct constant_pool *pool_list)
9021 struct constant_pool *curr_pool = NULL;
9025 /* Replace all literal pool references. */
9027 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9030 replace_ltrel_base (&PATTERN (insn));
9032 curr_pool = s390_find_pool (pool_list, insn);
9036 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
9038 rtx addr, pool_ref = NULL_RTX;
9039 find_constant_pool_ref (PATTERN (insn), &pool_ref);
9042 if (s390_execute_label (insn))
9043 addr = s390_find_execute (curr_pool, insn);
9045 addr = s390_find_constant (curr_pool,
9046 get_pool_constant (pool_ref),
9047 get_pool_mode (pool_ref));
9049 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
9050 INSN_CODE (insn) = -1;
9055 /* Dump out all literal pools. */
9057 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
9058 s390_dump_pool (curr_pool, 0);
9060 /* Free pool list. */
9064 struct constant_pool *next = pool_list->next;
9065 s390_free_pool (pool_list);
9070 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
9071 We have decided we cannot use this list, so revert all changes
9072 to the current function that were done by s390_chunkify_start. */
9075 s390_chunkify_cancel (struct constant_pool *pool_list)
9077 struct constant_pool *curr_pool = NULL;
9080 /* Remove all pool placeholder insns. */
9082 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
9084 /* Did we insert an extra barrier? Remove it. */
9085 rtx_insn *barrier = PREV_INSN (curr_pool->pool_insn);
9086 rtx_insn *jump = barrier? PREV_INSN (barrier) : NULL;
9087 rtx_insn *label = NEXT_INSN (curr_pool->pool_insn);
9089 if (jump && JUMP_P (jump)
9090 && barrier && BARRIER_P (barrier)
9091 && label && LABEL_P (label)
9092 && GET_CODE (PATTERN (jump)) == SET
9093 && SET_DEST (PATTERN (jump)) == pc_rtx
9094 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
9095 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
9098 remove_insn (barrier);
9099 remove_insn (label);
9102 remove_insn (curr_pool->pool_insn);
9105 /* Remove all base register reload insns. */
9107 for (insn = get_insns (); insn; )
9109 rtx_insn *next_insn = NEXT_INSN (insn);
9111 if (NONJUMP_INSN_P (insn)
9112 && GET_CODE (PATTERN (insn)) == SET
9113 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
9114 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
9120 /* Free pool list. */
9124 struct constant_pool *next = pool_list->next;
9125 s390_free_pool (pool_list);
9130 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
9133 s390_output_pool_entry (rtx exp, machine_mode mode, unsigned int align)
9135 switch (GET_MODE_CLASS (mode))
9138 case MODE_DECIMAL_FLOAT:
9139 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
9141 assemble_real (*CONST_DOUBLE_REAL_VALUE (exp), mode, align);
9145 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
9146 mark_symbol_refs_as_used (exp);
9149 case MODE_VECTOR_INT:
9150 case MODE_VECTOR_FLOAT:
9153 machine_mode inner_mode;
9154 gcc_assert (GET_CODE (exp) == CONST_VECTOR);
9156 inner_mode = GET_MODE_INNER (GET_MODE (exp));
9157 for (i = 0; i < XVECLEN (exp, 0); i++)
9158 s390_output_pool_entry (XVECEXP (exp, 0, i),
9162 : GET_MODE_BITSIZE (inner_mode));
9172 /* Return an RTL expression representing the value of the return address
9173 for the frame COUNT steps up from the current frame. FRAME is the
9174 frame pointer of that frame. */
9177 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
9182 /* Without backchain, we fail for all but the current frame. */
9184 if (!TARGET_BACKCHAIN && count > 0)
9187 /* For the current frame, we need to make sure the initial
9188 value of RETURN_REGNUM is actually saved. */
9192 /* On non-z architectures branch splitting could overwrite r14. */
9193 if (TARGET_CPU_ZARCH)
9194 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
9197 cfun_frame_layout.save_return_addr_p = true;
9198 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
9202 if (TARGET_PACKED_STACK)
9203 offset = -2 * UNITS_PER_LONG;
9205 offset = RETURN_REGNUM * UNITS_PER_LONG;
9207 addr = plus_constant (Pmode, frame, offset);
9208 addr = memory_address (Pmode, addr);
9209 return gen_rtx_MEM (Pmode, addr);
9212 /* Return an RTL expression representing the back chain stored in
9213 the current stack frame. */
9216 s390_back_chain_rtx (void)
9220 gcc_assert (TARGET_BACKCHAIN);
9222 if (TARGET_PACKED_STACK)
9223 chain = plus_constant (Pmode, stack_pointer_rtx,
9224 STACK_POINTER_OFFSET - UNITS_PER_LONG);
9226 chain = stack_pointer_rtx;
9228 chain = gen_rtx_MEM (Pmode, chain);
9232 /* Find first call clobbered register unused in a function.
9233 This could be used as base register in a leaf function
9234 or for holding the return address before epilogue. */
9237 find_unused_clobbered_reg (void)
9240 for (i = 0; i < 6; i++)
9241 if (!df_regs_ever_live_p (i))
9247 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
9248 clobbered hard regs in SETREG. */
9251 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
9253 char *regs_ever_clobbered = (char *)data;
9254 unsigned int i, regno;
9255 machine_mode mode = GET_MODE (setreg);
9257 if (GET_CODE (setreg) == SUBREG)
9259 rtx inner = SUBREG_REG (setreg);
9260 if (!GENERAL_REG_P (inner) && !FP_REG_P (inner))
9262 regno = subreg_regno (setreg);
9264 else if (GENERAL_REG_P (setreg) || FP_REG_P (setreg))
9265 regno = REGNO (setreg);
9270 i < regno + HARD_REGNO_NREGS (regno, mode);
9272 regs_ever_clobbered[i] = 1;
9275 /* Walks through all basic blocks of the current function looking
9276 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
9277 of the passed integer array REGS_EVER_CLOBBERED are set to one for
9278 each of those regs. */
9281 s390_regs_ever_clobbered (char regs_ever_clobbered[])
9287 memset (regs_ever_clobbered, 0, 32);
9289 /* For non-leaf functions we have to consider all call clobbered regs to be
9293 for (i = 0; i < 32; i++)
9294 regs_ever_clobbered[i] = call_really_used_regs[i];
9297 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
9298 this work is done by liveness analysis (mark_regs_live_at_end).
9299 Special care is needed for functions containing landing pads. Landing pads
9300 may use the eh registers, but the code which sets these registers is not
9301 contained in that function. Hence s390_regs_ever_clobbered is not able to
9302 deal with this automatically. */
9303 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
9304 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
9305 if (crtl->calls_eh_return
9306 || (cfun->machine->has_landing_pad_p
9307 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
9308 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
9310 /* For nonlocal gotos all call-saved registers have to be saved.
9311 This flag is also set for the unwinding code in libgcc.
9312 See expand_builtin_unwind_init. For regs_ever_live this is done by
9314 if (crtl->saves_all_registers)
9315 for (i = 0; i < 32; i++)
9316 if (!call_really_used_regs[i])
9317 regs_ever_clobbered[i] = 1;
9319 FOR_EACH_BB_FN (cur_bb, cfun)
9321 FOR_BB_INSNS (cur_bb, cur_insn)
9325 if (!INSN_P (cur_insn))
9328 pat = PATTERN (cur_insn);
9330 /* Ignore GPR restore insns. */
9331 if (epilogue_completed && RTX_FRAME_RELATED_P (cur_insn))
9333 if (GET_CODE (pat) == SET
9334 && GENERAL_REG_P (SET_DEST (pat)))
9337 if (GET_MODE (SET_SRC (pat)) == DImode
9338 && FP_REG_P (SET_SRC (pat)))
9342 if (GET_CODE (SET_SRC (pat)) == MEM)
9347 if (GET_CODE (pat) == PARALLEL
9348 && load_multiple_operation (pat, VOIDmode))
9353 s390_reg_clobbered_rtx,
9354 regs_ever_clobbered);
9359 /* Determine the frame area which actually has to be accessed
9360 in the function epilogue. The values are stored at the
9361 given pointers AREA_BOTTOM (address of the lowest used stack
9362 address) and AREA_TOP (address of the first item which does
9363 not belong to the stack frame). */
9366 s390_frame_area (int *area_bottom, int *area_top)
9373 if (cfun_frame_layout.first_restore_gpr != -1)
9375 b = (cfun_frame_layout.gprs_offset
9376 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
9377 t = b + (cfun_frame_layout.last_restore_gpr
9378 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
9381 if (TARGET_64BIT && cfun_save_high_fprs_p)
9383 b = MIN (b, cfun_frame_layout.f8_offset);
9384 t = MAX (t, (cfun_frame_layout.f8_offset
9385 + cfun_frame_layout.high_fprs * 8));
9390 if (cfun_fpr_save_p (FPR4_REGNUM))
9392 b = MIN (b, cfun_frame_layout.f4_offset);
9393 t = MAX (t, cfun_frame_layout.f4_offset + 8);
9395 if (cfun_fpr_save_p (FPR6_REGNUM))
9397 b = MIN (b, cfun_frame_layout.f4_offset + 8);
9398 t = MAX (t, cfun_frame_layout.f4_offset + 16);
9404 /* Update gpr_save_slots in the frame layout trying to make use of
9405 FPRs as GPR save slots.
9406 This is a helper routine of s390_register_info. */
9409 s390_register_info_gprtofpr ()
9411 int save_reg_slot = FPR0_REGNUM;
9414 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
9417 for (i = 15; i >= 6; i--)
9419 if (cfun_gpr_save_slot (i) == SAVE_SLOT_NONE)
9422 /* Advance to the next FP register which can be used as a
9424 while ((!call_really_used_regs[save_reg_slot]
9425 || df_regs_ever_live_p (save_reg_slot)
9426 || cfun_fpr_save_p (save_reg_slot))
9427 && FP_REGNO_P (save_reg_slot))
9429 if (!FP_REGNO_P (save_reg_slot))
9431 /* We only want to use ldgr/lgdr if we can get rid of
9432 stm/lm entirely. So undo the gpr slot allocation in
9433 case we ran out of FPR save slots. */
9434 for (j = 6; j <= 15; j++)
9435 if (FP_REGNO_P (cfun_gpr_save_slot (j)))
9436 cfun_gpr_save_slot (j) = SAVE_SLOT_STACK;
9439 cfun_gpr_save_slot (i) = save_reg_slot++;
9443 /* Set the bits in fpr_bitmap for FPRs which need to be saved due to
9445 This is a helper routine for s390_register_info. */
9448 s390_register_info_stdarg_fpr ()
9454 /* Save the FP argument regs for stdarg. f0, f2 for 31 bit and
9455 f0-f4 for 64 bit. */
9457 || !TARGET_HARD_FLOAT
9458 || !cfun->va_list_fpr_size
9459 || crtl->args.info.fprs >= FP_ARG_NUM_REG)
9462 min_fpr = crtl->args.info.fprs;
9463 max_fpr = min_fpr + cfun->va_list_fpr_size - 1;
9464 if (max_fpr >= FP_ARG_NUM_REG)
9465 max_fpr = FP_ARG_NUM_REG - 1;
9467 /* FPR argument regs start at f0. */
9468 min_fpr += FPR0_REGNUM;
9469 max_fpr += FPR0_REGNUM;
9471 for (i = min_fpr; i <= max_fpr; i++)
9472 cfun_set_fpr_save (i);
9475 /* Reserve the GPR save slots for GPRs which need to be saved due to
9477 This is a helper routine for s390_register_info. */
9480 s390_register_info_stdarg_gpr ()
9487 || !cfun->va_list_gpr_size
9488 || crtl->args.info.gprs >= GP_ARG_NUM_REG)
9491 min_gpr = crtl->args.info.gprs;
9492 max_gpr = min_gpr + cfun->va_list_gpr_size - 1;
9493 if (max_gpr >= GP_ARG_NUM_REG)
9494 max_gpr = GP_ARG_NUM_REG - 1;
9496 /* GPR argument regs start at r2. */
9497 min_gpr += GPR2_REGNUM;
9498 max_gpr += GPR2_REGNUM;
9500 /* If r6 was supposed to be saved into an FPR and now needs to go to
9501 the stack for vararg we have to adjust the restore range to make
9502 sure that the restore is done from stack as well. */
9503 if (FP_REGNO_P (cfun_gpr_save_slot (GPR6_REGNUM))
9504 && min_gpr <= GPR6_REGNUM
9505 && max_gpr >= GPR6_REGNUM)
9507 if (cfun_frame_layout.first_restore_gpr == -1
9508 || cfun_frame_layout.first_restore_gpr > GPR6_REGNUM)
9509 cfun_frame_layout.first_restore_gpr = GPR6_REGNUM;
9510 if (cfun_frame_layout.last_restore_gpr == -1
9511 || cfun_frame_layout.last_restore_gpr < GPR6_REGNUM)
9512 cfun_frame_layout.last_restore_gpr = GPR6_REGNUM;
9515 if (cfun_frame_layout.first_save_gpr == -1
9516 || cfun_frame_layout.first_save_gpr > min_gpr)
9517 cfun_frame_layout.first_save_gpr = min_gpr;
9519 if (cfun_frame_layout.last_save_gpr == -1
9520 || cfun_frame_layout.last_save_gpr < max_gpr)
9521 cfun_frame_layout.last_save_gpr = max_gpr;
9523 for (i = min_gpr; i <= max_gpr; i++)
9524 cfun_gpr_save_slot (i) = SAVE_SLOT_STACK;
9527 /* Calculate the save and restore ranges for stm(g) and lm(g) in the
9528 prologue and epilogue. */
9531 s390_register_info_set_ranges ()
9535 /* Find the first and the last save slot supposed to use the stack
9536 to set the restore range.
9537 Vararg regs might be marked as save to stack but only the
9538 call-saved regs really need restoring (i.e. r6). This code
9539 assumes that the vararg regs have not yet been recorded in
9540 cfun_gpr_save_slot. */
9541 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != SAVE_SLOT_STACK; i++);
9542 for (j = 15; j > i && cfun_gpr_save_slot (j) != SAVE_SLOT_STACK; j--);
9543 cfun_frame_layout.first_restore_gpr = (i == 16) ? -1 : i;
9544 cfun_frame_layout.last_restore_gpr = (i == 16) ? -1 : j;
9545 cfun_frame_layout.first_save_gpr = (i == 16) ? -1 : i;
9546 cfun_frame_layout.last_save_gpr = (i == 16) ? -1 : j;
9549 /* The GPR and FPR save slots in cfun->machine->frame_layout are set
9550 for registers which need to be saved in function prologue.
9551 This function can be used until the insns emitted for save/restore
9552 of the regs are visible in the RTL stream. */
9555 s390_register_info ()
9558 char clobbered_regs[32];
9560 gcc_assert (!epilogue_completed);
9562 if (reload_completed)
9563 /* After reload we rely on our own routine to determine which
9564 registers need saving. */
9565 s390_regs_ever_clobbered (clobbered_regs);
9567 /* During reload we use regs_ever_live as a base since reload
9568 does changes in there which we otherwise would not be aware
9570 for (i = 0; i < 32; i++)
9571 clobbered_regs[i] = df_regs_ever_live_p (i);
9573 for (i = 0; i < 32; i++)
9574 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
9576 /* Mark the call-saved FPRs which need to be saved.
9577 This needs to be done before checking the special GPRs since the
9578 stack pointer usage depends on whether high FPRs have to be saved
9580 cfun_frame_layout.fpr_bitmap = 0;
9581 cfun_frame_layout.high_fprs = 0;
9582 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
9583 if (clobbered_regs[i] && !call_really_used_regs[i])
9585 cfun_set_fpr_save (i);
9586 if (i >= FPR8_REGNUM)
9587 cfun_frame_layout.high_fprs++;
9590 /* Register 12 is used for GOT address, but also as temp in prologue
9591 for split-stack stdarg functions (unless r14 is available). */
9593 |= ((flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
9594 || (flag_split_stack && cfun->stdarg
9595 && (crtl->is_leaf || TARGET_TPF_PROFILING
9596 || has_hard_reg_initial_val (Pmode, RETURN_REGNUM))));
9598 clobbered_regs[BASE_REGNUM]
9599 |= (cfun->machine->base_reg
9600 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
9602 clobbered_regs[HARD_FRAME_POINTER_REGNUM]
9603 |= !!frame_pointer_needed;
9605 /* On pre z900 machines this might take until machine dependent
9607 save_return_addr_p will only be set on non-zarch machines so
9608 there is no risk that r14 goes into an FPR instead of a stack
9610 clobbered_regs[RETURN_REGNUM]
9612 || TARGET_TPF_PROFILING
9613 || cfun->machine->split_branches_pending_p
9614 || cfun_frame_layout.save_return_addr_p
9615 || crtl->calls_eh_return);
9617 clobbered_regs[STACK_POINTER_REGNUM]
9619 || TARGET_TPF_PROFILING
9620 || cfun_save_high_fprs_p
9621 || get_frame_size () > 0
9622 || (reload_completed && cfun_frame_layout.frame_size > 0)
9623 || cfun->calls_alloca);
9625 memset (cfun_frame_layout.gpr_save_slots, SAVE_SLOT_NONE, 16);
9627 for (i = 6; i < 16; i++)
9628 if (clobbered_regs[i])
9629 cfun_gpr_save_slot (i) = SAVE_SLOT_STACK;
9631 s390_register_info_stdarg_fpr ();
9632 s390_register_info_gprtofpr ();
9633 s390_register_info_set_ranges ();
9634 /* stdarg functions might need to save GPRs 2 to 6. This might
9635 override the GPR->FPR save decision made by
9636 s390_register_info_gprtofpr for r6 since vararg regs must go to
9638 s390_register_info_stdarg_gpr ();
9641 /* This function is called by s390_optimize_prologue in order to get
9642 rid of unnecessary GPR save/restore instructions. The register info
9643 for the GPRs is re-computed and the ranges are re-calculated. */
9646 s390_optimize_register_info ()
9648 char clobbered_regs[32];
9651 gcc_assert (epilogue_completed);
9652 gcc_assert (!cfun->machine->split_branches_pending_p);
9654 s390_regs_ever_clobbered (clobbered_regs);
9656 for (i = 0; i < 32; i++)
9657 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
9659 /* There is still special treatment needed for cases invisible to
9660 s390_regs_ever_clobbered. */
9661 clobbered_regs[RETURN_REGNUM]
9662 |= (TARGET_TPF_PROFILING
9663 /* When expanding builtin_return_addr in ESA mode we do not
9664 know whether r14 will later be needed as scratch reg when
9665 doing branch splitting. So the builtin always accesses the
9666 r14 save slot and we need to stick to the save/restore
9667 decision for r14 even if it turns out that it didn't get
9669 || cfun_frame_layout.save_return_addr_p
9670 || crtl->calls_eh_return);
9672 memset (cfun_frame_layout.gpr_save_slots, SAVE_SLOT_NONE, 6);
9674 for (i = 6; i < 16; i++)
9675 if (!clobbered_regs[i])
9676 cfun_gpr_save_slot (i) = SAVE_SLOT_NONE;
9678 s390_register_info_set_ranges ();
9679 s390_register_info_stdarg_gpr ();
9682 /* Fill cfun->machine with info about frame of current function. */
9685 s390_frame_info (void)
9687 HOST_WIDE_INT lowest_offset;
9689 cfun_frame_layout.first_save_gpr_slot = cfun_frame_layout.first_save_gpr;
9690 cfun_frame_layout.last_save_gpr_slot = cfun_frame_layout.last_save_gpr;
9692 /* The va_arg builtin uses a constant distance of 16 *
9693 UNITS_PER_LONG (r0-r15) to reach the FPRs from the reg_save_area
9694 pointer. So even if we are going to save the stack pointer in an
9695 FPR we need the stack space in order to keep the offsets
9697 if (cfun->stdarg && cfun_save_arg_fprs_p)
9699 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
9701 if (cfun_frame_layout.first_save_gpr_slot == -1)
9702 cfun_frame_layout.first_save_gpr_slot = STACK_POINTER_REGNUM;
9705 cfun_frame_layout.frame_size = get_frame_size ();
9706 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
9707 fatal_error (input_location,
9708 "total size of local variables exceeds architecture limit");
9710 if (!TARGET_PACKED_STACK)
9712 /* Fixed stack layout. */
9713 cfun_frame_layout.backchain_offset = 0;
9714 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
9715 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
9716 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
9717 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
9720 else if (TARGET_BACKCHAIN)
9722 /* Kernel stack layout - packed stack, backchain, no float */
9723 gcc_assert (TARGET_SOFT_FLOAT);
9724 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
9727 /* The distance between the backchain and the return address
9728 save slot must not change. So we always need a slot for the
9729 stack pointer which resides in between. */
9730 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
9732 cfun_frame_layout.gprs_offset
9733 = cfun_frame_layout.backchain_offset - cfun_gprs_save_area_size;
9735 /* FPRs will not be saved. Nevertheless pick sane values to
9736 keep area calculations valid. */
9737 cfun_frame_layout.f0_offset =
9738 cfun_frame_layout.f4_offset =
9739 cfun_frame_layout.f8_offset = cfun_frame_layout.gprs_offset;
9745 /* Packed stack layout without backchain. */
9747 /* With stdarg FPRs need their dedicated slots. */
9748 num_fprs = (TARGET_64BIT && cfun->stdarg ? 2
9749 : (cfun_fpr_save_p (FPR4_REGNUM) +
9750 cfun_fpr_save_p (FPR6_REGNUM)));
9751 cfun_frame_layout.f4_offset = STACK_POINTER_OFFSET - 8 * num_fprs;
9753 num_fprs = (cfun->stdarg ? 2
9754 : (cfun_fpr_save_p (FPR0_REGNUM)
9755 + cfun_fpr_save_p (FPR2_REGNUM)));
9756 cfun_frame_layout.f0_offset = cfun_frame_layout.f4_offset - 8 * num_fprs;
9758 cfun_frame_layout.gprs_offset
9759 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
9761 cfun_frame_layout.f8_offset = (cfun_frame_layout.gprs_offset
9762 - cfun_frame_layout.high_fprs * 8);
9765 if (cfun_save_high_fprs_p)
9766 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
9769 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
9771 /* In the following cases we have to allocate a STACK_POINTER_OFFSET
9772 sized area at the bottom of the stack. This is required also for
9773 leaf functions. When GCC generates a local stack reference it
9774 will always add STACK_POINTER_OFFSET to all these references. */
9776 && !TARGET_TPF_PROFILING
9777 && cfun_frame_layout.frame_size == 0
9778 && !cfun->calls_alloca)
9781 /* Calculate the number of bytes we have used in our own register
9782 save area. With the packed stack layout we can re-use the
9783 remaining bytes for normal stack elements. */
9785 if (TARGET_PACKED_STACK)
9786 lowest_offset = MIN (MIN (cfun_frame_layout.f0_offset,
9787 cfun_frame_layout.f4_offset),
9788 cfun_frame_layout.gprs_offset);
9792 if (TARGET_BACKCHAIN)
9793 lowest_offset = MIN (lowest_offset, cfun_frame_layout.backchain_offset);
9795 cfun_frame_layout.frame_size += STACK_POINTER_OFFSET - lowest_offset;
9797 /* If under 31 bit an odd number of gprs has to be saved we have to
9798 adjust the frame size to sustain 8 byte alignment of stack
9800 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
9801 STACK_BOUNDARY / BITS_PER_UNIT - 1)
9802 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
9805 /* Generate frame layout. Fills in register and frame data for the current
9806 function in cfun->machine. This routine can be called multiple times;
9807 it will re-do the complete frame layout every time. */
9810 s390_init_frame_layout (void)
9812 HOST_WIDE_INT frame_size;
9815 /* After LRA the frame layout is supposed to be read-only and should
9816 not be re-computed. */
9817 if (reload_completed)
9820 /* On S/390 machines, we may need to perform branch splitting, which
9821 will require both base and return address register. We have no
9822 choice but to assume we're going to need them until right at the
9823 end of the machine dependent reorg phase. */
9824 if (!TARGET_CPU_ZARCH)
9825 cfun->machine->split_branches_pending_p = true;
9829 frame_size = cfun_frame_layout.frame_size;
9831 /* Try to predict whether we'll need the base register. */
9832 base_used = cfun->machine->split_branches_pending_p
9833 || crtl->uses_const_pool
9834 || (!DISP_IN_RANGE (frame_size)
9835 && !CONST_OK_FOR_K (frame_size));
9837 /* Decide which register to use as literal pool base. In small
9838 leaf functions, try to use an unused call-clobbered register
9839 as base register to avoid save/restore overhead. */
9841 cfun->machine->base_reg = NULL_RTX;
9847 /* Prefer r5 (most likely to be free). */
9848 for (br = 5; br >= 2 && df_regs_ever_live_p (br); br--)
9850 cfun->machine->base_reg =
9851 gen_rtx_REG (Pmode, (br >= 2) ? br : BASE_REGNUM);
9854 s390_register_info ();
9857 while (frame_size != cfun_frame_layout.frame_size);
9860 /* Remove the FPR clobbers from a tbegin insn if it can be proven that
9861 the TX is nonescaping. A transaction is considered escaping if
9862 there is at least one path from tbegin returning CC0 to the
9863 function exit block without an tend.
9865 The check so far has some limitations:
9866 - only single tbegin/tend BBs are supported
9867 - the first cond jump after tbegin must separate the CC0 path from ~CC0
9868 - when CC is copied to a GPR and the CC0 check is done with the GPR
9869 this is not supported
9873 s390_optimize_nonescaping_tx (void)
9875 const unsigned int CC0 = 1 << 3;
9876 basic_block tbegin_bb = NULL;
9877 basic_block tend_bb = NULL;
9882 rtx_insn *tbegin_insn = NULL;
9884 if (!cfun->machine->tbegin_p)
9887 for (bb_index = 0; bb_index < n_basic_blocks_for_fn (cfun); bb_index++)
9889 bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
9894 FOR_BB_INSNS (bb, insn)
9896 rtx ite, cc, pat, target;
9897 unsigned HOST_WIDE_INT mask;
9899 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
9902 pat = PATTERN (insn);
9904 if (GET_CODE (pat) == PARALLEL)
9905 pat = XVECEXP (pat, 0, 0);
9907 if (GET_CODE (pat) != SET
9908 || GET_CODE (SET_SRC (pat)) != UNSPEC_VOLATILE)
9911 if (XINT (SET_SRC (pat), 1) == UNSPECV_TBEGIN)
9917 /* Just return if the tbegin doesn't have clobbers. */
9918 if (GET_CODE (PATTERN (insn)) != PARALLEL)
9921 if (tbegin_bb != NULL)
9924 /* Find the next conditional jump. */
9925 for (tmp = NEXT_INSN (insn);
9927 tmp = NEXT_INSN (tmp))
9929 if (reg_set_p (gen_rtx_REG (CCmode, CC_REGNUM), tmp))
9934 ite = SET_SRC (PATTERN (tmp));
9935 if (GET_CODE (ite) != IF_THEN_ELSE)
9938 cc = XEXP (XEXP (ite, 0), 0);
9939 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc))
9940 || GET_MODE (cc) != CCRAWmode
9941 || GET_CODE (XEXP (XEXP (ite, 0), 1)) != CONST_INT)
9944 if (bb->succs->length () != 2)
9947 mask = INTVAL (XEXP (XEXP (ite, 0), 1));
9948 if (GET_CODE (XEXP (ite, 0)) == NE)
9952 target = XEXP (ite, 1);
9953 else if (mask == (CC0 ^ 0xf))
9954 target = XEXP (ite, 2);
9962 ei = ei_start (bb->succs);
9963 e1 = ei_safe_edge (ei);
9965 e2 = ei_safe_edge (ei);
9967 if (e2->flags & EDGE_FALLTHRU)
9970 e1 = ei_safe_edge (ei);
9973 if (!(e1->flags & EDGE_FALLTHRU))
9976 tbegin_bb = (target == pc_rtx) ? e1->dest : e2->dest;
9978 if (tmp == BB_END (bb))
9983 if (XINT (SET_SRC (pat), 1) == UNSPECV_TEND)
9985 if (tend_bb != NULL)
9992 /* Either we successfully remove the FPR clobbers here or we are not
9993 able to do anything for this TX. Both cases don't qualify for
9995 cfun->machine->tbegin_p = false;
9997 if (tbegin_bb == NULL || tend_bb == NULL)
10000 calculate_dominance_info (CDI_POST_DOMINATORS);
10001 result = dominated_by_p (CDI_POST_DOMINATORS, tbegin_bb, tend_bb);
10002 free_dominance_info (CDI_POST_DOMINATORS);
10007 PATTERN (tbegin_insn) = gen_rtx_PARALLEL (VOIDmode,
10009 XVECEXP (PATTERN (tbegin_insn), 0, 0),
10010 XVECEXP (PATTERN (tbegin_insn), 0, 1)));
10011 INSN_CODE (tbegin_insn) = -1;
10012 df_insn_rescan (tbegin_insn);
10017 /* Return true if it is legal to put a value with MODE into REGNO. */
10020 s390_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
10022 if (!TARGET_VX && VECTOR_NOFP_REGNO_P (regno))
10025 switch (REGNO_REG_CLASS (regno))
10028 return ((GET_MODE_CLASS (mode) == MODE_INT
10029 && s390_class_max_nregs (VEC_REGS, mode) == 1)
10031 || s390_vector_mode_supported_p (mode));
10035 && ((GET_MODE_CLASS (mode) == MODE_INT
10036 && s390_class_max_nregs (FP_REGS, mode) == 1)
10038 || s390_vector_mode_supported_p (mode)))
10041 if (REGNO_PAIR_OK (regno, mode))
10043 if (mode == SImode || mode == DImode)
10046 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
10051 if (FRAME_REGNO_P (regno) && mode == Pmode)
10056 if (REGNO_PAIR_OK (regno, mode))
10059 || (mode != TFmode && mode != TCmode && mode != TDmode))
10064 if (GET_MODE_CLASS (mode) == MODE_CC)
10068 if (REGNO_PAIR_OK (regno, mode))
10070 if (mode == SImode || mode == Pmode)
10081 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
10084 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
10086 /* Once we've decided upon a register to use as base register, it must
10087 no longer be used for any other purpose. */
10088 if (cfun->machine->base_reg)
10089 if (REGNO (cfun->machine->base_reg) == old_reg
10090 || REGNO (cfun->machine->base_reg) == new_reg)
10093 /* Prevent regrename from using call-saved regs which haven't
10094 actually been saved. This is necessary since regrename assumes
10095 the backend save/restore decisions are based on
10096 df_regs_ever_live. Since we have our own routine we have to tell
10097 regrename manually about it. */
10098 if (GENERAL_REGNO_P (new_reg)
10099 && !call_really_used_regs[new_reg]
10100 && cfun_gpr_save_slot (new_reg) == SAVE_SLOT_NONE)
10106 /* Return nonzero if register REGNO can be used as a scratch register
10110 s390_hard_regno_scratch_ok (unsigned int regno)
10112 /* See s390_hard_regno_rename_ok. */
10113 if (GENERAL_REGNO_P (regno)
10114 && !call_really_used_regs[regno]
10115 && cfun_gpr_save_slot (regno) == SAVE_SLOT_NONE)
10121 /* Maximum number of registers to represent a value of mode MODE
10122 in a register of class RCLASS. */
10125 s390_class_max_nregs (enum reg_class rclass, machine_mode mode)
10128 bool reg_pair_required_p = false;
10134 reg_size = TARGET_VX ? 16 : 8;
10136 /* TF and TD modes would fit into a VR but we put them into a
10137 register pair since we do not have 128bit FP instructions on
10140 && SCALAR_FLOAT_MODE_P (mode)
10141 && GET_MODE_SIZE (mode) >= 16)
10142 reg_pair_required_p = true;
10144 /* Even if complex types would fit into a single FPR/VR we force
10145 them into a register pair to deal with the parts more easily.
10146 (FIXME: What about complex ints?) */
10147 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
10148 reg_pair_required_p = true;
10154 reg_size = UNITS_PER_WORD;
10158 if (reg_pair_required_p)
10159 return 2 * ((GET_MODE_SIZE (mode) / 2 + reg_size - 1) / reg_size);
10161 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
10164 /* Return TRUE if changing mode from FROM to TO should not be allowed
10165 for register class CLASS. */
10168 s390_cannot_change_mode_class (machine_mode from_mode,
10169 machine_mode to_mode,
10170 enum reg_class rclass)
10172 machine_mode small_mode;
10173 machine_mode big_mode;
10175 if (GET_MODE_SIZE (from_mode) == GET_MODE_SIZE (to_mode))
10178 if (GET_MODE_SIZE (from_mode) < GET_MODE_SIZE (to_mode))
10180 small_mode = from_mode;
10181 big_mode = to_mode;
10185 small_mode = to_mode;
10186 big_mode = from_mode;
10189 /* Values residing in VRs are little-endian style. All modes are
10190 placed left-aligned in an VR. This means that we cannot allow
10191 switching between modes with differing sizes. Also if the vector
10192 facility is available we still place TFmode values in VR register
10193 pairs, since the only instructions we have operating on TFmodes
10194 only deal with register pairs. Therefore we have to allow DFmode
10195 subregs of TFmodes to enable the TFmode splitters. */
10196 if (reg_classes_intersect_p (VEC_REGS, rclass)
10197 && (GET_MODE_SIZE (small_mode) < 8
10198 || s390_class_max_nregs (VEC_REGS, big_mode) == 1))
10201 /* Likewise for access registers, since they have only half the
10202 word size on 64-bit. */
10203 if (reg_classes_intersect_p (ACCESS_REGS, rclass))
10209 /* Return true if we use LRA instead of reload pass. */
10213 return s390_lra_flag;
10216 /* Return true if register FROM can be eliminated via register TO. */
10219 s390_can_eliminate (const int from, const int to)
10221 /* On zSeries machines, we have not marked the base register as fixed.
10222 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
10223 If a function requires the base register, we say here that this
10224 elimination cannot be performed. This will cause reload to free
10225 up the base register (as if it were fixed). On the other hand,
10226 if the current function does *not* require the base register, we
10227 say here the elimination succeeds, which in turn allows reload
10228 to allocate the base register for any other purpose. */
10229 if (from == BASE_REGNUM && to == BASE_REGNUM)
10231 if (TARGET_CPU_ZARCH)
10233 s390_init_frame_layout ();
10234 return cfun->machine->base_reg == NULL_RTX;
10240 /* Everything else must point into the stack frame. */
10241 gcc_assert (to == STACK_POINTER_REGNUM
10242 || to == HARD_FRAME_POINTER_REGNUM);
10244 gcc_assert (from == FRAME_POINTER_REGNUM
10245 || from == ARG_POINTER_REGNUM
10246 || from == RETURN_ADDRESS_POINTER_REGNUM);
10248 /* Make sure we actually saved the return address. */
10249 if (from == RETURN_ADDRESS_POINTER_REGNUM)
10250 if (!crtl->calls_eh_return
10252 && !cfun_frame_layout.save_return_addr_p)
10258 /* Return offset between register FROM and TO initially after prolog. */
10261 s390_initial_elimination_offset (int from, int to)
10263 HOST_WIDE_INT offset;
10265 /* ??? Why are we called for non-eliminable pairs? */
10266 if (!s390_can_eliminate (from, to))
10271 case FRAME_POINTER_REGNUM:
10272 offset = (get_frame_size()
10273 + STACK_POINTER_OFFSET
10274 + crtl->outgoing_args_size);
10277 case ARG_POINTER_REGNUM:
10278 s390_init_frame_layout ();
10279 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
10282 case RETURN_ADDRESS_POINTER_REGNUM:
10283 s390_init_frame_layout ();
10285 if (cfun_frame_layout.first_save_gpr_slot == -1)
10287 /* If it turns out that for stdarg nothing went into the reg
10288 save area we also do not need the return address
10290 if (cfun->stdarg && !cfun_save_arg_fprs_p)
10293 gcc_unreachable ();
10296 /* In order to make the following work it is not necessary for
10297 r14 to have a save slot. It is sufficient if one other GPR
10298 got one. Since the GPRs are always stored without gaps we
10299 are able to calculate where the r14 save slot would
10301 offset = (cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset +
10302 (RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot) *
10311 gcc_unreachable ();
10317 /* Emit insn to save fpr REGNUM at offset OFFSET relative
10318 to register BASE. Return generated insn. */
10321 save_fpr (rtx base, int offset, int regnum)
10324 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
10326 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
10327 set_mem_alias_set (addr, get_varargs_alias_set ());
10329 set_mem_alias_set (addr, get_frame_alias_set ());
10331 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
10334 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
10335 to register BASE. Return generated insn. */
10338 restore_fpr (rtx base, int offset, int regnum)
10341 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
10342 set_mem_alias_set (addr, get_frame_alias_set ());
10344 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
10347 /* Return true if REGNO is a global register, but not one
10348 of the special ones that need to be saved/restored in anyway. */
10351 global_not_special_regno_p (int regno)
10353 return (global_regs[regno]
10354 /* These registers are special and need to be
10355 restored in any case. */
10356 && !(regno == STACK_POINTER_REGNUM
10357 || regno == RETURN_REGNUM
10358 || regno == BASE_REGNUM
10359 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
10362 /* Generate insn to save registers FIRST to LAST into
10363 the register save area located at offset OFFSET
10364 relative to register BASE. */
10367 save_gprs (rtx base, int offset, int first, int last)
10369 rtx addr, insn, note;
10372 addr = plus_constant (Pmode, base, offset);
10373 addr = gen_rtx_MEM (Pmode, addr);
10375 set_mem_alias_set (addr, get_frame_alias_set ());
10377 /* Special-case single register. */
10381 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
10383 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
10385 if (!global_not_special_regno_p (first))
10386 RTX_FRAME_RELATED_P (insn) = 1;
10391 insn = gen_store_multiple (addr,
10392 gen_rtx_REG (Pmode, first),
10393 GEN_INT (last - first + 1));
10395 if (first <= 6 && cfun->stdarg)
10396 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
10398 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
10400 if (first + i <= 6)
10401 set_mem_alias_set (mem, get_varargs_alias_set ());
10404 /* We need to set the FRAME_RELATED flag on all SETs
10405 inside the store-multiple pattern.
10407 However, we must not emit DWARF records for registers 2..5
10408 if they are stored for use by variable arguments ...
10410 ??? Unfortunately, it is not enough to simply not the
10411 FRAME_RELATED flags for those SETs, because the first SET
10412 of the PARALLEL is always treated as if it had the flag
10413 set, even if it does not. Therefore we emit a new pattern
10414 without those registers as REG_FRAME_RELATED_EXPR note. */
10416 if (first >= 6 && !global_not_special_regno_p (first))
10418 rtx pat = PATTERN (insn);
10420 for (i = 0; i < XVECLEN (pat, 0); i++)
10421 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
10422 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
10424 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
10426 RTX_FRAME_RELATED_P (insn) = 1;
10428 else if (last >= 6)
10432 for (start = first >= 6 ? first : 6; start <= last; start++)
10433 if (!global_not_special_regno_p (start))
10439 addr = plus_constant (Pmode, base,
10440 offset + (start - first) * UNITS_PER_LONG);
10445 note = gen_movdi (gen_rtx_MEM (Pmode, addr),
10446 gen_rtx_REG (Pmode, start));
10448 note = gen_movsi (gen_rtx_MEM (Pmode, addr),
10449 gen_rtx_REG (Pmode, start));
10450 note = PATTERN (note);
10452 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
10453 RTX_FRAME_RELATED_P (insn) = 1;
10458 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
10459 gen_rtx_REG (Pmode, start),
10460 GEN_INT (last - start + 1));
10461 note = PATTERN (note);
10463 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
10465 for (i = 0; i < XVECLEN (note, 0); i++)
10466 if (GET_CODE (XVECEXP (note, 0, i)) == SET
10467 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
10469 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
10471 RTX_FRAME_RELATED_P (insn) = 1;
10477 /* Generate insn to restore registers FIRST to LAST from
10478 the register save area located at offset OFFSET
10479 relative to register BASE. */
10482 restore_gprs (rtx base, int offset, int first, int last)
10486 addr = plus_constant (Pmode, base, offset);
10487 addr = gen_rtx_MEM (Pmode, addr);
10488 set_mem_alias_set (addr, get_frame_alias_set ());
10490 /* Special-case single register. */
10494 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
10496 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
10498 RTX_FRAME_RELATED_P (insn) = 1;
10502 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
10504 GEN_INT (last - first + 1));
10505 RTX_FRAME_RELATED_P (insn) = 1;
10509 /* Return insn sequence to load the GOT register. */
10511 static GTY(()) rtx got_symbol;
10513 s390_load_got (void)
10517 /* We cannot use pic_offset_table_rtx here since we use this
10518 function also for non-pic if __tls_get_offset is called and in
10519 that case PIC_OFFSET_TABLE_REGNUM as well as pic_offset_table_rtx
10521 rtx got_rtx = gen_rtx_REG (Pmode, 12);
10525 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
10526 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
10531 if (TARGET_CPU_ZARCH)
10533 emit_move_insn (got_rtx, got_symbol);
10539 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
10540 UNSPEC_LTREL_OFFSET);
10541 offset = gen_rtx_CONST (Pmode, offset);
10542 offset = force_const_mem (Pmode, offset);
10544 emit_move_insn (got_rtx, offset);
10546 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
10547 UNSPEC_LTREL_BASE);
10548 offset = gen_rtx_PLUS (Pmode, got_rtx, offset);
10550 emit_move_insn (got_rtx, offset);
10553 insns = get_insns ();
10558 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
10559 and the change to the stack pointer. */
10562 s390_emit_stack_tie (void)
10564 rtx mem = gen_frame_mem (BLKmode,
10565 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
10567 emit_insn (gen_stack_tie (mem));
10570 /* Copy GPRS into FPR save slots. */
10573 s390_save_gprs_to_fprs (void)
10577 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
10580 for (i = 6; i < 16; i++)
10582 if (FP_REGNO_P (cfun_gpr_save_slot (i)))
10585 emit_move_insn (gen_rtx_REG (DImode, cfun_gpr_save_slot (i)),
10586 gen_rtx_REG (DImode, i));
10587 RTX_FRAME_RELATED_P (insn) = 1;
10588 /* This prevents dwarf2cfi from interpreting the set. Doing
10589 so it might emit def_cfa_register infos setting an FPR as
10591 add_reg_note (insn, REG_CFA_REGISTER, PATTERN (insn));
10596 /* Restore GPRs from FPR save slots. */
10599 s390_restore_gprs_from_fprs (void)
10603 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
10606 for (i = 6; i < 16; i++)
10610 if (!FP_REGNO_P (cfun_gpr_save_slot (i)))
10613 rtx fpr = gen_rtx_REG (DImode, cfun_gpr_save_slot (i));
10615 if (i == STACK_POINTER_REGNUM)
10616 insn = emit_insn (gen_stack_restore_from_fpr (fpr));
10618 insn = emit_move_insn (gen_rtx_REG (DImode, i), fpr);
10620 df_set_regs_ever_live (i, true);
10621 add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, i));
10622 if (i == STACK_POINTER_REGNUM)
10623 add_reg_note (insn, REG_CFA_DEF_CFA,
10624 plus_constant (Pmode, stack_pointer_rtx,
10625 STACK_POINTER_OFFSET));
10626 RTX_FRAME_RELATED_P (insn) = 1;
10631 /* A pass run immediately before shrink-wrapping and prologue and epilogue
10636 const pass_data pass_data_s390_early_mach =
10638 RTL_PASS, /* type */
10639 "early_mach", /* name */
10640 OPTGROUP_NONE, /* optinfo_flags */
10641 TV_MACH_DEP, /* tv_id */
10642 0, /* properties_required */
10643 0, /* properties_provided */
10644 0, /* properties_destroyed */
10645 0, /* todo_flags_start */
10646 ( TODO_df_verify | TODO_df_finish ), /* todo_flags_finish */
10649 class pass_s390_early_mach : public rtl_opt_pass
10652 pass_s390_early_mach (gcc::context *ctxt)
10653 : rtl_opt_pass (pass_data_s390_early_mach, ctxt)
10656 /* opt_pass methods: */
10657 virtual unsigned int execute (function *);
10659 }; // class pass_s390_early_mach
10662 pass_s390_early_mach::execute (function *fun)
10666 /* Try to get rid of the FPR clobbers. */
10667 s390_optimize_nonescaping_tx ();
10669 /* Re-compute register info. */
10670 s390_register_info ();
10672 /* If we're using a base register, ensure that it is always valid for
10673 the first non-prologue instruction. */
10674 if (fun->machine->base_reg)
10675 emit_insn_at_entry (gen_main_pool (fun->machine->base_reg));
10677 /* Annotate all constant pool references to let the scheduler know
10678 they implicitly use the base register. */
10679 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10682 annotate_constant_pool_refs (&PATTERN (insn));
10683 df_insn_rescan (insn);
10688 } // anon namespace
10690 /* Expand the prologue into a bunch of separate insns. */
10693 s390_emit_prologue (void)
10701 /* Choose best register to use for temp use within prologue.
10702 TPF with profiling must avoid the register 14 - the tracing function
10703 needs the original contents of r14 to be preserved. */
10705 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
10707 && !TARGET_TPF_PROFILING)
10708 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
10709 else if (flag_split_stack && cfun->stdarg)
10710 temp_reg = gen_rtx_REG (Pmode, 12);
10712 temp_reg = gen_rtx_REG (Pmode, 1);
10714 s390_save_gprs_to_fprs ();
10716 /* Save call saved gprs. */
10717 if (cfun_frame_layout.first_save_gpr != -1)
10719 insn = save_gprs (stack_pointer_rtx,
10720 cfun_frame_layout.gprs_offset +
10721 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
10722 - cfun_frame_layout.first_save_gpr_slot),
10723 cfun_frame_layout.first_save_gpr,
10724 cfun_frame_layout.last_save_gpr);
10728 /* Dummy insn to mark literal pool slot. */
10730 if (cfun->machine->base_reg)
10731 emit_insn (gen_main_pool (cfun->machine->base_reg));
10733 offset = cfun_frame_layout.f0_offset;
10735 /* Save f0 and f2. */
10736 for (i = FPR0_REGNUM; i <= FPR0_REGNUM + 1; i++)
10738 if (cfun_fpr_save_p (i))
10740 save_fpr (stack_pointer_rtx, offset, i);
10743 else if (!TARGET_PACKED_STACK || cfun->stdarg)
10747 /* Save f4 and f6. */
10748 offset = cfun_frame_layout.f4_offset;
10749 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
10751 if (cfun_fpr_save_p (i))
10753 insn = save_fpr (stack_pointer_rtx, offset, i);
10756 /* If f4 and f6 are call clobbered they are saved due to
10757 stdargs and therefore are not frame related. */
10758 if (!call_really_used_regs[i])
10759 RTX_FRAME_RELATED_P (insn) = 1;
10761 else if (!TARGET_PACKED_STACK || call_really_used_regs[i])
10765 if (TARGET_PACKED_STACK
10766 && cfun_save_high_fprs_p
10767 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
10769 offset = (cfun_frame_layout.f8_offset
10770 + (cfun_frame_layout.high_fprs - 1) * 8);
10772 for (i = FPR15_REGNUM; i >= FPR8_REGNUM && offset >= 0; i--)
10773 if (cfun_fpr_save_p (i))
10775 insn = save_fpr (stack_pointer_rtx, offset, i);
10777 RTX_FRAME_RELATED_P (insn) = 1;
10780 if (offset >= cfun_frame_layout.f8_offset)
10784 if (!TARGET_PACKED_STACK)
10785 next_fpr = cfun_save_high_fprs_p ? FPR15_REGNUM : 0;
10787 if (flag_stack_usage_info)
10788 current_function_static_stack_size = cfun_frame_layout.frame_size;
10790 /* Decrement stack pointer. */
10792 if (cfun_frame_layout.frame_size > 0)
10794 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
10795 rtx real_frame_off;
10797 if (s390_stack_size)
10799 HOST_WIDE_INT stack_guard;
10801 if (s390_stack_guard)
10802 stack_guard = s390_stack_guard;
10805 /* If no value for stack guard is provided the smallest power of 2
10806 larger than the current frame size is chosen. */
10808 while (stack_guard < cfun_frame_layout.frame_size)
10812 if (cfun_frame_layout.frame_size >= s390_stack_size)
10814 warning (0, "frame size of function %qs is %wd"
10815 " bytes exceeding user provided stack limit of "
10817 "An unconditional trap is added.",
10818 current_function_name(), cfun_frame_layout.frame_size,
10820 emit_insn (gen_trap ());
10825 /* stack_guard has to be smaller than s390_stack_size.
10826 Otherwise we would emit an AND with zero which would
10827 not match the test under mask pattern. */
10828 if (stack_guard >= s390_stack_size)
10830 warning (0, "frame size of function %qs is %wd"
10831 " bytes which is more than half the stack size. "
10832 "The dynamic check would not be reliable. "
10833 "No check emitted for this function.",
10834 current_function_name(),
10835 cfun_frame_layout.frame_size);
10839 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
10840 & ~(stack_guard - 1));
10842 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
10843 GEN_INT (stack_check_mask));
10845 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
10847 t, const0_rtx, const0_rtx));
10849 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
10851 t, const0_rtx, const0_rtx));
10856 if (s390_warn_framesize > 0
10857 && cfun_frame_layout.frame_size >= s390_warn_framesize)
10858 warning (0, "frame size of %qs is %wd bytes",
10859 current_function_name (), cfun_frame_layout.frame_size);
10861 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
10862 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
10864 /* Save incoming stack pointer into temp reg. */
10865 if (TARGET_BACKCHAIN || next_fpr)
10866 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
10868 /* Subtract frame size from stack pointer. */
10870 if (DISP_IN_RANGE (INTVAL (frame_off)))
10872 insn = gen_rtx_SET (stack_pointer_rtx,
10873 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
10875 insn = emit_insn (insn);
10879 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
10880 frame_off = force_const_mem (Pmode, frame_off);
10882 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
10883 annotate_constant_pool_refs (&PATTERN (insn));
10886 RTX_FRAME_RELATED_P (insn) = 1;
10887 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
10888 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
10889 gen_rtx_SET (stack_pointer_rtx,
10890 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
10893 /* Set backchain. */
10895 if (TARGET_BACKCHAIN)
10897 if (cfun_frame_layout.backchain_offset)
10898 addr = gen_rtx_MEM (Pmode,
10899 plus_constant (Pmode, stack_pointer_rtx,
10900 cfun_frame_layout.backchain_offset));
10902 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
10903 set_mem_alias_set (addr, get_frame_alias_set ());
10904 insn = emit_insn (gen_move_insn (addr, temp_reg));
10907 /* If we support non-call exceptions (e.g. for Java),
10908 we need to make sure the backchain pointer is set up
10909 before any possibly trapping memory access. */
10910 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
10912 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
10913 emit_clobber (addr);
10917 /* Save fprs 8 - 15 (64 bit ABI). */
10919 if (cfun_save_high_fprs_p && next_fpr)
10921 /* If the stack might be accessed through a different register
10922 we have to make sure that the stack pointer decrement is not
10923 moved below the use of the stack slots. */
10924 s390_emit_stack_tie ();
10926 insn = emit_insn (gen_add2_insn (temp_reg,
10927 GEN_INT (cfun_frame_layout.f8_offset)));
10931 for (i = FPR8_REGNUM; i <= next_fpr; i++)
10932 if (cfun_fpr_save_p (i))
10934 rtx addr = plus_constant (Pmode, stack_pointer_rtx,
10935 cfun_frame_layout.frame_size
10936 + cfun_frame_layout.f8_offset
10939 insn = save_fpr (temp_reg, offset, i);
10941 RTX_FRAME_RELATED_P (insn) = 1;
10942 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
10943 gen_rtx_SET (gen_rtx_MEM (DFmode, addr),
10944 gen_rtx_REG (DFmode, i)));
10948 /* Set frame pointer, if needed. */
10950 if (frame_pointer_needed)
10952 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
10953 RTX_FRAME_RELATED_P (insn) = 1;
10956 /* Set up got pointer, if needed. */
10958 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
10960 rtx_insn *insns = s390_load_got ();
10962 for (rtx_insn *insn = insns; insn; insn = NEXT_INSN (insn))
10963 annotate_constant_pool_refs (&PATTERN (insn));
10968 if (TARGET_TPF_PROFILING)
10970 /* Generate a BAS instruction to serve as a function
10971 entry intercept to facilitate the use of tracing
10972 algorithms located at the branch target. */
10973 emit_insn (gen_prologue_tpf ());
10975 /* Emit a blockage here so that all code
10976 lies between the profiling mechanisms. */
10977 emit_insn (gen_blockage ());
10981 /* Expand the epilogue into a bunch of separate insns. */
10984 s390_emit_epilogue (bool sibcall)
10986 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
10987 int area_bottom, area_top, offset = 0;
10992 if (TARGET_TPF_PROFILING)
10995 /* Generate a BAS instruction to serve as a function
10996 entry intercept to facilitate the use of tracing
10997 algorithms located at the branch target. */
10999 /* Emit a blockage here so that all code
11000 lies between the profiling mechanisms. */
11001 emit_insn (gen_blockage ());
11003 emit_insn (gen_epilogue_tpf ());
11006 /* Check whether to use frame or stack pointer for restore. */
11008 frame_pointer = (frame_pointer_needed
11009 ? hard_frame_pointer_rtx : stack_pointer_rtx);
11011 s390_frame_area (&area_bottom, &area_top);
11013 /* Check whether we can access the register save area.
11014 If not, increment the frame pointer as required. */
11016 if (area_top <= area_bottom)
11018 /* Nothing to restore. */
11020 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
11021 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
11023 /* Area is in range. */
11024 offset = cfun_frame_layout.frame_size;
11028 rtx insn, frame_off, cfa;
11030 offset = area_bottom < 0 ? -area_bottom : 0;
11031 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
11033 cfa = gen_rtx_SET (frame_pointer,
11034 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
11035 if (DISP_IN_RANGE (INTVAL (frame_off)))
11037 insn = gen_rtx_SET (frame_pointer,
11038 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
11039 insn = emit_insn (insn);
11043 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
11044 frame_off = force_const_mem (Pmode, frame_off);
11046 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
11047 annotate_constant_pool_refs (&PATTERN (insn));
11049 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
11050 RTX_FRAME_RELATED_P (insn) = 1;
11053 /* Restore call saved fprs. */
11057 if (cfun_save_high_fprs_p)
11059 next_offset = cfun_frame_layout.f8_offset;
11060 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
11062 if (cfun_fpr_save_p (i))
11064 restore_fpr (frame_pointer,
11065 offset + next_offset, i);
11067 = alloc_reg_note (REG_CFA_RESTORE,
11068 gen_rtx_REG (DFmode, i), cfa_restores);
11077 next_offset = cfun_frame_layout.f4_offset;
11079 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
11081 if (cfun_fpr_save_p (i))
11083 restore_fpr (frame_pointer,
11084 offset + next_offset, i);
11086 = alloc_reg_note (REG_CFA_RESTORE,
11087 gen_rtx_REG (DFmode, i), cfa_restores);
11090 else if (!TARGET_PACKED_STACK)
11096 /* Return register. */
11098 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
11100 /* Restore call saved gprs. */
11102 if (cfun_frame_layout.first_restore_gpr != -1)
11107 /* Check for global register and save them
11108 to stack location from where they get restored. */
11110 for (i = cfun_frame_layout.first_restore_gpr;
11111 i <= cfun_frame_layout.last_restore_gpr;
11114 if (global_not_special_regno_p (i))
11116 addr = plus_constant (Pmode, frame_pointer,
11117 offset + cfun_frame_layout.gprs_offset
11118 + (i - cfun_frame_layout.first_save_gpr_slot)
11120 addr = gen_rtx_MEM (Pmode, addr);
11121 set_mem_alias_set (addr, get_frame_alias_set ());
11122 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
11126 = alloc_reg_note (REG_CFA_RESTORE,
11127 gen_rtx_REG (Pmode, i), cfa_restores);
11132 /* Fetch return address from stack before load multiple,
11133 this will do good for scheduling.
11135 Only do this if we already decided that r14 needs to be
11136 saved to a stack slot. (And not just because r14 happens to
11137 be in between two GPRs which need saving.) Otherwise it
11138 would be difficult to take that decision back in
11139 s390_optimize_prologue. */
11140 if (cfun_gpr_save_slot (RETURN_REGNUM) == SAVE_SLOT_STACK)
11142 int return_regnum = find_unused_clobbered_reg();
11143 if (!return_regnum)
11145 return_reg = gen_rtx_REG (Pmode, return_regnum);
11147 addr = plus_constant (Pmode, frame_pointer,
11148 offset + cfun_frame_layout.gprs_offset
11150 - cfun_frame_layout.first_save_gpr_slot)
11152 addr = gen_rtx_MEM (Pmode, addr);
11153 set_mem_alias_set (addr, get_frame_alias_set ());
11154 emit_move_insn (return_reg, addr);
11156 /* Once we did that optimization we have to make sure
11157 s390_optimize_prologue does not try to remove the
11158 store of r14 since we will not be able to find the
11159 load issued here. */
11160 cfun_frame_layout.save_return_addr_p = true;
11164 insn = restore_gprs (frame_pointer,
11165 offset + cfun_frame_layout.gprs_offset
11166 + (cfun_frame_layout.first_restore_gpr
11167 - cfun_frame_layout.first_save_gpr_slot)
11169 cfun_frame_layout.first_restore_gpr,
11170 cfun_frame_layout.last_restore_gpr);
11171 insn = emit_insn (insn);
11172 REG_NOTES (insn) = cfa_restores;
11173 add_reg_note (insn, REG_CFA_DEF_CFA,
11174 plus_constant (Pmode, stack_pointer_rtx,
11175 STACK_POINTER_OFFSET));
11176 RTX_FRAME_RELATED_P (insn) = 1;
11179 s390_restore_gprs_from_fprs ();
11184 /* Return to caller. */
11186 p = rtvec_alloc (2);
11188 RTVEC_ELT (p, 0) = ret_rtx;
11189 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
11190 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
11194 /* Implement TARGET_SET_UP_BY_PROLOGUE. */
11197 s300_set_up_by_prologue (hard_reg_set_container *regs)
11199 if (cfun->machine->base_reg
11200 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
11201 SET_HARD_REG_BIT (regs->set, REGNO (cfun->machine->base_reg));
11204 /* -fsplit-stack support. */
11206 /* A SYMBOL_REF for __morestack. */
11207 static GTY(()) rtx morestack_ref;
11209 /* When using -fsplit-stack, the allocation routines set a field in
11210 the TCB to the bottom of the stack plus this much space, measured
11213 #define SPLIT_STACK_AVAILABLE 1024
11215 /* Emit -fsplit-stack prologue, which goes before the regular function
11219 s390_expand_split_stack_prologue (void)
11221 rtx r1, guard, cc = NULL;
11223 /* Offset from thread pointer to __private_ss. */
11224 int psso = TARGET_64BIT ? 0x38 : 0x20;
11225 /* Pointer size in bytes. */
11226 /* Frame size and argument size - the two parameters to __morestack. */
11227 HOST_WIDE_INT frame_size = cfun_frame_layout.frame_size;
11228 /* Align argument size to 8 bytes - simplifies __morestack code. */
11229 HOST_WIDE_INT args_size = crtl->args.size >= 0
11230 ? ((crtl->args.size + 7) & ~7)
11232 /* Label to be called by __morestack. */
11233 rtx_code_label *call_done = NULL;
11234 rtx_code_label *parm_base = NULL;
11237 gcc_assert (flag_split_stack && reload_completed);
11238 if (!TARGET_CPU_ZARCH)
11240 sorry ("CPUs older than z900 are not supported for -fsplit-stack");
11244 r1 = gen_rtx_REG (Pmode, 1);
11246 /* If no stack frame will be allocated, don't do anything. */
11249 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11251 /* If va_start is used, just use r15. */
11252 emit_move_insn (r1,
11253 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11254 GEN_INT (STACK_POINTER_OFFSET)));
11260 if (morestack_ref == NULL_RTX)
11262 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
11263 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
11264 | SYMBOL_FLAG_FUNCTION);
11267 if (CONST_OK_FOR_K (frame_size) || CONST_OK_FOR_Op (frame_size))
11269 /* If frame_size will fit in an add instruction, do a stack space
11270 check, and only call __morestack if there's not enough space. */
11272 /* Get thread pointer. r1 is the only register we can always destroy - r0
11273 could contain a static chain (and cannot be used to address memory
11274 anyway), r2-r6 can contain parameters, and r6-r15 are callee-saved. */
11275 emit_move_insn (r1, gen_rtx_REG (Pmode, TP_REGNUM));
11276 /* Aim at __private_ss. */
11277 guard = gen_rtx_MEM (Pmode, plus_constant (Pmode, r1, psso));
11279 /* If less that 1kiB used, skip addition and compare directly with
11281 if (frame_size > SPLIT_STACK_AVAILABLE)
11283 emit_move_insn (r1, guard);
11285 emit_insn (gen_adddi3 (r1, r1, GEN_INT (frame_size)));
11287 emit_insn (gen_addsi3 (r1, r1, GEN_INT (frame_size)));
11291 /* Compare the (maybe adjusted) guard with the stack pointer. */
11292 cc = s390_emit_compare (LT, stack_pointer_rtx, guard);
11295 call_done = gen_label_rtx ();
11296 parm_base = gen_label_rtx ();
11298 /* Emit the parameter block. */
11299 tmp = gen_split_stack_data (parm_base, call_done,
11300 GEN_INT (frame_size),
11301 GEN_INT (args_size));
11302 insn = emit_insn (tmp);
11303 add_reg_note (insn, REG_LABEL_OPERAND, call_done);
11304 LABEL_NUSES (call_done)++;
11305 add_reg_note (insn, REG_LABEL_OPERAND, parm_base);
11306 LABEL_NUSES (parm_base)++;
11308 /* %r1 = litbase. */
11309 insn = emit_move_insn (r1, gen_rtx_LABEL_REF (VOIDmode, parm_base));
11310 add_reg_note (insn, REG_LABEL_OPERAND, parm_base);
11311 LABEL_NUSES (parm_base)++;
11313 /* Now, we need to call __morestack. It has very special calling
11314 conventions: it preserves param/return/static chain registers for
11315 calling main function body, and looks for its own parameters at %r1. */
11319 tmp = gen_split_stack_cond_call (morestack_ref, cc, call_done);
11321 insn = emit_jump_insn (tmp);
11322 JUMP_LABEL (insn) = call_done;
11323 LABEL_NUSES (call_done)++;
11325 /* Mark the jump as very unlikely to be taken. */
11326 add_int_reg_note (insn, REG_BR_PROB, REG_BR_PROB_BASE / 100);
11328 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11330 /* If va_start is used, and __morestack was not called, just use
11332 emit_move_insn (r1,
11333 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11334 GEN_INT (STACK_POINTER_OFFSET)));
11339 tmp = gen_split_stack_call (morestack_ref, call_done);
11340 insn = emit_jump_insn (tmp);
11341 JUMP_LABEL (insn) = call_done;
11342 LABEL_NUSES (call_done)++;
11346 /* __morestack will call us here. */
11348 emit_label (call_done);
11351 /* We may have to tell the dataflow pass that the split stack prologue
11352 is initializing a register. */
11355 s390_live_on_entry (bitmap regs)
11357 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11359 gcc_assert (flag_split_stack);
11360 bitmap_set_bit (regs, 1);
11364 /* Return true if the function can use simple_return to return outside
11365 of a shrink-wrapped region. At present shrink-wrapping is supported
11369 s390_can_use_simple_return_insn (void)
11374 /* Return true if the epilogue is guaranteed to contain only a return
11375 instruction and if a direct return can therefore be used instead.
11376 One of the main advantages of using direct return instructions
11377 is that we can then use conditional returns. */
11380 s390_can_use_return_insn (void)
11384 if (!reload_completed)
11390 if (TARGET_TPF_PROFILING)
11393 for (i = 0; i < 16; i++)
11394 if (cfun_gpr_save_slot (i) != SAVE_SLOT_NONE)
11397 /* For 31 bit this is not covered by the frame_size check below
11398 since f4, f6 are saved in the register save area without needing
11399 additional stack space. */
11401 && (cfun_fpr_save_p (FPR4_REGNUM) || cfun_fpr_save_p (FPR6_REGNUM)))
11404 if (cfun->machine->base_reg
11405 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
11408 return cfun_frame_layout.frame_size == 0;
11411 /* The VX ABI differs for vararg functions. Therefore we need the
11412 prototype of the callee to be available when passing vector type
11414 static const char *
11415 s390_invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
11417 return ((TARGET_VX_ABI
11419 && VECTOR_TYPE_P (TREE_TYPE (val))
11420 && (funcdecl == NULL_TREE
11421 || (TREE_CODE (funcdecl) == FUNCTION_DECL
11422 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
11423 ? N_("Vector argument passed to unprototyped function")
11428 /* Return the size in bytes of a function argument of
11429 type TYPE and/or mode MODE. At least one of TYPE or
11430 MODE must be specified. */
11433 s390_function_arg_size (machine_mode mode, const_tree type)
11436 return int_size_in_bytes (type);
11438 /* No type info available for some library calls ... */
11439 if (mode != BLKmode)
11440 return GET_MODE_SIZE (mode);
11442 /* If we have neither type nor mode, abort */
11443 gcc_unreachable ();
11446 /* Return true if a function argument of type TYPE and mode MODE
11447 is to be passed in a vector register, if available. */
11450 s390_function_arg_vector (machine_mode mode, const_tree type)
11452 if (!TARGET_VX_ABI)
11455 if (s390_function_arg_size (mode, type) > 16)
11458 /* No type info available for some library calls ... */
11460 return VECTOR_MODE_P (mode);
11462 /* The ABI says that record types with a single member are treated
11463 just like that member would be. */
11464 while (TREE_CODE (type) == RECORD_TYPE)
11466 tree field, single = NULL_TREE;
11468 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
11470 if (TREE_CODE (field) != FIELD_DECL)
11473 if (single == NULL_TREE)
11474 single = TREE_TYPE (field);
11479 if (single == NULL_TREE)
11483 /* If the field declaration adds extra byte due to
11484 e.g. padding this is not accepted as vector type. */
11485 if (int_size_in_bytes (single) <= 0
11486 || int_size_in_bytes (single) != int_size_in_bytes (type))
11492 return VECTOR_TYPE_P (type);
11495 /* Return true if a function argument of type TYPE and mode MODE
11496 is to be passed in a floating-point register, if available. */
11499 s390_function_arg_float (machine_mode mode, const_tree type)
11501 if (s390_function_arg_size (mode, type) > 8)
11504 /* Soft-float changes the ABI: no floating-point registers are used. */
11505 if (TARGET_SOFT_FLOAT)
11508 /* No type info available for some library calls ... */
11510 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
11512 /* The ABI says that record types with a single member are treated
11513 just like that member would be. */
11514 while (TREE_CODE (type) == RECORD_TYPE)
11516 tree field, single = NULL_TREE;
11518 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
11520 if (TREE_CODE (field) != FIELD_DECL)
11523 if (single == NULL_TREE)
11524 single = TREE_TYPE (field);
11529 if (single == NULL_TREE)
11535 return TREE_CODE (type) == REAL_TYPE;
11538 /* Return true if a function argument of type TYPE and mode MODE
11539 is to be passed in an integer register, or a pair of integer
11540 registers, if available. */
11543 s390_function_arg_integer (machine_mode mode, const_tree type)
11545 int size = s390_function_arg_size (mode, type);
11549 /* No type info available for some library calls ... */
11551 return GET_MODE_CLASS (mode) == MODE_INT
11552 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
11554 /* We accept small integral (and similar) types. */
11555 if (INTEGRAL_TYPE_P (type)
11556 || POINTER_TYPE_P (type)
11557 || TREE_CODE (type) == NULLPTR_TYPE
11558 || TREE_CODE (type) == OFFSET_TYPE
11559 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
11562 /* We also accept structs of size 1, 2, 4, 8 that are not
11563 passed in floating-point registers. */
11564 if (AGGREGATE_TYPE_P (type)
11565 && exact_log2 (size) >= 0
11566 && !s390_function_arg_float (mode, type))
11572 /* Return 1 if a function argument of type TYPE and mode MODE
11573 is to be passed by reference. The ABI specifies that only
11574 structures of size 1, 2, 4, or 8 bytes are passed by value,
11575 all other structures (and complex numbers) are passed by
11579 s390_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
11580 machine_mode mode, const_tree type,
11581 bool named ATTRIBUTE_UNUSED)
11583 int size = s390_function_arg_size (mode, type);
11585 if (s390_function_arg_vector (mode, type))
11593 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
11596 if (TREE_CODE (type) == COMPLEX_TYPE
11597 || TREE_CODE (type) == VECTOR_TYPE)
11604 /* Update the data in CUM to advance over an argument of mode MODE and
11605 data type TYPE. (TYPE is null for libcalls where that information
11606 may not be available.). The boolean NAMED specifies whether the
11607 argument is a named argument (as opposed to an unnamed argument
11608 matching an ellipsis). */
11611 s390_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
11612 const_tree type, bool named)
11614 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11616 if (s390_function_arg_vector (mode, type))
11618 /* We are called for unnamed vector stdarg arguments which are
11619 passed on the stack. In this case this hook does not have to
11620 do anything since stack arguments are tracked by common
11626 else if (s390_function_arg_float (mode, type))
11630 else if (s390_function_arg_integer (mode, type))
11632 int size = s390_function_arg_size (mode, type);
11633 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
11636 gcc_unreachable ();
11639 /* Define where to put the arguments to a function.
11640 Value is zero to push the argument on the stack,
11641 or a hard register in which to store the argument.
11643 MODE is the argument's machine mode.
11644 TYPE is the data type of the argument (as a tree).
11645 This is null for libcalls where that information may
11647 CUM is a variable of type CUMULATIVE_ARGS which gives info about
11648 the preceding args and about the function being called.
11649 NAMED is nonzero if this argument is a named parameter
11650 (otherwise it is an extra parameter matching an ellipsis).
11652 On S/390, we use general purpose registers 2 through 6 to
11653 pass integer, pointer, and certain structure arguments, and
11654 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
11655 to pass floating point arguments. All remaining arguments
11656 are pushed to the stack. */
11659 s390_function_arg (cumulative_args_t cum_v, machine_mode mode,
11660 const_tree type, bool named)
11662 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11665 s390_check_type_for_vector_abi (type, true, false);
11667 if (s390_function_arg_vector (mode, type))
11669 /* Vector arguments being part of the ellipsis are passed on the
11671 if (!named || (cum->vrs + 1 > VEC_ARG_NUM_REG))
11674 return gen_rtx_REG (mode, cum->vrs + FIRST_VEC_ARG_REGNO);
11676 else if (s390_function_arg_float (mode, type))
11678 if (cum->fprs + 1 > FP_ARG_NUM_REG)
11681 return gen_rtx_REG (mode, cum->fprs + 16);
11683 else if (s390_function_arg_integer (mode, type))
11685 int size = s390_function_arg_size (mode, type);
11686 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
11688 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
11690 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
11691 return gen_rtx_REG (mode, cum->gprs + 2);
11692 else if (n_gprs == 2)
11694 rtvec p = rtvec_alloc (2);
11697 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
11700 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
11703 return gen_rtx_PARALLEL (mode, p);
11707 /* After the real arguments, expand_call calls us once again
11708 with a void_type_node type. Whatever we return here is
11709 passed as operand 2 to the call expanders.
11711 We don't need this feature ... */
11712 else if (type == void_type_node)
11715 gcc_unreachable ();
11718 /* Return true if return values of type TYPE should be returned
11719 in a memory buffer whose address is passed by the caller as
11720 hidden first argument. */
11723 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
11725 /* We accept small integral (and similar) types. */
11726 if (INTEGRAL_TYPE_P (type)
11727 || POINTER_TYPE_P (type)
11728 || TREE_CODE (type) == OFFSET_TYPE
11729 || TREE_CODE (type) == REAL_TYPE)
11730 return int_size_in_bytes (type) > 8;
11732 /* vector types which fit into a VR. */
11734 && VECTOR_TYPE_P (type)
11735 && int_size_in_bytes (type) <= 16)
11738 /* Aggregates and similar constructs are always returned
11740 if (AGGREGATE_TYPE_P (type)
11741 || TREE_CODE (type) == COMPLEX_TYPE
11742 || VECTOR_TYPE_P (type))
11745 /* ??? We get called on all sorts of random stuff from
11746 aggregate_value_p. We can't abort, but it's not clear
11747 what's safe to return. Pretend it's a struct I guess. */
11751 /* Function arguments and return values are promoted to word size. */
11753 static machine_mode
11754 s390_promote_function_mode (const_tree type, machine_mode mode,
11756 const_tree fntype ATTRIBUTE_UNUSED,
11757 int for_return ATTRIBUTE_UNUSED)
11759 if (INTEGRAL_MODE_P (mode)
11760 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
11762 if (type != NULL_TREE && POINTER_TYPE_P (type))
11763 *punsignedp = POINTERS_EXTEND_UNSIGNED;
11770 /* Define where to return a (scalar) value of type RET_TYPE.
11771 If RET_TYPE is null, define where to return a (scalar)
11772 value of mode MODE from a libcall. */
11775 s390_function_and_libcall_value (machine_mode mode,
11776 const_tree ret_type,
11777 const_tree fntype_or_decl,
11778 bool outgoing ATTRIBUTE_UNUSED)
11780 /* For vector return types it is important to use the RET_TYPE
11781 argument whenever available since the middle-end might have
11782 changed the mode to a scalar mode. */
11783 bool vector_ret_type_p = ((ret_type && VECTOR_TYPE_P (ret_type))
11784 || (!ret_type && VECTOR_MODE_P (mode)));
11786 /* For normal functions perform the promotion as
11787 promote_function_mode would do. */
11790 int unsignedp = TYPE_UNSIGNED (ret_type);
11791 mode = promote_function_mode (ret_type, mode, &unsignedp,
11792 fntype_or_decl, 1);
11795 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT
11796 || SCALAR_FLOAT_MODE_P (mode)
11797 || (TARGET_VX_ABI && vector_ret_type_p));
11798 gcc_assert (GET_MODE_SIZE (mode) <= (TARGET_VX_ABI ? 16 : 8));
11800 if (TARGET_VX_ABI && vector_ret_type_p)
11801 return gen_rtx_REG (mode, FIRST_VEC_ARG_REGNO);
11802 else if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
11803 return gen_rtx_REG (mode, 16);
11804 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
11805 || UNITS_PER_LONG == UNITS_PER_WORD)
11806 return gen_rtx_REG (mode, 2);
11807 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
11809 /* This case is triggered when returning a 64 bit value with
11810 -m31 -mzarch. Although the value would fit into a single
11811 register it has to be forced into a 32 bit register pair in
11812 order to match the ABI. */
11813 rtvec p = rtvec_alloc (2);
11816 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
11818 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
11820 return gen_rtx_PARALLEL (mode, p);
11823 gcc_unreachable ();
11826 /* Define where to return a scalar return value of type RET_TYPE. */
11829 s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
11832 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
11833 fn_decl_or_type, outgoing);
11836 /* Define where to return a scalar libcall return value of mode
11840 s390_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
11842 return s390_function_and_libcall_value (mode, NULL_TREE,
11847 /* Create and return the va_list datatype.
11849 On S/390, va_list is an array type equivalent to
11851 typedef struct __va_list_tag
11855 void *__overflow_arg_area;
11856 void *__reg_save_area;
11859 where __gpr and __fpr hold the number of general purpose
11860 or floating point arguments used up to now, respectively,
11861 __overflow_arg_area points to the stack location of the
11862 next argument passed on the stack, and __reg_save_area
11863 always points to the start of the register area in the
11864 call frame of the current function. The function prologue
11865 saves all registers used for argument passing into this
11866 area if the function uses variable arguments. */
11869 s390_build_builtin_va_list (void)
11871 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
11873 record = lang_hooks.types.make_type (RECORD_TYPE);
11876 build_decl (BUILTINS_LOCATION,
11877 TYPE_DECL, get_identifier ("__va_list_tag"), record);
11879 f_gpr = build_decl (BUILTINS_LOCATION,
11880 FIELD_DECL, get_identifier ("__gpr"),
11881 long_integer_type_node);
11882 f_fpr = build_decl (BUILTINS_LOCATION,
11883 FIELD_DECL, get_identifier ("__fpr"),
11884 long_integer_type_node);
11885 f_ovf = build_decl (BUILTINS_LOCATION,
11886 FIELD_DECL, get_identifier ("__overflow_arg_area"),
11888 f_sav = build_decl (BUILTINS_LOCATION,
11889 FIELD_DECL, get_identifier ("__reg_save_area"),
11892 va_list_gpr_counter_field = f_gpr;
11893 va_list_fpr_counter_field = f_fpr;
11895 DECL_FIELD_CONTEXT (f_gpr) = record;
11896 DECL_FIELD_CONTEXT (f_fpr) = record;
11897 DECL_FIELD_CONTEXT (f_ovf) = record;
11898 DECL_FIELD_CONTEXT (f_sav) = record;
11900 TYPE_STUB_DECL (record) = type_decl;
11901 TYPE_NAME (record) = type_decl;
11902 TYPE_FIELDS (record) = f_gpr;
11903 DECL_CHAIN (f_gpr) = f_fpr;
11904 DECL_CHAIN (f_fpr) = f_ovf;
11905 DECL_CHAIN (f_ovf) = f_sav;
11907 layout_type (record);
11909 /* The correct type is an array type of one element. */
11910 return build_array_type (record, build_index_type (size_zero_node));
11913 /* Implement va_start by filling the va_list structure VALIST.
11914 STDARG_P is always true, and ignored.
11915 NEXTARG points to the first anonymous stack argument.
11917 The following global variables are used to initialize
11918 the va_list structure:
11921 holds number of gprs and fprs used for named arguments.
11922 crtl->args.arg_offset_rtx:
11923 holds the offset of the first anonymous stack argument
11924 (relative to the virtual arg pointer). */
11927 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
11929 HOST_WIDE_INT n_gpr, n_fpr;
11931 tree f_gpr, f_fpr, f_ovf, f_sav;
11932 tree gpr, fpr, ovf, sav, t;
11934 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
11935 f_fpr = DECL_CHAIN (f_gpr);
11936 f_ovf = DECL_CHAIN (f_fpr);
11937 f_sav = DECL_CHAIN (f_ovf);
11939 valist = build_simple_mem_ref (valist);
11940 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
11941 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
11942 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
11943 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
11945 /* Count number of gp and fp argument registers used. */
11947 n_gpr = crtl->args.info.gprs;
11948 n_fpr = crtl->args.info.fprs;
11950 if (cfun->va_list_gpr_size)
11952 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
11953 build_int_cst (NULL_TREE, n_gpr));
11954 TREE_SIDE_EFFECTS (t) = 1;
11955 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11958 if (cfun->va_list_fpr_size)
11960 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
11961 build_int_cst (NULL_TREE, n_fpr));
11962 TREE_SIDE_EFFECTS (t) = 1;
11963 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11966 if (flag_split_stack
11967 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
11969 && cfun->machine->split_stack_varargs_pointer == NULL_RTX)
11974 reg = gen_reg_rtx (Pmode);
11975 cfun->machine->split_stack_varargs_pointer = reg;
11978 emit_move_insn (reg, gen_rtx_REG (Pmode, 1));
11979 seq = get_insns ();
11982 push_topmost_sequence ();
11983 emit_insn_after (seq, entry_of_function ());
11984 pop_topmost_sequence ();
11987 /* Find the overflow area.
11988 FIXME: This currently is too pessimistic when the vector ABI is
11989 enabled. In that case we *always* set up the overflow area
11991 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
11992 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG
11995 if (cfun->machine->split_stack_varargs_pointer == NULL_RTX)
11996 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
11998 t = make_tree (TREE_TYPE (ovf), cfun->machine->split_stack_varargs_pointer);
12000 off = INTVAL (crtl->args.arg_offset_rtx);
12001 off = off < 0 ? 0 : off;
12002 if (TARGET_DEBUG_ARG)
12003 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
12004 (int)n_gpr, (int)n_fpr, off);
12006 t = fold_build_pointer_plus_hwi (t, off);
12008 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
12009 TREE_SIDE_EFFECTS (t) = 1;
12010 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12013 /* Find the register save area. */
12014 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
12015 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
12017 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
12018 t = fold_build_pointer_plus_hwi (t, -RETURN_REGNUM * UNITS_PER_LONG);
12020 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
12021 TREE_SIDE_EFFECTS (t) = 1;
12022 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12026 /* Implement va_arg by updating the va_list structure
12027 VALIST as required to retrieve an argument of type
12028 TYPE, and returning that argument.
12030 Generates code equivalent to:
12032 if (integral value) {
12033 if (size <= 4 && args.gpr < 5 ||
12034 size > 4 && args.gpr < 4 )
12035 ret = args.reg_save_area[args.gpr+8]
12037 ret = *args.overflow_arg_area++;
12038 } else if (vector value) {
12039 ret = *args.overflow_arg_area;
12040 args.overflow_arg_area += size / 8;
12041 } else if (float value) {
12043 ret = args.reg_save_area[args.fpr+64]
12045 ret = *args.overflow_arg_area++;
12046 } else if (aggregate value) {
12048 ret = *args.reg_save_area[args.gpr]
12050 ret = **args.overflow_arg_area++;
12054 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
12055 gimple_seq *post_p ATTRIBUTE_UNUSED)
12057 tree f_gpr, f_fpr, f_ovf, f_sav;
12058 tree gpr, fpr, ovf, sav, reg, t, u;
12059 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
12060 tree lab_false, lab_over;
12061 tree addr = create_tmp_var (ptr_type_node, "addr");
12062 bool left_align_p; /* How a value < UNITS_PER_LONG is aligned within
12065 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12066 f_fpr = DECL_CHAIN (f_gpr);
12067 f_ovf = DECL_CHAIN (f_fpr);
12068 f_sav = DECL_CHAIN (f_ovf);
12070 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12071 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
12072 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
12074 /* The tree for args* cannot be shared between gpr/fpr and ovf since
12075 both appear on a lhs. */
12076 valist = unshare_expr (valist);
12077 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
12079 size = int_size_in_bytes (type);
12081 s390_check_type_for_vector_abi (type, true, false);
12083 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
12085 if (TARGET_DEBUG_ARG)
12087 fprintf (stderr, "va_arg: aggregate type");
12091 /* Aggregates are passed by reference. */
12096 /* kernel stack layout on 31 bit: It is assumed here that no padding
12097 will be added by s390_frame_info because for va_args always an even
12098 number of gprs has to be saved r15-r2 = 14 regs. */
12099 sav_ofs = 2 * UNITS_PER_LONG;
12100 sav_scale = UNITS_PER_LONG;
12101 size = UNITS_PER_LONG;
12102 max_reg = GP_ARG_NUM_REG - n_reg;
12103 left_align_p = false;
12105 else if (s390_function_arg_vector (TYPE_MODE (type), type))
12107 if (TARGET_DEBUG_ARG)
12109 fprintf (stderr, "va_arg: vector type");
12119 left_align_p = true;
12121 else if (s390_function_arg_float (TYPE_MODE (type), type))
12123 if (TARGET_DEBUG_ARG)
12125 fprintf (stderr, "va_arg: float type");
12129 /* FP args go in FP registers, if present. */
12133 sav_ofs = 16 * UNITS_PER_LONG;
12135 max_reg = FP_ARG_NUM_REG - n_reg;
12136 left_align_p = false;
12140 if (TARGET_DEBUG_ARG)
12142 fprintf (stderr, "va_arg: other type");
12146 /* Otherwise into GP registers. */
12149 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
12151 /* kernel stack layout on 31 bit: It is assumed here that no padding
12152 will be added by s390_frame_info because for va_args always an even
12153 number of gprs has to be saved r15-r2 = 14 regs. */
12154 sav_ofs = 2 * UNITS_PER_LONG;
12156 if (size < UNITS_PER_LONG)
12157 sav_ofs += UNITS_PER_LONG - size;
12159 sav_scale = UNITS_PER_LONG;
12160 max_reg = GP_ARG_NUM_REG - n_reg;
12161 left_align_p = false;
12164 /* Pull the value out of the saved registers ... */
12166 if (reg != NULL_TREE)
12169 if (reg > ((typeof (reg))max_reg))
12172 addr = sav + sav_ofs + reg * save_scale;
12179 lab_false = create_artificial_label (UNKNOWN_LOCATION);
12180 lab_over = create_artificial_label (UNKNOWN_LOCATION);
12182 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
12183 t = build2 (GT_EXPR, boolean_type_node, reg, t);
12184 u = build1 (GOTO_EXPR, void_type_node, lab_false);
12185 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
12186 gimplify_and_add (t, pre_p);
12188 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
12189 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
12190 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
12191 t = fold_build_pointer_plus (t, u);
12193 gimplify_assign (addr, t, pre_p);
12195 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
12197 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
12200 /* ... Otherwise out of the overflow area. */
12203 if (size < UNITS_PER_LONG && !left_align_p)
12204 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG - size);
12206 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
12208 gimplify_assign (addr, t, pre_p);
12210 if (size < UNITS_PER_LONG && left_align_p)
12211 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG);
12213 t = fold_build_pointer_plus_hwi (t, size);
12215 gimplify_assign (ovf, t, pre_p);
12217 if (reg != NULL_TREE)
12218 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
12221 /* Increment register save count. */
12225 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
12226 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
12227 gimplify_and_add (u, pre_p);
12232 t = build_pointer_type_for_mode (build_pointer_type (type),
12234 addr = fold_convert (t, addr);
12235 addr = build_va_arg_indirect_ref (addr);
12239 t = build_pointer_type_for_mode (type, ptr_mode, true);
12240 addr = fold_convert (t, addr);
12243 return build_va_arg_indirect_ref (addr);
12246 /* Emit rtl for the tbegin or tbegin_retry (RETRY != NULL_RTX)
12248 DEST - Register location where CC will be stored.
12249 TDB - Pointer to a 256 byte area where to store the transaction.
12250 diagnostic block. NULL if TDB is not needed.
12251 RETRY - Retry count value. If non-NULL a retry loop for CC2
12253 CLOBBER_FPRS_P - If true clobbers for all FPRs are emitted as part
12254 of the tbegin instruction pattern. */
12257 s390_expand_tbegin (rtx dest, rtx tdb, rtx retry, bool clobber_fprs_p)
12259 rtx retry_plus_two = gen_reg_rtx (SImode);
12260 rtx retry_reg = gen_reg_rtx (SImode);
12261 rtx_code_label *retry_label = NULL;
12263 if (retry != NULL_RTX)
12265 emit_move_insn (retry_reg, retry);
12266 emit_insn (gen_addsi3 (retry_plus_two, retry_reg, const2_rtx));
12267 emit_insn (gen_addsi3 (retry_reg, retry_reg, const1_rtx));
12268 retry_label = gen_label_rtx ();
12269 emit_label (retry_label);
12272 if (clobber_fprs_p)
12275 emit_insn (gen_tbegin_1_z13 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
12278 emit_insn (gen_tbegin_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
12282 emit_insn (gen_tbegin_nofloat_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
12285 emit_move_insn (dest, gen_rtx_UNSPEC (SImode,
12286 gen_rtvec (1, gen_rtx_REG (CCRAWmode,
12288 UNSPEC_CC_TO_INT));
12289 if (retry != NULL_RTX)
12291 const int CC0 = 1 << 3;
12292 const int CC1 = 1 << 2;
12293 const int CC3 = 1 << 0;
12295 rtx count = gen_reg_rtx (SImode);
12296 rtx_code_label *leave_label = gen_label_rtx ();
12298 /* Exit for success and permanent failures. */
12299 jump = s390_emit_jump (leave_label,
12300 gen_rtx_EQ (VOIDmode,
12301 gen_rtx_REG (CCRAWmode, CC_REGNUM),
12302 gen_rtx_CONST_INT (VOIDmode, CC0 | CC1 | CC3)));
12303 LABEL_NUSES (leave_label) = 1;
12305 /* CC2 - transient failure. Perform retry with ppa. */
12306 emit_move_insn (count, retry_plus_two);
12307 emit_insn (gen_subsi3 (count, count, retry_reg));
12308 emit_insn (gen_tx_assist (count));
12309 jump = emit_jump_insn (gen_doloop_si64 (retry_label,
12312 JUMP_LABEL (jump) = retry_label;
12313 LABEL_NUSES (retry_label) = 1;
12314 emit_label (leave_label);
12319 /* Return the decl for the target specific builtin with the function
12323 s390_builtin_decl (unsigned fcode, bool initialized_p ATTRIBUTE_UNUSED)
12325 if (fcode >= S390_BUILTIN_MAX)
12326 return error_mark_node;
12328 return s390_builtin_decls[fcode];
12331 /* We call mcount before the function prologue. So a profiled leaf
12332 function should stay a leaf function. */
12335 s390_keep_leaf_when_profiled ()
12340 /* Output assembly code for the trampoline template to
12343 On S/390, we use gpr 1 internally in the trampoline code;
12344 gpr 0 is used to hold the static chain. */
12347 s390_asm_trampoline_template (FILE *file)
12350 op[0] = gen_rtx_REG (Pmode, 0);
12351 op[1] = gen_rtx_REG (Pmode, 1);
12355 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
12356 output_asm_insn ("lmg\t%0,%1,14(%1)", op); /* 6 byte */
12357 output_asm_insn ("br\t%1", op); /* 2 byte */
12358 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
12362 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
12363 output_asm_insn ("lm\t%0,%1,6(%1)", op); /* 4 byte */
12364 output_asm_insn ("br\t%1", op); /* 2 byte */
12365 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
12369 /* Emit RTL insns to initialize the variable parts of a trampoline.
12370 FNADDR is an RTX for the address of the function's pure code.
12371 CXT is an RTX for the static chain value for the function. */
12374 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
12376 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
12379 emit_block_move (m_tramp, assemble_trampoline_template (),
12380 GEN_INT (2 * UNITS_PER_LONG), BLOCK_OP_NORMAL);
12382 mem = adjust_address (m_tramp, Pmode, 2 * UNITS_PER_LONG);
12383 emit_move_insn (mem, cxt);
12384 mem = adjust_address (m_tramp, Pmode, 3 * UNITS_PER_LONG);
12385 emit_move_insn (mem, fnaddr);
12388 /* Output assembler code to FILE to increment profiler label # LABELNO
12389 for profiling a function entry. */
12392 s390_function_profiler (FILE *file, int labelno)
12397 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
12399 fprintf (file, "# function profiler \n");
12401 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
12402 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
12403 op[1] = gen_rtx_MEM (Pmode, plus_constant (Pmode, op[1], UNITS_PER_LONG));
12405 op[2] = gen_rtx_REG (Pmode, 1);
12406 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
12407 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
12409 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
12412 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
12413 op[4] = gen_rtx_CONST (Pmode, op[4]);
12418 output_asm_insn ("stg\t%0,%1", op);
12419 output_asm_insn ("larl\t%2,%3", op);
12420 output_asm_insn ("brasl\t%0,%4", op);
12421 output_asm_insn ("lg\t%0,%1", op);
12423 else if (TARGET_CPU_ZARCH)
12425 output_asm_insn ("st\t%0,%1", op);
12426 output_asm_insn ("larl\t%2,%3", op);
12427 output_asm_insn ("brasl\t%0,%4", op);
12428 output_asm_insn ("l\t%0,%1", op);
12430 else if (!flag_pic)
12432 op[6] = gen_label_rtx ();
12434 output_asm_insn ("st\t%0,%1", op);
12435 output_asm_insn ("bras\t%2,%l6", op);
12436 output_asm_insn (".long\t%4", op);
12437 output_asm_insn (".long\t%3", op);
12438 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
12439 output_asm_insn ("l\t%0,0(%2)", op);
12440 output_asm_insn ("l\t%2,4(%2)", op);
12441 output_asm_insn ("basr\t%0,%0", op);
12442 output_asm_insn ("l\t%0,%1", op);
12446 op[5] = gen_label_rtx ();
12447 op[6] = gen_label_rtx ();
12449 output_asm_insn ("st\t%0,%1", op);
12450 output_asm_insn ("bras\t%2,%l6", op);
12451 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
12452 output_asm_insn (".long\t%4-%l5", op);
12453 output_asm_insn (".long\t%3-%l5", op);
12454 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
12455 output_asm_insn ("lr\t%0,%2", op);
12456 output_asm_insn ("a\t%0,0(%2)", op);
12457 output_asm_insn ("a\t%2,4(%2)", op);
12458 output_asm_insn ("basr\t%0,%0", op);
12459 output_asm_insn ("l\t%0,%1", op);
12463 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
12464 into its SYMBOL_REF_FLAGS. */
12467 s390_encode_section_info (tree decl, rtx rtl, int first)
12469 default_encode_section_info (decl, rtl, first);
12471 if (TREE_CODE (decl) == VAR_DECL)
12473 /* Store the alignment to be able to check if we can use
12474 a larl/load-relative instruction. We only handle the cases
12475 that can go wrong (i.e. no FUNC_DECLs). */
12476 if (DECL_ALIGN (decl) == 0 || DECL_ALIGN (decl) % 16)
12477 SYMBOL_FLAG_SET_NOTALIGN2 (XEXP (rtl, 0));
12478 else if (DECL_ALIGN (decl) % 32)
12479 SYMBOL_FLAG_SET_NOTALIGN4 (XEXP (rtl, 0));
12480 else if (DECL_ALIGN (decl) % 64)
12481 SYMBOL_FLAG_SET_NOTALIGN8 (XEXP (rtl, 0));
12484 /* Literal pool references don't have a decl so they are handled
12485 differently here. We rely on the information in the MEM_ALIGN
12486 entry to decide upon the alignment. */
12488 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
12489 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0)))
12491 if (MEM_ALIGN (rtl) == 0 || MEM_ALIGN (rtl) % 16)
12492 SYMBOL_FLAG_SET_NOTALIGN2 (XEXP (rtl, 0));
12493 else if (MEM_ALIGN (rtl) % 32)
12494 SYMBOL_FLAG_SET_NOTALIGN4 (XEXP (rtl, 0));
12495 else if (MEM_ALIGN (rtl) % 64)
12496 SYMBOL_FLAG_SET_NOTALIGN8 (XEXP (rtl, 0));
12500 /* Output thunk to FILE that implements a C++ virtual function call (with
12501 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
12502 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
12503 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
12504 relative to the resulting this pointer. */
12507 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
12508 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
12514 /* Make sure unwind info is emitted for the thunk if needed. */
12515 final_start_function (emit_barrier (), file, 1);
12517 /* Operand 0 is the target function. */
12518 op[0] = XEXP (DECL_RTL (function), 0);
12519 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
12522 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
12523 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
12524 op[0] = gen_rtx_CONST (Pmode, op[0]);
12527 /* Operand 1 is the 'this' pointer. */
12528 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
12529 op[1] = gen_rtx_REG (Pmode, 3);
12531 op[1] = gen_rtx_REG (Pmode, 2);
12533 /* Operand 2 is the delta. */
12534 op[2] = GEN_INT (delta);
12536 /* Operand 3 is the vcall_offset. */
12537 op[3] = GEN_INT (vcall_offset);
12539 /* Operand 4 is the temporary register. */
12540 op[4] = gen_rtx_REG (Pmode, 1);
12542 /* Operands 5 to 8 can be used as labels. */
12548 /* Operand 9 can be used for temporary register. */
12551 /* Generate code. */
12554 /* Setup literal pool pointer if required. */
12555 if ((!DISP_IN_RANGE (delta)
12556 && !CONST_OK_FOR_K (delta)
12557 && !CONST_OK_FOR_Os (delta))
12558 || (!DISP_IN_RANGE (vcall_offset)
12559 && !CONST_OK_FOR_K (vcall_offset)
12560 && !CONST_OK_FOR_Os (vcall_offset)))
12562 op[5] = gen_label_rtx ();
12563 output_asm_insn ("larl\t%4,%5", op);
12566 /* Add DELTA to this pointer. */
12569 if (CONST_OK_FOR_J (delta))
12570 output_asm_insn ("la\t%1,%2(%1)", op);
12571 else if (DISP_IN_RANGE (delta))
12572 output_asm_insn ("lay\t%1,%2(%1)", op);
12573 else if (CONST_OK_FOR_K (delta))
12574 output_asm_insn ("aghi\t%1,%2", op);
12575 else if (CONST_OK_FOR_Os (delta))
12576 output_asm_insn ("agfi\t%1,%2", op);
12579 op[6] = gen_label_rtx ();
12580 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
12584 /* Perform vcall adjustment. */
12587 if (DISP_IN_RANGE (vcall_offset))
12589 output_asm_insn ("lg\t%4,0(%1)", op);
12590 output_asm_insn ("ag\t%1,%3(%4)", op);
12592 else if (CONST_OK_FOR_K (vcall_offset))
12594 output_asm_insn ("lghi\t%4,%3", op);
12595 output_asm_insn ("ag\t%4,0(%1)", op);
12596 output_asm_insn ("ag\t%1,0(%4)", op);
12598 else if (CONST_OK_FOR_Os (vcall_offset))
12600 output_asm_insn ("lgfi\t%4,%3", op);
12601 output_asm_insn ("ag\t%4,0(%1)", op);
12602 output_asm_insn ("ag\t%1,0(%4)", op);
12606 op[7] = gen_label_rtx ();
12607 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
12608 output_asm_insn ("ag\t%4,0(%1)", op);
12609 output_asm_insn ("ag\t%1,0(%4)", op);
12613 /* Jump to target. */
12614 output_asm_insn ("jg\t%0", op);
12616 /* Output literal pool if required. */
12619 output_asm_insn (".align\t4", op);
12620 targetm.asm_out.internal_label (file, "L",
12621 CODE_LABEL_NUMBER (op[5]));
12625 targetm.asm_out.internal_label (file, "L",
12626 CODE_LABEL_NUMBER (op[6]));
12627 output_asm_insn (".long\t%2", op);
12631 targetm.asm_out.internal_label (file, "L",
12632 CODE_LABEL_NUMBER (op[7]));
12633 output_asm_insn (".long\t%3", op);
12638 /* Setup base pointer if required. */
12640 || (!DISP_IN_RANGE (delta)
12641 && !CONST_OK_FOR_K (delta)
12642 && !CONST_OK_FOR_Os (delta))
12643 || (!DISP_IN_RANGE (delta)
12644 && !CONST_OK_FOR_K (vcall_offset)
12645 && !CONST_OK_FOR_Os (vcall_offset)))
12647 op[5] = gen_label_rtx ();
12648 output_asm_insn ("basr\t%4,0", op);
12649 targetm.asm_out.internal_label (file, "L",
12650 CODE_LABEL_NUMBER (op[5]));
12653 /* Add DELTA to this pointer. */
12656 if (CONST_OK_FOR_J (delta))
12657 output_asm_insn ("la\t%1,%2(%1)", op);
12658 else if (DISP_IN_RANGE (delta))
12659 output_asm_insn ("lay\t%1,%2(%1)", op);
12660 else if (CONST_OK_FOR_K (delta))
12661 output_asm_insn ("ahi\t%1,%2", op);
12662 else if (CONST_OK_FOR_Os (delta))
12663 output_asm_insn ("afi\t%1,%2", op);
12666 op[6] = gen_label_rtx ();
12667 output_asm_insn ("a\t%1,%6-%5(%4)", op);
12671 /* Perform vcall adjustment. */
12674 if (CONST_OK_FOR_J (vcall_offset))
12676 output_asm_insn ("l\t%4,0(%1)", op);
12677 output_asm_insn ("a\t%1,%3(%4)", op);
12679 else if (DISP_IN_RANGE (vcall_offset))
12681 output_asm_insn ("l\t%4,0(%1)", op);
12682 output_asm_insn ("ay\t%1,%3(%4)", op);
12684 else if (CONST_OK_FOR_K (vcall_offset))
12686 output_asm_insn ("lhi\t%4,%3", op);
12687 output_asm_insn ("a\t%4,0(%1)", op);
12688 output_asm_insn ("a\t%1,0(%4)", op);
12690 else if (CONST_OK_FOR_Os (vcall_offset))
12692 output_asm_insn ("iilf\t%4,%3", op);
12693 output_asm_insn ("a\t%4,0(%1)", op);
12694 output_asm_insn ("a\t%1,0(%4)", op);
12698 op[7] = gen_label_rtx ();
12699 output_asm_insn ("l\t%4,%7-%5(%4)", op);
12700 output_asm_insn ("a\t%4,0(%1)", op);
12701 output_asm_insn ("a\t%1,0(%4)", op);
12704 /* We had to clobber the base pointer register.
12705 Re-setup the base pointer (with a different base). */
12706 op[5] = gen_label_rtx ();
12707 output_asm_insn ("basr\t%4,0", op);
12708 targetm.asm_out.internal_label (file, "L",
12709 CODE_LABEL_NUMBER (op[5]));
12712 /* Jump to target. */
12713 op[8] = gen_label_rtx ();
12716 output_asm_insn ("l\t%4,%8-%5(%4)", op);
12717 else if (!nonlocal)
12718 output_asm_insn ("a\t%4,%8-%5(%4)", op);
12719 /* We cannot call through .plt, since .plt requires %r12 loaded. */
12720 else if (flag_pic == 1)
12722 output_asm_insn ("a\t%4,%8-%5(%4)", op);
12723 output_asm_insn ("l\t%4,%0(%4)", op);
12725 else if (flag_pic == 2)
12727 op[9] = gen_rtx_REG (Pmode, 0);
12728 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
12729 output_asm_insn ("a\t%4,%8-%5(%4)", op);
12730 output_asm_insn ("ar\t%4,%9", op);
12731 output_asm_insn ("l\t%4,0(%4)", op);
12734 output_asm_insn ("br\t%4", op);
12736 /* Output literal pool. */
12737 output_asm_insn (".align\t4", op);
12739 if (nonlocal && flag_pic == 2)
12740 output_asm_insn (".long\t%0", op);
12743 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
12744 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
12747 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
12749 output_asm_insn (".long\t%0", op);
12751 output_asm_insn (".long\t%0-%5", op);
12755 targetm.asm_out.internal_label (file, "L",
12756 CODE_LABEL_NUMBER (op[6]));
12757 output_asm_insn (".long\t%2", op);
12761 targetm.asm_out.internal_label (file, "L",
12762 CODE_LABEL_NUMBER (op[7]));
12763 output_asm_insn (".long\t%3", op);
12766 final_end_function ();
12770 s390_valid_pointer_mode (machine_mode mode)
12772 return (mode == SImode || (TARGET_64BIT && mode == DImode));
12775 /* Checks whether the given CALL_EXPR would use a caller
12776 saved register. This is used to decide whether sibling call
12777 optimization could be performed on the respective function
12781 s390_call_saved_register_used (tree call_expr)
12783 CUMULATIVE_ARGS cum_v;
12784 cumulative_args_t cum;
12791 INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0);
12792 cum = pack_cumulative_args (&cum_v);
12794 for (i = 0; i < call_expr_nargs (call_expr); i++)
12796 parameter = CALL_EXPR_ARG (call_expr, i);
12797 gcc_assert (parameter);
12799 /* For an undeclared variable passed as parameter we will get
12800 an ERROR_MARK node here. */
12801 if (TREE_CODE (parameter) == ERROR_MARK)
12804 type = TREE_TYPE (parameter);
12807 mode = TYPE_MODE (type);
12810 /* We assume that in the target function all parameters are
12811 named. This only has an impact on vector argument register
12812 usage none of which is call-saved. */
12813 if (pass_by_reference (&cum_v, mode, type, true))
12816 type = build_pointer_type (type);
12819 parm_rtx = s390_function_arg (cum, mode, type, true);
12821 s390_function_arg_advance (cum, mode, type, true);
12826 if (REG_P (parm_rtx))
12829 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
12831 if (!call_used_regs[reg + REGNO (parm_rtx)])
12835 if (GET_CODE (parm_rtx) == PARALLEL)
12839 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
12841 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
12843 gcc_assert (REG_P (r));
12846 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
12848 if (!call_used_regs[reg + REGNO (r)])
12857 /* Return true if the given call expression can be
12858 turned into a sibling call.
12859 DECL holds the declaration of the function to be called whereas
12860 EXP is the call expression itself. */
12863 s390_function_ok_for_sibcall (tree decl, tree exp)
12865 /* The TPF epilogue uses register 1. */
12866 if (TARGET_TPF_PROFILING)
12869 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
12870 which would have to be restored before the sibcall. */
12871 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
12874 /* Register 6 on s390 is available as an argument register but unfortunately
12875 "caller saved". This makes functions needing this register for arguments
12876 not suitable for sibcalls. */
12877 return !s390_call_saved_register_used (exp);
12880 /* Return the fixed registers used for condition codes. */
12883 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
12886 *p2 = INVALID_REGNUM;
12891 /* This function is used by the call expanders of the machine description.
12892 It emits the call insn itself together with the necessary operations
12893 to adjust the target address and returns the emitted insn.
12894 ADDR_LOCATION is the target address rtx
12895 TLS_CALL the location of the thread-local symbol
12896 RESULT_REG the register where the result of the call should be stored
12897 RETADDR_REG the register where the return address should be stored
12898 If this parameter is NULL_RTX the call is considered
12899 to be a sibling call. */
12902 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
12905 bool plt_call = false;
12911 /* Direct function calls need special treatment. */
12912 if (GET_CODE (addr_location) == SYMBOL_REF)
12914 /* When calling a global routine in PIC mode, we must
12915 replace the symbol itself with the PLT stub. */
12916 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
12918 if (TARGET_64BIT || retaddr_reg != NULL_RTX)
12920 addr_location = gen_rtx_UNSPEC (Pmode,
12921 gen_rtvec (1, addr_location),
12923 addr_location = gen_rtx_CONST (Pmode, addr_location);
12927 /* For -fpic code the PLT entries might use r12 which is
12928 call-saved. Therefore we cannot do a sibcall when
12929 calling directly using a symbol ref. When reaching
12930 this point we decided (in s390_function_ok_for_sibcall)
12931 to do a sibcall for a function pointer but one of the
12932 optimizers was able to get rid of the function pointer
12933 by propagating the symbol ref into the call. This
12934 optimization is illegal for S/390 so we turn the direct
12935 call into a indirect call again. */
12936 addr_location = force_reg (Pmode, addr_location);
12939 /* Unless we can use the bras(l) insn, force the
12940 routine address into a register. */
12941 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
12944 addr_location = legitimize_pic_address (addr_location, 0);
12946 addr_location = force_reg (Pmode, addr_location);
12950 /* If it is already an indirect call or the code above moved the
12951 SYMBOL_REF to somewhere else make sure the address can be found in
12953 if (retaddr_reg == NULL_RTX
12954 && GET_CODE (addr_location) != SYMBOL_REF
12957 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
12958 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
12961 addr_location = gen_rtx_MEM (QImode, addr_location);
12962 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
12964 if (result_reg != NULL_RTX)
12965 call = gen_rtx_SET (result_reg, call);
12967 if (retaddr_reg != NULL_RTX)
12969 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
12971 if (tls_call != NULL_RTX)
12972 vec = gen_rtvec (3, call, clobber,
12973 gen_rtx_USE (VOIDmode, tls_call));
12975 vec = gen_rtvec (2, call, clobber);
12977 call = gen_rtx_PARALLEL (VOIDmode, vec);
12980 insn = emit_call_insn (call);
12982 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
12983 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
12985 /* s390_function_ok_for_sibcall should
12986 have denied sibcalls in this case. */
12987 gcc_assert (retaddr_reg != NULL_RTX);
12988 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, 12));
12993 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
12996 s390_conditional_register_usage (void)
13002 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
13003 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
13005 if (TARGET_CPU_ZARCH)
13007 fixed_regs[BASE_REGNUM] = 0;
13008 call_used_regs[BASE_REGNUM] = 0;
13009 fixed_regs[RETURN_REGNUM] = 0;
13010 call_used_regs[RETURN_REGNUM] = 0;
13014 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
13015 call_used_regs[i] = call_really_used_regs[i] = 0;
13019 call_used_regs[FPR4_REGNUM] = call_really_used_regs[FPR4_REGNUM] = 0;
13020 call_used_regs[FPR6_REGNUM] = call_really_used_regs[FPR6_REGNUM] = 0;
13023 if (TARGET_SOFT_FLOAT)
13025 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
13026 call_used_regs[i] = fixed_regs[i] = 1;
13029 /* Disable v16 - v31 for non-vector target. */
13032 for (i = VR16_REGNUM; i <= VR31_REGNUM; i++)
13033 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
13037 /* Corresponding function to eh_return expander. */
13039 static GTY(()) rtx s390_tpf_eh_return_symbol;
13041 s390_emit_tpf_eh_return (rtx target)
13046 if (!s390_tpf_eh_return_symbol)
13047 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
13049 reg = gen_rtx_REG (Pmode, 2);
13050 orig_ra = gen_rtx_REG (Pmode, 3);
13052 emit_move_insn (reg, target);
13053 emit_move_insn (orig_ra, get_hard_reg_initial_val (Pmode, RETURN_REGNUM));
13054 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
13055 gen_rtx_REG (Pmode, RETURN_REGNUM));
13056 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
13057 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), orig_ra);
13059 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
13062 /* Rework the prologue/epilogue to avoid saving/restoring
13063 registers unnecessarily. */
13066 s390_optimize_prologue (void)
13068 rtx_insn *insn, *new_insn, *next_insn;
13070 /* Do a final recompute of the frame-related data. */
13071 s390_optimize_register_info ();
13073 /* If all special registers are in fact used, there's nothing we
13074 can do, so no point in walking the insn list. */
13076 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
13077 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
13078 && (TARGET_CPU_ZARCH
13079 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
13080 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
13083 /* Search for prologue/epilogue insns and replace them. */
13085 for (insn = get_insns (); insn; insn = next_insn)
13087 int first, last, off;
13088 rtx set, base, offset;
13091 next_insn = NEXT_INSN (insn);
13093 if (! NONJUMP_INSN_P (insn) || ! RTX_FRAME_RELATED_P (insn))
13096 pat = PATTERN (insn);
13098 /* Remove ldgr/lgdr instructions used for saving and restore
13099 GPRs if possible. */
13104 if (INSN_CODE (insn) == CODE_FOR_stack_restore_from_fpr)
13105 tmp_pat = XVECEXP (pat, 0, 0);
13107 if (GET_CODE (tmp_pat) == SET
13108 && GET_MODE (SET_SRC (tmp_pat)) == DImode
13109 && REG_P (SET_SRC (tmp_pat))
13110 && REG_P (SET_DEST (tmp_pat)))
13112 int src_regno = REGNO (SET_SRC (tmp_pat));
13113 int dest_regno = REGNO (SET_DEST (tmp_pat));
13117 if (!((GENERAL_REGNO_P (src_regno)
13118 && FP_REGNO_P (dest_regno))
13119 || (FP_REGNO_P (src_regno)
13120 && GENERAL_REGNO_P (dest_regno))))
13123 gpr_regno = GENERAL_REGNO_P (src_regno) ? src_regno : dest_regno;
13124 fpr_regno = FP_REGNO_P (src_regno) ? src_regno : dest_regno;
13126 /* GPR must be call-saved, FPR must be call-clobbered. */
13127 if (!call_really_used_regs[fpr_regno]
13128 || call_really_used_regs[gpr_regno])
13131 /* It must not happen that what we once saved in an FPR now
13132 needs a stack slot. */
13133 gcc_assert (cfun_gpr_save_slot (gpr_regno) != SAVE_SLOT_STACK);
13135 if (cfun_gpr_save_slot (gpr_regno) == SAVE_SLOT_NONE)
13137 remove_insn (insn);
13143 if (GET_CODE (pat) == PARALLEL
13144 && store_multiple_operation (pat, VOIDmode))
13146 set = XVECEXP (pat, 0, 0);
13147 first = REGNO (SET_SRC (set));
13148 last = first + XVECLEN (pat, 0) - 1;
13149 offset = const0_rtx;
13150 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
13151 off = INTVAL (offset);
13153 if (GET_CODE (base) != REG || off < 0)
13155 if (cfun_frame_layout.first_save_gpr != -1
13156 && (cfun_frame_layout.first_save_gpr < first
13157 || cfun_frame_layout.last_save_gpr > last))
13159 if (REGNO (base) != STACK_POINTER_REGNUM
13160 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13162 if (first > BASE_REGNUM || last < BASE_REGNUM)
13165 if (cfun_frame_layout.first_save_gpr != -1)
13167 rtx s_pat = save_gprs (base,
13168 off + (cfun_frame_layout.first_save_gpr
13169 - first) * UNITS_PER_LONG,
13170 cfun_frame_layout.first_save_gpr,
13171 cfun_frame_layout.last_save_gpr);
13172 new_insn = emit_insn_before (s_pat, insn);
13173 INSN_ADDRESSES_NEW (new_insn, -1);
13176 remove_insn (insn);
13180 if (cfun_frame_layout.first_save_gpr == -1
13181 && GET_CODE (pat) == SET
13182 && GENERAL_REG_P (SET_SRC (pat))
13183 && GET_CODE (SET_DEST (pat)) == MEM)
13186 first = REGNO (SET_SRC (set));
13187 offset = const0_rtx;
13188 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
13189 off = INTVAL (offset);
13191 if (GET_CODE (base) != REG || off < 0)
13193 if (REGNO (base) != STACK_POINTER_REGNUM
13194 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13197 remove_insn (insn);
13201 if (GET_CODE (pat) == PARALLEL
13202 && load_multiple_operation (pat, VOIDmode))
13204 set = XVECEXP (pat, 0, 0);
13205 first = REGNO (SET_DEST (set));
13206 last = first + XVECLEN (pat, 0) - 1;
13207 offset = const0_rtx;
13208 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
13209 off = INTVAL (offset);
13211 if (GET_CODE (base) != REG || off < 0)
13214 if (cfun_frame_layout.first_restore_gpr != -1
13215 && (cfun_frame_layout.first_restore_gpr < first
13216 || cfun_frame_layout.last_restore_gpr > last))
13218 if (REGNO (base) != STACK_POINTER_REGNUM
13219 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13221 if (first > BASE_REGNUM || last < BASE_REGNUM)
13224 if (cfun_frame_layout.first_restore_gpr != -1)
13226 rtx rpat = restore_gprs (base,
13227 off + (cfun_frame_layout.first_restore_gpr
13228 - first) * UNITS_PER_LONG,
13229 cfun_frame_layout.first_restore_gpr,
13230 cfun_frame_layout.last_restore_gpr);
13232 /* Remove REG_CFA_RESTOREs for registers that we no
13233 longer need to save. */
13234 REG_NOTES (rpat) = REG_NOTES (insn);
13235 for (rtx *ptr = ®_NOTES (rpat); *ptr; )
13236 if (REG_NOTE_KIND (*ptr) == REG_CFA_RESTORE
13237 && ((int) REGNO (XEXP (*ptr, 0))
13238 < cfun_frame_layout.first_restore_gpr))
13239 *ptr = XEXP (*ptr, 1);
13241 ptr = &XEXP (*ptr, 1);
13242 new_insn = emit_insn_before (rpat, insn);
13243 RTX_FRAME_RELATED_P (new_insn) = 1;
13244 INSN_ADDRESSES_NEW (new_insn, -1);
13247 remove_insn (insn);
13251 if (cfun_frame_layout.first_restore_gpr == -1
13252 && GET_CODE (pat) == SET
13253 && GENERAL_REG_P (SET_DEST (pat))
13254 && GET_CODE (SET_SRC (pat)) == MEM)
13257 first = REGNO (SET_DEST (set));
13258 offset = const0_rtx;
13259 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
13260 off = INTVAL (offset);
13262 if (GET_CODE (base) != REG || off < 0)
13265 if (REGNO (base) != STACK_POINTER_REGNUM
13266 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13269 remove_insn (insn);
13275 /* On z10 and later the dynamic branch prediction must see the
13276 backward jump within a certain windows. If not it falls back to
13277 the static prediction. This function rearranges the loop backward
13278 branch in a way which makes the static prediction always correct.
13279 The function returns true if it added an instruction. */
13281 s390_fix_long_loop_prediction (rtx_insn *insn)
13283 rtx set = single_set (insn);
13284 rtx code_label, label_ref;
13285 rtx_insn *uncond_jump;
13286 rtx_insn *cur_insn;
13290 /* This will exclude branch on count and branch on index patterns
13291 since these are correctly statically predicted. */
13293 || SET_DEST (set) != pc_rtx
13294 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
13297 /* Skip conditional returns. */
13298 if (ANY_RETURN_P (XEXP (SET_SRC (set), 1))
13299 && XEXP (SET_SRC (set), 2) == pc_rtx)
13302 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
13303 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
13305 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
13307 code_label = XEXP (label_ref, 0);
13309 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
13310 || INSN_ADDRESSES (INSN_UID (insn)) == -1
13311 || (INSN_ADDRESSES (INSN_UID (insn))
13312 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
13315 for (distance = 0, cur_insn = PREV_INSN (insn);
13316 distance < PREDICT_DISTANCE - 6;
13317 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
13318 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
13321 rtx_code_label *new_label = gen_label_rtx ();
13322 uncond_jump = emit_jump_insn_after (
13323 gen_rtx_SET (pc_rtx,
13324 gen_rtx_LABEL_REF (VOIDmode, code_label)),
13326 emit_label_after (new_label, uncond_jump);
13328 tmp = XEXP (SET_SRC (set), 1);
13329 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
13330 XEXP (SET_SRC (set), 2) = tmp;
13331 INSN_CODE (insn) = -1;
13333 XEXP (label_ref, 0) = new_label;
13334 JUMP_LABEL (insn) = new_label;
13335 JUMP_LABEL (uncond_jump) = code_label;
13340 /* Returns 1 if INSN reads the value of REG for purposes not related
13341 to addressing of memory, and 0 otherwise. */
13343 s390_non_addr_reg_read_p (rtx reg, rtx_insn *insn)
13345 return reg_referenced_p (reg, PATTERN (insn))
13346 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
13349 /* Starting from INSN find_cond_jump looks downwards in the insn
13350 stream for a single jump insn which is the last user of the
13351 condition code set in INSN. */
13353 find_cond_jump (rtx_insn *insn)
13355 for (; insn; insn = NEXT_INSN (insn))
13359 if (LABEL_P (insn))
13362 if (!JUMP_P (insn))
13364 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
13369 /* This will be triggered by a return. */
13370 if (GET_CODE (PATTERN (insn)) != SET)
13373 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
13374 ite = SET_SRC (PATTERN (insn));
13376 if (GET_CODE (ite) != IF_THEN_ELSE)
13379 cc = XEXP (XEXP (ite, 0), 0);
13380 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
13383 if (find_reg_note (insn, REG_DEAD, cc))
13391 /* Swap the condition in COND and the operands in OP0 and OP1 so that
13392 the semantics does not change. If NULL_RTX is passed as COND the
13393 function tries to find the conditional jump starting with INSN. */
13395 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx_insn *insn)
13399 if (cond == NULL_RTX)
13401 rtx_insn *jump = find_cond_jump (NEXT_INSN (insn));
13402 rtx set = jump ? single_set (jump) : NULL_RTX;
13404 if (set == NULL_RTX)
13407 cond = XEXP (SET_SRC (set), 0);
13412 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
13415 /* On z10, instructions of the compare-and-branch family have the
13416 property to access the register occurring as second operand with
13417 its bits complemented. If such a compare is grouped with a second
13418 instruction that accesses the same register non-complemented, and
13419 if that register's value is delivered via a bypass, then the
13420 pipeline recycles, thereby causing significant performance decline.
13421 This function locates such situations and exchanges the two
13422 operands of the compare. The function return true whenever it
13425 s390_z10_optimize_cmp (rtx_insn *insn)
13427 rtx_insn *prev_insn, *next_insn;
13428 bool insn_added_p = false;
13429 rtx cond, *op0, *op1;
13431 if (GET_CODE (PATTERN (insn)) == PARALLEL)
13433 /* Handle compare and branch and branch on count
13435 rtx pattern = single_set (insn);
13438 || SET_DEST (pattern) != pc_rtx
13439 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
13442 cond = XEXP (SET_SRC (pattern), 0);
13443 op0 = &XEXP (cond, 0);
13444 op1 = &XEXP (cond, 1);
13446 else if (GET_CODE (PATTERN (insn)) == SET)
13450 /* Handle normal compare instructions. */
13451 src = SET_SRC (PATTERN (insn));
13452 dest = SET_DEST (PATTERN (insn));
13455 || !CC_REGNO_P (REGNO (dest))
13456 || GET_CODE (src) != COMPARE)
13459 /* s390_swap_cmp will try to find the conditional
13460 jump when passing NULL_RTX as condition. */
13462 op0 = &XEXP (src, 0);
13463 op1 = &XEXP (src, 1);
13468 if (!REG_P (*op0) || !REG_P (*op1))
13471 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
13474 /* Swap the COMPARE arguments and its mask if there is a
13475 conflicting access in the previous insn. */
13476 prev_insn = prev_active_insn (insn);
13477 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
13478 && reg_referenced_p (*op1, PATTERN (prev_insn)))
13479 s390_swap_cmp (cond, op0, op1, insn);
13481 /* Check if there is a conflict with the next insn. If there
13482 was no conflict with the previous insn, then swap the
13483 COMPARE arguments and its mask. If we already swapped
13484 the operands, or if swapping them would cause a conflict
13485 with the previous insn, issue a NOP after the COMPARE in
13486 order to separate the two instuctions. */
13487 next_insn = next_active_insn (insn);
13488 if (next_insn != NULL_RTX && INSN_P (next_insn)
13489 && s390_non_addr_reg_read_p (*op1, next_insn))
13491 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
13492 && s390_non_addr_reg_read_p (*op0, prev_insn))
13494 if (REGNO (*op1) == 0)
13495 emit_insn_after (gen_nop1 (), insn);
13497 emit_insn_after (gen_nop (), insn);
13498 insn_added_p = true;
13501 s390_swap_cmp (cond, op0, op1, insn);
13503 return insn_added_p;
13506 /* Number of INSNs to be scanned backward in the last BB of the loop
13507 and forward in the first BB of the loop. This usually should be a
13508 bit more than the number of INSNs which could go into one
13510 #define S390_OSC_SCAN_INSN_NUM 5
13512 /* Scan LOOP for static OSC collisions and return true if a osc_break
13513 should be issued for this loop. */
13515 s390_adjust_loop_scan_osc (struct loop* loop)
13518 HARD_REG_SET modregs, newregs;
13519 rtx_insn *insn, *store_insn = NULL;
13521 struct s390_address addr_store, addr_load;
13522 subrtx_iterator::array_type array;
13525 CLEAR_HARD_REG_SET (modregs);
13528 FOR_BB_INSNS_REVERSE (loop->latch, insn)
13530 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
13534 if (insn_count > S390_OSC_SCAN_INSN_NUM)
13537 find_all_hard_reg_sets (insn, &newregs, true);
13538 IOR_HARD_REG_SET (modregs, newregs);
13540 set = single_set (insn);
13544 if (MEM_P (SET_DEST (set))
13545 && s390_decompose_address (XEXP (SET_DEST (set), 0), &addr_store))
13552 if (store_insn == NULL_RTX)
13556 FOR_BB_INSNS (loop->header, insn)
13558 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
13561 if (insn == store_insn)
13565 if (insn_count > S390_OSC_SCAN_INSN_NUM)
13568 find_all_hard_reg_sets (insn, &newregs, true);
13569 IOR_HARD_REG_SET (modregs, newregs);
13571 set = single_set (insn);
13575 /* An intermediate store disrupts static OSC checking
13577 if (MEM_P (SET_DEST (set))
13578 && s390_decompose_address (XEXP (SET_DEST (set), 0), NULL))
13581 FOR_EACH_SUBRTX (iter, array, SET_SRC (set), NONCONST)
13583 && s390_decompose_address (XEXP (*iter, 0), &addr_load)
13584 && rtx_equal_p (addr_load.base, addr_store.base)
13585 && rtx_equal_p (addr_load.indx, addr_store.indx)
13586 && rtx_equal_p (addr_load.disp, addr_store.disp))
13588 if ((addr_load.base != NULL_RTX
13589 && TEST_HARD_REG_BIT (modregs, REGNO (addr_load.base)))
13590 || (addr_load.indx != NULL_RTX
13591 && TEST_HARD_REG_BIT (modregs, REGNO (addr_load.indx))))
13598 /* Look for adjustments which can be done on simple innermost
13601 s390_adjust_loops ()
13603 struct loop *loop = NULL;
13606 compute_bb_for_insn ();
13608 /* Find the loops. */
13609 loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
13611 FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
13615 flow_loop_dump (loop, dump_file, NULL, 0);
13616 fprintf (dump_file, ";; OSC loop scan Loop: ");
13618 if (loop->latch == NULL
13619 || pc_set (BB_END (loop->latch)) == NULL_RTX
13620 || !s390_adjust_loop_scan_osc (loop))
13624 if (loop->latch == NULL)
13625 fprintf (dump_file, " muliple backward jumps\n");
13628 fprintf (dump_file, " header insn: %d latch insn: %d ",
13629 INSN_UID (BB_HEAD (loop->header)),
13630 INSN_UID (BB_END (loop->latch)));
13631 if (pc_set (BB_END (loop->latch)) == NULL_RTX)
13632 fprintf (dump_file, " loop does not end with jump\n");
13634 fprintf (dump_file, " not instrumented\n");
13640 rtx_insn *new_insn;
13643 fprintf (dump_file, " adding OSC break insn: ");
13644 new_insn = emit_insn_before (gen_osc_break (),
13645 BB_END (loop->latch));
13646 INSN_ADDRESSES_NEW (new_insn, -1);
13650 loop_optimizer_finalize ();
13652 df_finish_pass (false);
13655 /* Perform machine-dependent processing. */
13660 bool pool_overflow = false;
13661 int hw_before, hw_after;
13663 if (s390_tune == PROCESSOR_2964_Z13)
13664 s390_adjust_loops ();
13666 /* Make sure all splits have been performed; splits after
13667 machine_dependent_reorg might confuse insn length counts. */
13668 split_all_insns_noflow ();
13670 /* Install the main literal pool and the associated base
13671 register load insns.
13673 In addition, there are two problematic situations we need
13676 - the literal pool might be > 4096 bytes in size, so that
13677 some of its elements cannot be directly accessed
13679 - a branch target might be > 64K away from the branch, so that
13680 it is not possible to use a PC-relative instruction.
13682 To fix those, we split the single literal pool into multiple
13683 pool chunks, reloading the pool base register at various
13684 points throughout the function to ensure it always points to
13685 the pool chunk the following code expects, and / or replace
13686 PC-relative branches by absolute branches.
13688 However, the two problems are interdependent: splitting the
13689 literal pool can move a branch further away from its target,
13690 causing the 64K limit to overflow, and on the other hand,
13691 replacing a PC-relative branch by an absolute branch means
13692 we need to put the branch target address into the literal
13693 pool, possibly causing it to overflow.
13695 So, we loop trying to fix up both problems until we manage
13696 to satisfy both conditions at the same time. Note that the
13697 loop is guaranteed to terminate as every pass of the loop
13698 strictly decreases the total number of PC-relative branches
13699 in the function. (This is not completely true as there
13700 might be branch-over-pool insns introduced by chunkify_start.
13701 Those never need to be split however.) */
13705 struct constant_pool *pool = NULL;
13707 /* Collect the literal pool. */
13708 if (!pool_overflow)
13710 pool = s390_mainpool_start ();
13712 pool_overflow = true;
13715 /* If literal pool overflowed, start to chunkify it. */
13717 pool = s390_chunkify_start ();
13719 /* Split out-of-range branches. If this has created new
13720 literal pool entries, cancel current chunk list and
13721 recompute it. zSeries machines have large branch
13722 instructions, so we never need to split a branch. */
13723 if (!TARGET_CPU_ZARCH && s390_split_branches ())
13726 s390_chunkify_cancel (pool);
13728 s390_mainpool_cancel (pool);
13733 /* If we made it up to here, both conditions are satisfied.
13734 Finish up literal pool related changes. */
13736 s390_chunkify_finish (pool);
13738 s390_mainpool_finish (pool);
13740 /* We're done splitting branches. */
13741 cfun->machine->split_branches_pending_p = false;
13745 /* Generate out-of-pool execute target insns. */
13746 if (TARGET_CPU_ZARCH)
13748 rtx_insn *insn, *target;
13751 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13753 label = s390_execute_label (insn);
13757 gcc_assert (label != const0_rtx);
13759 target = emit_label (XEXP (label, 0));
13760 INSN_ADDRESSES_NEW (target, -1);
13762 target = emit_insn (s390_execute_target (insn));
13763 INSN_ADDRESSES_NEW (target, -1);
13767 /* Try to optimize prologue and epilogue further. */
13768 s390_optimize_prologue ();
13770 /* Walk over the insns and do some >=z10 specific changes. */
13771 if (s390_tune >= PROCESSOR_2097_Z10)
13774 bool insn_added_p = false;
13776 /* The insn lengths and addresses have to be up to date for the
13777 following manipulations. */
13778 shorten_branches (get_insns ());
13780 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13782 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
13786 insn_added_p |= s390_fix_long_loop_prediction (insn);
13788 if ((GET_CODE (PATTERN (insn)) == PARALLEL
13789 || GET_CODE (PATTERN (insn)) == SET)
13790 && s390_tune == PROCESSOR_2097_Z10)
13791 insn_added_p |= s390_z10_optimize_cmp (insn);
13794 /* Adjust branches if we added new instructions. */
13796 shorten_branches (get_insns ());
13799 s390_function_num_hotpatch_hw (current_function_decl, &hw_before, &hw_after);
13804 /* Insert NOPs for hotpatching. */
13805 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13807 1. inside the area covered by debug information to allow setting
13808 breakpoints at the NOPs,
13809 2. before any insn which results in an asm instruction,
13810 3. before in-function labels to avoid jumping to the NOPs, for
13811 example as part of a loop,
13812 4. before any barrier in case the function is completely empty
13813 (__builtin_unreachable ()) and has neither internal labels nor
13816 if (active_insn_p (insn) || BARRIER_P (insn) || LABEL_P (insn))
13818 /* Output a series of NOPs before the first active insn. */
13819 while (insn && hw_after > 0)
13821 if (hw_after >= 3 && TARGET_CPU_ZARCH)
13823 emit_insn_before (gen_nop_6_byte (), insn);
13826 else if (hw_after >= 2)
13828 emit_insn_before (gen_nop_4_byte (), insn);
13833 emit_insn_before (gen_nop_2_byte (), insn);
13840 /* Return true if INSN is a fp load insn writing register REGNO. */
13842 s390_fpload_toreg (rtx_insn *insn, unsigned int regno)
13845 enum attr_type flag = s390_safe_attr_type (insn);
13847 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
13850 set = single_set (insn);
13852 if (set == NULL_RTX)
13855 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
13858 if (REGNO (SET_DEST (set)) != regno)
13864 /* This value describes the distance to be avoided between an
13865 aritmetic fp instruction and an fp load writing the same register.
13866 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
13867 fine but the exact value has to be avoided. Otherwise the FP
13868 pipeline will throw an exception causing a major penalty. */
13869 #define Z10_EARLYLOAD_DISTANCE 7
13871 /* Rearrange the ready list in order to avoid the situation described
13872 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
13873 moved to the very end of the ready list. */
13875 s390_z10_prevent_earlyload_conflicts (rtx_insn **ready, int *nready_p)
13877 unsigned int regno;
13878 int nready = *nready_p;
13883 enum attr_type flag;
13886 /* Skip DISTANCE - 1 active insns. */
13887 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
13888 distance > 0 && insn != NULL_RTX;
13889 distance--, insn = prev_active_insn (insn))
13890 if (CALL_P (insn) || JUMP_P (insn))
13893 if (insn == NULL_RTX)
13896 set = single_set (insn);
13898 if (set == NULL_RTX || !REG_P (SET_DEST (set))
13899 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
13902 flag = s390_safe_attr_type (insn);
13904 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
13907 regno = REGNO (SET_DEST (set));
13910 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
13917 memmove (&ready[1], &ready[0], sizeof (rtx_insn *) * i);
13922 /* The s390_sched_state variable tracks the state of the current or
13923 the last instruction group.
13925 0,1,2 number of instructions scheduled in the current group
13926 3 the last group is complete - normal insns
13927 4 the last group was a cracked/expanded insn */
13929 static int s390_sched_state;
13931 #define S390_SCHED_STATE_NORMAL 3
13932 #define S390_SCHED_STATE_CRACKED 4
13934 #define S390_SCHED_ATTR_MASK_CRACKED 0x1
13935 #define S390_SCHED_ATTR_MASK_EXPANDED 0x2
13936 #define S390_SCHED_ATTR_MASK_ENDGROUP 0x4
13937 #define S390_SCHED_ATTR_MASK_GROUPALONE 0x8
13939 static unsigned int
13940 s390_get_sched_attrmask (rtx_insn *insn)
13942 unsigned int mask = 0;
13946 case PROCESSOR_2827_ZEC12:
13947 if (get_attr_zEC12_cracked (insn))
13948 mask |= S390_SCHED_ATTR_MASK_CRACKED;
13949 if (get_attr_zEC12_expanded (insn))
13950 mask |= S390_SCHED_ATTR_MASK_EXPANDED;
13951 if (get_attr_zEC12_endgroup (insn))
13952 mask |= S390_SCHED_ATTR_MASK_ENDGROUP;
13953 if (get_attr_zEC12_groupalone (insn))
13954 mask |= S390_SCHED_ATTR_MASK_GROUPALONE;
13956 case PROCESSOR_2964_Z13:
13957 if (get_attr_z13_cracked (insn))
13958 mask |= S390_SCHED_ATTR_MASK_CRACKED;
13959 if (get_attr_z13_expanded (insn))
13960 mask |= S390_SCHED_ATTR_MASK_EXPANDED;
13961 if (get_attr_z13_endgroup (insn))
13962 mask |= S390_SCHED_ATTR_MASK_ENDGROUP;
13963 if (get_attr_z13_groupalone (insn))
13964 mask |= S390_SCHED_ATTR_MASK_GROUPALONE;
13967 gcc_unreachable ();
13972 static unsigned int
13973 s390_get_unit_mask (rtx_insn *insn, int *units)
13975 unsigned int mask = 0;
13979 case PROCESSOR_2964_Z13:
13981 if (get_attr_z13_unit_lsu (insn))
13983 if (get_attr_z13_unit_fxu (insn))
13985 if (get_attr_z13_unit_vfu (insn))
13989 gcc_unreachable ();
13994 /* Return the scheduling score for INSN. The higher the score the
13995 better. The score is calculated from the OOO scheduling attributes
13996 of INSN and the scheduling state s390_sched_state. */
13998 s390_sched_score (rtx_insn *insn)
14000 unsigned int mask = s390_get_sched_attrmask (insn);
14003 switch (s390_sched_state)
14006 /* Try to put insns into the first slot which would otherwise
14008 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
14009 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
14011 if ((mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
14015 /* Prefer not cracked insns while trying to put together a
14017 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
14018 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0
14019 && (mask & S390_SCHED_ATTR_MASK_GROUPALONE) == 0)
14021 if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) == 0)
14025 /* Prefer not cracked insns while trying to put together a
14027 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
14028 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0
14029 && (mask & S390_SCHED_ATTR_MASK_GROUPALONE) == 0)
14031 /* Prefer endgroup insns in the last slot. */
14032 if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) != 0)
14035 case S390_SCHED_STATE_NORMAL:
14036 /* Prefer not cracked insns if the last was not cracked. */
14037 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
14038 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0)
14040 if ((mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
14043 case S390_SCHED_STATE_CRACKED:
14044 /* Try to keep cracked insns together to prevent them from
14045 interrupting groups. */
14046 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
14047 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
14052 if (s390_tune == PROCESSOR_2964_Z13)
14055 unsigned unit_mask, m = 1;
14057 unit_mask = s390_get_unit_mask (insn, &units);
14058 gcc_assert (units <= MAX_SCHED_UNITS);
14060 /* Add a score in range 0..MAX_SCHED_MIX_SCORE depending on how long
14061 ago the last insn of this unit type got scheduled. This is
14062 supposed to help providing a proper instruction mix to the
14064 for (i = 0; i < units; i++, m <<= 1)
14066 score += (last_scheduled_unit_distance[i] * MAX_SCHED_MIX_SCORE /
14067 MAX_SCHED_MIX_DISTANCE);
14072 /* This function is called via hook TARGET_SCHED_REORDER before
14073 issuing one insn from list READY which contains *NREADYP entries.
14074 For target z10 it reorders load instructions to avoid early load
14075 conflicts in the floating point pipeline */
14077 s390_sched_reorder (FILE *file, int verbose,
14078 rtx_insn **ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
14080 if (s390_tune == PROCESSOR_2097_Z10
14081 && reload_completed
14083 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
14085 if (s390_tune >= PROCESSOR_2827_ZEC12
14086 && reload_completed
14090 int last_index = *nreadyp - 1;
14091 int max_index = -1;
14092 int max_score = -1;
14095 /* Just move the insn with the highest score to the top (the
14096 end) of the list. A full sort is not needed since a conflict
14097 in the hazard recognition cannot happen. So the top insn in
14098 the ready list will always be taken. */
14099 for (i = last_index; i >= 0; i--)
14103 if (recog_memoized (ready[i]) < 0)
14106 score = s390_sched_score (ready[i]);
14107 if (score > max_score)
14114 if (max_index != -1)
14116 if (max_index != last_index)
14118 tmp = ready[max_index];
14119 ready[max_index] = ready[last_index];
14120 ready[last_index] = tmp;
14124 ";;\t\tBACKEND: move insn %d to the top of list\n",
14125 INSN_UID (ready[last_index]));
14127 else if (verbose > 5)
14129 ";;\t\tBACKEND: best insn %d already on top\n",
14130 INSN_UID (ready[last_index]));
14135 fprintf (file, "ready list ooo attributes - sched state: %d\n",
14138 for (i = last_index; i >= 0; i--)
14140 unsigned int sched_mask;
14141 rtx_insn *insn = ready[i];
14143 if (recog_memoized (insn) < 0)
14146 sched_mask = s390_get_sched_attrmask (insn);
14147 fprintf (file, ";;\t\tBACKEND: insn %d score: %d: ",
14149 s390_sched_score (insn));
14150 #define PRINT_SCHED_ATTR(M, ATTR) fprintf (file, "%s ",\
14151 ((M) & sched_mask) ? #ATTR : "");
14152 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_CRACKED, cracked);
14153 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_EXPANDED, expanded);
14154 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_ENDGROUP, endgroup);
14155 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_GROUPALONE, groupalone);
14156 #undef PRINT_SCHED_ATTR
14157 if (s390_tune == PROCESSOR_2964_Z13)
14159 unsigned int unit_mask, m = 1;
14162 unit_mask = s390_get_unit_mask (insn, &units);
14163 fprintf (file, "(units:");
14164 for (j = 0; j < units; j++, m <<= 1)
14166 fprintf (file, " u%d", j);
14167 fprintf (file, ")");
14169 fprintf (file, "\n");
14174 return s390_issue_rate ();
14178 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
14179 the scheduler has issued INSN. It stores the last issued insn into
14180 last_scheduled_insn in order to make it available for
14181 s390_sched_reorder. */
14183 s390_sched_variable_issue (FILE *file, int verbose, rtx_insn *insn, int more)
14185 last_scheduled_insn = insn;
14187 if (s390_tune >= PROCESSOR_2827_ZEC12
14188 && reload_completed
14189 && recog_memoized (insn) >= 0)
14191 unsigned int mask = s390_get_sched_attrmask (insn);
14193 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
14194 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
14195 s390_sched_state = S390_SCHED_STATE_CRACKED;
14196 else if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) != 0
14197 || (mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
14198 s390_sched_state = S390_SCHED_STATE_NORMAL;
14201 /* Only normal insns are left (mask == 0). */
14202 switch (s390_sched_state)
14207 case S390_SCHED_STATE_NORMAL:
14208 if (s390_sched_state == S390_SCHED_STATE_NORMAL)
14209 s390_sched_state = 1;
14211 s390_sched_state++;
14214 case S390_SCHED_STATE_CRACKED:
14215 s390_sched_state = S390_SCHED_STATE_NORMAL;
14220 if (s390_tune == PROCESSOR_2964_Z13)
14223 unsigned unit_mask, m = 1;
14225 unit_mask = s390_get_unit_mask (insn, &units);
14226 gcc_assert (units <= MAX_SCHED_UNITS);
14228 for (i = 0; i < units; i++, m <<= 1)
14230 last_scheduled_unit_distance[i] = 0;
14231 else if (last_scheduled_unit_distance[i] < MAX_SCHED_MIX_DISTANCE)
14232 last_scheduled_unit_distance[i]++;
14237 unsigned int sched_mask;
14239 sched_mask = s390_get_sched_attrmask (insn);
14241 fprintf (file, ";;\t\tBACKEND: insn %d: ", INSN_UID (insn));
14242 #define PRINT_SCHED_ATTR(M, ATTR) fprintf (file, "%s ", ((M) & sched_mask) ? #ATTR : "");
14243 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_CRACKED, cracked);
14244 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_EXPANDED, expanded);
14245 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_ENDGROUP, endgroup);
14246 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_GROUPALONE, groupalone);
14247 #undef PRINT_SCHED_ATTR
14249 if (s390_tune == PROCESSOR_2964_Z13)
14251 unsigned int unit_mask, m = 1;
14254 unit_mask = s390_get_unit_mask (insn, &units);
14255 fprintf (file, "(units:");
14256 for (j = 0; j < units; j++, m <<= 1)
14258 fprintf (file, " %d", j);
14259 fprintf (file, ")");
14261 fprintf (file, " sched state: %d\n", s390_sched_state);
14263 if (s390_tune == PROCESSOR_2964_Z13)
14267 s390_get_unit_mask (insn, &units);
14269 fprintf (file, ";;\t\tBACKEND: units unused for: ");
14270 for (j = 0; j < units; j++)
14271 fprintf (file, "%d:%d ", j, last_scheduled_unit_distance[j]);
14272 fprintf (file, "\n");
14277 if (GET_CODE (PATTERN (insn)) != USE
14278 && GET_CODE (PATTERN (insn)) != CLOBBER)
14285 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
14286 int verbose ATTRIBUTE_UNUSED,
14287 int max_ready ATTRIBUTE_UNUSED)
14289 last_scheduled_insn = NULL;
14290 memset (last_scheduled_unit_distance, 0, MAX_SCHED_UNITS * sizeof (int));
14291 s390_sched_state = 0;
14294 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
14295 a new number struct loop *loop should be unrolled if tuned for cpus with
14296 a built-in stride prefetcher.
14297 The loop is analyzed for memory accesses by calling check_dpu for
14298 each rtx of the loop. Depending on the loop_depth and the amount of
14299 memory accesses a new number <=nunroll is returned to improve the
14300 behavior of the hardware prefetch unit. */
14302 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
14307 unsigned mem_count = 0;
14309 if (s390_tune < PROCESSOR_2097_Z10)
14312 /* Count the number of memory references within the loop body. */
14313 bbs = get_loop_body (loop);
14314 subrtx_iterator::array_type array;
14315 for (i = 0; i < loop->num_nodes; i++)
14316 FOR_BB_INSNS (bbs[i], insn)
14317 if (INSN_P (insn) && INSN_CODE (insn) != -1)
14318 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
14323 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
14324 if (mem_count == 0)
14327 switch (loop_depth(loop))
14330 return MIN (nunroll, 28 / mem_count);
14332 return MIN (nunroll, 22 / mem_count);
14334 return MIN (nunroll, 16 / mem_count);
14338 /* Restore the current options. This is a hook function and also called
14342 s390_function_specific_restore (struct gcc_options *opts,
14343 struct cl_target_option *ptr ATTRIBUTE_UNUSED)
14345 opts->x_s390_cost_pointer = (long)processor_table[opts->x_s390_tune].cost;
14349 s390_option_override_internal (bool main_args_p,
14350 struct gcc_options *opts,
14351 const struct gcc_options *opts_set)
14353 const char *prefix;
14354 const char *suffix;
14356 /* Set up prefix/suffix so the error messages refer to either the command
14357 line argument, or the attribute(target). */
14365 prefix = "option(\"";
14370 /* Architecture mode defaults according to ABI. */
14371 if (!(opts_set->x_target_flags & MASK_ZARCH))
14374 opts->x_target_flags |= MASK_ZARCH;
14376 opts->x_target_flags &= ~MASK_ZARCH;
14379 /* Set the march default in case it hasn't been specified on cmdline. */
14380 if (!opts_set->x_s390_arch)
14381 opts->x_s390_arch = PROCESSOR_2064_Z900;
14382 else if (opts->x_s390_arch == PROCESSOR_9672_G5
14383 || opts->x_s390_arch == PROCESSOR_9672_G6)
14384 warning (OPT_Wdeprecated, "%sarch=%s%s is deprecated and will be removed "
14385 "in future releases; use at least %sarch=z900%s",
14386 prefix, opts->x_s390_arch == PROCESSOR_9672_G5 ? "g5" : "g6",
14387 suffix, prefix, suffix);
14389 opts->x_s390_arch_flags = processor_flags_table[(int) opts->x_s390_arch];
14391 /* Determine processor to tune for. */
14392 if (!opts_set->x_s390_tune)
14393 opts->x_s390_tune = opts->x_s390_arch;
14394 else if (opts->x_s390_tune == PROCESSOR_9672_G5
14395 || opts->x_s390_tune == PROCESSOR_9672_G6)
14396 warning (OPT_Wdeprecated, "%stune=%s%s is deprecated and will be removed "
14397 "in future releases; use at least %stune=z900%s",
14398 prefix, opts->x_s390_tune == PROCESSOR_9672_G5 ? "g5" : "g6",
14399 suffix, prefix, suffix);
14401 opts->x_s390_tune_flags = processor_flags_table[opts->x_s390_tune];
14403 /* Sanity checks. */
14404 if (opts->x_s390_arch == PROCESSOR_NATIVE
14405 || opts->x_s390_tune == PROCESSOR_NATIVE)
14406 gcc_unreachable ();
14407 if (TARGET_ZARCH_P (opts->x_target_flags) && !TARGET_CPU_ZARCH_P (opts))
14408 error ("z/Architecture mode not supported on %s",
14409 processor_table[(int)opts->x_s390_arch].name);
14410 if (TARGET_64BIT && !TARGET_ZARCH_P (opts->x_target_flags))
14411 error ("64-bit ABI not supported in ESA/390 mode");
14413 /* Enable hardware transactions if available and not explicitly
14414 disabled by user. E.g. with -m31 -march=zEC12 -mzarch */
14415 if (!TARGET_OPT_HTM_P (opts_set->x_target_flags))
14417 if (TARGET_CPU_HTM_P (opts) && TARGET_ZARCH_P (opts->x_target_flags))
14418 opts->x_target_flags |= MASK_OPT_HTM;
14420 opts->x_target_flags &= ~MASK_OPT_HTM;
14423 if (TARGET_OPT_VX_P (opts_set->x_target_flags))
14425 if (TARGET_OPT_VX_P (opts->x_target_flags))
14427 if (!TARGET_CPU_VX_P (opts))
14428 error ("hardware vector support not available on %s",
14429 processor_table[(int)opts->x_s390_arch].name);
14430 if (TARGET_SOFT_FLOAT_P (opts->x_target_flags))
14431 error ("hardware vector support not available with -msoft-float");
14436 if (TARGET_CPU_VX_P (opts))
14437 /* Enable vector support if available and not explicitly disabled
14438 by user. E.g. with -m31 -march=z13 -mzarch */
14439 opts->x_target_flags |= MASK_OPT_VX;
14441 opts->x_target_flags &= ~MASK_OPT_VX;
14444 /* Use hardware DFP if available and not explicitly disabled by
14445 user. E.g. with -m31 -march=z10 -mzarch */
14446 if (!TARGET_HARD_DFP_P (opts_set->x_target_flags))
14448 if (TARGET_DFP_P (opts))
14449 opts->x_target_flags |= MASK_HARD_DFP;
14451 opts->x_target_flags &= ~MASK_HARD_DFP;
14454 if (TARGET_HARD_DFP_P (opts->x_target_flags) && !TARGET_DFP_P (opts))
14456 if (TARGET_HARD_DFP_P (opts_set->x_target_flags))
14458 if (!TARGET_CPU_DFP_P (opts))
14459 error ("hardware decimal floating point instructions"
14460 " not available on %s",
14461 processor_table[(int)opts->x_s390_arch].name);
14462 if (!TARGET_ZARCH_P (opts->x_target_flags))
14463 error ("hardware decimal floating point instructions"
14464 " not available in ESA/390 mode");
14467 opts->x_target_flags &= ~MASK_HARD_DFP;
14470 if (TARGET_SOFT_FLOAT_P (opts_set->x_target_flags)
14471 && TARGET_SOFT_FLOAT_P (opts->x_target_flags))
14473 if (TARGET_HARD_DFP_P (opts_set->x_target_flags)
14474 && TARGET_HARD_DFP_P (opts->x_target_flags))
14475 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
14477 opts->x_target_flags &= ~MASK_HARD_DFP;
14480 if (TARGET_BACKCHAIN_P (opts->x_target_flags)
14481 && TARGET_PACKED_STACK_P (opts->x_target_flags)
14482 && TARGET_HARD_FLOAT_P (opts->x_target_flags))
14483 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
14486 if (opts->x_s390_stack_size)
14488 if (opts->x_s390_stack_guard >= opts->x_s390_stack_size)
14489 error ("stack size must be greater than the stack guard value");
14490 else if (opts->x_s390_stack_size > 1 << 16)
14491 error ("stack size must not be greater than 64k");
14493 else if (opts->x_s390_stack_guard)
14494 error ("-mstack-guard implies use of -mstack-size");
14496 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
14497 if (!TARGET_LONG_DOUBLE_128_P (opts_set->x_target_flags))
14498 opts->x_target_flags |= MASK_LONG_DOUBLE_128;
14501 if (opts->x_s390_tune >= PROCESSOR_2097_Z10)
14503 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
14504 opts->x_param_values,
14505 opts_set->x_param_values);
14506 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
14507 opts->x_param_values,
14508 opts_set->x_param_values);
14509 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
14510 opts->x_param_values,
14511 opts_set->x_param_values);
14512 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
14513 opts->x_param_values,
14514 opts_set->x_param_values);
14517 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
14518 opts->x_param_values,
14519 opts_set->x_param_values);
14520 /* values for loop prefetching */
14521 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
14522 opts->x_param_values,
14523 opts_set->x_param_values);
14524 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
14525 opts->x_param_values,
14526 opts_set->x_param_values);
14527 /* s390 has more than 2 levels and the size is much larger. Since
14528 we are always running virtualized assume that we only get a small
14529 part of the caches above l1. */
14530 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
14531 opts->x_param_values,
14532 opts_set->x_param_values);
14533 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
14534 opts->x_param_values,
14535 opts_set->x_param_values);
14536 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
14537 opts->x_param_values,
14538 opts_set->x_param_values);
14540 /* Use the alternative scheduling-pressure algorithm by default. */
14541 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
14542 opts->x_param_values,
14543 opts_set->x_param_values);
14545 /* Call target specific restore function to do post-init work. At the moment,
14546 this just sets opts->x_s390_cost_pointer. */
14547 s390_function_specific_restore (opts, NULL);
14551 s390_option_override (void)
14554 cl_deferred_option *opt;
14555 vec<cl_deferred_option> *v =
14556 (vec<cl_deferred_option> *) s390_deferred_options;
14559 FOR_EACH_VEC_ELT (*v, i, opt)
14561 switch (opt->opt_index)
14563 case OPT_mhotpatch_:
14570 strncpy (s, opt->arg, 256);
14572 t = strchr (s, ',');
14577 val1 = integral_argument (s);
14578 val2 = integral_argument (t);
14585 if (val1 == -1 || val2 == -1)
14587 /* argument is not a plain number */
14588 error ("arguments to %qs should be non-negative integers",
14592 else if (val1 > s390_hotpatch_hw_max
14593 || val2 > s390_hotpatch_hw_max)
14595 error ("argument to %qs is too large (max. %d)",
14596 "-mhotpatch=n,m", s390_hotpatch_hw_max);
14599 s390_hotpatch_hw_before_label = val1;
14600 s390_hotpatch_hw_after_label = val2;
14604 gcc_unreachable ();
14608 /* Set up function hooks. */
14609 init_machine_status = s390_init_machine_status;
14611 s390_option_override_internal (true, &global_options, &global_options_set);
14613 /* Save the initial options in case the user does function specific
14615 target_option_default_node = build_target_option_node (&global_options);
14616 target_option_current_node = target_option_default_node;
14618 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
14619 requires the arch flags to be evaluated already. Since prefetching
14620 is beneficial on s390, we enable it if available. */
14621 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
14622 flag_prefetch_loop_arrays = 1;
14626 /* Don't emit DWARF3/4 unless specifically selected. The TPF
14627 debuggers do not yet support DWARF 3/4. */
14628 if (!global_options_set.x_dwarf_strict)
14630 if (!global_options_set.x_dwarf_version)
14634 /* Register a target-specific optimization-and-lowering pass
14635 to run immediately before prologue and epilogue generation.
14637 Registering the pass must be done at start up. It's
14638 convenient to do it here. */
14639 opt_pass *new_pass = new pass_s390_early_mach (g);
14640 struct register_pass_info insert_pass_s390_early_mach =
14642 new_pass, /* pass */
14643 "pro_and_epilogue", /* reference_pass_name */
14644 1, /* ref_pass_instance_number */
14645 PASS_POS_INSERT_BEFORE /* po_op */
14647 register_pass (&insert_pass_s390_early_mach);
14650 #if S390_USE_TARGET_ATTRIBUTE
14651 /* Inner function to process the attribute((target(...))), take an argument and
14652 set the current options from the argument. If we have a list, recursively go
14656 s390_valid_target_attribute_inner_p (tree args,
14657 struct gcc_options *opts,
14658 struct gcc_options *new_opts_set,
14664 #define S390_ATTRIB(S,O,A) { S, sizeof (S)-1, O, A, 0 }
14665 #define S390_PRAGMA(S,O,A) { S, sizeof (S)-1, O, A, 1 }
14666 static const struct
14668 const char *string;
14672 int only_as_pragma;
14675 S390_ATTRIB ("arch=", OPT_march_, 1),
14676 S390_ATTRIB ("tune=", OPT_mtune_, 1),
14677 /* uinteger options */
14678 S390_ATTRIB ("stack-guard=", OPT_mstack_guard_, 1),
14679 S390_ATTRIB ("stack-size=", OPT_mstack_size_, 1),
14680 S390_ATTRIB ("branch-cost=", OPT_mbranch_cost_, 1),
14681 S390_ATTRIB ("warn-framesize=", OPT_mwarn_framesize_, 1),
14683 S390_ATTRIB ("backchain", OPT_mbackchain, 0),
14684 S390_ATTRIB ("hard-dfp", OPT_mhard_dfp, 0),
14685 S390_ATTRIB ("hard-float", OPT_mhard_float, 0),
14686 S390_ATTRIB ("htm", OPT_mhtm, 0),
14687 S390_ATTRIB ("vx", OPT_mvx, 0),
14688 S390_ATTRIB ("packed-stack", OPT_mpacked_stack, 0),
14689 S390_ATTRIB ("small-exec", OPT_msmall_exec, 0),
14690 S390_ATTRIB ("soft-float", OPT_msoft_float, 0),
14691 S390_ATTRIB ("mvcle", OPT_mmvcle, 0),
14692 S390_PRAGMA ("zvector", OPT_mzvector, 0),
14693 /* boolean options */
14694 S390_ATTRIB ("warn-dynamicstack", OPT_mwarn_dynamicstack, 0),
14699 /* If this is a list, recurse to get the options. */
14700 if (TREE_CODE (args) == TREE_LIST)
14703 int num_pragma_values;
14706 /* Note: attribs.c:decl_attributes prepends the values from
14707 current_target_pragma to the list of target attributes. To determine
14708 whether we're looking at a value of the attribute or the pragma we
14709 assume that the first [list_length (current_target_pragma)] values in
14710 the list are the values from the pragma. */
14711 num_pragma_values = (!force_pragma && current_target_pragma != NULL)
14712 ? list_length (current_target_pragma) : 0;
14713 for (i = 0; args; args = TREE_CHAIN (args), i++)
14717 is_pragma = (force_pragma || i < num_pragma_values);
14718 if (TREE_VALUE (args)
14719 && !s390_valid_target_attribute_inner_p (TREE_VALUE (args),
14720 opts, new_opts_set,
14729 else if (TREE_CODE (args) != STRING_CST)
14731 error ("attribute %<target%> argument not a string");
14735 /* Handle multiple arguments separated by commas. */
14736 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
14738 while (next_optstr && *next_optstr != '\0')
14740 char *p = next_optstr;
14742 char *comma = strchr (next_optstr, ',');
14743 size_t len, opt_len;
14749 enum cl_var_type var_type;
14755 len = comma - next_optstr;
14756 next_optstr = comma + 1;
14761 next_optstr = NULL;
14764 /* Recognize no-xxx. */
14765 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
14774 /* Find the option. */
14777 for (i = 0; i < ARRAY_SIZE (attrs); i++)
14779 opt_len = attrs[i].len;
14780 if (ch == attrs[i].string[0]
14781 && ((attrs[i].has_arg) ? len > opt_len : len == opt_len)
14782 && memcmp (p, attrs[i].string, opt_len) == 0)
14784 opt = attrs[i].opt;
14785 if (!opt_set_p && cl_options[opt].cl_reject_negative)
14787 mask = cl_options[opt].var_value;
14788 var_type = cl_options[opt].var_type;
14794 /* Process the option. */
14797 error ("attribute(target(\"%s\")) is unknown", orig_p);
14800 else if (attrs[i].only_as_pragma && !force_pragma)
14802 /* Value is not allowed for the target attribute. */
14803 error ("Value %qs is not supported by attribute %<target%>",
14808 else if (var_type == CLVC_BIT_SET || var_type == CLVC_BIT_CLEAR)
14810 if (var_type == CLVC_BIT_CLEAR)
14811 opt_set_p = !opt_set_p;
14814 opts->x_target_flags |= mask;
14816 opts->x_target_flags &= ~mask;
14817 new_opts_set->x_target_flags |= mask;
14820 else if (cl_options[opt].var_type == CLVC_BOOLEAN)
14824 if (cl_options[opt].cl_uinteger)
14826 /* Unsigned integer argument. Code based on the function
14827 decode_cmdline_option () in opts-common.c. */
14828 value = integral_argument (p + opt_len);
14831 value = (opt_set_p) ? 1 : 0;
14835 struct cl_decoded_option decoded;
14837 /* Value range check; only implemented for numeric and boolean
14838 options at the moment. */
14839 generate_option (opt, NULL, value, CL_TARGET, &decoded);
14840 s390_handle_option (opts, new_opts_set, &decoded, input_location);
14841 set_option (opts, new_opts_set, opt, value,
14842 p + opt_len, DK_UNSPECIFIED, input_location,
14847 error ("attribute(target(\"%s\")) is unknown", orig_p);
14852 else if (cl_options[opt].var_type == CLVC_ENUM)
14857 arg_ok = opt_enum_arg_to_value (opt, p + opt_len, &value, CL_TARGET);
14859 set_option (opts, new_opts_set, opt, value,
14860 p + opt_len, DK_UNSPECIFIED, input_location,
14864 error ("attribute(target(\"%s\")) is unknown", orig_p);
14870 gcc_unreachable ();
14875 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
14878 s390_valid_target_attribute_tree (tree args,
14879 struct gcc_options *opts,
14880 const struct gcc_options *opts_set,
14883 tree t = NULL_TREE;
14884 struct gcc_options new_opts_set;
14886 memset (&new_opts_set, 0, sizeof (new_opts_set));
14888 /* Process each of the options on the chain. */
14889 if (! s390_valid_target_attribute_inner_p (args, opts, &new_opts_set,
14891 return error_mark_node;
14893 /* If some option was set (even if it has not changed), rerun
14894 s390_option_override_internal, and then save the options away. */
14895 if (new_opts_set.x_target_flags
14896 || new_opts_set.x_s390_arch
14897 || new_opts_set.x_s390_tune
14898 || new_opts_set.x_s390_stack_guard
14899 || new_opts_set.x_s390_stack_size
14900 || new_opts_set.x_s390_branch_cost
14901 || new_opts_set.x_s390_warn_framesize
14902 || new_opts_set.x_s390_warn_dynamicstack_p)
14904 const unsigned char *src = (const unsigned char *)opts_set;
14905 unsigned char *dest = (unsigned char *)&new_opts_set;
14908 /* Merge the original option flags into the new ones. */
14909 for (i = 0; i < sizeof(*opts_set); i++)
14912 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
14913 s390_option_override_internal (false, opts, &new_opts_set);
14914 /* Save the current options unless we are validating options for
14916 t = build_target_option_node (opts);
14921 /* Hook to validate attribute((target("string"))). */
14924 s390_valid_target_attribute_p (tree fndecl,
14925 tree ARG_UNUSED (name),
14927 int ARG_UNUSED (flags))
14929 struct gcc_options func_options;
14930 tree new_target, new_optimize;
14933 /* attribute((target("default"))) does nothing, beyond
14934 affecting multi-versioning. */
14935 if (TREE_VALUE (args)
14936 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
14937 && TREE_CHAIN (args) == NULL_TREE
14938 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
14941 tree old_optimize = build_optimization_node (&global_options);
14943 /* Get the optimization options of the current function. */
14944 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
14946 if (!func_optimize)
14947 func_optimize = old_optimize;
14949 /* Init func_options. */
14950 memset (&func_options, 0, sizeof (func_options));
14951 init_options_struct (&func_options, NULL);
14952 lang_hooks.init_options_struct (&func_options);
14954 cl_optimization_restore (&func_options, TREE_OPTIMIZATION (func_optimize));
14956 /* Initialize func_options to the default before its target options can
14958 cl_target_option_restore (&func_options,
14959 TREE_TARGET_OPTION (target_option_default_node));
14961 new_target = s390_valid_target_attribute_tree (args, &func_options,
14962 &global_options_set,
14964 current_target_pragma));
14965 new_optimize = build_optimization_node (&func_options);
14966 if (new_target == error_mark_node)
14968 else if (fndecl && new_target)
14970 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
14971 if (old_optimize != new_optimize)
14972 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
14977 /* Restore targets globals from NEW_TREE and invalidate s390_previous_fndecl
14981 s390_activate_target_options (tree new_tree)
14983 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
14984 if (TREE_TARGET_GLOBALS (new_tree))
14985 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
14986 else if (new_tree == target_option_default_node)
14987 restore_target_globals (&default_target_globals);
14989 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
14990 s390_previous_fndecl = NULL_TREE;
14993 /* Establish appropriate back-end context for processing the function
14994 FNDECL. The argument might be NULL to indicate processing at top
14995 level, outside of any function scope. */
14997 s390_set_current_function (tree fndecl)
14999 /* Only change the context if the function changes. This hook is called
15000 several times in the course of compiling a function, and we don't want to
15001 slow things down too much or call target_reinit when it isn't safe. */
15002 if (fndecl == s390_previous_fndecl)
15006 if (s390_previous_fndecl == NULL_TREE)
15007 old_tree = target_option_current_node;
15008 else if (DECL_FUNCTION_SPECIFIC_TARGET (s390_previous_fndecl))
15009 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (s390_previous_fndecl);
15011 old_tree = target_option_default_node;
15013 if (fndecl == NULL_TREE)
15015 if (old_tree != target_option_current_node)
15016 s390_activate_target_options (target_option_current_node);
15020 tree new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
15021 if (new_tree == NULL_TREE)
15022 new_tree = target_option_default_node;
15024 if (old_tree != new_tree)
15025 s390_activate_target_options (new_tree);
15026 s390_previous_fndecl = fndecl;
15030 /* Implement TARGET_USE_BY_PIECES_INFRASTRUCTURE_P. */
15033 s390_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size,
15034 unsigned int align ATTRIBUTE_UNUSED,
15035 enum by_pieces_operation op ATTRIBUTE_UNUSED,
15036 bool speed_p ATTRIBUTE_UNUSED)
15038 return (size == 1 || size == 2
15039 || size == 4 || (TARGET_ZARCH && size == 8));
15042 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
15045 s390_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
15047 tree sfpc = s390_builtin_decls[S390_BUILTIN_s390_sfpc];
15048 tree efpc = s390_builtin_decls[S390_BUILTIN_s390_efpc];
15049 tree call_efpc = build_call_expr (efpc, 0);
15050 tree fenv_var = create_tmp_var_raw (unsigned_type_node);
15052 #define FPC_EXCEPTION_MASK HOST_WIDE_INT_UC (0xf8000000)
15053 #define FPC_FLAGS_MASK HOST_WIDE_INT_UC (0x00f80000)
15054 #define FPC_DXC_MASK HOST_WIDE_INT_UC (0x0000ff00)
15055 #define FPC_EXCEPTION_MASK_SHIFT HOST_WIDE_INT_UC (24)
15056 #define FPC_FLAGS_SHIFT HOST_WIDE_INT_UC (16)
15057 #define FPC_DXC_SHIFT HOST_WIDE_INT_UC (8)
15059 /* Generates the equivalent of feholdexcept (&fenv_var)
15061 fenv_var = __builtin_s390_efpc ();
15062 __builtin_s390_sfpc (fenv_var & mask) */
15063 tree old_fpc = build2 (MODIFY_EXPR, unsigned_type_node, fenv_var, call_efpc);
15065 build2 (BIT_AND_EXPR, unsigned_type_node, fenv_var,
15066 build_int_cst (unsigned_type_node,
15067 ~(FPC_DXC_MASK | FPC_FLAGS_MASK |
15068 FPC_EXCEPTION_MASK)));
15069 tree set_new_fpc = build_call_expr (sfpc, 1, new_fpc);
15070 *hold = build2 (COMPOUND_EXPR, void_type_node, old_fpc, set_new_fpc);
15072 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT)
15074 __builtin_s390_sfpc (__builtin_s390_efpc () & mask) */
15075 new_fpc = build2 (BIT_AND_EXPR, unsigned_type_node, call_efpc,
15076 build_int_cst (unsigned_type_node,
15077 ~(FPC_DXC_MASK | FPC_FLAGS_MASK)));
15078 *clear = build_call_expr (sfpc, 1, new_fpc);
15080 /* Generates the equivalent of feupdateenv (fenv_var)
15082 old_fpc = __builtin_s390_efpc ();
15083 __builtin_s390_sfpc (fenv_var);
15084 __atomic_feraiseexcept ((old_fpc & FPC_FLAGS_MASK) >> FPC_FLAGS_SHIFT); */
15086 old_fpc = create_tmp_var_raw (unsigned_type_node);
15087 tree store_old_fpc = build2 (MODIFY_EXPR, void_type_node,
15088 old_fpc, call_efpc);
15090 set_new_fpc = build_call_expr (sfpc, 1, fenv_var);
15092 tree raise_old_except = build2 (BIT_AND_EXPR, unsigned_type_node, old_fpc,
15093 build_int_cst (unsigned_type_node,
15095 raise_old_except = build2 (RSHIFT_EXPR, unsigned_type_node, raise_old_except,
15096 build_int_cst (unsigned_type_node,
15098 tree atomic_feraiseexcept
15099 = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
15100 raise_old_except = build_call_expr (atomic_feraiseexcept,
15101 1, raise_old_except);
15103 *update = build2 (COMPOUND_EXPR, void_type_node,
15104 build2 (COMPOUND_EXPR, void_type_node,
15105 store_old_fpc, set_new_fpc),
15108 #undef FPC_EXCEPTION_MASK
15109 #undef FPC_FLAGS_MASK
15110 #undef FPC_DXC_MASK
15111 #undef FPC_EXCEPTION_MASK_SHIFT
15112 #undef FPC_FLAGS_SHIFT
15113 #undef FPC_DXC_SHIFT
15116 /* Return the vector mode to be used for inner mode MODE when doing
15118 static machine_mode
15119 s390_preferred_simd_mode (machine_mode mode)
15139 /* Our hardware does not require vectors to be strictly aligned. */
15141 s390_support_vector_misalignment (machine_mode mode ATTRIBUTE_UNUSED,
15142 const_tree type ATTRIBUTE_UNUSED,
15143 int misalignment ATTRIBUTE_UNUSED,
15144 bool is_packed ATTRIBUTE_UNUSED)
15149 return default_builtin_support_vector_misalignment (mode, type, misalignment,
15153 /* The vector ABI requires vector types to be aligned on an 8 byte
15154 boundary (our stack alignment). However, we allow this to be
15155 overriden by the user, while this definitely breaks the ABI. */
15156 static HOST_WIDE_INT
15157 s390_vector_alignment (const_tree type)
15159 if (!TARGET_VX_ABI)
15160 return default_vector_alignment (type);
15162 if (TYPE_USER_ALIGN (type))
15163 return TYPE_ALIGN (type);
15165 return MIN (64, tree_to_shwi (TYPE_SIZE (type)));
15168 #ifdef HAVE_AS_MACHINE_MACHINEMODE
15169 /* Implement TARGET_ASM_FILE_START. */
15171 s390_asm_file_start (void)
15173 default_file_start ();
15174 s390_asm_output_machine_for_arch (asm_out_file);
15178 /* Implement TARGET_ASM_FILE_END. */
15180 s390_asm_file_end (void)
15182 #ifdef HAVE_AS_GNU_ATTRIBUTE
15183 varpool_node *vnode;
15184 cgraph_node *cnode;
15186 FOR_EACH_VARIABLE (vnode)
15187 if (TREE_PUBLIC (vnode->decl))
15188 s390_check_type_for_vector_abi (TREE_TYPE (vnode->decl), false, false);
15190 FOR_EACH_FUNCTION (cnode)
15191 if (TREE_PUBLIC (cnode->decl))
15192 s390_check_type_for_vector_abi (TREE_TYPE (cnode->decl), false, false);
15195 if (s390_vector_abi != 0)
15196 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
15199 file_end_indicate_exec_stack ();
15201 if (flag_split_stack)
15202 file_end_indicate_split_stack ();
15205 /* Return true if TYPE is a vector bool type. */
15207 s390_vector_bool_type_p (const_tree type)
15209 return TYPE_VECTOR_OPAQUE (type);
15212 /* Return the diagnostic message string if the binary operation OP is
15213 not permitted on TYPE1 and TYPE2, NULL otherwise. */
15215 s390_invalid_binary_op (int op ATTRIBUTE_UNUSED, const_tree type1, const_tree type2)
15217 bool bool1_p, bool2_p;
15221 machine_mode mode1, mode2;
15223 if (!TARGET_ZVECTOR)
15226 if (!VECTOR_TYPE_P (type1) || !VECTOR_TYPE_P (type2))
15229 bool1_p = s390_vector_bool_type_p (type1);
15230 bool2_p = s390_vector_bool_type_p (type2);
15232 /* Mixing signed and unsigned types is forbidden for all
15234 if (!bool1_p && !bool2_p
15235 && TYPE_UNSIGNED (type1) != TYPE_UNSIGNED (type2))
15236 return N_("types differ in signess");
15238 plusminus_p = (op == PLUS_EXPR || op == MINUS_EXPR);
15239 muldiv_p = (op == MULT_EXPR || op == RDIV_EXPR || op == TRUNC_DIV_EXPR
15240 || op == CEIL_DIV_EXPR || op == FLOOR_DIV_EXPR
15241 || op == ROUND_DIV_EXPR);
15242 compare_p = (op == LT_EXPR || op == LE_EXPR || op == GT_EXPR || op == GE_EXPR
15243 || op == EQ_EXPR || op == NE_EXPR);
15245 if (bool1_p && bool2_p && (plusminus_p || muldiv_p))
15246 return N_("binary operator does not support two vector bool operands");
15248 if (bool1_p != bool2_p && (muldiv_p || compare_p))
15249 return N_("binary operator does not support vector bool operand");
15251 mode1 = TYPE_MODE (type1);
15252 mode2 = TYPE_MODE (type2);
15254 if (bool1_p != bool2_p && plusminus_p
15255 && (GET_MODE_CLASS (mode1) == MODE_VECTOR_FLOAT
15256 || GET_MODE_CLASS (mode2) == MODE_VECTOR_FLOAT))
15257 return N_("binary operator does not support mixing vector "
15258 "bool with floating point vector operands");
15263 /* Initialize GCC target structure. */
15265 #undef TARGET_ASM_ALIGNED_HI_OP
15266 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
15267 #undef TARGET_ASM_ALIGNED_DI_OP
15268 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
15269 #undef TARGET_ASM_INTEGER
15270 #define TARGET_ASM_INTEGER s390_assemble_integer
15272 #undef TARGET_ASM_OPEN_PAREN
15273 #define TARGET_ASM_OPEN_PAREN ""
15275 #undef TARGET_ASM_CLOSE_PAREN
15276 #define TARGET_ASM_CLOSE_PAREN ""
15278 #undef TARGET_OPTION_OVERRIDE
15279 #define TARGET_OPTION_OVERRIDE s390_option_override
15281 #ifdef TARGET_THREAD_SSP_OFFSET
15282 #undef TARGET_STACK_PROTECT_GUARD
15283 #define TARGET_STACK_PROTECT_GUARD hook_tree_void_null
15286 #undef TARGET_ENCODE_SECTION_INFO
15287 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
15289 #undef TARGET_SCALAR_MODE_SUPPORTED_P
15290 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
15293 #undef TARGET_HAVE_TLS
15294 #define TARGET_HAVE_TLS true
15296 #undef TARGET_CANNOT_FORCE_CONST_MEM
15297 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
15299 #undef TARGET_DELEGITIMIZE_ADDRESS
15300 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
15302 #undef TARGET_LEGITIMIZE_ADDRESS
15303 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
15305 #undef TARGET_RETURN_IN_MEMORY
15306 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
15308 #undef TARGET_INIT_BUILTINS
15309 #define TARGET_INIT_BUILTINS s390_init_builtins
15310 #undef TARGET_EXPAND_BUILTIN
15311 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
15312 #undef TARGET_BUILTIN_DECL
15313 #define TARGET_BUILTIN_DECL s390_builtin_decl
15315 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
15316 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
15318 #undef TARGET_ASM_OUTPUT_MI_THUNK
15319 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
15320 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
15321 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
15323 #undef TARGET_SCHED_ADJUST_PRIORITY
15324 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
15325 #undef TARGET_SCHED_ISSUE_RATE
15326 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
15327 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
15328 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
15330 #undef TARGET_SCHED_VARIABLE_ISSUE
15331 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
15332 #undef TARGET_SCHED_REORDER
15333 #define TARGET_SCHED_REORDER s390_sched_reorder
15334 #undef TARGET_SCHED_INIT
15335 #define TARGET_SCHED_INIT s390_sched_init
15337 #undef TARGET_CANNOT_COPY_INSN_P
15338 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
15339 #undef TARGET_RTX_COSTS
15340 #define TARGET_RTX_COSTS s390_rtx_costs
15341 #undef TARGET_ADDRESS_COST
15342 #define TARGET_ADDRESS_COST s390_address_cost
15343 #undef TARGET_REGISTER_MOVE_COST
15344 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
15345 #undef TARGET_MEMORY_MOVE_COST
15346 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
15348 #undef TARGET_MACHINE_DEPENDENT_REORG
15349 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
15351 #undef TARGET_VALID_POINTER_MODE
15352 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
15354 #undef TARGET_BUILD_BUILTIN_VA_LIST
15355 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
15356 #undef TARGET_EXPAND_BUILTIN_VA_START
15357 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
15358 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
15359 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
15361 #undef TARGET_PROMOTE_FUNCTION_MODE
15362 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
15363 #undef TARGET_PASS_BY_REFERENCE
15364 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
15366 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
15367 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
15368 #undef TARGET_FUNCTION_ARG
15369 #define TARGET_FUNCTION_ARG s390_function_arg
15370 #undef TARGET_FUNCTION_ARG_ADVANCE
15371 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
15372 #undef TARGET_FUNCTION_VALUE
15373 #define TARGET_FUNCTION_VALUE s390_function_value
15374 #undef TARGET_LIBCALL_VALUE
15375 #define TARGET_LIBCALL_VALUE s390_libcall_value
15376 #undef TARGET_STRICT_ARGUMENT_NAMING
15377 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
15379 #undef TARGET_KEEP_LEAF_WHEN_PROFILED
15380 #define TARGET_KEEP_LEAF_WHEN_PROFILED s390_keep_leaf_when_profiled
15382 #undef TARGET_FIXED_CONDITION_CODE_REGS
15383 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
15385 #undef TARGET_CC_MODES_COMPATIBLE
15386 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
15388 #undef TARGET_INVALID_WITHIN_DOLOOP
15389 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_insn_null
15392 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
15393 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
15396 #undef TARGET_DWARF_FRAME_REG_MODE
15397 #define TARGET_DWARF_FRAME_REG_MODE s390_dwarf_frame_reg_mode
15399 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
15400 #undef TARGET_MANGLE_TYPE
15401 #define TARGET_MANGLE_TYPE s390_mangle_type
15404 #undef TARGET_SCALAR_MODE_SUPPORTED_P
15405 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
15407 #undef TARGET_VECTOR_MODE_SUPPORTED_P
15408 #define TARGET_VECTOR_MODE_SUPPORTED_P s390_vector_mode_supported_p
15410 #undef TARGET_PREFERRED_RELOAD_CLASS
15411 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
15413 #undef TARGET_SECONDARY_RELOAD
15414 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
15416 #undef TARGET_LIBGCC_CMP_RETURN_MODE
15417 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
15419 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
15420 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
15422 #undef TARGET_LEGITIMATE_ADDRESS_P
15423 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
15425 #undef TARGET_LEGITIMATE_CONSTANT_P
15426 #define TARGET_LEGITIMATE_CONSTANT_P s390_legitimate_constant_p
15428 #undef TARGET_LRA_P
15429 #define TARGET_LRA_P s390_lra_p
15431 #undef TARGET_CAN_ELIMINATE
15432 #define TARGET_CAN_ELIMINATE s390_can_eliminate
15434 #undef TARGET_CONDITIONAL_REGISTER_USAGE
15435 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
15437 #undef TARGET_LOOP_UNROLL_ADJUST
15438 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
15440 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
15441 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
15442 #undef TARGET_TRAMPOLINE_INIT
15443 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
15445 #undef TARGET_UNWIND_WORD_MODE
15446 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
15448 #undef TARGET_CANONICALIZE_COMPARISON
15449 #define TARGET_CANONICALIZE_COMPARISON s390_canonicalize_comparison
15451 #undef TARGET_HARD_REGNO_SCRATCH_OK
15452 #define TARGET_HARD_REGNO_SCRATCH_OK s390_hard_regno_scratch_ok
15454 #undef TARGET_ATTRIBUTE_TABLE
15455 #define TARGET_ATTRIBUTE_TABLE s390_attribute_table
15457 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
15458 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
15460 #undef TARGET_SET_UP_BY_PROLOGUE
15461 #define TARGET_SET_UP_BY_PROLOGUE s300_set_up_by_prologue
15463 #undef TARGET_EXTRA_LIVE_ON_ENTRY
15464 #define TARGET_EXTRA_LIVE_ON_ENTRY s390_live_on_entry
15466 #undef TARGET_USE_BY_PIECES_INFRASTRUCTURE_P
15467 #define TARGET_USE_BY_PIECES_INFRASTRUCTURE_P \
15468 s390_use_by_pieces_infrastructure_p
15470 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
15471 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV s390_atomic_assign_expand_fenv
15473 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
15474 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN s390_invalid_arg_for_unprototyped_fn
15476 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
15477 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE s390_preferred_simd_mode
15479 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
15480 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT s390_support_vector_misalignment
15482 #undef TARGET_VECTOR_ALIGNMENT
15483 #define TARGET_VECTOR_ALIGNMENT s390_vector_alignment
15485 #undef TARGET_INVALID_BINARY_OP
15486 #define TARGET_INVALID_BINARY_OP s390_invalid_binary_op
15488 #ifdef HAVE_AS_MACHINE_MACHINEMODE
15489 #undef TARGET_ASM_FILE_START
15490 #define TARGET_ASM_FILE_START s390_asm_file_start
15493 #undef TARGET_ASM_FILE_END
15494 #define TARGET_ASM_FILE_END s390_asm_file_end
15496 #if S390_USE_TARGET_ATTRIBUTE
15497 #undef TARGET_SET_CURRENT_FUNCTION
15498 #define TARGET_SET_CURRENT_FUNCTION s390_set_current_function
15500 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
15501 #define TARGET_OPTION_VALID_ATTRIBUTE_P s390_valid_target_attribute_p
15504 #undef TARGET_OPTION_RESTORE
15505 #define TARGET_OPTION_RESTORE s390_function_specific_restore
15507 struct gcc_target targetm = TARGET_INITIALIZER;
15509 #include "gt-s390.h"