1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999-2017 Free Software Foundation, Inc.
3 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
4 Ulrich Weigand (uweigand@de.ibm.com) and
5 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
28 #include "target-globals.h"
37 #include "stringpool.h"
44 #include "diagnostic-core.h"
45 #include "diagnostic.h"
47 #include "fold-const.h"
48 #include "print-tree.h"
49 #include "stor-layout.h"
52 #include "conditions.h"
54 #include "insn-attr.h"
66 #include "cfgcleanup.h"
68 #include "langhooks.h"
69 #include "internal-fn.h"
70 #include "gimple-fold.h"
75 #include "tree-pass.h"
80 #include "tm-constrs.h"
82 /* This file should be included last. */
83 #include "target-def.h"
85 /* Remember the last target of s390_set_current_function. */
86 static GTY(()) tree s390_previous_fndecl;
88 /* Define the specific costs for a given cpu. */
90 struct processor_costs
93 const int m; /* cost of an M instruction. */
94 const int mghi; /* cost of an MGHI instruction. */
95 const int mh; /* cost of an MH instruction. */
96 const int mhi; /* cost of an MHI instruction. */
97 const int ml; /* cost of an ML instruction. */
98 const int mr; /* cost of an MR instruction. */
99 const int ms; /* cost of an MS instruction. */
100 const int msg; /* cost of an MSG instruction. */
101 const int msgf; /* cost of an MSGF instruction. */
102 const int msgfr; /* cost of an MSGFR instruction. */
103 const int msgr; /* cost of an MSGR instruction. */
104 const int msr; /* cost of an MSR instruction. */
105 const int mult_df; /* cost of multiplication in DFmode. */
108 const int sqxbr; /* cost of square root in TFmode. */
109 const int sqdbr; /* cost of square root in DFmode. */
110 const int sqebr; /* cost of square root in SFmode. */
111 /* multiply and add */
112 const int madbr; /* cost of multiply and add in DFmode. */
113 const int maebr; /* cost of multiply and add in SFmode. */
125 #define s390_cost ((const struct processor_costs *)(s390_cost_pointer))
128 struct processor_costs z900_cost =
130 COSTS_N_INSNS (5), /* M */
131 COSTS_N_INSNS (10), /* MGHI */
132 COSTS_N_INSNS (5), /* MH */
133 COSTS_N_INSNS (4), /* MHI */
134 COSTS_N_INSNS (5), /* ML */
135 COSTS_N_INSNS (5), /* MR */
136 COSTS_N_INSNS (4), /* MS */
137 COSTS_N_INSNS (15), /* MSG */
138 COSTS_N_INSNS (7), /* MSGF */
139 COSTS_N_INSNS (7), /* MSGFR */
140 COSTS_N_INSNS (10), /* MSGR */
141 COSTS_N_INSNS (4), /* MSR */
142 COSTS_N_INSNS (7), /* multiplication in DFmode */
143 COSTS_N_INSNS (13), /* MXBR */
144 COSTS_N_INSNS (136), /* SQXBR */
145 COSTS_N_INSNS (44), /* SQDBR */
146 COSTS_N_INSNS (35), /* SQEBR */
147 COSTS_N_INSNS (18), /* MADBR */
148 COSTS_N_INSNS (13), /* MAEBR */
149 COSTS_N_INSNS (134), /* DXBR */
150 COSTS_N_INSNS (30), /* DDBR */
151 COSTS_N_INSNS (27), /* DEBR */
152 COSTS_N_INSNS (220), /* DLGR */
153 COSTS_N_INSNS (34), /* DLR */
154 COSTS_N_INSNS (34), /* DR */
155 COSTS_N_INSNS (32), /* DSGFR */
156 COSTS_N_INSNS (32), /* DSGR */
160 struct processor_costs z990_cost =
162 COSTS_N_INSNS (4), /* M */
163 COSTS_N_INSNS (2), /* MGHI */
164 COSTS_N_INSNS (2), /* MH */
165 COSTS_N_INSNS (2), /* MHI */
166 COSTS_N_INSNS (4), /* ML */
167 COSTS_N_INSNS (4), /* MR */
168 COSTS_N_INSNS (5), /* MS */
169 COSTS_N_INSNS (6), /* MSG */
170 COSTS_N_INSNS (4), /* MSGF */
171 COSTS_N_INSNS (4), /* MSGFR */
172 COSTS_N_INSNS (4), /* MSGR */
173 COSTS_N_INSNS (4), /* MSR */
174 COSTS_N_INSNS (1), /* multiplication in DFmode */
175 COSTS_N_INSNS (28), /* MXBR */
176 COSTS_N_INSNS (130), /* SQXBR */
177 COSTS_N_INSNS (66), /* SQDBR */
178 COSTS_N_INSNS (38), /* SQEBR */
179 COSTS_N_INSNS (1), /* MADBR */
180 COSTS_N_INSNS (1), /* MAEBR */
181 COSTS_N_INSNS (60), /* DXBR */
182 COSTS_N_INSNS (40), /* DDBR */
183 COSTS_N_INSNS (26), /* DEBR */
184 COSTS_N_INSNS (176), /* DLGR */
185 COSTS_N_INSNS (31), /* DLR */
186 COSTS_N_INSNS (31), /* DR */
187 COSTS_N_INSNS (31), /* DSGFR */
188 COSTS_N_INSNS (31), /* DSGR */
192 struct processor_costs z9_109_cost =
194 COSTS_N_INSNS (4), /* M */
195 COSTS_N_INSNS (2), /* MGHI */
196 COSTS_N_INSNS (2), /* MH */
197 COSTS_N_INSNS (2), /* MHI */
198 COSTS_N_INSNS (4), /* ML */
199 COSTS_N_INSNS (4), /* MR */
200 COSTS_N_INSNS (5), /* MS */
201 COSTS_N_INSNS (6), /* MSG */
202 COSTS_N_INSNS (4), /* MSGF */
203 COSTS_N_INSNS (4), /* MSGFR */
204 COSTS_N_INSNS (4), /* MSGR */
205 COSTS_N_INSNS (4), /* MSR */
206 COSTS_N_INSNS (1), /* multiplication in DFmode */
207 COSTS_N_INSNS (28), /* MXBR */
208 COSTS_N_INSNS (130), /* SQXBR */
209 COSTS_N_INSNS (66), /* SQDBR */
210 COSTS_N_INSNS (38), /* SQEBR */
211 COSTS_N_INSNS (1), /* MADBR */
212 COSTS_N_INSNS (1), /* MAEBR */
213 COSTS_N_INSNS (60), /* DXBR */
214 COSTS_N_INSNS (40), /* DDBR */
215 COSTS_N_INSNS (26), /* DEBR */
216 COSTS_N_INSNS (30), /* DLGR */
217 COSTS_N_INSNS (23), /* DLR */
218 COSTS_N_INSNS (23), /* DR */
219 COSTS_N_INSNS (24), /* DSGFR */
220 COSTS_N_INSNS (24), /* DSGR */
224 struct processor_costs z10_cost =
226 COSTS_N_INSNS (10), /* M */
227 COSTS_N_INSNS (10), /* MGHI */
228 COSTS_N_INSNS (10), /* MH */
229 COSTS_N_INSNS (10), /* MHI */
230 COSTS_N_INSNS (10), /* ML */
231 COSTS_N_INSNS (10), /* MR */
232 COSTS_N_INSNS (10), /* MS */
233 COSTS_N_INSNS (10), /* MSG */
234 COSTS_N_INSNS (10), /* MSGF */
235 COSTS_N_INSNS (10), /* MSGFR */
236 COSTS_N_INSNS (10), /* MSGR */
237 COSTS_N_INSNS (10), /* MSR */
238 COSTS_N_INSNS (1) , /* multiplication in DFmode */
239 COSTS_N_INSNS (50), /* MXBR */
240 COSTS_N_INSNS (120), /* SQXBR */
241 COSTS_N_INSNS (52), /* SQDBR */
242 COSTS_N_INSNS (38), /* SQEBR */
243 COSTS_N_INSNS (1), /* MADBR */
244 COSTS_N_INSNS (1), /* MAEBR */
245 COSTS_N_INSNS (111), /* DXBR */
246 COSTS_N_INSNS (39), /* DDBR */
247 COSTS_N_INSNS (32), /* DEBR */
248 COSTS_N_INSNS (160), /* DLGR */
249 COSTS_N_INSNS (71), /* DLR */
250 COSTS_N_INSNS (71), /* DR */
251 COSTS_N_INSNS (71), /* DSGFR */
252 COSTS_N_INSNS (71), /* DSGR */
256 struct processor_costs z196_cost =
258 COSTS_N_INSNS (7), /* M */
259 COSTS_N_INSNS (5), /* MGHI */
260 COSTS_N_INSNS (5), /* MH */
261 COSTS_N_INSNS (5), /* MHI */
262 COSTS_N_INSNS (7), /* ML */
263 COSTS_N_INSNS (7), /* MR */
264 COSTS_N_INSNS (6), /* MS */
265 COSTS_N_INSNS (8), /* MSG */
266 COSTS_N_INSNS (6), /* MSGF */
267 COSTS_N_INSNS (6), /* MSGFR */
268 COSTS_N_INSNS (8), /* MSGR */
269 COSTS_N_INSNS (6), /* MSR */
270 COSTS_N_INSNS (1) , /* multiplication in DFmode */
271 COSTS_N_INSNS (40), /* MXBR B+40 */
272 COSTS_N_INSNS (100), /* SQXBR B+100 */
273 COSTS_N_INSNS (42), /* SQDBR B+42 */
274 COSTS_N_INSNS (28), /* SQEBR B+28 */
275 COSTS_N_INSNS (1), /* MADBR B */
276 COSTS_N_INSNS (1), /* MAEBR B */
277 COSTS_N_INSNS (101), /* DXBR B+101 */
278 COSTS_N_INSNS (29), /* DDBR */
279 COSTS_N_INSNS (22), /* DEBR */
280 COSTS_N_INSNS (160), /* DLGR cracked */
281 COSTS_N_INSNS (160), /* DLR cracked */
282 COSTS_N_INSNS (160), /* DR expanded */
283 COSTS_N_INSNS (160), /* DSGFR cracked */
284 COSTS_N_INSNS (160), /* DSGR cracked */
288 struct processor_costs zEC12_cost =
290 COSTS_N_INSNS (7), /* M */
291 COSTS_N_INSNS (5), /* MGHI */
292 COSTS_N_INSNS (5), /* MH */
293 COSTS_N_INSNS (5), /* MHI */
294 COSTS_N_INSNS (7), /* ML */
295 COSTS_N_INSNS (7), /* MR */
296 COSTS_N_INSNS (6), /* MS */
297 COSTS_N_INSNS (8), /* MSG */
298 COSTS_N_INSNS (6), /* MSGF */
299 COSTS_N_INSNS (6), /* MSGFR */
300 COSTS_N_INSNS (8), /* MSGR */
301 COSTS_N_INSNS (6), /* MSR */
302 COSTS_N_INSNS (1) , /* multiplication in DFmode */
303 COSTS_N_INSNS (40), /* MXBR B+40 */
304 COSTS_N_INSNS (100), /* SQXBR B+100 */
305 COSTS_N_INSNS (42), /* SQDBR B+42 */
306 COSTS_N_INSNS (28), /* SQEBR B+28 */
307 COSTS_N_INSNS (1), /* MADBR B */
308 COSTS_N_INSNS (1), /* MAEBR B */
309 COSTS_N_INSNS (131), /* DXBR B+131 */
310 COSTS_N_INSNS (29), /* DDBR */
311 COSTS_N_INSNS (22), /* DEBR */
312 COSTS_N_INSNS (160), /* DLGR cracked */
313 COSTS_N_INSNS (160), /* DLR cracked */
314 COSTS_N_INSNS (160), /* DR expanded */
315 COSTS_N_INSNS (160), /* DSGFR cracked */
316 COSTS_N_INSNS (160), /* DSGR cracked */
321 const char *const name;
322 const enum processor_type processor;
323 const struct processor_costs *cost;
325 const processor_table[] =
327 { "g5", PROCESSOR_9672_G5, &z900_cost },
328 { "g6", PROCESSOR_9672_G6, &z900_cost },
329 { "z900", PROCESSOR_2064_Z900, &z900_cost },
330 { "z990", PROCESSOR_2084_Z990, &z990_cost },
331 { "z9-109", PROCESSOR_2094_Z9_109, &z9_109_cost },
332 { "z9-ec", PROCESSOR_2094_Z9_EC, &z9_109_cost },
333 { "z10", PROCESSOR_2097_Z10, &z10_cost },
334 { "z196", PROCESSOR_2817_Z196, &z196_cost },
335 { "zEC12", PROCESSOR_2827_ZEC12, &zEC12_cost },
336 { "z13", PROCESSOR_2964_Z13, &zEC12_cost },
337 { "arch12", PROCESSOR_ARCH12, &zEC12_cost },
338 { "native", PROCESSOR_NATIVE, NULL }
341 extern int reload_completed;
343 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
344 static rtx_insn *last_scheduled_insn;
345 #define MAX_SCHED_UNITS 3
346 static int last_scheduled_unit_distance[MAX_SCHED_UNITS];
348 /* The maximum score added for an instruction whose unit hasn't been
349 in use for MAX_SCHED_MIX_DISTANCE steps. Increase this value to
350 give instruction mix scheduling more priority over instruction
352 #define MAX_SCHED_MIX_SCORE 8
354 /* The maximum distance up to which individual scores will be
355 calculated. Everything beyond this gives MAX_SCHED_MIX_SCORE.
356 Increase this with the OOO windows size of the machine. */
357 #define MAX_SCHED_MIX_DISTANCE 100
359 /* Structure used to hold the components of a S/390 memory
360 address. A legitimate address on S/390 is of the general
362 base + index + displacement
363 where any of the components is optional.
365 base and index are registers of the class ADDR_REGS,
366 displacement is an unsigned 12-bit immediate constant. */
377 /* The following structure is embedded in the machine
378 specific part of struct function. */
380 struct GTY (()) s390_frame_layout
382 /* Offset within stack frame. */
383 HOST_WIDE_INT gprs_offset;
384 HOST_WIDE_INT f0_offset;
385 HOST_WIDE_INT f4_offset;
386 HOST_WIDE_INT f8_offset;
387 HOST_WIDE_INT backchain_offset;
389 /* Number of first and last gpr where slots in the register
390 save area are reserved for. */
391 int first_save_gpr_slot;
392 int last_save_gpr_slot;
394 /* Location (FP register number) where GPRs (r0-r15) should
396 0 - does not need to be saved at all
398 #define SAVE_SLOT_NONE 0
399 #define SAVE_SLOT_STACK -1
400 signed char gpr_save_slots[16];
402 /* Number of first and last gpr to be saved, restored. */
404 int first_restore_gpr;
406 int last_restore_gpr;
408 /* Bits standing for floating point registers. Set, if the
409 respective register has to be saved. Starting with reg 16 (f0)
410 at the rightmost bit.
411 Bit 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
412 fpr 15 13 11 9 14 12 10 8 7 5 3 1 6 4 2 0
413 reg 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 */
414 unsigned int fpr_bitmap;
416 /* Number of floating point registers f8-f15 which must be saved. */
419 /* Set if return address needs to be saved.
420 This flag is set by s390_return_addr_rtx if it could not use
421 the initial value of r14 and therefore depends on r14 saved
423 bool save_return_addr_p;
425 /* Size of stack frame. */
426 HOST_WIDE_INT frame_size;
429 /* Define the structure for the machine field in struct function. */
431 struct GTY(()) machine_function
433 struct s390_frame_layout frame_layout;
435 /* Literal pool base register. */
438 /* True if we may need to perform branch splitting. */
439 bool split_branches_pending_p;
441 bool has_landing_pad_p;
443 /* True if the current function may contain a tbegin clobbering
447 /* For -fsplit-stack support: A stack local which holds a pointer to
448 the stack arguments for a function with a variable number of
449 arguments. This is set at the start of the function and is used
450 to initialize the overflow_arg_area field of the va_list
452 rtx split_stack_varargs_pointer;
455 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
457 #define cfun_frame_layout (cfun->machine->frame_layout)
458 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
459 #define cfun_save_arg_fprs_p (!!(TARGET_64BIT \
460 ? cfun_frame_layout.fpr_bitmap & 0x0f \
461 : cfun_frame_layout.fpr_bitmap & 0x03))
462 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
463 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
464 #define cfun_set_fpr_save(REGNO) (cfun->machine->frame_layout.fpr_bitmap |= \
465 (1 << (REGNO - FPR0_REGNUM)))
466 #define cfun_fpr_save_p(REGNO) (!!(cfun->machine->frame_layout.fpr_bitmap & \
467 (1 << (REGNO - FPR0_REGNUM))))
468 #define cfun_gpr_save_slot(REGNO) \
469 cfun->machine->frame_layout.gpr_save_slots[REGNO]
471 /* Number of GPRs and FPRs used for argument passing. */
472 #define GP_ARG_NUM_REG 5
473 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
474 #define VEC_ARG_NUM_REG 8
476 /* A couple of shortcuts. */
477 #define CONST_OK_FOR_J(x) \
478 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
479 #define CONST_OK_FOR_K(x) \
480 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
481 #define CONST_OK_FOR_Os(x) \
482 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
483 #define CONST_OK_FOR_Op(x) \
484 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
485 #define CONST_OK_FOR_On(x) \
486 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
488 #define REGNO_PAIR_OK(REGNO, MODE) \
489 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
491 /* That's the read ahead of the dynamic branch prediction unit in
492 bytes on a z10 (or higher) CPU. */
493 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
496 /* Indicate which ABI has been used for passing vector args.
497 0 - no vector type arguments have been passed where the ABI is relevant
498 1 - the old ABI has been used
499 2 - a vector type argument has been passed either in a vector register
500 or on the stack by value */
501 static int s390_vector_abi = 0;
503 /* Set the vector ABI marker if TYPE is subject to the vector ABI
504 switch. The vector ABI affects only vector data types. There are
505 two aspects of the vector ABI relevant here:
507 1. vectors >= 16 bytes have an alignment of 8 bytes with the new
508 ABI and natural alignment with the old.
510 2. vector <= 16 bytes are passed in VRs or by value on the stack
511 with the new ABI but by reference on the stack with the old.
513 If ARG_P is true TYPE is used for a function argument or return
514 value. The ABI marker then is set for all vector data types. If
515 ARG_P is false only type 1 vectors are being checked. */
518 s390_check_type_for_vector_abi (const_tree type, bool arg_p, bool in_struct_p)
520 static hash_set<const_tree> visited_types_hash;
525 if (type == NULL_TREE || TREE_CODE (type) == ERROR_MARK)
528 if (visited_types_hash.contains (type))
531 visited_types_hash.add (type);
533 if (VECTOR_TYPE_P (type))
535 int type_size = int_size_in_bytes (type);
537 /* Outside arguments only the alignment is changing and this
538 only happens for vector types >= 16 bytes. */
539 if (!arg_p && type_size < 16)
542 /* In arguments vector types > 16 are passed as before (GCC
543 never enforced the bigger alignment for arguments which was
544 required by the old vector ABI). However, it might still be
545 ABI relevant due to the changed alignment if it is a struct
547 if (arg_p && type_size > 16 && !in_struct_p)
550 s390_vector_abi = TARGET_VX_ABI ? 2 : 1;
552 else if (POINTER_TYPE_P (type) || TREE_CODE (type) == ARRAY_TYPE)
554 /* ARRAY_TYPE: Since with neither of the ABIs we have more than
555 natural alignment there will never be ABI dependent padding
556 in an array type. That's why we do not set in_struct_p to
558 s390_check_type_for_vector_abi (TREE_TYPE (type), arg_p, in_struct_p);
560 else if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
564 /* Check the return type. */
565 s390_check_type_for_vector_abi (TREE_TYPE (type), true, false);
567 for (arg_chain = TYPE_ARG_TYPES (type);
569 arg_chain = TREE_CHAIN (arg_chain))
570 s390_check_type_for_vector_abi (TREE_VALUE (arg_chain), true, false);
572 else if (RECORD_OR_UNION_TYPE_P (type))
576 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
578 if (TREE_CODE (field) != FIELD_DECL)
581 s390_check_type_for_vector_abi (TREE_TYPE (field), arg_p, true);
587 /* System z builtins. */
589 #include "s390-builtins.h"
591 const unsigned int bflags_builtin[S390_BUILTIN_MAX + 1] =
596 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, ...) BFLAGS,
598 #define OB_DEF_VAR(...)
599 #include "s390-builtins.def"
603 const unsigned int opflags_builtin[S390_BUILTIN_MAX + 1] =
608 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, OPFLAGS, ...) OPFLAGS,
610 #define OB_DEF_VAR(...)
611 #include "s390-builtins.def"
615 const unsigned int bflags_overloaded_builtin[S390_OVERLOADED_BUILTIN_MAX + 1] =
621 #define OB_DEF(NAME, FIRST_VAR_NAME, LAST_VAR_NAME, BFLAGS, ...) BFLAGS,
622 #define OB_DEF_VAR(...)
623 #include "s390-builtins.def"
628 opflags_overloaded_builtin_var[S390_OVERLOADED_BUILTIN_VAR_MAX + 1] =
635 #define OB_DEF_VAR(NAME, PATTERN, FLAGS, FNTYPE) FLAGS,
636 #include "s390-builtins.def"
640 tree s390_builtin_types[BT_MAX];
641 tree s390_builtin_fn_types[BT_FN_MAX];
642 tree s390_builtin_decls[S390_BUILTIN_MAX +
643 S390_OVERLOADED_BUILTIN_MAX +
644 S390_OVERLOADED_BUILTIN_VAR_MAX];
646 static enum insn_code const code_for_builtin[S390_BUILTIN_MAX + 1] = {
650 #define B_DEF(NAME, PATTERN, ...) CODE_FOR_##PATTERN,
652 #define OB_DEF_VAR(...)
654 #include "s390-builtins.def"
659 s390_init_builtins (void)
661 /* These definitions are being used in s390-builtins.def. */
662 tree returns_twice_attr = tree_cons (get_identifier ("returns_twice"),
664 tree noreturn_attr = tree_cons (get_identifier ("noreturn"), NULL, NULL);
665 tree c_uint64_type_node;
667 /* The uint64_type_node from tree.c is not compatible to the C99
668 uint64_t data type. What we want is c_uint64_type_node from
669 c-common.c. But since backend code is not supposed to interface
670 with the frontend we recreate it here. */
672 c_uint64_type_node = long_unsigned_type_node;
674 c_uint64_type_node = long_long_unsigned_type_node;
677 #define DEF_TYPE(INDEX, NODE, CONST_P) \
678 if (s390_builtin_types[INDEX] == NULL) \
679 s390_builtin_types[INDEX] = (!CONST_P) ? \
680 (NODE) : build_type_variant ((NODE), 1, 0);
682 #undef DEF_POINTER_TYPE
683 #define DEF_POINTER_TYPE(INDEX, INDEX_BASE) \
684 if (s390_builtin_types[INDEX] == NULL) \
685 s390_builtin_types[INDEX] = \
686 build_pointer_type (s390_builtin_types[INDEX_BASE]);
688 #undef DEF_DISTINCT_TYPE
689 #define DEF_DISTINCT_TYPE(INDEX, INDEX_BASE) \
690 if (s390_builtin_types[INDEX] == NULL) \
691 s390_builtin_types[INDEX] = \
692 build_distinct_type_copy (s390_builtin_types[INDEX_BASE]);
694 #undef DEF_VECTOR_TYPE
695 #define DEF_VECTOR_TYPE(INDEX, INDEX_BASE, ELEMENTS) \
696 if (s390_builtin_types[INDEX] == NULL) \
697 s390_builtin_types[INDEX] = \
698 build_vector_type (s390_builtin_types[INDEX_BASE], ELEMENTS);
700 #undef DEF_OPAQUE_VECTOR_TYPE
701 #define DEF_OPAQUE_VECTOR_TYPE(INDEX, INDEX_BASE, ELEMENTS) \
702 if (s390_builtin_types[INDEX] == NULL) \
703 s390_builtin_types[INDEX] = \
704 build_opaque_vector_type (s390_builtin_types[INDEX_BASE], ELEMENTS);
707 #define DEF_FN_TYPE(INDEX, args...) \
708 if (s390_builtin_fn_types[INDEX] == NULL) \
709 s390_builtin_fn_types[INDEX] = \
710 build_function_type_list (args, NULL_TREE);
712 #define DEF_OV_TYPE(...)
713 #include "s390-builtin-types.def"
716 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, OPFLAGS, FNTYPE) \
717 if (s390_builtin_decls[S390_BUILTIN_##NAME] == NULL) \
718 s390_builtin_decls[S390_BUILTIN_##NAME] = \
719 add_builtin_function ("__builtin_" #NAME, \
720 s390_builtin_fn_types[FNTYPE], \
721 S390_BUILTIN_##NAME, \
726 #define OB_DEF(NAME, FIRST_VAR_NAME, LAST_VAR_NAME, BFLAGS, FNTYPE) \
727 if (s390_builtin_decls[S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX] \
729 s390_builtin_decls[S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX] = \
730 add_builtin_function ("__builtin_" #NAME, \
731 s390_builtin_fn_types[FNTYPE], \
732 S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX, \
737 #define OB_DEF_VAR(...)
738 #include "s390-builtins.def"
742 /* Return true if ARG is appropriate as argument number ARGNUM of
743 builtin DECL. The operand flags from s390-builtins.def have to
744 passed as OP_FLAGS. */
746 s390_const_operand_ok (tree arg, int argnum, int op_flags, tree decl)
748 if (O_UIMM_P (op_flags))
750 int bitwidths[] = { 1, 2, 3, 4, 5, 8, 12, 16, 32 };
751 int bitwidth = bitwidths[op_flags - O_U1];
753 if (!tree_fits_uhwi_p (arg)
754 || tree_to_uhwi (arg) > (HOST_WIDE_INT_1U << bitwidth) - 1)
756 error("constant argument %d for builtin %qF is out of range (0.."
757 HOST_WIDE_INT_PRINT_UNSIGNED ")",
759 (HOST_WIDE_INT_1U << bitwidth) - 1);
764 if (O_SIMM_P (op_flags))
766 int bitwidths[] = { 2, 3, 4, 5, 8, 12, 16, 32 };
767 int bitwidth = bitwidths[op_flags - O_S2];
769 if (!tree_fits_shwi_p (arg)
770 || tree_to_shwi (arg) < -(HOST_WIDE_INT_1 << (bitwidth - 1))
771 || tree_to_shwi (arg) > ((HOST_WIDE_INT_1 << (bitwidth - 1)) - 1))
773 error("constant argument %d for builtin %qF is out of range ("
774 HOST_WIDE_INT_PRINT_DEC ".."
775 HOST_WIDE_INT_PRINT_DEC ")",
777 -(HOST_WIDE_INT_1 << (bitwidth - 1)),
778 (HOST_WIDE_INT_1 << (bitwidth - 1)) - 1);
785 /* Expand an expression EXP that calls a built-in function,
786 with result going to TARGET if that's convenient
787 (and in mode MODE if that's convenient).
788 SUBTARGET may be used as the target for computing one of EXP's operands.
789 IGNORE is nonzero if the value is to be ignored. */
792 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
793 machine_mode mode ATTRIBUTE_UNUSED,
794 int ignore ATTRIBUTE_UNUSED)
798 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
799 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
800 enum insn_code icode;
801 rtx op[MAX_ARGS], pat;
805 call_expr_arg_iterator iter;
806 unsigned int all_op_flags = opflags_for_builtin (fcode);
807 machine_mode last_vec_mode = VOIDmode;
809 if (TARGET_DEBUG_ARG)
812 "s390_expand_builtin, code = %4d, %s, bflags = 0x%x\n",
813 (int)fcode, IDENTIFIER_POINTER (DECL_NAME (fndecl)),
814 bflags_for_builtin (fcode));
817 if (S390_USE_TARGET_ATTRIBUTE)
821 bflags = bflags_for_builtin (fcode);
822 if ((bflags & B_HTM) && !TARGET_HTM)
824 error ("builtin %qF is not supported without -mhtm "
825 "(default with -march=zEC12 and higher).", fndecl);
828 if (((bflags & B_VX) || (bflags & B_VXE)) && !TARGET_VX)
830 error ("builtin %qF is not supported without -mvx "
831 "(default with -march=z13 and higher).", fndecl);
835 if ((bflags & B_VXE) && !TARGET_VXE)
837 error ("Builtin %qF requires arch12 or higher.", fndecl);
841 if (fcode >= S390_OVERLOADED_BUILTIN_VAR_OFFSET
842 && fcode < S390_ALL_BUILTIN_MAX)
846 else if (fcode < S390_OVERLOADED_BUILTIN_OFFSET)
848 icode = code_for_builtin[fcode];
849 /* Set a flag in the machine specific cfun part in order to support
850 saving/restoring of FPRs. */
851 if (fcode == S390_BUILTIN_tbegin || fcode == S390_BUILTIN_tbegin_retry)
852 cfun->machine->tbegin_p = true;
854 else if (fcode < S390_OVERLOADED_BUILTIN_VAR_OFFSET)
856 error ("unresolved overloaded builtin");
860 internal_error ("bad builtin fcode");
863 internal_error ("bad builtin icode");
865 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
869 machine_mode tmode = insn_data[icode].operand[0].mode;
871 || GET_MODE (target) != tmode
872 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
873 target = gen_reg_rtx (tmode);
875 /* There are builtins (e.g. vec_promote) with no vector
876 arguments but an element selector. So we have to also look
877 at the vector return type when emitting the modulo
879 if (VECTOR_MODE_P (insn_data[icode].operand[0].mode))
880 last_vec_mode = insn_data[icode].operand[0].mode;
884 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
887 const struct insn_operand_data *insn_op;
888 unsigned int op_flags = all_op_flags & ((1 << O_SHIFT) - 1);
890 all_op_flags = all_op_flags >> O_SHIFT;
892 if (arg == error_mark_node)
894 if (arity >= MAX_ARGS)
897 if (O_IMM_P (op_flags)
898 && TREE_CODE (arg) != INTEGER_CST)
900 error ("constant value required for builtin %qF argument %d",
905 if (!s390_const_operand_ok (arg, arity + 1, op_flags, fndecl))
908 insn_op = &insn_data[icode].operand[arity + nonvoid];
909 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
911 /* expand_expr truncates constants to the target mode only if it
912 is "convenient". However, our checks below rely on this
914 if (CONST_INT_P (op[arity])
915 && SCALAR_INT_MODE_P (insn_op->mode)
916 && GET_MODE (op[arity]) != insn_op->mode)
917 op[arity] = GEN_INT (trunc_int_for_mode (INTVAL (op[arity]),
920 /* Wrap the expanded RTX for pointer types into a MEM expr with
921 the proper mode. This allows us to use e.g. (match_operand
922 "memory_operand"..) in the insn patterns instead of (mem
923 (match_operand "address_operand)). This is helpful for
924 patterns not just accepting MEMs. */
925 if (POINTER_TYPE_P (TREE_TYPE (arg))
926 && insn_op->predicate != address_operand)
927 op[arity] = gen_rtx_MEM (insn_op->mode, op[arity]);
929 /* Expand the module operation required on element selectors. */
930 if (op_flags == O_ELEM)
932 gcc_assert (last_vec_mode != VOIDmode);
933 op[arity] = simplify_expand_binop (SImode, code_to_optab (AND),
935 GEN_INT (GET_MODE_NUNITS (last_vec_mode) - 1),
936 NULL_RTX, 1, OPTAB_DIRECT);
939 /* Record the vector mode used for an element selector. This assumes:
940 1. There is no builtin with two different vector modes and an element selector
941 2. The element selector comes after the vector type it is referring to.
942 This currently the true for all the builtins but FIXME we
943 should better check for that. */
944 if (VECTOR_MODE_P (insn_op->mode))
945 last_vec_mode = insn_op->mode;
947 if (insn_op->predicate (op[arity], insn_op->mode))
953 if (MEM_P (op[arity])
954 && insn_op->predicate == memory_operand
955 && (GET_MODE (XEXP (op[arity], 0)) == Pmode
956 || GET_MODE (XEXP (op[arity], 0)) == VOIDmode))
958 op[arity] = replace_equiv_address (op[arity],
959 copy_to_mode_reg (Pmode,
960 XEXP (op[arity], 0)));
962 /* Some of the builtins require different modes/types than the
963 pattern in order to implement a specific API. Instead of
964 adding many expanders which do the mode change we do it here.
965 E.g. s390_vec_add_u128 required to have vector unsigned char
966 arguments is mapped to addti3. */
967 else if (insn_op->mode != VOIDmode
968 && GET_MODE (op[arity]) != VOIDmode
969 && GET_MODE (op[arity]) != insn_op->mode
970 && ((tmp_rtx = simplify_gen_subreg (insn_op->mode, op[arity],
971 GET_MODE (op[arity]), 0))
976 else if (GET_MODE (op[arity]) == insn_op->mode
977 || GET_MODE (op[arity]) == VOIDmode
978 || (insn_op->predicate == address_operand
979 && GET_MODE (op[arity]) == Pmode))
981 /* An address_operand usually has VOIDmode in the expander
982 so we cannot use this. */
983 machine_mode target_mode =
984 (insn_op->predicate == address_operand
985 ? Pmode : insn_op->mode);
986 op[arity] = copy_to_mode_reg (target_mode, op[arity]);
989 if (!insn_op->predicate (op[arity], insn_op->mode))
991 error ("invalid argument %d for builtin %qF", arity + 1, fndecl);
1000 pat = GEN_FCN (icode) (target);
1004 pat = GEN_FCN (icode) (target, op[0]);
1006 pat = GEN_FCN (icode) (op[0]);
1010 pat = GEN_FCN (icode) (target, op[0], op[1]);
1012 pat = GEN_FCN (icode) (op[0], op[1]);
1016 pat = GEN_FCN (icode) (target, op[0], op[1], op[2]);
1018 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
1022 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3]);
1024 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
1028 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4]);
1030 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]);
1034 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4], op[5]);
1036 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4], op[5]);
1052 static const int s390_hotpatch_hw_max = 1000000;
1053 static int s390_hotpatch_hw_before_label = 0;
1054 static int s390_hotpatch_hw_after_label = 0;
1056 /* Check whether the hotpatch attribute is applied to a function and, if it has
1057 an argument, the argument is valid. */
1060 s390_handle_hotpatch_attribute (tree *node, tree name, tree args,
1061 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1067 if (TREE_CODE (*node) != FUNCTION_DECL)
1069 warning (OPT_Wattributes, "%qE attribute only applies to functions",
1071 *no_add_attrs = true;
1073 if (args != NULL && TREE_CHAIN (args) != NULL)
1075 expr = TREE_VALUE (args);
1076 expr2 = TREE_VALUE (TREE_CHAIN (args));
1078 if (args == NULL || TREE_CHAIN (args) == NULL)
1080 else if (TREE_CODE (expr) != INTEGER_CST
1081 || !INTEGRAL_TYPE_P (TREE_TYPE (expr))
1082 || wi::gtu_p (expr, s390_hotpatch_hw_max))
1084 else if (TREE_CODE (expr2) != INTEGER_CST
1085 || !INTEGRAL_TYPE_P (TREE_TYPE (expr2))
1086 || wi::gtu_p (expr2, s390_hotpatch_hw_max))
1092 error ("requested %qE attribute is not a comma separated pair of"
1093 " non-negative integer constants or too large (max. %d)", name,
1094 s390_hotpatch_hw_max);
1095 *no_add_attrs = true;
1101 /* Expand the s390_vector_bool type attribute. */
1104 s390_handle_vectorbool_attribute (tree *node, tree name ATTRIBUTE_UNUSED,
1105 tree args ATTRIBUTE_UNUSED,
1106 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1108 tree type = *node, result = NULL_TREE;
1111 while (POINTER_TYPE_P (type)
1112 || TREE_CODE (type) == FUNCTION_TYPE
1113 || TREE_CODE (type) == METHOD_TYPE
1114 || TREE_CODE (type) == ARRAY_TYPE)
1115 type = TREE_TYPE (type);
1117 mode = TYPE_MODE (type);
1120 case DImode: case V2DImode: result = s390_builtin_types[BT_BV2DI]; break;
1121 case SImode: case V4SImode: result = s390_builtin_types[BT_BV4SI]; break;
1122 case HImode: case V8HImode: result = s390_builtin_types[BT_BV8HI]; break;
1123 case QImode: case V16QImode: result = s390_builtin_types[BT_BV16QI];
1127 *no_add_attrs = true; /* No need to hang on to the attribute. */
1130 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
1135 static const struct attribute_spec s390_attribute_table[] = {
1136 { "hotpatch", 2, 2, true, false, false, s390_handle_hotpatch_attribute, false },
1137 { "s390_vector_bool", 0, 0, false, true, false, s390_handle_vectorbool_attribute, true },
1139 { NULL, 0, 0, false, false, false, NULL, false }
1142 /* Return the alignment for LABEL. We default to the -falign-labels
1143 value except for the literal pool base label. */
1145 s390_label_align (rtx_insn *label)
1147 rtx_insn *prev_insn = prev_active_insn (label);
1150 if (prev_insn == NULL_RTX)
1153 set = single_set (prev_insn);
1155 if (set == NULL_RTX)
1158 src = SET_SRC (set);
1160 /* Don't align literal pool base labels. */
1161 if (GET_CODE (src) == UNSPEC
1162 && XINT (src, 1) == UNSPEC_MAIN_BASE)
1166 return align_labels_log;
1170 s390_libgcc_cmp_return_mode (void)
1172 return TARGET_64BIT ? DImode : SImode;
1176 s390_libgcc_shift_count_mode (void)
1178 return TARGET_64BIT ? DImode : SImode;
1182 s390_unwind_word_mode (void)
1184 return TARGET_64BIT ? DImode : SImode;
1187 /* Return true if the back end supports mode MODE. */
1189 s390_scalar_mode_supported_p (machine_mode mode)
1191 /* In contrast to the default implementation reject TImode constants on 31bit
1192 TARGET_ZARCH for ABI compliance. */
1193 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
1196 if (DECIMAL_FLOAT_MODE_P (mode))
1197 return default_decimal_float_supported_p ();
1199 return default_scalar_mode_supported_p (mode);
1202 /* Return true if the back end supports vector mode MODE. */
1204 s390_vector_mode_supported_p (machine_mode mode)
1208 if (!VECTOR_MODE_P (mode)
1210 || GET_MODE_SIZE (mode) > 16)
1213 inner = GET_MODE_INNER (mode);
1231 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
1234 s390_set_has_landing_pad_p (bool value)
1236 cfun->machine->has_landing_pad_p = value;
1239 /* If two condition code modes are compatible, return a condition code
1240 mode which is compatible with both. Otherwise, return
1244 s390_cc_modes_compatible (machine_mode m1, machine_mode m2)
1252 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
1253 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
1274 /* Return true if SET either doesn't set the CC register, or else
1275 the source and destination have matching CC modes and that
1276 CC mode is at least as constrained as REQ_MODE. */
1279 s390_match_ccmode_set (rtx set, machine_mode req_mode)
1281 machine_mode set_mode;
1283 gcc_assert (GET_CODE (set) == SET);
1285 /* These modes are supposed to be used only in CC consumer
1287 gcc_assert (req_mode != CCVIALLmode && req_mode != CCVIANYmode
1288 && req_mode != CCVFALLmode && req_mode != CCVFANYmode);
1290 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
1293 set_mode = GET_MODE (SET_DEST (set));
1312 if (req_mode != set_mode)
1317 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
1318 && req_mode != CCSRmode && req_mode != CCURmode)
1324 if (req_mode != CCAmode)
1332 return (GET_MODE (SET_SRC (set)) == set_mode);
1335 /* Return true if every SET in INSN that sets the CC register
1336 has source and destination with matching CC modes and that
1337 CC mode is at least as constrained as REQ_MODE.
1338 If REQ_MODE is VOIDmode, always return false. */
1341 s390_match_ccmode (rtx_insn *insn, machine_mode req_mode)
1345 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
1346 if (req_mode == VOIDmode)
1349 if (GET_CODE (PATTERN (insn)) == SET)
1350 return s390_match_ccmode_set (PATTERN (insn), req_mode);
1352 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1353 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1355 rtx set = XVECEXP (PATTERN (insn), 0, i);
1356 if (GET_CODE (set) == SET)
1357 if (!s390_match_ccmode_set (set, req_mode))
1364 /* If a test-under-mask instruction can be used to implement
1365 (compare (and ... OP1) OP2), return the CC mode required
1366 to do that. Otherwise, return VOIDmode.
1367 MIXED is true if the instruction can distinguish between
1368 CC1 and CC2 for mixed selected bits (TMxx), it is false
1369 if the instruction cannot (TM). */
1372 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
1376 /* ??? Fixme: should work on CONST_WIDE_INT as well. */
1377 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
1380 /* Selected bits all zero: CC0.
1381 e.g.: int a; if ((a & (16 + 128)) == 0) */
1382 if (INTVAL (op2) == 0)
1385 /* Selected bits all one: CC3.
1386 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
1387 if (INTVAL (op2) == INTVAL (op1))
1390 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
1392 if ((a & (16 + 128)) == 16) -> CCT1
1393 if ((a & (16 + 128)) == 128) -> CCT2 */
1396 bit1 = exact_log2 (INTVAL (op2));
1397 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
1398 if (bit0 != -1 && bit1 != -1)
1399 return bit0 > bit1 ? CCT1mode : CCT2mode;
1405 /* Given a comparison code OP (EQ, NE, etc.) and the operands
1406 OP0 and OP1 of a COMPARE, return the mode to be used for the
1410 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
1416 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
1417 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1419 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
1420 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
1422 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1423 || GET_CODE (op1) == NEG)
1424 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1427 if (GET_CODE (op0) == AND)
1429 /* Check whether we can potentially do it via TM. */
1430 machine_mode ccmode;
1431 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
1432 if (ccmode != VOIDmode)
1434 /* Relax CCTmode to CCZmode to allow fall-back to AND
1435 if that turns out to be beneficial. */
1436 return ccmode == CCTmode ? CCZmode : ccmode;
1440 if (register_operand (op0, HImode)
1441 && GET_CODE (op1) == CONST_INT
1442 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
1444 if (register_operand (op0, QImode)
1445 && GET_CODE (op1) == CONST_INT
1446 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
1455 /* The only overflow condition of NEG and ABS happens when
1456 -INT_MAX is used as parameter, which stays negative. So
1457 we have an overflow from a positive value to a negative.
1458 Using CCAP mode the resulting cc can be used for comparisons. */
1459 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
1460 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1463 /* If constants are involved in an add instruction it is possible to use
1464 the resulting cc for comparisons with zero. Knowing the sign of the
1465 constant the overflow behavior gets predictable. e.g.:
1466 int a, b; if ((b = a + c) > 0)
1467 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
1468 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
1469 && (CONST_OK_FOR_K (INTVAL (XEXP (op0, 1)))
1470 || (CONST_OK_FOR_CONSTRAINT_P (INTVAL (XEXP (op0, 1)), 'O', "Os")
1471 /* Avoid INT32_MIN on 32 bit. */
1472 && (!TARGET_ZARCH || INTVAL (XEXP (op0, 1)) != -0x7fffffff - 1))))
1474 if (INTVAL (XEXP((op0), 1)) < 0)
1488 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1489 && GET_CODE (op1) != CONST_INT)
1495 if (GET_CODE (op0) == PLUS
1496 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1499 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1500 && GET_CODE (op1) != CONST_INT)
1506 if (GET_CODE (op0) == MINUS
1507 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1510 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1511 && GET_CODE (op1) != CONST_INT)
1520 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
1521 that we can implement more efficiently. */
1524 s390_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
1525 bool op0_preserve_value)
1527 if (op0_preserve_value)
1530 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
1531 if ((*code == EQ || *code == NE)
1532 && *op1 == const0_rtx
1533 && GET_CODE (*op0) == ZERO_EXTRACT
1534 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
1535 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
1536 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
1538 rtx inner = XEXP (*op0, 0);
1539 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
1540 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
1541 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
1543 if (len > 0 && len < modesize
1544 && pos >= 0 && pos + len <= modesize
1545 && modesize <= HOST_BITS_PER_WIDE_INT)
1547 unsigned HOST_WIDE_INT block;
1548 block = (HOST_WIDE_INT_1U << len) - 1;
1549 block <<= modesize - pos - len;
1551 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
1552 gen_int_mode (block, GET_MODE (inner)));
1556 /* Narrow AND of memory against immediate to enable TM. */
1557 if ((*code == EQ || *code == NE)
1558 && *op1 == const0_rtx
1559 && GET_CODE (*op0) == AND
1560 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
1561 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
1563 rtx inner = XEXP (*op0, 0);
1564 rtx mask = XEXP (*op0, 1);
1566 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
1567 if (GET_CODE (inner) == SUBREG
1568 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
1569 && (GET_MODE_SIZE (GET_MODE (inner))
1570 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
1572 & GET_MODE_MASK (GET_MODE (inner))
1573 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
1575 inner = SUBREG_REG (inner);
1577 /* Do not change volatile MEMs. */
1578 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
1580 int part = s390_single_part (XEXP (*op0, 1),
1581 GET_MODE (inner), QImode, 0);
1584 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
1585 inner = adjust_address_nv (inner, QImode, part);
1586 *op0 = gen_rtx_AND (QImode, inner, mask);
1591 /* Narrow comparisons against 0xffff to HImode if possible. */
1592 if ((*code == EQ || *code == NE)
1593 && GET_CODE (*op1) == CONST_INT
1594 && INTVAL (*op1) == 0xffff
1595 && SCALAR_INT_MODE_P (GET_MODE (*op0))
1596 && (nonzero_bits (*op0, GET_MODE (*op0))
1597 & ~HOST_WIDE_INT_UC (0xffff)) == 0)
1599 *op0 = gen_lowpart (HImode, *op0);
1603 /* Remove redundant UNSPEC_STRCMPCC_TO_INT conversions if possible. */
1604 if (GET_CODE (*op0) == UNSPEC
1605 && XINT (*op0, 1) == UNSPEC_STRCMPCC_TO_INT
1606 && XVECLEN (*op0, 0) == 1
1607 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
1608 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
1609 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
1610 && *op1 == const0_rtx)
1612 enum rtx_code new_code = UNKNOWN;
1615 case EQ: new_code = EQ; break;
1616 case NE: new_code = NE; break;
1617 case LT: new_code = GTU; break;
1618 case GT: new_code = LTU; break;
1619 case LE: new_code = GEU; break;
1620 case GE: new_code = LEU; break;
1624 if (new_code != UNKNOWN)
1626 *op0 = XVECEXP (*op0, 0, 0);
1631 /* Remove redundant UNSPEC_CC_TO_INT conversions if possible. */
1632 if (GET_CODE (*op0) == UNSPEC
1633 && XINT (*op0, 1) == UNSPEC_CC_TO_INT
1634 && XVECLEN (*op0, 0) == 1
1635 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
1636 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
1637 && CONST_INT_P (*op1))
1639 enum rtx_code new_code = UNKNOWN;
1640 switch (GET_MODE (XVECEXP (*op0, 0, 0)))
1646 case EQ: new_code = EQ; break;
1647 case NE: new_code = NE; break;
1654 if (new_code != UNKNOWN)
1656 /* For CCRAWmode put the required cc mask into the second
1658 if (GET_MODE (XVECEXP (*op0, 0, 0)) == CCRAWmode
1659 && INTVAL (*op1) >= 0 && INTVAL (*op1) <= 3)
1660 *op1 = gen_rtx_CONST_INT (VOIDmode, 1 << (3 - INTVAL (*op1)));
1661 *op0 = XVECEXP (*op0, 0, 0);
1666 /* Simplify cascaded EQ, NE with const0_rtx. */
1667 if ((*code == NE || *code == EQ)
1668 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
1669 && GET_MODE (*op0) == SImode
1670 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
1671 && REG_P (XEXP (*op0, 0))
1672 && XEXP (*op0, 1) == const0_rtx
1673 && *op1 == const0_rtx)
1675 if ((*code == EQ && GET_CODE (*op0) == NE)
1676 || (*code == NE && GET_CODE (*op0) == EQ))
1680 *op0 = XEXP (*op0, 0);
1683 /* Prefer register over memory as first operand. */
1684 if (MEM_P (*op0) && REG_P (*op1))
1686 rtx tem = *op0; *op0 = *op1; *op1 = tem;
1687 *code = (int)swap_condition ((enum rtx_code)*code);
1690 /* A comparison result is compared against zero. Replace it with
1691 the (perhaps inverted) original comparison.
1692 This probably should be done by simplify_relational_operation. */
1693 if ((*code == EQ || *code == NE)
1694 && *op1 == const0_rtx
1695 && COMPARISON_P (*op0)
1696 && CC_REG_P (XEXP (*op0, 0)))
1698 enum rtx_code new_code;
1701 new_code = reversed_comparison_code_parts (GET_CODE (*op0),
1703 XEXP (*op1, 0), NULL);
1705 new_code = GET_CODE (*op0);
1707 if (new_code != UNKNOWN)
1710 *op1 = XEXP (*op0, 1);
1711 *op0 = XEXP (*op0, 0);
1717 /* Emit a compare instruction suitable to implement the comparison
1718 OP0 CODE OP1. Return the correct condition RTL to be placed in
1719 the IF_THEN_ELSE of the conditional branch testing the result. */
1722 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
1724 machine_mode mode = s390_select_ccmode (code, op0, op1);
1727 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
1729 /* Do not output a redundant compare instruction if a
1730 compare_and_swap pattern already computed the result and the
1731 machine modes are compatible. */
1732 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
1738 cc = gen_rtx_REG (mode, CC_REGNUM);
1739 emit_insn (gen_rtx_SET (cc, gen_rtx_COMPARE (mode, op0, op1)));
1742 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
1745 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
1747 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
1748 conditional branch testing the result. */
1751 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem,
1752 rtx cmp, rtx new_rtx)
1754 emit_insn (gen_atomic_compare_and_swapsi_internal (old, mem, cmp, new_rtx));
1755 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM),
1759 /* Emit a jump instruction to TARGET and return it. If COND is
1760 NULL_RTX, emit an unconditional jump, else a conditional jump under
1764 s390_emit_jump (rtx target, rtx cond)
1768 target = gen_rtx_LABEL_REF (VOIDmode, target);
1770 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
1772 insn = gen_rtx_SET (pc_rtx, target);
1773 return emit_jump_insn (insn);
1776 /* Return branch condition mask to implement a branch
1777 specified by CODE. Return -1 for invalid comparisons. */
1780 s390_branch_condition_mask (rtx code)
1782 const int CC0 = 1 << 3;
1783 const int CC1 = 1 << 2;
1784 const int CC2 = 1 << 1;
1785 const int CC3 = 1 << 0;
1787 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
1788 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
1789 gcc_assert (XEXP (code, 1) == const0_rtx
1790 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
1791 && CONST_INT_P (XEXP (code, 1))));
1794 switch (GET_MODE (XEXP (code, 0)))
1798 switch (GET_CODE (code))
1800 case EQ: return CC0;
1801 case NE: return CC1 | CC2 | CC3;
1807 switch (GET_CODE (code))
1809 case EQ: return CC1;
1810 case NE: return CC0 | CC2 | CC3;
1816 switch (GET_CODE (code))
1818 case EQ: return CC2;
1819 case NE: return CC0 | CC1 | CC3;
1825 switch (GET_CODE (code))
1827 case EQ: return CC3;
1828 case NE: return CC0 | CC1 | CC2;
1834 switch (GET_CODE (code))
1836 case EQ: return CC0 | CC2;
1837 case NE: return CC1 | CC3;
1843 switch (GET_CODE (code))
1845 case LTU: return CC2 | CC3; /* carry */
1846 case GEU: return CC0 | CC1; /* no carry */
1852 switch (GET_CODE (code))
1854 case GTU: return CC0 | CC1; /* borrow */
1855 case LEU: return CC2 | CC3; /* no borrow */
1861 switch (GET_CODE (code))
1863 case EQ: return CC0 | CC2;
1864 case NE: return CC1 | CC3;
1865 case LTU: return CC1;
1866 case GTU: return CC3;
1867 case LEU: return CC1 | CC2;
1868 case GEU: return CC2 | CC3;
1873 switch (GET_CODE (code))
1875 case EQ: return CC0;
1876 case NE: return CC1 | CC2 | CC3;
1877 case LTU: return CC1;
1878 case GTU: return CC2;
1879 case LEU: return CC0 | CC1;
1880 case GEU: return CC0 | CC2;
1886 switch (GET_CODE (code))
1888 case EQ: return CC0;
1889 case NE: return CC2 | CC1 | CC3;
1890 case LTU: return CC2;
1891 case GTU: return CC1;
1892 case LEU: return CC0 | CC2;
1893 case GEU: return CC0 | CC1;
1899 switch (GET_CODE (code))
1901 case EQ: return CC0;
1902 case NE: return CC1 | CC2 | CC3;
1903 case LT: return CC1 | CC3;
1904 case GT: return CC2;
1905 case LE: return CC0 | CC1 | CC3;
1906 case GE: return CC0 | CC2;
1912 switch (GET_CODE (code))
1914 case EQ: return CC0;
1915 case NE: return CC1 | CC2 | CC3;
1916 case LT: return CC1;
1917 case GT: return CC2 | CC3;
1918 case LE: return CC0 | CC1;
1919 case GE: return CC0 | CC2 | CC3;
1925 switch (GET_CODE (code))
1927 case EQ: return CC0;
1928 case NE: return CC1 | CC2 | CC3;
1929 case LT: return CC1;
1930 case GT: return CC2;
1931 case LE: return CC0 | CC1;
1932 case GE: return CC0 | CC2;
1933 case UNORDERED: return CC3;
1934 case ORDERED: return CC0 | CC1 | CC2;
1935 case UNEQ: return CC0 | CC3;
1936 case UNLT: return CC1 | CC3;
1937 case UNGT: return CC2 | CC3;
1938 case UNLE: return CC0 | CC1 | CC3;
1939 case UNGE: return CC0 | CC2 | CC3;
1940 case LTGT: return CC1 | CC2;
1946 switch (GET_CODE (code))
1948 case EQ: return CC0;
1949 case NE: return CC2 | CC1 | CC3;
1950 case LT: return CC2;
1951 case GT: return CC1;
1952 case LE: return CC0 | CC2;
1953 case GE: return CC0 | CC1;
1954 case UNORDERED: return CC3;
1955 case ORDERED: return CC0 | CC2 | CC1;
1956 case UNEQ: return CC0 | CC3;
1957 case UNLT: return CC2 | CC3;
1958 case UNGT: return CC1 | CC3;
1959 case UNLE: return CC0 | CC2 | CC3;
1960 case UNGE: return CC0 | CC1 | CC3;
1961 case LTGT: return CC2 | CC1;
1966 /* Vector comparison modes. */
1967 /* CC2 will never be set. It however is part of the negated
1970 switch (GET_CODE (code))
1975 case GE: return CC0;
1976 /* The inverted modes are in fact *any* modes. */
1980 case LT: return CC3 | CC1 | CC2;
1985 switch (GET_CODE (code))
1990 case GE: return CC0 | CC1;
1991 /* The inverted modes are in fact *all* modes. */
1995 case LT: return CC3 | CC2;
1999 switch (GET_CODE (code))
2003 case GE: return CC0;
2004 /* The inverted modes are in fact *any* modes. */
2007 case UNLT: return CC3 | CC1 | CC2;
2012 switch (GET_CODE (code))
2016 case GE: return CC0 | CC1;
2017 /* The inverted modes are in fact *all* modes. */
2020 case UNLT: return CC3 | CC2;
2025 switch (GET_CODE (code))
2028 return INTVAL (XEXP (code, 1));
2030 return (INTVAL (XEXP (code, 1))) ^ 0xf;
2041 /* Return branch condition mask to implement a compare and branch
2042 specified by CODE. Return -1 for invalid comparisons. */
2045 s390_compare_and_branch_condition_mask (rtx code)
2047 const int CC0 = 1 << 3;
2048 const int CC1 = 1 << 2;
2049 const int CC2 = 1 << 1;
2051 switch (GET_CODE (code))
2075 /* If INV is false, return assembler mnemonic string to implement
2076 a branch specified by CODE. If INV is true, return mnemonic
2077 for the corresponding inverted branch. */
2080 s390_branch_condition_mnemonic (rtx code, int inv)
2084 static const char *const mnemonic[16] =
2086 NULL, "o", "h", "nle",
2087 "l", "nhe", "lh", "ne",
2088 "e", "nlh", "he", "nl",
2089 "le", "nh", "no", NULL
2092 if (GET_CODE (XEXP (code, 0)) == REG
2093 && REGNO (XEXP (code, 0)) == CC_REGNUM
2094 && (XEXP (code, 1) == const0_rtx
2095 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
2096 && CONST_INT_P (XEXP (code, 1)))))
2097 mask = s390_branch_condition_mask (code);
2099 mask = s390_compare_and_branch_condition_mask (code);
2101 gcc_assert (mask >= 0);
2106 gcc_assert (mask >= 1 && mask <= 14);
2108 return mnemonic[mask];
2111 /* Return the part of op which has a value different from def.
2112 The size of the part is determined by mode.
2113 Use this function only if you already know that op really
2114 contains such a part. */
2116 unsigned HOST_WIDE_INT
2117 s390_extract_part (rtx op, machine_mode mode, int def)
2119 unsigned HOST_WIDE_INT value = 0;
2120 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
2121 int part_bits = GET_MODE_BITSIZE (mode);
2122 unsigned HOST_WIDE_INT part_mask = (HOST_WIDE_INT_1U << part_bits) - 1;
2125 for (i = 0; i < max_parts; i++)
2128 value = UINTVAL (op);
2130 value >>= part_bits;
2132 if ((value & part_mask) != (def & part_mask))
2133 return value & part_mask;
2139 /* If OP is an integer constant of mode MODE with exactly one
2140 part of mode PART_MODE unequal to DEF, return the number of that
2141 part. Otherwise, return -1. */
2144 s390_single_part (rtx op,
2146 machine_mode part_mode,
2149 unsigned HOST_WIDE_INT value = 0;
2150 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
2151 unsigned HOST_WIDE_INT part_mask
2152 = (HOST_WIDE_INT_1U << GET_MODE_BITSIZE (part_mode)) - 1;
2155 if (GET_CODE (op) != CONST_INT)
2158 for (i = 0; i < n_parts; i++)
2161 value = UINTVAL (op);
2163 value >>= GET_MODE_BITSIZE (part_mode);
2165 if ((value & part_mask) != (def & part_mask))
2173 return part == -1 ? -1 : n_parts - 1 - part;
2176 /* Return true if IN contains a contiguous bitfield in the lower SIZE
2177 bits and no other bits are set in (the lower SIZE bits of) IN.
2179 PSTART and PEND can be used to obtain the start and end
2180 position (inclusive) of the bitfield relative to 64
2181 bits. *PSTART / *PEND gives the position of the first/last bit
2182 of the bitfield counting from the highest order bit starting
2186 s390_contiguous_bitmask_nowrap_p (unsigned HOST_WIDE_INT in, int size,
2187 int *pstart, int *pend)
2191 int lowbit = HOST_BITS_PER_WIDE_INT - 1;
2192 int highbit = HOST_BITS_PER_WIDE_INT - size;
2193 unsigned HOST_WIDE_INT bitmask = HOST_WIDE_INT_1U;
2195 gcc_assert (!!pstart == !!pend);
2196 for (start = lowbit; start >= highbit; bitmask <<= 1, start--)
2199 /* Look for the rightmost bit of a contiguous range of ones. */
2206 /* Look for the firt zero bit after the range of ones. */
2207 if (! (bitmask & in))
2211 /* We're one past the last one-bit. */
2215 /* No one bits found. */
2218 if (start > highbit)
2220 unsigned HOST_WIDE_INT mask;
2222 /* Calculate a mask for all bits beyond the contiguous bits. */
2223 mask = ((~HOST_WIDE_INT_0U >> highbit)
2224 & (~HOST_WIDE_INT_0U << (lowbit - start + 1)));
2226 /* There are more bits set beyond the first range of one bits. */
2239 /* Same as s390_contiguous_bitmask_nowrap_p but also returns true
2240 if ~IN contains a contiguous bitfield. In that case, *END is <
2243 If WRAP_P is true, a bitmask that wraps around is also tested.
2244 When a wraparoud occurs *START is greater than *END (in
2245 non-null pointers), and the uppermost (64 - SIZE) bits are thus
2246 part of the range. If WRAP_P is false, no wraparound is
2250 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, bool wrap_p,
2251 int size, int *start, int *end)
2253 int bs = HOST_BITS_PER_WIDE_INT;
2256 gcc_assert (!!start == !!end);
2257 if ((in & ((~HOST_WIDE_INT_0U) >> (bs - size))) == 0)
2258 /* This cannot be expressed as a contiguous bitmask. Exit early because
2259 the second call of s390_contiguous_bitmask_nowrap_p would accept this as
2262 b = s390_contiguous_bitmask_nowrap_p (in, size, start, end);
2267 b = s390_contiguous_bitmask_nowrap_p (~in, size, start, end);
2273 gcc_assert (s >= 1);
2274 *start = ((e + 1) & (bs - 1));
2275 *end = ((s - 1 + bs) & (bs - 1));
2281 /* Return true if OP contains the same contiguous bitfield in *all*
2282 its elements. START and END can be used to obtain the start and
2283 end position of the bitfield.
2285 START/STOP give the position of the first/last bit of the bitfield
2286 counting from the lowest order bit starting with zero. In order to
2287 use these values for S/390 instructions this has to be converted to
2288 "bits big endian" style. */
2291 s390_contiguous_bitmask_vector_p (rtx op, int *start, int *end)
2293 unsigned HOST_WIDE_INT mask;
2298 gcc_assert (!!start == !!end);
2299 if (!const_vec_duplicate_p (op, &elt)
2300 || !CONST_INT_P (elt))
2303 size = GET_MODE_UNIT_BITSIZE (GET_MODE (op));
2305 /* We cannot deal with V1TI/V1TF. This would require a vgmq. */
2309 mask = UINTVAL (elt);
2311 b = s390_contiguous_bitmask_p (mask, true, size, start, end);
2316 *start -= (HOST_BITS_PER_WIDE_INT - size);
2317 *end -= (HOST_BITS_PER_WIDE_INT - size);
2325 /* Return true if C consists only of byte chunks being either 0 or
2326 0xff. If MASK is !=NULL a byte mask is generated which is
2327 appropriate for the vector generate byte mask instruction. */
2330 s390_bytemask_vector_p (rtx op, unsigned *mask)
2333 unsigned tmp_mask = 0;
2334 int nunit, unit_size;
2336 if (!VECTOR_MODE_P (GET_MODE (op))
2337 || GET_CODE (op) != CONST_VECTOR
2338 || !CONST_INT_P (XVECEXP (op, 0, 0)))
2341 nunit = GET_MODE_NUNITS (GET_MODE (op));
2342 unit_size = GET_MODE_UNIT_SIZE (GET_MODE (op));
2344 for (i = 0; i < nunit; i++)
2346 unsigned HOST_WIDE_INT c;
2349 if (!CONST_INT_P (XVECEXP (op, 0, i)))
2352 c = UINTVAL (XVECEXP (op, 0, i));
2353 for (j = 0; j < unit_size; j++)
2355 if ((c & 0xff) != 0 && (c & 0xff) != 0xff)
2357 tmp_mask |= (c & 1) << ((nunit - 1 - i) * unit_size + j);
2358 c = c >> BITS_PER_UNIT;
2368 /* Check whether a rotate of ROTL followed by an AND of CONTIG is
2369 equivalent to a shift followed by the AND. In particular, CONTIG
2370 should not overlap the (rotated) bit 0/bit 63 gap. Negative values
2371 for ROTL indicate a rotate to the right. */
2374 s390_extzv_shift_ok (int bitsize, int rotl, unsigned HOST_WIDE_INT contig)
2379 ok = s390_contiguous_bitmask_nowrap_p (contig, bitsize, &start, &end);
2383 return (64 - end >= rotl);
2386 /* Translate "- rotate right" in BITSIZE mode to "rotate left" in
2388 rotl = -rotl + (64 - bitsize);
2389 return (start >= rotl);
2393 /* Check whether we can (and want to) split a double-word
2394 move in mode MODE from SRC to DST into two single-word
2395 moves, moving the subword FIRST_SUBWORD first. */
2398 s390_split_ok_p (rtx dst, rtx src, machine_mode mode, int first_subword)
2400 /* Floating point and vector registers cannot be split. */
2401 if (FP_REG_P (src) || FP_REG_P (dst) || VECTOR_REG_P (src) || VECTOR_REG_P (dst))
2404 /* Non-offsettable memory references cannot be split. */
2405 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
2406 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
2409 /* Moving the first subword must not clobber a register
2410 needed to move the second subword. */
2411 if (register_operand (dst, mode))
2413 rtx subreg = operand_subword (dst, first_subword, 0, mode);
2414 if (reg_overlap_mentioned_p (subreg, src))
2421 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
2422 and [MEM2, MEM2 + SIZE] do overlap and false
2426 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
2428 rtx addr1, addr2, addr_delta;
2429 HOST_WIDE_INT delta;
2431 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
2437 addr1 = XEXP (mem1, 0);
2438 addr2 = XEXP (mem2, 0);
2440 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
2442 /* This overlapping check is used by peepholes merging memory block operations.
2443 Overlapping operations would otherwise be recognized by the S/390 hardware
2444 and would fall back to a slower implementation. Allowing overlapping
2445 operations would lead to slow code but not to wrong code. Therefore we are
2446 somewhat optimistic if we cannot prove that the memory blocks are
2448 That's why we return false here although this may accept operations on
2449 overlapping memory areas. */
2450 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
2453 delta = INTVAL (addr_delta);
2456 || (delta > 0 && delta < size)
2457 || (delta < 0 && -delta < size))
2463 /* Check whether the address of memory reference MEM2 equals exactly
2464 the address of memory reference MEM1 plus DELTA. Return true if
2465 we can prove this to be the case, false otherwise. */
2468 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
2470 rtx addr1, addr2, addr_delta;
2472 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
2475 addr1 = XEXP (mem1, 0);
2476 addr2 = XEXP (mem2, 0);
2478 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
2479 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
2485 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
2488 s390_expand_logical_operator (enum rtx_code code, machine_mode mode,
2491 machine_mode wmode = mode;
2492 rtx dst = operands[0];
2493 rtx src1 = operands[1];
2494 rtx src2 = operands[2];
2497 /* If we cannot handle the operation directly, use a temp register. */
2498 if (!s390_logical_operator_ok_p (operands))
2499 dst = gen_reg_rtx (mode);
2501 /* QImode and HImode patterns make sense only if we have a destination
2502 in memory. Otherwise perform the operation in SImode. */
2503 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
2506 /* Widen operands if required. */
2509 if (GET_CODE (dst) == SUBREG
2510 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
2512 else if (REG_P (dst))
2513 dst = gen_rtx_SUBREG (wmode, dst, 0);
2515 dst = gen_reg_rtx (wmode);
2517 if (GET_CODE (src1) == SUBREG
2518 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
2520 else if (GET_MODE (src1) != VOIDmode)
2521 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
2523 if (GET_CODE (src2) == SUBREG
2524 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
2526 else if (GET_MODE (src2) != VOIDmode)
2527 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
2530 /* Emit the instruction. */
2531 op = gen_rtx_SET (dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
2532 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
2533 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
2535 /* Fix up the destination if needed. */
2536 if (dst != operands[0])
2537 emit_move_insn (operands[0], gen_lowpart (mode, dst));
2540 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
2543 s390_logical_operator_ok_p (rtx *operands)
2545 /* If the destination operand is in memory, it needs to coincide
2546 with one of the source operands. After reload, it has to be
2547 the first source operand. */
2548 if (GET_CODE (operands[0]) == MEM)
2549 return rtx_equal_p (operands[0], operands[1])
2550 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
2555 /* Narrow logical operation CODE of memory operand MEMOP with immediate
2556 operand IMMOP to switch from SS to SI type instructions. */
2559 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
2561 int def = code == AND ? -1 : 0;
2565 gcc_assert (GET_CODE (*memop) == MEM);
2566 gcc_assert (!MEM_VOLATILE_P (*memop));
2568 mask = s390_extract_part (*immop, QImode, def);
2569 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
2570 gcc_assert (part >= 0);
2572 *memop = adjust_address (*memop, QImode, part);
2573 *immop = gen_int_mode (mask, QImode);
2577 /* How to allocate a 'struct machine_function'. */
2579 static struct machine_function *
2580 s390_init_machine_status (void)
2582 return ggc_cleared_alloc<machine_function> ();
2585 /* Map for smallest class containing reg regno. */
2587 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
2588 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 0 */
2589 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 4 */
2590 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 8 */
2591 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 12 */
2592 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 16 */
2593 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 20 */
2594 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 24 */
2595 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 28 */
2596 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS, /* 32 */
2597 ACCESS_REGS, ACCESS_REGS, VEC_REGS, VEC_REGS, /* 36 */
2598 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 40 */
2599 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 44 */
2600 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 48 */
2601 VEC_REGS, VEC_REGS /* 52 */
2604 /* Return attribute type of insn. */
2606 static enum attr_type
2607 s390_safe_attr_type (rtx_insn *insn)
2609 if (recog_memoized (insn) >= 0)
2610 return get_attr_type (insn);
2615 /* Return true if DISP is a valid short displacement. */
2618 s390_short_displacement (rtx disp)
2620 /* No displacement is OK. */
2624 /* Without the long displacement facility we don't need to
2625 distingiush between long and short displacement. */
2626 if (!TARGET_LONG_DISPLACEMENT)
2629 /* Integer displacement in range. */
2630 if (GET_CODE (disp) == CONST_INT)
2631 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
2633 /* GOT offset is not OK, the GOT can be large. */
2634 if (GET_CODE (disp) == CONST
2635 && GET_CODE (XEXP (disp, 0)) == UNSPEC
2636 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
2637 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
2640 /* All other symbolic constants are literal pool references,
2641 which are OK as the literal pool must be small. */
2642 if (GET_CODE (disp) == CONST)
2648 /* Decompose a RTL expression ADDR for a memory address into
2649 its components, returned in OUT.
2651 Returns false if ADDR is not a valid memory address, true
2652 otherwise. If OUT is NULL, don't return the components,
2653 but check for validity only.
2655 Note: Only addresses in canonical form are recognized.
2656 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
2657 canonical form so that they will be recognized. */
2660 s390_decompose_address (rtx addr, struct s390_address *out)
2662 HOST_WIDE_INT offset = 0;
2663 rtx base = NULL_RTX;
2664 rtx indx = NULL_RTX;
2665 rtx disp = NULL_RTX;
2667 bool pointer = false;
2668 bool base_ptr = false;
2669 bool indx_ptr = false;
2670 bool literal_pool = false;
2672 /* We may need to substitute the literal pool base register into the address
2673 below. However, at this point we do not know which register is going to
2674 be used as base, so we substitute the arg pointer register. This is going
2675 to be treated as holding a pointer below -- it shouldn't be used for any
2677 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
2679 /* Decompose address into base + index + displacement. */
2681 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
2684 else if (GET_CODE (addr) == PLUS)
2686 rtx op0 = XEXP (addr, 0);
2687 rtx op1 = XEXP (addr, 1);
2688 enum rtx_code code0 = GET_CODE (op0);
2689 enum rtx_code code1 = GET_CODE (op1);
2691 if (code0 == REG || code0 == UNSPEC)
2693 if (code1 == REG || code1 == UNSPEC)
2695 indx = op0; /* index + base */
2701 base = op0; /* base + displacement */
2706 else if (code0 == PLUS)
2708 indx = XEXP (op0, 0); /* index + base + disp */
2709 base = XEXP (op0, 1);
2720 disp = addr; /* displacement */
2722 /* Extract integer part of displacement. */
2726 if (GET_CODE (disp) == CONST_INT)
2728 offset = INTVAL (disp);
2731 else if (GET_CODE (disp) == CONST
2732 && GET_CODE (XEXP (disp, 0)) == PLUS
2733 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
2735 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
2736 disp = XEXP (XEXP (disp, 0), 0);
2740 /* Strip off CONST here to avoid special case tests later. */
2741 if (disp && GET_CODE (disp) == CONST)
2742 disp = XEXP (disp, 0);
2744 /* We can convert literal pool addresses to
2745 displacements by basing them off the base register. */
2746 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
2751 base = fake_pool_base, literal_pool = true;
2753 /* Mark up the displacement. */
2754 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
2755 UNSPEC_LTREL_OFFSET);
2758 /* Validate base register. */
2761 if (GET_CODE (base) == UNSPEC)
2762 switch (XINT (base, 1))
2766 disp = gen_rtx_UNSPEC (Pmode,
2767 gen_rtvec (1, XVECEXP (base, 0, 0)),
2768 UNSPEC_LTREL_OFFSET);
2772 base = XVECEXP (base, 0, 1);
2775 case UNSPEC_LTREL_BASE:
2776 if (XVECLEN (base, 0) == 1)
2777 base = fake_pool_base, literal_pool = true;
2779 base = XVECEXP (base, 0, 1);
2786 if (!REG_P (base) || GET_MODE (base) != Pmode)
2789 if (REGNO (base) == STACK_POINTER_REGNUM
2790 || REGNO (base) == FRAME_POINTER_REGNUM
2791 || ((reload_completed || reload_in_progress)
2792 && frame_pointer_needed
2793 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
2794 || REGNO (base) == ARG_POINTER_REGNUM
2796 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
2797 pointer = base_ptr = true;
2799 if ((reload_completed || reload_in_progress)
2800 && base == cfun->machine->base_reg)
2801 pointer = base_ptr = literal_pool = true;
2804 /* Validate index register. */
2807 if (GET_CODE (indx) == UNSPEC)
2808 switch (XINT (indx, 1))
2812 disp = gen_rtx_UNSPEC (Pmode,
2813 gen_rtvec (1, XVECEXP (indx, 0, 0)),
2814 UNSPEC_LTREL_OFFSET);
2818 indx = XVECEXP (indx, 0, 1);
2821 case UNSPEC_LTREL_BASE:
2822 if (XVECLEN (indx, 0) == 1)
2823 indx = fake_pool_base, literal_pool = true;
2825 indx = XVECEXP (indx, 0, 1);
2832 if (!REG_P (indx) || GET_MODE (indx) != Pmode)
2835 if (REGNO (indx) == STACK_POINTER_REGNUM
2836 || REGNO (indx) == FRAME_POINTER_REGNUM
2837 || ((reload_completed || reload_in_progress)
2838 && frame_pointer_needed
2839 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
2840 || REGNO (indx) == ARG_POINTER_REGNUM
2842 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
2843 pointer = indx_ptr = true;
2845 if ((reload_completed || reload_in_progress)
2846 && indx == cfun->machine->base_reg)
2847 pointer = indx_ptr = literal_pool = true;
2850 /* Prefer to use pointer as base, not index. */
2851 if (base && indx && !base_ptr
2852 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
2859 /* Validate displacement. */
2862 /* If virtual registers are involved, the displacement will change later
2863 anyway as the virtual registers get eliminated. This could make a
2864 valid displacement invalid, but it is more likely to make an invalid
2865 displacement valid, because we sometimes access the register save area
2866 via negative offsets to one of those registers.
2867 Thus we don't check the displacement for validity here. If after
2868 elimination the displacement turns out to be invalid after all,
2869 this is fixed up by reload in any case. */
2870 /* LRA maintains always displacements up to date and we need to
2871 know the displacement is right during all LRA not only at the
2872 final elimination. */
2874 || (base != arg_pointer_rtx
2875 && indx != arg_pointer_rtx
2876 && base != return_address_pointer_rtx
2877 && indx != return_address_pointer_rtx
2878 && base != frame_pointer_rtx
2879 && indx != frame_pointer_rtx
2880 && base != virtual_stack_vars_rtx
2881 && indx != virtual_stack_vars_rtx))
2882 if (!DISP_IN_RANGE (offset))
2887 /* All the special cases are pointers. */
2890 /* In the small-PIC case, the linker converts @GOT
2891 and @GOTNTPOFF offsets to possible displacements. */
2892 if (GET_CODE (disp) == UNSPEC
2893 && (XINT (disp, 1) == UNSPEC_GOT
2894 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
2900 /* Accept pool label offsets. */
2901 else if (GET_CODE (disp) == UNSPEC
2902 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
2905 /* Accept literal pool references. */
2906 else if (GET_CODE (disp) == UNSPEC
2907 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
2909 /* In case CSE pulled a non literal pool reference out of
2910 the pool we have to reject the address. This is
2911 especially important when loading the GOT pointer on non
2912 zarch CPUs. In this case the literal pool contains an lt
2913 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
2914 will most likely exceed the displacement. */
2915 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
2916 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
2919 orig_disp = gen_rtx_CONST (Pmode, disp);
2922 /* If we have an offset, make sure it does not
2923 exceed the size of the constant pool entry. */
2924 rtx sym = XVECEXP (disp, 0, 0);
2925 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
2928 orig_disp = plus_constant (Pmode, orig_disp, offset);
2943 out->disp = orig_disp;
2944 out->pointer = pointer;
2945 out->literal_pool = literal_pool;
2951 /* Decompose a RTL expression OP for an address style operand into its
2952 components, and return the base register in BASE and the offset in
2953 OFFSET. While OP looks like an address it is never supposed to be
2956 Return true if OP is a valid address operand, false if not. */
2959 s390_decompose_addrstyle_without_index (rtx op, rtx *base,
2960 HOST_WIDE_INT *offset)
2964 /* We can have an integer constant, an address register,
2965 or a sum of the two. */
2966 if (CONST_SCALAR_INT_P (op))
2971 if (op && GET_CODE (op) == PLUS && CONST_SCALAR_INT_P (XEXP (op, 1)))
2976 while (op && GET_CODE (op) == SUBREG)
2977 op = SUBREG_REG (op);
2979 if (op && GET_CODE (op) != REG)
2984 if (off == NULL_RTX)
2986 else if (CONST_INT_P (off))
2987 *offset = INTVAL (off);
2988 else if (CONST_WIDE_INT_P (off))
2989 /* The offset will anyway be cut down to 12 bits so take just
2990 the lowest order chunk of the wide int. */
2991 *offset = CONST_WIDE_INT_ELT (off, 0);
3002 /* Return true if CODE is a valid address without index. */
3005 s390_legitimate_address_without_index_p (rtx op)
3007 struct s390_address addr;
3009 if (!s390_decompose_address (XEXP (op, 0), &addr))
3018 /* Return TRUE if ADDR is an operand valid for a load/store relative
3019 instruction. Be aware that the alignment of the operand needs to
3020 be checked separately.
3021 Valid addresses are single references or a sum of a reference and a
3022 constant integer. Return these parts in SYMREF and ADDEND. You can
3023 pass NULL in REF and/or ADDEND if you are not interested in these
3024 values. Literal pool references are *not* considered symbol
3028 s390_loadrelative_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
3030 HOST_WIDE_INT tmpaddend = 0;
3032 if (GET_CODE (addr) == CONST)
3033 addr = XEXP (addr, 0);
3035 if (GET_CODE (addr) == PLUS)
3037 if (!CONST_INT_P (XEXP (addr, 1)))
3040 tmpaddend = INTVAL (XEXP (addr, 1));
3041 addr = XEXP (addr, 0);
3044 if ((GET_CODE (addr) == SYMBOL_REF && !CONSTANT_POOL_ADDRESS_P (addr))
3045 || (GET_CODE (addr) == UNSPEC
3046 && (XINT (addr, 1) == UNSPEC_GOTENT
3047 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
3052 *addend = tmpaddend;
3059 /* Return true if the address in OP is valid for constraint letter C
3060 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
3061 pool MEMs should be accepted. Only the Q, R, S, T constraint
3062 letters are allowed for C. */
3065 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
3067 struct s390_address addr;
3068 bool decomposed = false;
3070 /* This check makes sure that no symbolic address (except literal
3071 pool references) are accepted by the R or T constraints. */
3072 if (s390_loadrelative_operand_p (op, NULL, NULL))
3075 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
3078 if (!s390_decompose_address (op, &addr))
3080 if (addr.literal_pool)
3085 /* With reload, we sometimes get intermediate address forms that are
3086 actually invalid as-is, but we need to accept them in the most
3087 generic cases below ('R' or 'T'), since reload will in fact fix
3088 them up. LRA behaves differently here; we never see such forms,
3089 but on the other hand, we need to strictly reject every invalid
3090 address form. Perform this check right up front. */
3091 if (lra_in_progress)
3093 if (!decomposed && !s390_decompose_address (op, &addr))
3100 case 'Q': /* no index short displacement */
3101 if (!decomposed && !s390_decompose_address (op, &addr))
3105 if (!s390_short_displacement (addr.disp))
3109 case 'R': /* with index short displacement */
3110 if (TARGET_LONG_DISPLACEMENT)
3112 if (!decomposed && !s390_decompose_address (op, &addr))
3114 if (!s390_short_displacement (addr.disp))
3117 /* Any invalid address here will be fixed up by reload,
3118 so accept it for the most generic constraint. */
3121 case 'S': /* no index long displacement */
3122 if (!decomposed && !s390_decompose_address (op, &addr))
3128 case 'T': /* with index long displacement */
3129 /* Any invalid address here will be fixed up by reload,
3130 so accept it for the most generic constraint. */
3140 /* Evaluates constraint strings described by the regular expression
3141 ([A|B|Z](Q|R|S|T))|Y and returns 1 if OP is a valid operand for
3142 the constraint given in STR, or 0 else. */
3145 s390_mem_constraint (const char *str, rtx op)
3152 /* Check for offsettable variants of memory constraints. */
3153 if (!MEM_P (op) || MEM_VOLATILE_P (op))
3155 if ((reload_completed || reload_in_progress)
3156 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
3158 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
3160 /* Check for non-literal-pool variants of memory constraints. */
3163 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
3168 if (GET_CODE (op) != MEM)
3170 return s390_check_qrst_address (c, XEXP (op, 0), true);
3172 /* Simply check for the basic form of a shift count. Reload will
3173 take care of making sure we have a proper base register. */
3174 if (!s390_decompose_addrstyle_without_index (op, NULL, NULL))
3178 return s390_check_qrst_address (str[1], op, true);
3186 /* Evaluates constraint strings starting with letter O. Input
3187 parameter C is the second letter following the "O" in the constraint
3188 string. Returns 1 if VALUE meets the respective constraint and 0
3192 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
3200 return trunc_int_for_mode (value, SImode) == value;
3204 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
3207 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
3215 /* Evaluates constraint strings starting with letter N. Parameter STR
3216 contains the letters following letter "N" in the constraint string.
3217 Returns true if VALUE matches the constraint. */
3220 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
3222 machine_mode mode, part_mode;
3224 int part, part_goal;
3230 part_goal = str[0] - '0';
3274 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
3277 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
3280 if (part_goal != -1 && part_goal != part)
3287 /* Returns true if the input parameter VALUE is a float zero. */
3290 s390_float_const_zero_p (rtx value)
3292 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
3293 && value == CONST0_RTX (GET_MODE (value)));
3296 /* Implement TARGET_REGISTER_MOVE_COST. */
3299 s390_register_move_cost (machine_mode mode,
3300 reg_class_t from, reg_class_t to)
3302 /* On s390, copy between fprs and gprs is expensive. */
3304 /* It becomes somewhat faster having ldgr/lgdr. */
3305 if (TARGET_Z10 && GET_MODE_SIZE (mode) == 8)
3307 /* ldgr is single cycle. */
3308 if (reg_classes_intersect_p (from, GENERAL_REGS)
3309 && reg_classes_intersect_p (to, FP_REGS))
3311 /* lgdr needs 3 cycles. */
3312 if (reg_classes_intersect_p (to, GENERAL_REGS)
3313 && reg_classes_intersect_p (from, FP_REGS))
3317 /* Otherwise copying is done via memory. */
3318 if ((reg_classes_intersect_p (from, GENERAL_REGS)
3319 && reg_classes_intersect_p (to, FP_REGS))
3320 || (reg_classes_intersect_p (from, FP_REGS)
3321 && reg_classes_intersect_p (to, GENERAL_REGS)))
3327 /* Implement TARGET_MEMORY_MOVE_COST. */
3330 s390_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
3331 reg_class_t rclass ATTRIBUTE_UNUSED,
3332 bool in ATTRIBUTE_UNUSED)
3337 /* Compute a (partial) cost for rtx X. Return true if the complete
3338 cost has been computed, and false if subexpressions should be
3339 scanned. In either case, *TOTAL contains the cost result. The
3340 initial value of *TOTAL is the default value computed by
3341 rtx_cost. It may be left unmodified. OUTER_CODE contains the
3342 code of the superexpression of x. */
3345 s390_rtx_costs (rtx x, machine_mode mode, int outer_code,
3346 int opno ATTRIBUTE_UNUSED,
3347 int *total, bool speed ATTRIBUTE_UNUSED)
3349 int code = GET_CODE (x);
3357 case CONST_WIDE_INT:
3364 if (GET_CODE (XEXP (x, 0)) == AND
3365 && GET_CODE (XEXP (x, 1)) == ASHIFT
3366 && REG_P (XEXP (XEXP (x, 0), 0))
3367 && REG_P (XEXP (XEXP (x, 1), 0))
3368 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
3369 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
3370 && (UINTVAL (XEXP (XEXP (x, 0), 1)) ==
3371 (HOST_WIDE_INT_1U << UINTVAL (XEXP (XEXP (x, 1), 1))) - 1))
3373 *total = COSTS_N_INSNS (2);
3377 /* ~AND on a 128 bit mode. This can be done using a vector
3380 && GET_CODE (XEXP (x, 0)) == NOT
3381 && GET_CODE (XEXP (x, 1)) == NOT
3382 && REG_P (XEXP (XEXP (x, 0), 0))
3383 && REG_P (XEXP (XEXP (x, 1), 0))
3384 && GET_MODE_SIZE (GET_MODE (XEXP (XEXP (x, 0), 0))) == 16
3385 && s390_hard_regno_mode_ok (VR0_REGNUM,
3386 GET_MODE (XEXP (XEXP (x, 0), 0))))
3388 *total = COSTS_N_INSNS (1);
3401 *total = COSTS_N_INSNS (1);
3406 *total = COSTS_N_INSNS (1);
3414 rtx left = XEXP (x, 0);
3415 rtx right = XEXP (x, 1);
3416 if (GET_CODE (right) == CONST_INT
3417 && CONST_OK_FOR_K (INTVAL (right)))
3418 *total = s390_cost->mhi;
3419 else if (GET_CODE (left) == SIGN_EXTEND)
3420 *total = s390_cost->mh;
3422 *total = s390_cost->ms; /* msr, ms, msy */
3427 rtx left = XEXP (x, 0);
3428 rtx right = XEXP (x, 1);
3431 if (GET_CODE (right) == CONST_INT
3432 && CONST_OK_FOR_K (INTVAL (right)))
3433 *total = s390_cost->mghi;
3434 else if (GET_CODE (left) == SIGN_EXTEND)
3435 *total = s390_cost->msgf;
3437 *total = s390_cost->msg; /* msgr, msg */
3439 else /* TARGET_31BIT */
3441 if (GET_CODE (left) == SIGN_EXTEND
3442 && GET_CODE (right) == SIGN_EXTEND)
3443 /* mulsidi case: mr, m */
3444 *total = s390_cost->m;
3445 else if (GET_CODE (left) == ZERO_EXTEND
3446 && GET_CODE (right) == ZERO_EXTEND
3447 && TARGET_CPU_ZARCH)
3448 /* umulsidi case: ml, mlr */
3449 *total = s390_cost->ml;
3451 /* Complex calculation is required. */
3452 *total = COSTS_N_INSNS (40);
3458 *total = s390_cost->mult_df;
3461 *total = s390_cost->mxbr;
3472 *total = s390_cost->madbr;
3475 *total = s390_cost->maebr;
3480 /* Negate in the third argument is free: FMSUB. */
3481 if (GET_CODE (XEXP (x, 2)) == NEG)
3483 *total += (rtx_cost (XEXP (x, 0), mode, FMA, 0, speed)
3484 + rtx_cost (XEXP (x, 1), mode, FMA, 1, speed)
3485 + rtx_cost (XEXP (XEXP (x, 2), 0), mode, FMA, 2, speed));
3492 if (mode == TImode) /* 128 bit division */
3493 *total = s390_cost->dlgr;
3494 else if (mode == DImode)
3496 rtx right = XEXP (x, 1);
3497 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
3498 *total = s390_cost->dlr;
3499 else /* 64 by 64 bit division */
3500 *total = s390_cost->dlgr;
3502 else if (mode == SImode) /* 32 bit division */
3503 *total = s390_cost->dlr;
3510 rtx right = XEXP (x, 1);
3511 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
3513 *total = s390_cost->dsgfr;
3515 *total = s390_cost->dr;
3516 else /* 64 by 64 bit division */
3517 *total = s390_cost->dsgr;
3519 else if (mode == SImode) /* 32 bit division */
3520 *total = s390_cost->dlr;
3521 else if (mode == SFmode)
3523 *total = s390_cost->debr;
3525 else if (mode == DFmode)
3527 *total = s390_cost->ddbr;
3529 else if (mode == TFmode)
3531 *total = s390_cost->dxbr;
3537 *total = s390_cost->sqebr;
3538 else if (mode == DFmode)
3539 *total = s390_cost->sqdbr;
3541 *total = s390_cost->sqxbr;
3546 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
3547 || outer_code == PLUS || outer_code == MINUS
3548 || outer_code == COMPARE)
3553 *total = COSTS_N_INSNS (1);
3554 if (GET_CODE (XEXP (x, 0)) == AND
3555 && GET_CODE (XEXP (x, 1)) == CONST_INT
3556 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
3558 rtx op0 = XEXP (XEXP (x, 0), 0);
3559 rtx op1 = XEXP (XEXP (x, 0), 1);
3560 rtx op2 = XEXP (x, 1);
3562 if (memory_operand (op0, GET_MODE (op0))
3563 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
3565 if (register_operand (op0, GET_MODE (op0))
3566 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
3576 /* Return the cost of an address rtx ADDR. */
3579 s390_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
3580 addr_space_t as ATTRIBUTE_UNUSED,
3581 bool speed ATTRIBUTE_UNUSED)
3583 struct s390_address ad;
3584 if (!s390_decompose_address (addr, &ad))
3587 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
3590 /* Implement targetm.vectorize.builtin_vectorization_cost. */
3592 s390_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
3594 int misalign ATTRIBUTE_UNUSED)
3596 switch (type_of_cost)
3606 case cond_branch_not_taken:
3608 case vec_promote_demote:
3609 case unaligned_load:
3610 case unaligned_store:
3613 case cond_branch_taken:
3617 return TYPE_VECTOR_SUBPARTS (vectype) - 1;
3624 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
3625 otherwise return 0. */
3628 tls_symbolic_operand (rtx op)
3630 if (GET_CODE (op) != SYMBOL_REF)
3632 return SYMBOL_REF_TLS_MODEL (op);
3635 /* Split DImode access register reference REG (on 64-bit) into its constituent
3636 low and high parts, and store them into LO and HI. Note that gen_lowpart/
3637 gen_highpart cannot be used as they assume all registers are word-sized,
3638 while our access registers have only half that size. */
3641 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
3643 gcc_assert (TARGET_64BIT);
3644 gcc_assert (ACCESS_REG_P (reg));
3645 gcc_assert (GET_MODE (reg) == DImode);
3646 gcc_assert (!(REGNO (reg) & 1));
3648 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
3649 *hi = gen_rtx_REG (SImode, REGNO (reg));
3652 /* Return true if OP contains a symbol reference */
3655 symbolic_reference_mentioned_p (rtx op)
3660 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
3663 fmt = GET_RTX_FORMAT (GET_CODE (op));
3664 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3670 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3671 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3675 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
3682 /* Return true if OP contains a reference to a thread-local symbol. */
3685 tls_symbolic_reference_mentioned_p (rtx op)
3690 if (GET_CODE (op) == SYMBOL_REF)
3691 return tls_symbolic_operand (op);
3693 fmt = GET_RTX_FORMAT (GET_CODE (op));
3694 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3700 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3701 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3705 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
3713 /* Return true if OP is a legitimate general operand when
3714 generating PIC code. It is given that flag_pic is on
3715 and that OP satisfies CONSTANT_P. */
3718 legitimate_pic_operand_p (rtx op)
3720 /* Accept all non-symbolic constants. */
3721 if (!SYMBOLIC_CONST (op))
3724 /* Reject everything else; must be handled
3725 via emit_symbolic_move. */
3729 /* Returns true if the constant value OP is a legitimate general operand.
3730 It is given that OP satisfies CONSTANT_P. */
3733 s390_legitimate_constant_p (machine_mode mode, rtx op)
3735 if (TARGET_VX && VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
3737 if (GET_MODE_SIZE (mode) != 16)
3740 if (!satisfies_constraint_j00 (op)
3741 && !satisfies_constraint_jm1 (op)
3742 && !satisfies_constraint_jKK (op)
3743 && !satisfies_constraint_jxx (op)
3744 && !satisfies_constraint_jyy (op))
3748 /* Accept all non-symbolic constants. */
3749 if (!SYMBOLIC_CONST (op))
3752 /* Accept immediate LARL operands. */
3753 if (TARGET_CPU_ZARCH && larl_operand (op, mode))
3756 /* Thread-local symbols are never legal constants. This is
3757 so that emit_call knows that computing such addresses
3758 might require a function call. */
3759 if (TLS_SYMBOLIC_CONST (op))
3762 /* In the PIC case, symbolic constants must *not* be
3763 forced into the literal pool. We accept them here,
3764 so that they will be handled by emit_symbolic_move. */
3768 /* All remaining non-PIC symbolic constants are
3769 forced into the literal pool. */
3773 /* Determine if it's legal to put X into the constant pool. This
3774 is not possible if X contains the address of a symbol that is
3775 not constant (TLS) or not known at final link time (PIC). */
3778 s390_cannot_force_const_mem (machine_mode mode, rtx x)
3780 switch (GET_CODE (x))
3784 case CONST_WIDE_INT:
3786 /* Accept all non-symbolic constants. */
3790 /* Labels are OK iff we are non-PIC. */
3791 return flag_pic != 0;
3794 /* 'Naked' TLS symbol references are never OK,
3795 non-TLS symbols are OK iff we are non-PIC. */
3796 if (tls_symbolic_operand (x))
3799 return flag_pic != 0;
3802 return s390_cannot_force_const_mem (mode, XEXP (x, 0));
3805 return s390_cannot_force_const_mem (mode, XEXP (x, 0))
3806 || s390_cannot_force_const_mem (mode, XEXP (x, 1));
3809 switch (XINT (x, 1))
3811 /* Only lt-relative or GOT-relative UNSPECs are OK. */
3812 case UNSPEC_LTREL_OFFSET:
3820 case UNSPEC_GOTNTPOFF:
3821 case UNSPEC_INDNTPOFF:
3824 /* If the literal pool shares the code section, be put
3825 execute template placeholders into the pool as well. */
3827 return TARGET_CPU_ZARCH;
3839 /* Returns true if the constant value OP is a legitimate general
3840 operand during and after reload. The difference to
3841 legitimate_constant_p is that this function will not accept
3842 a constant that would need to be forced to the literal pool
3843 before it can be used as operand.
3844 This function accepts all constants which can be loaded directly
3848 legitimate_reload_constant_p (rtx op)
3850 /* Accept la(y) operands. */
3851 if (GET_CODE (op) == CONST_INT
3852 && DISP_IN_RANGE (INTVAL (op)))
3855 /* Accept l(g)hi/l(g)fi operands. */
3856 if (GET_CODE (op) == CONST_INT
3857 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
3860 /* Accept lliXX operands. */
3862 && GET_CODE (op) == CONST_INT
3863 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
3864 && s390_single_part (op, word_mode, HImode, 0) >= 0)
3868 && GET_CODE (op) == CONST_INT
3869 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
3870 && s390_single_part (op, word_mode, SImode, 0) >= 0)
3873 /* Accept larl operands. */
3874 if (TARGET_CPU_ZARCH
3875 && larl_operand (op, VOIDmode))
3878 /* Accept floating-point zero operands that fit into a single GPR. */
3879 if (GET_CODE (op) == CONST_DOUBLE
3880 && s390_float_const_zero_p (op)
3881 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
3884 /* Accept double-word operands that can be split. */
3885 if (GET_CODE (op) == CONST_WIDE_INT
3886 || (GET_CODE (op) == CONST_INT
3887 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op)))
3889 machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
3890 rtx hi = operand_subword (op, 0, 0, dword_mode);
3891 rtx lo = operand_subword (op, 1, 0, dword_mode);
3892 return legitimate_reload_constant_p (hi)
3893 && legitimate_reload_constant_p (lo);
3896 /* Everything else cannot be handled without reload. */
3900 /* Returns true if the constant value OP is a legitimate fp operand
3901 during and after reload.
3902 This function accepts all constants which can be loaded directly
3906 legitimate_reload_fp_constant_p (rtx op)
3908 /* Accept floating-point zero operands if the load zero instruction
3909 can be used. Prior to z196 the load fp zero instruction caused a
3910 performance penalty if the result is used as BFP number. */
3912 && GET_CODE (op) == CONST_DOUBLE
3913 && s390_float_const_zero_p (op))
3919 /* Returns true if the constant value OP is a legitimate vector operand
3920 during and after reload.
3921 This function accepts all constants which can be loaded directly
3925 legitimate_reload_vector_constant_p (rtx op)
3927 if (TARGET_VX && GET_MODE_SIZE (GET_MODE (op)) == 16
3928 && (satisfies_constraint_j00 (op)
3929 || satisfies_constraint_jm1 (op)
3930 || satisfies_constraint_jKK (op)
3931 || satisfies_constraint_jxx (op)
3932 || satisfies_constraint_jyy (op)))
3938 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
3939 return the class of reg to actually use. */
3942 s390_preferred_reload_class (rtx op, reg_class_t rclass)
3944 switch (GET_CODE (op))
3946 /* Constants we cannot reload into general registers
3947 must be forced into the literal pool. */
3951 case CONST_WIDE_INT:
3952 if (reg_class_subset_p (GENERAL_REGS, rclass)
3953 && legitimate_reload_constant_p (op))
3954 return GENERAL_REGS;
3955 else if (reg_class_subset_p (ADDR_REGS, rclass)
3956 && legitimate_reload_constant_p (op))
3958 else if (reg_class_subset_p (FP_REGS, rclass)
3959 && legitimate_reload_fp_constant_p (op))
3961 else if (reg_class_subset_p (VEC_REGS, rclass)
3962 && legitimate_reload_vector_constant_p (op))
3967 /* If a symbolic constant or a PLUS is reloaded,
3968 it is most likely being used as an address, so
3969 prefer ADDR_REGS. If 'class' is not a superset
3970 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
3972 /* Symrefs cannot be pushed into the literal pool with -fPIC
3973 so we *MUST NOT* return NO_REGS for these cases
3974 (s390_cannot_force_const_mem will return true).
3976 On the other hand we MUST return NO_REGS for symrefs with
3977 invalid addend which might have been pushed to the literal
3978 pool (no -fPIC). Usually we would expect them to be
3979 handled via secondary reload but this does not happen if
3980 they are used as literal pool slot replacement in reload
3981 inheritance (see emit_input_reload_insns). */
3982 if (TARGET_CPU_ZARCH
3983 && GET_CODE (XEXP (op, 0)) == PLUS
3984 && GET_CODE (XEXP (XEXP(op, 0), 0)) == SYMBOL_REF
3985 && GET_CODE (XEXP (XEXP(op, 0), 1)) == CONST_INT)
3987 if (flag_pic && reg_class_subset_p (ADDR_REGS, rclass))
3995 if (!legitimate_reload_constant_p (op))
3999 /* load address will be used. */
4000 if (reg_class_subset_p (ADDR_REGS, rclass))
4012 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
4013 multiple of ALIGNMENT and the SYMBOL_REF being naturally
4017 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
4019 HOST_WIDE_INT addend;
4022 /* The "required alignment" might be 0 (e.g. for certain structs
4023 accessed via BLKmode). Early abort in this case, as well as when
4024 an alignment > 8 is required. */
4025 if (alignment < 2 || alignment > 8)
4028 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
4031 if (addend & (alignment - 1))
4034 if (GET_CODE (symref) == SYMBOL_REF)
4036 /* We have load-relative instructions for 2-byte, 4-byte, and
4037 8-byte alignment so allow only these. */
4040 case 8: return !SYMBOL_FLAG_NOTALIGN8_P (symref);
4041 case 4: return !SYMBOL_FLAG_NOTALIGN4_P (symref);
4042 case 2: return !SYMBOL_FLAG_NOTALIGN2_P (symref);
4043 default: return false;
4047 if (GET_CODE (symref) == UNSPEC
4048 && alignment <= UNITS_PER_LONG)
4054 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
4055 operand SCRATCH is used to reload the even part of the address and
4059 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
4061 HOST_WIDE_INT addend;
4064 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
4068 /* Easy case. The addend is even so larl will do fine. */
4069 emit_move_insn (reg, addr);
4072 /* We can leave the scratch register untouched if the target
4073 register is a valid base register. */
4074 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
4075 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
4078 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
4079 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
4082 emit_move_insn (scratch,
4083 gen_rtx_CONST (Pmode,
4084 gen_rtx_PLUS (Pmode, symref,
4085 GEN_INT (addend - 1))));
4087 emit_move_insn (scratch, symref);
4089 /* Increment the address using la in order to avoid clobbering cc. */
4090 s390_load_address (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
4094 /* Generate what is necessary to move between REG and MEM using
4095 SCRATCH. The direction is given by TOMEM. */
4098 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
4100 /* Reload might have pulled a constant out of the literal pool.
4101 Force it back in. */
4102 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
4103 || GET_CODE (mem) == CONST_WIDE_INT
4104 || GET_CODE (mem) == CONST_VECTOR
4105 || GET_CODE (mem) == CONST)
4106 mem = force_const_mem (GET_MODE (reg), mem);
4108 gcc_assert (MEM_P (mem));
4110 /* For a load from memory we can leave the scratch register
4111 untouched if the target register is a valid base register. */
4113 && REGNO (reg) < FIRST_PSEUDO_REGISTER
4114 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
4115 && GET_MODE (reg) == GET_MODE (scratch))
4118 /* Load address into scratch register. Since we can't have a
4119 secondary reload for a secondary reload we have to cover the case
4120 where larl would need a secondary reload here as well. */
4121 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
4123 /* Now we can use a standard load/store to do the move. */
4125 emit_move_insn (replace_equiv_address (mem, scratch), reg);
4127 emit_move_insn (reg, replace_equiv_address (mem, scratch));
4130 /* Inform reload about cases where moving X with a mode MODE to a register in
4131 RCLASS requires an extra scratch or immediate register. Return the class
4132 needed for the immediate register. */
4135 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
4136 machine_mode mode, secondary_reload_info *sri)
4138 enum reg_class rclass = (enum reg_class) rclass_i;
4140 /* Intermediate register needed. */
4141 if (reg_classes_intersect_p (CC_REGS, rclass))
4142 return GENERAL_REGS;
4146 /* The vst/vl vector move instructions allow only for short
4149 && GET_CODE (XEXP (x, 0)) == PLUS
4150 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4151 && !SHORT_DISP_IN_RANGE(INTVAL (XEXP (XEXP (x, 0), 1)))
4152 && reg_class_subset_p (rclass, VEC_REGS)
4153 && (!reg_class_subset_p (rclass, FP_REGS)
4154 || (GET_MODE_SIZE (mode) > 8
4155 && s390_class_max_nregs (FP_REGS, mode) == 1)))
4158 sri->icode = (TARGET_64BIT ?
4159 CODE_FOR_reloaddi_la_in :
4160 CODE_FOR_reloadsi_la_in);
4162 sri->icode = (TARGET_64BIT ?
4163 CODE_FOR_reloaddi_la_out :
4164 CODE_FOR_reloadsi_la_out);
4170 HOST_WIDE_INT offset;
4173 /* On z10 several optimizer steps may generate larl operands with
4176 && s390_loadrelative_operand_p (x, &symref, &offset)
4178 && !SYMBOL_FLAG_NOTALIGN2_P (symref)
4179 && (offset & 1) == 1)
4180 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
4181 : CODE_FOR_reloadsi_larl_odd_addend_z10);
4183 /* Handle all the (mem (symref)) accesses we cannot use the z10
4184 instructions for. */
4186 && s390_loadrelative_operand_p (XEXP (x, 0), NULL, NULL)
4188 || !reg_class_subset_p (rclass, GENERAL_REGS)
4189 || GET_MODE_SIZE (mode) > UNITS_PER_WORD
4190 || !s390_check_symref_alignment (XEXP (x, 0),
4191 GET_MODE_SIZE (mode))))
4193 #define __SECONDARY_RELOAD_CASE(M,m) \
4196 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
4197 CODE_FOR_reload##m##di_tomem_z10; \
4199 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
4200 CODE_FOR_reload##m##si_tomem_z10; \
4203 switch (GET_MODE (x))
4205 __SECONDARY_RELOAD_CASE (QI, qi);
4206 __SECONDARY_RELOAD_CASE (HI, hi);
4207 __SECONDARY_RELOAD_CASE (SI, si);
4208 __SECONDARY_RELOAD_CASE (DI, di);
4209 __SECONDARY_RELOAD_CASE (TI, ti);
4210 __SECONDARY_RELOAD_CASE (SF, sf);
4211 __SECONDARY_RELOAD_CASE (DF, df);
4212 __SECONDARY_RELOAD_CASE (TF, tf);
4213 __SECONDARY_RELOAD_CASE (SD, sd);
4214 __SECONDARY_RELOAD_CASE (DD, dd);
4215 __SECONDARY_RELOAD_CASE (TD, td);
4216 __SECONDARY_RELOAD_CASE (V1QI, v1qi);
4217 __SECONDARY_RELOAD_CASE (V2QI, v2qi);
4218 __SECONDARY_RELOAD_CASE (V4QI, v4qi);
4219 __SECONDARY_RELOAD_CASE (V8QI, v8qi);
4220 __SECONDARY_RELOAD_CASE (V16QI, v16qi);
4221 __SECONDARY_RELOAD_CASE (V1HI, v1hi);
4222 __SECONDARY_RELOAD_CASE (V2HI, v2hi);
4223 __SECONDARY_RELOAD_CASE (V4HI, v4hi);
4224 __SECONDARY_RELOAD_CASE (V8HI, v8hi);
4225 __SECONDARY_RELOAD_CASE (V1SI, v1si);
4226 __SECONDARY_RELOAD_CASE (V2SI, v2si);
4227 __SECONDARY_RELOAD_CASE (V4SI, v4si);
4228 __SECONDARY_RELOAD_CASE (V1DI, v1di);
4229 __SECONDARY_RELOAD_CASE (V2DI, v2di);
4230 __SECONDARY_RELOAD_CASE (V1TI, v1ti);
4231 __SECONDARY_RELOAD_CASE (V1SF, v1sf);
4232 __SECONDARY_RELOAD_CASE (V2SF, v2sf);
4233 __SECONDARY_RELOAD_CASE (V4SF, v4sf);
4234 __SECONDARY_RELOAD_CASE (V1DF, v1df);
4235 __SECONDARY_RELOAD_CASE (V2DF, v2df);
4236 __SECONDARY_RELOAD_CASE (V1TF, v1tf);
4240 #undef __SECONDARY_RELOAD_CASE
4244 /* We need a scratch register when loading a PLUS expression which
4245 is not a legitimate operand of the LOAD ADDRESS instruction. */
4246 /* LRA can deal with transformation of plus op very well -- so we
4247 don't need to prompt LRA in this case. */
4248 if (! lra_in_progress && in_p && s390_plus_operand (x, mode))
4249 sri->icode = (TARGET_64BIT ?
4250 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
4252 /* Performing a multiword move from or to memory we have to make sure the
4253 second chunk in memory is addressable without causing a displacement
4254 overflow. If that would be the case we calculate the address in
4255 a scratch register. */
4257 && GET_CODE (XEXP (x, 0)) == PLUS
4258 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4259 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
4260 + GET_MODE_SIZE (mode) - 1))
4262 /* For GENERAL_REGS a displacement overflow is no problem if occurring
4263 in a s_operand address since we may fallback to lm/stm. So we only
4264 have to care about overflows in the b+i+d case. */
4265 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
4266 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
4267 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
4268 /* For FP_REGS no lm/stm is available so this check is triggered
4269 for displacement overflows in b+i+d and b+d like addresses. */
4270 || (reg_classes_intersect_p (FP_REGS, rclass)
4271 && s390_class_max_nregs (FP_REGS, mode) > 1))
4274 sri->icode = (TARGET_64BIT ?
4275 CODE_FOR_reloaddi_la_in :
4276 CODE_FOR_reloadsi_la_in);
4278 sri->icode = (TARGET_64BIT ?
4279 CODE_FOR_reloaddi_la_out :
4280 CODE_FOR_reloadsi_la_out);
4284 /* A scratch address register is needed when a symbolic constant is
4285 copied to r0 compiling with -fPIC. In other cases the target
4286 register might be used as temporary (see legitimize_pic_address). */
4287 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
4288 sri->icode = (TARGET_64BIT ?
4289 CODE_FOR_reloaddi_PIC_addr :
4290 CODE_FOR_reloadsi_PIC_addr);
4292 /* Either scratch or no register needed. */
4296 /* Generate code to load SRC, which is PLUS that is not a
4297 legitimate operand for the LA instruction, into TARGET.
4298 SCRATCH may be used as scratch register. */
4301 s390_expand_plus_operand (rtx target, rtx src,
4305 struct s390_address ad;
4307 /* src must be a PLUS; get its two operands. */
4308 gcc_assert (GET_CODE (src) == PLUS);
4309 gcc_assert (GET_MODE (src) == Pmode);
4311 /* Check if any of the two operands is already scheduled
4312 for replacement by reload. This can happen e.g. when
4313 float registers occur in an address. */
4314 sum1 = find_replacement (&XEXP (src, 0));
4315 sum2 = find_replacement (&XEXP (src, 1));
4316 src = gen_rtx_PLUS (Pmode, sum1, sum2);
4318 /* If the address is already strictly valid, there's nothing to do. */
4319 if (!s390_decompose_address (src, &ad)
4320 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4321 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
4323 /* Otherwise, one of the operands cannot be an address register;
4324 we reload its value into the scratch register. */
4325 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
4327 emit_move_insn (scratch, sum1);
4330 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
4332 emit_move_insn (scratch, sum2);
4336 /* According to the way these invalid addresses are generated
4337 in reload.c, it should never happen (at least on s390) that
4338 *neither* of the PLUS components, after find_replacements
4339 was applied, is an address register. */
4340 if (sum1 == scratch && sum2 == scratch)
4346 src = gen_rtx_PLUS (Pmode, sum1, sum2);
4349 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
4350 is only ever performed on addresses, so we can mark the
4351 sum as legitimate for LA in any case. */
4352 s390_load_address (target, src);
4356 /* Return true if ADDR is a valid memory address.
4357 STRICT specifies whether strict register checking applies. */
4360 s390_legitimate_address_p (machine_mode mode, rtx addr, bool strict)
4362 struct s390_address ad;
4365 && larl_operand (addr, VOIDmode)
4366 && (mode == VOIDmode
4367 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
4370 if (!s390_decompose_address (addr, &ad))
4375 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4378 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
4384 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
4385 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
4389 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
4390 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
4396 /* Return true if OP is a valid operand for the LA instruction.
4397 In 31-bit, we need to prove that the result is used as an
4398 address, as LA performs only a 31-bit addition. */
4401 legitimate_la_operand_p (rtx op)
4403 struct s390_address addr;
4404 if (!s390_decompose_address (op, &addr))
4407 return (TARGET_64BIT || addr.pointer);
4410 /* Return true if it is valid *and* preferable to use LA to
4411 compute the sum of OP1 and OP2. */
4414 preferred_la_operand_p (rtx op1, rtx op2)
4416 struct s390_address addr;
4418 if (op2 != const0_rtx)
4419 op1 = gen_rtx_PLUS (Pmode, op1, op2);
4421 if (!s390_decompose_address (op1, &addr))
4423 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
4425 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
4428 /* Avoid LA instructions with index register on z196; it is
4429 preferable to use regular add instructions when possible.
4430 Starting with zEC12 the la with index register is "uncracked"
4432 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
4435 if (!TARGET_64BIT && !addr.pointer)
4441 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
4442 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
4448 /* Emit a forced load-address operation to load SRC into DST.
4449 This will use the LOAD ADDRESS instruction even in situations
4450 where legitimate_la_operand_p (SRC) returns false. */
4453 s390_load_address (rtx dst, rtx src)
4456 emit_move_insn (dst, src);
4458 emit_insn (gen_force_la_31 (dst, src));
4461 /* Return a legitimate reference for ORIG (an address) using the
4462 register REG. If REG is 0, a new pseudo is generated.
4464 There are two types of references that must be handled:
4466 1. Global data references must load the address from the GOT, via
4467 the PIC reg. An insn is emitted to do this load, and the reg is
4470 2. Static data references, constant pool addresses, and code labels
4471 compute the address as an offset from the GOT, whose base is in
4472 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
4473 differentiate them from global data objects. The returned
4474 address is the PIC reg + an unspec constant.
4476 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
4477 reg also appears in the address. */
4480 legitimize_pic_address (rtx orig, rtx reg)
4483 rtx addend = const0_rtx;
4486 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
4488 if (GET_CODE (addr) == CONST)
4489 addr = XEXP (addr, 0);
4491 if (GET_CODE (addr) == PLUS)
4493 addend = XEXP (addr, 1);
4494 addr = XEXP (addr, 0);
4497 if ((GET_CODE (addr) == LABEL_REF
4498 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr))
4499 || (GET_CODE (addr) == UNSPEC &&
4500 (XINT (addr, 1) == UNSPEC_GOTENT
4501 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
4502 && GET_CODE (addend) == CONST_INT)
4504 /* This can be locally addressed. */
4506 /* larl_operand requires UNSPECs to be wrapped in a const rtx. */
4507 rtx const_addr = (GET_CODE (addr) == UNSPEC ?
4508 gen_rtx_CONST (Pmode, addr) : addr);
4510 if (TARGET_CPU_ZARCH
4511 && larl_operand (const_addr, VOIDmode)
4512 && INTVAL (addend) < HOST_WIDE_INT_1 << 31
4513 && INTVAL (addend) >= -(HOST_WIDE_INT_1 << 31))
4515 if (INTVAL (addend) & 1)
4517 /* LARL can't handle odd offsets, so emit a pair of LARL
4519 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4521 if (!DISP_IN_RANGE (INTVAL (addend)))
4523 HOST_WIDE_INT even = INTVAL (addend) - 1;
4524 addr = gen_rtx_PLUS (Pmode, addr, GEN_INT (even));
4525 addr = gen_rtx_CONST (Pmode, addr);
4526 addend = const1_rtx;
4529 emit_move_insn (temp, addr);
4530 new_rtx = gen_rtx_PLUS (Pmode, temp, addend);
4534 s390_load_address (reg, new_rtx);
4540 /* If the offset is even, we can just use LARL. This
4541 will happen automatically. */
4546 /* No larl - Access local symbols relative to the GOT. */
4548 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4550 if (reload_in_progress || reload_completed)
4551 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4553 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
4554 if (addend != const0_rtx)
4555 addr = gen_rtx_PLUS (Pmode, addr, addend);
4556 addr = gen_rtx_CONST (Pmode, addr);
4557 addr = force_const_mem (Pmode, addr);
4558 emit_move_insn (temp, addr);
4560 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4563 s390_load_address (reg, new_rtx);
4568 else if (GET_CODE (addr) == SYMBOL_REF && addend == const0_rtx)
4570 /* A non-local symbol reference without addend.
4572 The symbol ref is wrapped into an UNSPEC to make sure the
4573 proper operand modifier (@GOT or @GOTENT) will be emitted.
4574 This will tell the linker to put the symbol into the GOT.
4576 Additionally the code dereferencing the GOT slot is emitted here.
4578 An addend to the symref needs to be added afterwards.
4579 legitimize_pic_address calls itself recursively to handle
4580 that case. So no need to do it here. */
4583 reg = gen_reg_rtx (Pmode);
4587 /* Use load relative if possible.
4588 lgrl <target>, sym@GOTENT */
4589 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
4590 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4591 new_rtx = gen_const_mem (GET_MODE (reg), new_rtx);
4593 emit_move_insn (reg, new_rtx);
4596 else if (flag_pic == 1)
4598 /* Assume GOT offset is a valid displacement operand (< 4k
4599 or < 512k with z990). This is handled the same way in
4600 both 31- and 64-bit code (@GOT).
4601 lg <target>, sym@GOT(r12) */
4603 if (reload_in_progress || reload_completed)
4604 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4606 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
4607 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4608 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
4609 new_rtx = gen_const_mem (Pmode, new_rtx);
4610 emit_move_insn (reg, new_rtx);
4613 else if (TARGET_CPU_ZARCH)
4615 /* If the GOT offset might be >= 4k, we determine the position
4616 of the GOT entry via a PC-relative LARL (@GOTENT).
4617 larl temp, sym@GOTENT
4618 lg <target>, 0(temp) */
4620 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
4622 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
4623 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
4625 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
4626 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4627 emit_move_insn (temp, new_rtx);
4629 new_rtx = gen_const_mem (Pmode, temp);
4630 emit_move_insn (reg, new_rtx);
4636 /* If the GOT offset might be >= 4k, we have to load it
4637 from the literal pool (@GOT).
4639 lg temp, lit-litbase(r13)
4640 lg <target>, 0(temp)
4641 lit: .long sym@GOT */
4643 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
4645 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
4646 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
4648 if (reload_in_progress || reload_completed)
4649 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4651 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
4652 addr = gen_rtx_CONST (Pmode, addr);
4653 addr = force_const_mem (Pmode, addr);
4654 emit_move_insn (temp, addr);
4656 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4657 new_rtx = gen_const_mem (Pmode, new_rtx);
4658 emit_move_insn (reg, new_rtx);
4662 else if (GET_CODE (addr) == UNSPEC && GET_CODE (addend) == CONST_INT)
4664 gcc_assert (XVECLEN (addr, 0) == 1);
4665 switch (XINT (addr, 1))
4667 /* These address symbols (or PLT slots) relative to the GOT
4668 (not GOT slots!). In general this will exceed the
4669 displacement range so these value belong into the literal
4673 new_rtx = force_const_mem (Pmode, orig);
4676 /* For -fPIC the GOT size might exceed the displacement
4677 range so make sure the value is in the literal pool. */
4680 new_rtx = force_const_mem (Pmode, orig);
4683 /* For @GOTENT larl is used. This is handled like local
4689 /* @PLT is OK as is on 64-bit, must be converted to
4690 GOT-relative @PLTOFF on 31-bit. */
4692 if (!TARGET_CPU_ZARCH)
4694 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4696 if (reload_in_progress || reload_completed)
4697 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4699 addr = XVECEXP (addr, 0, 0);
4700 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
4702 if (addend != const0_rtx)
4703 addr = gen_rtx_PLUS (Pmode, addr, addend);
4704 addr = gen_rtx_CONST (Pmode, addr);
4705 addr = force_const_mem (Pmode, addr);
4706 emit_move_insn (temp, addr);
4708 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4711 s390_load_address (reg, new_rtx);
4716 /* On 64 bit larl can be used. This case is handled like
4717 local symbol refs. */
4721 /* Everything else cannot happen. */
4726 else if (addend != const0_rtx)
4728 /* Otherwise, compute the sum. */
4730 rtx base = legitimize_pic_address (addr, reg);
4731 new_rtx = legitimize_pic_address (addend,
4732 base == reg ? NULL_RTX : reg);
4733 if (GET_CODE (new_rtx) == CONST_INT)
4734 new_rtx = plus_constant (Pmode, base, INTVAL (new_rtx));
4737 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
4739 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
4740 new_rtx = XEXP (new_rtx, 1);
4742 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
4745 if (GET_CODE (new_rtx) == CONST)
4746 new_rtx = XEXP (new_rtx, 0);
4747 new_rtx = force_operand (new_rtx, 0);
4753 /* Load the thread pointer into a register. */
4756 s390_get_thread_pointer (void)
4758 rtx tp = gen_reg_rtx (Pmode);
4760 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
4761 mark_reg_pointer (tp, BITS_PER_WORD);
4766 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
4767 in s390_tls_symbol which always refers to __tls_get_offset.
4768 The returned offset is written to RESULT_REG and an USE rtx is
4769 generated for TLS_CALL. */
4771 static GTY(()) rtx s390_tls_symbol;
4774 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
4779 emit_insn (s390_load_got ());
4781 if (!s390_tls_symbol)
4782 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
4784 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
4785 gen_rtx_REG (Pmode, RETURN_REGNUM));
4787 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
4788 RTL_CONST_CALL_P (insn) = 1;
4791 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
4792 this (thread-local) address. REG may be used as temporary. */
4795 legitimize_tls_address (rtx addr, rtx reg)
4797 rtx new_rtx, tls_call, temp, base, r2;
4800 if (GET_CODE (addr) == SYMBOL_REF)
4801 switch (tls_symbolic_operand (addr))
4803 case TLS_MODEL_GLOBAL_DYNAMIC:
4805 r2 = gen_rtx_REG (Pmode, 2);
4806 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
4807 new_rtx = gen_rtx_CONST (Pmode, tls_call);
4808 new_rtx = force_const_mem (Pmode, new_rtx);
4809 emit_move_insn (r2, new_rtx);
4810 s390_emit_tls_call_insn (r2, tls_call);
4811 insn = get_insns ();
4814 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
4815 temp = gen_reg_rtx (Pmode);
4816 emit_libcall_block (insn, temp, r2, new_rtx);
4818 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4821 s390_load_address (reg, new_rtx);
4826 case TLS_MODEL_LOCAL_DYNAMIC:
4828 r2 = gen_rtx_REG (Pmode, 2);
4829 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
4830 new_rtx = gen_rtx_CONST (Pmode, tls_call);
4831 new_rtx = force_const_mem (Pmode, new_rtx);
4832 emit_move_insn (r2, new_rtx);
4833 s390_emit_tls_call_insn (r2, tls_call);
4834 insn = get_insns ();
4837 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
4838 temp = gen_reg_rtx (Pmode);
4839 emit_libcall_block (insn, temp, r2, new_rtx);
4841 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4842 base = gen_reg_rtx (Pmode);
4843 s390_load_address (base, new_rtx);
4845 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
4846 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4847 new_rtx = force_const_mem (Pmode, new_rtx);
4848 temp = gen_reg_rtx (Pmode);
4849 emit_move_insn (temp, new_rtx);
4851 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
4854 s390_load_address (reg, new_rtx);
4859 case TLS_MODEL_INITIAL_EXEC:
4862 /* Assume GOT offset < 4k. This is handled the same way
4863 in both 31- and 64-bit code. */
4865 if (reload_in_progress || reload_completed)
4866 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4868 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
4869 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4870 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
4871 new_rtx = gen_const_mem (Pmode, new_rtx);
4872 temp = gen_reg_rtx (Pmode);
4873 emit_move_insn (temp, new_rtx);
4875 else if (TARGET_CPU_ZARCH)
4877 /* If the GOT offset might be >= 4k, we determine the position
4878 of the GOT entry via a PC-relative LARL. */
4880 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
4881 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4882 temp = gen_reg_rtx (Pmode);
4883 emit_move_insn (temp, new_rtx);
4885 new_rtx = gen_const_mem (Pmode, temp);
4886 temp = gen_reg_rtx (Pmode);
4887 emit_move_insn (temp, new_rtx);
4891 /* If the GOT offset might be >= 4k, we have to load it
4892 from the literal pool. */
4894 if (reload_in_progress || reload_completed)
4895 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4897 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
4898 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4899 new_rtx = force_const_mem (Pmode, new_rtx);
4900 temp = gen_reg_rtx (Pmode);
4901 emit_move_insn (temp, new_rtx);
4903 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4904 new_rtx = gen_const_mem (Pmode, new_rtx);
4906 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
4907 temp = gen_reg_rtx (Pmode);
4908 emit_insn (gen_rtx_SET (temp, new_rtx));
4912 /* In position-dependent code, load the absolute address of
4913 the GOT entry from the literal pool. */
4915 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
4916 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4917 new_rtx = force_const_mem (Pmode, new_rtx);
4918 temp = gen_reg_rtx (Pmode);
4919 emit_move_insn (temp, new_rtx);
4922 new_rtx = gen_const_mem (Pmode, new_rtx);
4923 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
4924 temp = gen_reg_rtx (Pmode);
4925 emit_insn (gen_rtx_SET (temp, new_rtx));
4928 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4931 s390_load_address (reg, new_rtx);
4936 case TLS_MODEL_LOCAL_EXEC:
4937 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
4938 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4939 new_rtx = force_const_mem (Pmode, new_rtx);
4940 temp = gen_reg_rtx (Pmode);
4941 emit_move_insn (temp, new_rtx);
4943 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4946 s390_load_address (reg, new_rtx);
4955 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
4957 switch (XINT (XEXP (addr, 0), 1))
4959 case UNSPEC_INDNTPOFF:
4960 gcc_assert (TARGET_CPU_ZARCH);
4969 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
4970 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
4972 new_rtx = XEXP (XEXP (addr, 0), 0);
4973 if (GET_CODE (new_rtx) != SYMBOL_REF)
4974 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4976 new_rtx = legitimize_tls_address (new_rtx, reg);
4977 new_rtx = plus_constant (Pmode, new_rtx,
4978 INTVAL (XEXP (XEXP (addr, 0), 1)));
4979 new_rtx = force_operand (new_rtx, 0);
4983 gcc_unreachable (); /* for now ... */
4988 /* Emit insns making the address in operands[1] valid for a standard
4989 move to operands[0]. operands[1] is replaced by an address which
4990 should be used instead of the former RTX to emit the move
4994 emit_symbolic_move (rtx *operands)
4996 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
4998 if (GET_CODE (operands[0]) == MEM)
4999 operands[1] = force_reg (Pmode, operands[1]);
5000 else if (TLS_SYMBOLIC_CONST (operands[1]))
5001 operands[1] = legitimize_tls_address (operands[1], temp);
5003 operands[1] = legitimize_pic_address (operands[1], temp);
5006 /* Try machine-dependent ways of modifying an illegitimate address X
5007 to be legitimate. If we find one, return the new, valid address.
5009 OLDX is the address as it was before break_out_memory_refs was called.
5010 In some cases it is useful to look at this to decide what needs to be done.
5012 MODE is the mode of the operand pointed to by X.
5014 When -fpic is used, special handling is needed for symbolic references.
5015 See comments by legitimize_pic_address for details. */
5018 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
5019 machine_mode mode ATTRIBUTE_UNUSED)
5021 rtx constant_term = const0_rtx;
5023 if (TLS_SYMBOLIC_CONST (x))
5025 x = legitimize_tls_address (x, 0);
5027 if (s390_legitimate_address_p (mode, x, FALSE))
5030 else if (GET_CODE (x) == PLUS
5031 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
5032 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
5038 if (SYMBOLIC_CONST (x)
5039 || (GET_CODE (x) == PLUS
5040 && (SYMBOLIC_CONST (XEXP (x, 0))
5041 || SYMBOLIC_CONST (XEXP (x, 1)))))
5042 x = legitimize_pic_address (x, 0);
5044 if (s390_legitimate_address_p (mode, x, FALSE))
5048 x = eliminate_constant_term (x, &constant_term);
5050 /* Optimize loading of large displacements by splitting them
5051 into the multiple of 4K and the rest; this allows the
5052 former to be CSE'd if possible.
5054 Don't do this if the displacement is added to a register
5055 pointing into the stack frame, as the offsets will
5056 change later anyway. */
5058 if (GET_CODE (constant_term) == CONST_INT
5059 && !TARGET_LONG_DISPLACEMENT
5060 && !DISP_IN_RANGE (INTVAL (constant_term))
5061 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
5063 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
5064 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
5066 rtx temp = gen_reg_rtx (Pmode);
5067 rtx val = force_operand (GEN_INT (upper), temp);
5069 emit_move_insn (temp, val);
5071 x = gen_rtx_PLUS (Pmode, x, temp);
5072 constant_term = GEN_INT (lower);
5075 if (GET_CODE (x) == PLUS)
5077 if (GET_CODE (XEXP (x, 0)) == REG)
5079 rtx temp = gen_reg_rtx (Pmode);
5080 rtx val = force_operand (XEXP (x, 1), temp);
5082 emit_move_insn (temp, val);
5084 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
5087 else if (GET_CODE (XEXP (x, 1)) == REG)
5089 rtx temp = gen_reg_rtx (Pmode);
5090 rtx val = force_operand (XEXP (x, 0), temp);
5092 emit_move_insn (temp, val);
5094 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
5098 if (constant_term != const0_rtx)
5099 x = gen_rtx_PLUS (Pmode, x, constant_term);
5104 /* Try a machine-dependent way of reloading an illegitimate address AD
5105 operand. If we find one, push the reload and return the new address.
5107 MODE is the mode of the enclosing MEM. OPNUM is the operand number
5108 and TYPE is the reload type of the current reload. */
5111 legitimize_reload_address (rtx ad, machine_mode mode ATTRIBUTE_UNUSED,
5112 int opnum, int type)
5114 if (!optimize || TARGET_LONG_DISPLACEMENT)
5117 if (GET_CODE (ad) == PLUS)
5119 rtx tem = simplify_binary_operation (PLUS, Pmode,
5120 XEXP (ad, 0), XEXP (ad, 1));
5125 if (GET_CODE (ad) == PLUS
5126 && GET_CODE (XEXP (ad, 0)) == REG
5127 && GET_CODE (XEXP (ad, 1)) == CONST_INT
5128 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
5130 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
5131 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
5132 rtx cst, tem, new_rtx;
5134 cst = GEN_INT (upper);
5135 if (!legitimate_reload_constant_p (cst))
5136 cst = force_const_mem (Pmode, cst);
5138 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
5139 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
5141 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
5142 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
5143 opnum, (enum reload_type) type);
5150 /* Emit code to move LEN bytes from DST to SRC. */
5153 s390_expand_movmem (rtx dst, rtx src, rtx len)
5155 /* When tuning for z10 or higher we rely on the Glibc functions to
5156 do the right thing. Only for constant lengths below 64k we will
5157 generate inline code. */
5158 if (s390_tune >= PROCESSOR_2097_Z10
5159 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
5162 /* Expand memcpy for constant length operands without a loop if it
5163 is shorter that way.
5165 With a constant length argument a
5166 memcpy loop (without pfd) is 36 bytes -> 6 * mvc */
5167 if (GET_CODE (len) == CONST_INT
5168 && INTVAL (len) >= 0
5169 && INTVAL (len) <= 256 * 6
5170 && (!TARGET_MVCLE || INTVAL (len) <= 256))
5174 for (l = INTVAL (len), o = 0; l > 0; l -= 256, o += 256)
5176 rtx newdst = adjust_address (dst, BLKmode, o);
5177 rtx newsrc = adjust_address (src, BLKmode, o);
5178 emit_insn (gen_movmem_short (newdst, newsrc,
5179 GEN_INT (l > 256 ? 255 : l - 1)));
5183 else if (TARGET_MVCLE)
5185 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
5190 rtx dst_addr, src_addr, count, blocks, temp;
5191 rtx_code_label *loop_start_label = gen_label_rtx ();
5192 rtx_code_label *loop_end_label = gen_label_rtx ();
5193 rtx_code_label *end_label = gen_label_rtx ();
5196 mode = GET_MODE (len);
5197 if (mode == VOIDmode)
5200 dst_addr = gen_reg_rtx (Pmode);
5201 src_addr = gen_reg_rtx (Pmode);
5202 count = gen_reg_rtx (mode);
5203 blocks = gen_reg_rtx (mode);
5205 convert_move (count, len, 1);
5206 emit_cmp_and_jump_insns (count, const0_rtx,
5207 EQ, NULL_RTX, mode, 1, end_label);
5209 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
5210 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
5211 dst = change_address (dst, VOIDmode, dst_addr);
5212 src = change_address (src, VOIDmode, src_addr);
5214 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5217 emit_move_insn (count, temp);
5219 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5222 emit_move_insn (blocks, temp);
5224 emit_cmp_and_jump_insns (blocks, const0_rtx,
5225 EQ, NULL_RTX, mode, 1, loop_end_label);
5227 emit_label (loop_start_label);
5230 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
5234 /* Issue a read prefetch for the +3 cache line. */
5235 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
5236 const0_rtx, const0_rtx);
5237 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5238 emit_insn (prefetch);
5240 /* Issue a write prefetch for the +3 cache line. */
5241 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
5242 const1_rtx, const0_rtx);
5243 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5244 emit_insn (prefetch);
5247 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
5248 s390_load_address (dst_addr,
5249 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
5250 s390_load_address (src_addr,
5251 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
5253 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5256 emit_move_insn (blocks, temp);
5258 emit_cmp_and_jump_insns (blocks, const0_rtx,
5259 EQ, NULL_RTX, mode, 1, loop_end_label);
5261 emit_jump (loop_start_label);
5262 emit_label (loop_end_label);
5264 emit_insn (gen_movmem_short (dst, src,
5265 convert_to_mode (Pmode, count, 1)));
5266 emit_label (end_label);
5271 /* Emit code to set LEN bytes at DST to VAL.
5272 Make use of clrmem if VAL is zero. */
5275 s390_expand_setmem (rtx dst, rtx len, rtx val)
5277 const int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
5279 if (GET_CODE (len) == CONST_INT && INTVAL (len) <= 0)
5282 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
5284 /* Expand setmem/clrmem for a constant length operand without a
5285 loop if it will be shorter that way.
5286 With a constant length and without pfd argument a
5287 clrmem loop is 32 bytes -> 5.3 * xc
5288 setmem loop is 36 bytes -> 3.6 * (mvi/stc + mvc) */
5289 if (GET_CODE (len) == CONST_INT
5290 && ((INTVAL (len) <= 256 * 5 && val == const0_rtx)
5291 || INTVAL (len) <= 257 * 3)
5292 && (!TARGET_MVCLE || INTVAL (len) <= 256))
5296 if (val == const0_rtx)
5297 /* clrmem: emit 256 byte blockwise XCs. */
5298 for (l = INTVAL (len), o = 0; l > 0; l -= 256, o += 256)
5300 rtx newdst = adjust_address (dst, BLKmode, o);
5301 emit_insn (gen_clrmem_short (newdst,
5302 GEN_INT (l > 256 ? 255 : l - 1)));
5305 /* setmem: emit 1(mvi) + 256(mvc) byte blockwise memsets by
5306 setting first byte to val and using a 256 byte mvc with one
5307 byte overlap to propagate the byte. */
5308 for (l = INTVAL (len), o = 0; l > 0; l -= 257, o += 257)
5310 rtx newdst = adjust_address (dst, BLKmode, o);
5311 emit_move_insn (adjust_address (dst, QImode, o), val);
5314 rtx newdstp1 = adjust_address (dst, BLKmode, o + 1);
5315 emit_insn (gen_movmem_short (newdstp1, newdst,
5316 GEN_INT (l > 257 ? 255 : l - 2)));
5321 else if (TARGET_MVCLE)
5323 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
5325 emit_insn (gen_setmem_long_di (dst, convert_to_mode (Pmode, len, 1),
5328 emit_insn (gen_setmem_long_si (dst, convert_to_mode (Pmode, len, 1),
5334 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
5335 rtx_code_label *loop_start_label = gen_label_rtx ();
5336 rtx_code_label *onebyte_end_label = gen_label_rtx ();
5337 rtx_code_label *zerobyte_end_label = gen_label_rtx ();
5338 rtx_code_label *restbyte_end_label = gen_label_rtx ();
5341 mode = GET_MODE (len);
5342 if (mode == VOIDmode)
5345 dst_addr = gen_reg_rtx (Pmode);
5346 count = gen_reg_rtx (mode);
5347 blocks = gen_reg_rtx (mode);
5349 convert_move (count, len, 1);
5350 emit_cmp_and_jump_insns (count, const0_rtx,
5351 EQ, NULL_RTX, mode, 1, zerobyte_end_label,
5354 /* We need to make a copy of the target address since memset is
5355 supposed to return it unmodified. We have to make it here
5356 already since the new reg is used at onebyte_end_label. */
5357 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
5358 dst = change_address (dst, VOIDmode, dst_addr);
5360 if (val != const0_rtx)
5362 /* When using the overlapping mvc the original target
5363 address is only accessed as single byte entity (even by
5364 the mvc reading this value). */
5365 set_mem_size (dst, 1);
5366 dstp1 = adjust_address (dst, VOIDmode, 1);
5367 emit_cmp_and_jump_insns (count,
5368 const1_rtx, EQ, NULL_RTX, mode, 1,
5369 onebyte_end_label, very_unlikely);
5372 /* There is one unconditional (mvi+mvc)/xc after the loop
5373 dealing with the rest of the bytes, subtracting two (mvi+mvc)
5374 or one (xc) here leaves this number of bytes to be handled by
5376 temp = expand_binop (mode, add_optab, count,
5377 val == const0_rtx ? constm1_rtx : GEN_INT (-2),
5378 count, 1, OPTAB_DIRECT);
5380 emit_move_insn (count, temp);
5382 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5385 emit_move_insn (blocks, temp);
5387 emit_cmp_and_jump_insns (blocks, const0_rtx,
5388 EQ, NULL_RTX, mode, 1, restbyte_end_label);
5390 emit_jump (loop_start_label);
5392 if (val != const0_rtx)
5394 /* The 1 byte != 0 special case. Not handled efficiently
5395 since we require two jumps for that. However, this
5396 should be very rare. */
5397 emit_label (onebyte_end_label);
5398 emit_move_insn (adjust_address (dst, QImode, 0), val);
5399 emit_jump (zerobyte_end_label);
5402 emit_label (loop_start_label);
5405 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
5407 /* Issue a write prefetch for the +4 cache line. */
5408 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
5410 const1_rtx, const0_rtx);
5411 emit_insn (prefetch);
5412 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5415 if (val == const0_rtx)
5416 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
5419 /* Set the first byte in the block to the value and use an
5420 overlapping mvc for the block. */
5421 emit_move_insn (adjust_address (dst, QImode, 0), val);
5422 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (254)));
5424 s390_load_address (dst_addr,
5425 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
5427 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5430 emit_move_insn (blocks, temp);
5432 emit_cmp_and_jump_insns (blocks, const0_rtx,
5433 NE, NULL_RTX, mode, 1, loop_start_label);
5435 emit_label (restbyte_end_label);
5437 if (val == const0_rtx)
5438 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
5441 /* Set the first byte in the block to the value and use an
5442 overlapping mvc for the block. */
5443 emit_move_insn (adjust_address (dst, QImode, 0), val);
5444 /* execute only uses the lowest 8 bits of count that's
5445 exactly what we need here. */
5446 emit_insn (gen_movmem_short (dstp1, dst,
5447 convert_to_mode (Pmode, count, 1)));
5450 emit_label (zerobyte_end_label);
5454 /* Emit code to compare LEN bytes at OP0 with those at OP1,
5455 and return the result in TARGET. */
5458 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
5460 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
5463 /* When tuning for z10 or higher we rely on the Glibc functions to
5464 do the right thing. Only for constant lengths below 64k we will
5465 generate inline code. */
5466 if (s390_tune >= PROCESSOR_2097_Z10
5467 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
5470 /* As the result of CMPINT is inverted compared to what we need,
5471 we have to swap the operands. */
5472 tmp = op0; op0 = op1; op1 = tmp;
5474 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
5476 if (INTVAL (len) > 0)
5478 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
5479 emit_insn (gen_cmpint (target, ccreg));
5482 emit_move_insn (target, const0_rtx);
5484 else if (TARGET_MVCLE)
5486 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
5487 emit_insn (gen_cmpint (target, ccreg));
5491 rtx addr0, addr1, count, blocks, temp;
5492 rtx_code_label *loop_start_label = gen_label_rtx ();
5493 rtx_code_label *loop_end_label = gen_label_rtx ();
5494 rtx_code_label *end_label = gen_label_rtx ();
5497 mode = GET_MODE (len);
5498 if (mode == VOIDmode)
5501 addr0 = gen_reg_rtx (Pmode);
5502 addr1 = gen_reg_rtx (Pmode);
5503 count = gen_reg_rtx (mode);
5504 blocks = gen_reg_rtx (mode);
5506 convert_move (count, len, 1);
5507 emit_cmp_and_jump_insns (count, const0_rtx,
5508 EQ, NULL_RTX, mode, 1, end_label);
5510 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
5511 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
5512 op0 = change_address (op0, VOIDmode, addr0);
5513 op1 = change_address (op1, VOIDmode, addr1);
5515 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5518 emit_move_insn (count, temp);
5520 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5523 emit_move_insn (blocks, temp);
5525 emit_cmp_and_jump_insns (blocks, const0_rtx,
5526 EQ, NULL_RTX, mode, 1, loop_end_label);
5528 emit_label (loop_start_label);
5531 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
5535 /* Issue a read prefetch for the +2 cache line of operand 1. */
5536 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
5537 const0_rtx, const0_rtx);
5538 emit_insn (prefetch);
5539 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5541 /* Issue a read prefetch for the +2 cache line of operand 2. */
5542 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
5543 const0_rtx, const0_rtx);
5544 emit_insn (prefetch);
5545 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5548 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
5549 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
5550 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
5551 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
5552 temp = gen_rtx_SET (pc_rtx, temp);
5553 emit_jump_insn (temp);
5555 s390_load_address (addr0,
5556 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
5557 s390_load_address (addr1,
5558 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
5560 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5563 emit_move_insn (blocks, temp);
5565 emit_cmp_and_jump_insns (blocks, const0_rtx,
5566 EQ, NULL_RTX, mode, 1, loop_end_label);
5568 emit_jump (loop_start_label);
5569 emit_label (loop_end_label);
5571 emit_insn (gen_cmpmem_short (op0, op1,
5572 convert_to_mode (Pmode, count, 1)));
5573 emit_label (end_label);
5575 emit_insn (gen_cmpint (target, ccreg));
5580 /* Emit a conditional jump to LABEL for condition code mask MASK using
5581 comparsion operator COMPARISON. Return the emitted jump insn. */
5584 s390_emit_ccraw_jump (HOST_WIDE_INT mask, enum rtx_code comparison, rtx label)
5588 gcc_assert (comparison == EQ || comparison == NE);
5589 gcc_assert (mask > 0 && mask < 15);
5591 temp = gen_rtx_fmt_ee (comparison, VOIDmode,
5592 gen_rtx_REG (CCRAWmode, CC_REGNUM), GEN_INT (mask));
5593 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
5594 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
5595 temp = gen_rtx_SET (pc_rtx, temp);
5596 return emit_jump_insn (temp);
5599 /* Emit the instructions to implement strlen of STRING and store the
5600 result in TARGET. The string has the known ALIGNMENT. This
5601 version uses vector instructions and is therefore not appropriate
5602 for targets prior to z13. */
5605 s390_expand_vec_strlen (rtx target, rtx string, rtx alignment)
5607 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
5608 int very_likely = REG_BR_PROB_BASE - 1;
5609 rtx highest_index_to_load_reg = gen_reg_rtx (Pmode);
5610 rtx str_reg = gen_reg_rtx (V16QImode);
5611 rtx str_addr_base_reg = gen_reg_rtx (Pmode);
5612 rtx str_idx_reg = gen_reg_rtx (Pmode);
5613 rtx result_reg = gen_reg_rtx (V16QImode);
5614 rtx is_aligned_label = gen_label_rtx ();
5615 rtx into_loop_label = NULL_RTX;
5616 rtx loop_start_label = gen_label_rtx ();
5618 rtx len = gen_reg_rtx (QImode);
5621 s390_load_address (str_addr_base_reg, XEXP (string, 0));
5622 emit_move_insn (str_idx_reg, const0_rtx);
5624 if (INTVAL (alignment) < 16)
5626 /* Check whether the address happens to be aligned properly so
5627 jump directly to the aligned loop. */
5628 emit_cmp_and_jump_insns (gen_rtx_AND (Pmode,
5629 str_addr_base_reg, GEN_INT (15)),
5630 const0_rtx, EQ, NULL_RTX,
5631 Pmode, 1, is_aligned_label);
5633 temp = gen_reg_rtx (Pmode);
5634 temp = expand_binop (Pmode, and_optab, str_addr_base_reg,
5635 GEN_INT (15), temp, 1, OPTAB_DIRECT);
5636 gcc_assert (REG_P (temp));
5637 highest_index_to_load_reg =
5638 expand_binop (Pmode, sub_optab, GEN_INT (15), temp,
5639 highest_index_to_load_reg, 1, OPTAB_DIRECT);
5640 gcc_assert (REG_P (highest_index_to_load_reg));
5641 emit_insn (gen_vllv16qi (str_reg,
5642 convert_to_mode (SImode, highest_index_to_load_reg, 1),
5643 gen_rtx_MEM (BLKmode, str_addr_base_reg)));
5645 into_loop_label = gen_label_rtx ();
5646 s390_emit_jump (into_loop_label, NULL_RTX);
5650 emit_label (is_aligned_label);
5651 LABEL_NUSES (is_aligned_label) = INTVAL (alignment) < 16 ? 2 : 1;
5653 /* Reaching this point we are only performing 16 bytes aligned
5655 emit_move_insn (highest_index_to_load_reg, GEN_INT (15));
5657 emit_label (loop_start_label);
5658 LABEL_NUSES (loop_start_label) = 1;
5660 /* Load 16 bytes of the string into VR. */
5661 emit_move_insn (str_reg,
5662 gen_rtx_MEM (V16QImode,
5663 gen_rtx_PLUS (Pmode, str_idx_reg,
5664 str_addr_base_reg)));
5665 if (into_loop_label != NULL_RTX)
5667 emit_label (into_loop_label);
5668 LABEL_NUSES (into_loop_label) = 1;
5671 /* Increment string index by 16 bytes. */
5672 expand_binop (Pmode, add_optab, str_idx_reg, GEN_INT (16),
5673 str_idx_reg, 1, OPTAB_DIRECT);
5675 emit_insn (gen_vec_vfenesv16qi (result_reg, str_reg, str_reg,
5676 GEN_INT (VSTRING_FLAG_ZS | VSTRING_FLAG_CS)));
5678 add_int_reg_note (s390_emit_ccraw_jump (8, NE, loop_start_label),
5679 REG_BR_PROB, very_likely);
5680 emit_insn (gen_vec_extractv16qi (len, result_reg, GEN_INT (7)));
5682 /* If the string pointer wasn't aligned we have loaded less then 16
5683 bytes and the remaining bytes got filled with zeros (by vll).
5684 Now we have to check whether the resulting index lies within the
5685 bytes actually part of the string. */
5687 cond = s390_emit_compare (GT, convert_to_mode (Pmode, len, 1),
5688 highest_index_to_load_reg);
5689 s390_load_address (highest_index_to_load_reg,
5690 gen_rtx_PLUS (Pmode, highest_index_to_load_reg,
5693 emit_insn (gen_movdicc (str_idx_reg, cond,
5694 highest_index_to_load_reg, str_idx_reg));
5696 emit_insn (gen_movsicc (str_idx_reg, cond,
5697 highest_index_to_load_reg, str_idx_reg));
5699 add_int_reg_note (s390_emit_jump (is_aligned_label, cond), REG_BR_PROB,
5702 expand_binop (Pmode, add_optab, str_idx_reg,
5703 GEN_INT (-16), str_idx_reg, 1, OPTAB_DIRECT);
5704 /* FIXME: len is already zero extended - so avoid the llgcr emitted
5706 temp = expand_binop (Pmode, add_optab, str_idx_reg,
5707 convert_to_mode (Pmode, len, 1),
5708 target, 1, OPTAB_DIRECT);
5710 emit_move_insn (target, temp);
5714 s390_expand_vec_movstr (rtx result, rtx dst, rtx src)
5716 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
5717 rtx temp = gen_reg_rtx (Pmode);
5718 rtx src_addr = XEXP (src, 0);
5719 rtx dst_addr = XEXP (dst, 0);
5720 rtx src_addr_reg = gen_reg_rtx (Pmode);
5721 rtx dst_addr_reg = gen_reg_rtx (Pmode);
5722 rtx offset = gen_reg_rtx (Pmode);
5723 rtx vsrc = gen_reg_rtx (V16QImode);
5724 rtx vpos = gen_reg_rtx (V16QImode);
5725 rtx loadlen = gen_reg_rtx (SImode);
5726 rtx gpos_qi = gen_reg_rtx(QImode);
5727 rtx gpos = gen_reg_rtx (SImode);
5728 rtx done_label = gen_label_rtx ();
5729 rtx loop_label = gen_label_rtx ();
5730 rtx exit_label = gen_label_rtx ();
5731 rtx full_label = gen_label_rtx ();
5733 /* Perform a quick check for string ending on the first up to 16
5734 bytes and exit early if successful. */
5736 emit_insn (gen_vlbb (vsrc, src, GEN_INT (6)));
5737 emit_insn (gen_lcbb (loadlen, src_addr, GEN_INT (6)));
5738 emit_insn (gen_vfenezv16qi (vpos, vsrc, vsrc));
5739 emit_insn (gen_vec_extractv16qi (gpos_qi, vpos, GEN_INT (7)));
5740 emit_move_insn (gpos, gen_rtx_SUBREG (SImode, gpos_qi, 0));
5741 /* gpos is the byte index if a zero was found and 16 otherwise.
5742 So if it is lower than the loaded bytes we have a hit. */
5743 emit_cmp_and_jump_insns (gpos, loadlen, GE, NULL_RTX, SImode, 1,
5745 emit_insn (gen_vstlv16qi (vsrc, gpos, dst));
5747 force_expand_binop (Pmode, add_optab, dst_addr, gpos, result,
5749 emit_jump (exit_label);
5752 emit_label (full_label);
5753 LABEL_NUSES (full_label) = 1;
5755 /* Calculate `offset' so that src + offset points to the last byte
5756 before 16 byte alignment. */
5758 /* temp = src_addr & 0xf */
5759 force_expand_binop (Pmode, and_optab, src_addr, GEN_INT (15), temp,
5762 /* offset = 0xf - temp */
5763 emit_move_insn (offset, GEN_INT (15));
5764 force_expand_binop (Pmode, sub_optab, offset, temp, offset,
5767 /* Store `offset' bytes in the dstination string. The quick check
5768 has loaded at least `offset' bytes into vsrc. */
5770 emit_insn (gen_vstlv16qi (vsrc, gen_lowpart (SImode, offset), dst));
5772 /* Advance to the next byte to be loaded. */
5773 force_expand_binop (Pmode, add_optab, offset, const1_rtx, offset,
5776 /* Make sure the addresses are single regs which can be used as a
5778 emit_move_insn (src_addr_reg, src_addr);
5779 emit_move_insn (dst_addr_reg, dst_addr);
5783 emit_label (loop_label);
5784 LABEL_NUSES (loop_label) = 1;
5786 emit_move_insn (vsrc,
5787 gen_rtx_MEM (V16QImode,
5788 gen_rtx_PLUS (Pmode, src_addr_reg, offset)));
5790 emit_insn (gen_vec_vfenesv16qi (vpos, vsrc, vsrc,
5791 GEN_INT (VSTRING_FLAG_ZS | VSTRING_FLAG_CS)));
5792 add_int_reg_note (s390_emit_ccraw_jump (8, EQ, done_label),
5793 REG_BR_PROB, very_unlikely);
5795 emit_move_insn (gen_rtx_MEM (V16QImode,
5796 gen_rtx_PLUS (Pmode, dst_addr_reg, offset)),
5799 force_expand_binop (Pmode, add_optab, offset, GEN_INT (16),
5800 offset, 1, OPTAB_DIRECT);
5802 emit_jump (loop_label);
5807 /* We are done. Add the offset of the zero character to the dst_addr
5808 pointer to get the result. */
5810 emit_label (done_label);
5811 LABEL_NUSES (done_label) = 1;
5813 force_expand_binop (Pmode, add_optab, dst_addr_reg, offset, dst_addr_reg,
5816 emit_insn (gen_vec_extractv16qi (gpos_qi, vpos, GEN_INT (7)));
5817 emit_move_insn (gpos, gen_rtx_SUBREG (SImode, gpos_qi, 0));
5819 emit_insn (gen_vstlv16qi (vsrc, gpos, gen_rtx_MEM (BLKmode, dst_addr_reg)));
5821 force_expand_binop (Pmode, add_optab, dst_addr_reg, gpos, result,
5826 emit_label (exit_label);
5827 LABEL_NUSES (exit_label) = 1;
5831 /* Expand conditional increment or decrement using alc/slb instructions.
5832 Should generate code setting DST to either SRC or SRC + INCREMENT,
5833 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
5834 Returns true if successful, false otherwise.
5836 That makes it possible to implement some if-constructs without jumps e.g.:
5837 (borrow = CC0 | CC1 and carry = CC2 | CC3)
5838 unsigned int a, b, c;
5839 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
5840 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
5841 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
5842 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
5844 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
5845 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
5846 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
5847 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
5848 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
5851 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
5852 rtx dst, rtx src, rtx increment)
5854 machine_mode cmp_mode;
5855 machine_mode cc_mode;
5861 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
5862 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
5864 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
5865 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
5870 /* Try ADD LOGICAL WITH CARRY. */
5871 if (increment == const1_rtx)
5873 /* Determine CC mode to use. */
5874 if (cmp_code == EQ || cmp_code == NE)
5876 if (cmp_op1 != const0_rtx)
5878 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
5879 NULL_RTX, 0, OPTAB_WIDEN);
5880 cmp_op1 = const0_rtx;
5883 cmp_code = cmp_code == EQ ? LEU : GTU;
5886 if (cmp_code == LTU || cmp_code == LEU)
5891 cmp_code = swap_condition (cmp_code);
5908 /* Emit comparison instruction pattern. */
5909 if (!register_operand (cmp_op0, cmp_mode))
5910 cmp_op0 = force_reg (cmp_mode, cmp_op0);
5912 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
5913 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
5914 /* We use insn_invalid_p here to add clobbers if required. */
5915 ret = insn_invalid_p (emit_insn (insn), false);
5918 /* Emit ALC instruction pattern. */
5919 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
5920 gen_rtx_REG (cc_mode, CC_REGNUM),
5923 if (src != const0_rtx)
5925 if (!register_operand (src, GET_MODE (dst)))
5926 src = force_reg (GET_MODE (dst), src);
5928 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
5929 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
5932 p = rtvec_alloc (2);
5934 gen_rtx_SET (dst, op_res);
5936 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
5937 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
5942 /* Try SUBTRACT LOGICAL WITH BORROW. */
5943 if (increment == constm1_rtx)
5945 /* Determine CC mode to use. */
5946 if (cmp_code == EQ || cmp_code == NE)
5948 if (cmp_op1 != const0_rtx)
5950 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
5951 NULL_RTX, 0, OPTAB_WIDEN);
5952 cmp_op1 = const0_rtx;
5955 cmp_code = cmp_code == EQ ? LEU : GTU;
5958 if (cmp_code == GTU || cmp_code == GEU)
5963 cmp_code = swap_condition (cmp_code);
5980 /* Emit comparison instruction pattern. */
5981 if (!register_operand (cmp_op0, cmp_mode))
5982 cmp_op0 = force_reg (cmp_mode, cmp_op0);
5984 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
5985 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
5986 /* We use insn_invalid_p here to add clobbers if required. */
5987 ret = insn_invalid_p (emit_insn (insn), false);
5990 /* Emit SLB instruction pattern. */
5991 if (!register_operand (src, GET_MODE (dst)))
5992 src = force_reg (GET_MODE (dst), src);
5994 op_res = gen_rtx_MINUS (GET_MODE (dst),
5995 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
5996 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
5997 gen_rtx_REG (cc_mode, CC_REGNUM),
5999 p = rtvec_alloc (2);
6001 gen_rtx_SET (dst, op_res);
6003 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6004 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
6012 /* Expand code for the insv template. Return true if successful. */
6015 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
6017 int bitsize = INTVAL (op1);
6018 int bitpos = INTVAL (op2);
6019 machine_mode mode = GET_MODE (dest);
6021 int smode_bsize, mode_bsize;
6024 if (bitsize + bitpos > GET_MODE_BITSIZE (mode))
6027 /* Generate INSERT IMMEDIATE (IILL et al). */
6028 /* (set (ze (reg)) (const_int)). */
6030 && register_operand (dest, word_mode)
6031 && (bitpos % 16) == 0
6032 && (bitsize % 16) == 0
6033 && const_int_operand (src, VOIDmode))
6035 HOST_WIDE_INT val = INTVAL (src);
6036 int regpos = bitpos + bitsize;
6038 while (regpos > bitpos)
6040 machine_mode putmode;
6043 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
6048 putsize = GET_MODE_BITSIZE (putmode);
6050 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
6053 gen_int_mode (val, putmode));
6056 gcc_assert (regpos == bitpos);
6060 smode = smallest_mode_for_size (bitsize, MODE_INT);
6061 smode_bsize = GET_MODE_BITSIZE (smode);
6062 mode_bsize = GET_MODE_BITSIZE (mode);
6064 /* Generate STORE CHARACTERS UNDER MASK (STCM et al). */
6066 && (bitsize % BITS_PER_UNIT) == 0
6068 && (register_operand (src, word_mode)
6069 || const_int_operand (src, VOIDmode)))
6071 /* Emit standard pattern if possible. */
6072 if (smode_bsize == bitsize)
6074 emit_move_insn (adjust_address (dest, smode, 0),
6075 gen_lowpart (smode, src));
6079 /* (set (ze (mem)) (const_int)). */
6080 else if (const_int_operand (src, VOIDmode))
6082 int size = bitsize / BITS_PER_UNIT;
6083 rtx src_mem = adjust_address (force_const_mem (word_mode, src),
6085 UNITS_PER_WORD - size);
6087 dest = adjust_address (dest, BLKmode, 0);
6088 set_mem_size (dest, size);
6089 s390_expand_movmem (dest, src_mem, GEN_INT (size));
6093 /* (set (ze (mem)) (reg)). */
6094 else if (register_operand (src, word_mode))
6097 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
6101 /* Emit st,stcmh sequence. */
6102 int stcmh_width = bitsize - 32;
6103 int size = stcmh_width / BITS_PER_UNIT;
6105 emit_move_insn (adjust_address (dest, SImode, size),
6106 gen_lowpart (SImode, src));
6107 set_mem_size (dest, size);
6108 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
6109 GEN_INT (stcmh_width),
6111 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT (32)));
6117 /* Generate INSERT CHARACTERS UNDER MASK (IC, ICM et al). */
6118 if ((bitpos % BITS_PER_UNIT) == 0
6119 && (bitsize % BITS_PER_UNIT) == 0
6120 && (bitpos & 32) == ((bitpos + bitsize - 1) & 32)
6122 && (mode == DImode || mode == SImode)
6123 && register_operand (dest, mode))
6125 /* Emit a strict_low_part pattern if possible. */
6126 if (smode_bsize == bitsize && bitpos == mode_bsize - smode_bsize)
6128 op = gen_rtx_STRICT_LOW_PART (VOIDmode, gen_lowpart (smode, dest));
6129 op = gen_rtx_SET (op, gen_lowpart (smode, src));
6130 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6131 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
6135 /* ??? There are more powerful versions of ICM that are not
6136 completely represented in the md file. */
6139 /* For z10, generate ROTATE THEN INSERT SELECTED BITS (RISBG et al). */
6140 if (TARGET_Z10 && (mode == DImode || mode == SImode))
6142 machine_mode mode_s = GET_MODE (src);
6144 if (CONSTANT_P (src))
6146 /* For constant zero values the representation with AND
6147 appears to be folded in more situations than the (set
6148 (zero_extract) ...).
6149 We only do this when the start and end of the bitfield
6150 remain in the same SImode chunk. That way nihf or nilf
6152 The AND patterns might still generate a risbg for this. */
6153 if (src == const0_rtx && bitpos / 32 == (bitpos + bitsize - 1) / 32)
6156 src = force_reg (mode, src);
6158 else if (mode_s != mode)
6160 gcc_assert (GET_MODE_BITSIZE (mode_s) >= bitsize);
6161 src = force_reg (mode_s, src);
6162 src = gen_lowpart (mode, src);
6165 op = gen_rtx_ZERO_EXTRACT (mode, dest, op1, op2),
6166 op = gen_rtx_SET (op, src);
6170 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6171 op = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber));
6181 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
6182 register that holds VAL of mode MODE shifted by COUNT bits. */
6185 s390_expand_mask_and_shift (rtx val, machine_mode mode, rtx count)
6187 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
6188 NULL_RTX, 1, OPTAB_DIRECT);
6189 return expand_simple_binop (SImode, ASHIFT, val, count,
6190 NULL_RTX, 1, OPTAB_DIRECT);
6193 /* Generate a vector comparison COND of CMP_OP1 and CMP_OP2 and store
6194 the result in TARGET. */
6197 s390_expand_vec_compare (rtx target, enum rtx_code cond,
6198 rtx cmp_op1, rtx cmp_op2)
6200 machine_mode mode = GET_MODE (target);
6201 bool neg_p = false, swap_p = false;
6204 if (GET_MODE (cmp_op1) == V2DFmode)
6208 /* NE a != b -> !(a == b) */
6209 case NE: cond = EQ; neg_p = true; break;
6210 /* UNGT a u> b -> !(b >= a) */
6211 case UNGT: cond = GE; neg_p = true; swap_p = true; break;
6212 /* UNGE a u>= b -> !(b > a) */
6213 case UNGE: cond = GT; neg_p = true; swap_p = true; break;
6214 /* LE: a <= b -> b >= a */
6215 case LE: cond = GE; swap_p = true; break;
6216 /* UNLE: a u<= b -> !(a > b) */
6217 case UNLE: cond = GT; neg_p = true; break;
6218 /* LT: a < b -> b > a */
6219 case LT: cond = GT; swap_p = true; break;
6220 /* UNLT: a u< b -> !(a >= b) */
6221 case UNLT: cond = GE; neg_p = true; break;
6223 emit_insn (gen_vec_cmpuneqv2df (target, cmp_op1, cmp_op2));
6226 emit_insn (gen_vec_cmpltgtv2df (target, cmp_op1, cmp_op2));
6229 emit_insn (gen_vec_orderedv2df (target, cmp_op1, cmp_op2));
6232 emit_insn (gen_vec_unorderedv2df (target, cmp_op1, cmp_op2));
6241 /* NE: a != b -> !(a == b) */
6242 case NE: cond = EQ; neg_p = true; break;
6243 /* GE: a >= b -> !(b > a) */
6244 case GE: cond = GT; neg_p = true; swap_p = true; break;
6245 /* GEU: a >= b -> !(b > a) */
6246 case GEU: cond = GTU; neg_p = true; swap_p = true; break;
6247 /* LE: a <= b -> !(a > b) */
6248 case LE: cond = GT; neg_p = true; break;
6249 /* LEU: a <= b -> !(a > b) */
6250 case LEU: cond = GTU; neg_p = true; break;
6251 /* LT: a < b -> b > a */
6252 case LT: cond = GT; swap_p = true; break;
6253 /* LTU: a < b -> b > a */
6254 case LTU: cond = GTU; swap_p = true; break;
6261 tmp = cmp_op1; cmp_op1 = cmp_op2; cmp_op2 = tmp;
6264 emit_insn (gen_rtx_SET (target, gen_rtx_fmt_ee (cond,
6266 cmp_op1, cmp_op2)));
6268 emit_insn (gen_rtx_SET (target, gen_rtx_NOT (mode, target)));
6271 /* Expand the comparison CODE of CMP1 and CMP2 and copy 1 or 0 into
6272 TARGET if either all (ALL_P is true) or any (ALL_P is false) of the
6273 elements in CMP1 and CMP2 fulfill the comparison.
6274 This function is only used to emit patterns for the vx builtins and
6275 therefore only handles comparison codes required by the
6278 s390_expand_vec_compare_cc (rtx target, enum rtx_code code,
6279 rtx cmp1, rtx cmp2, bool all_p)
6281 machine_mode cc_producer_mode, cc_consumer_mode, scratch_mode;
6282 rtx tmp_reg = gen_reg_rtx (SImode);
6283 bool swap_p = false;
6285 if (GET_MODE_CLASS (GET_MODE (cmp1)) == MODE_VECTOR_INT)
6291 cc_producer_mode = CCVEQmode;
6295 code = swap_condition (code);
6300 cc_producer_mode = CCVIHmode;
6304 code = swap_condition (code);
6309 cc_producer_mode = CCVIHUmode;
6315 scratch_mode = GET_MODE (cmp1);
6316 /* These codes represent inverted CC interpretations. Inverting
6317 an ALL CC mode results in an ANY CC mode and the other way
6318 around. Invert the all_p flag here to compensate for
6320 if (code == NE || code == LE || code == LEU)
6323 cc_consumer_mode = all_p ? CCVIALLmode : CCVIANYmode;
6325 else if (GET_MODE_CLASS (GET_MODE (cmp1)) == MODE_VECTOR_FLOAT)
6331 case EQ: cc_producer_mode = CCVEQmode; break;
6332 case NE: cc_producer_mode = CCVEQmode; inv_p = true; break;
6333 case GT: cc_producer_mode = CCVFHmode; break;
6334 case GE: cc_producer_mode = CCVFHEmode; break;
6335 case UNLE: cc_producer_mode = CCVFHmode; inv_p = true; break;
6336 case UNLT: cc_producer_mode = CCVFHEmode; inv_p = true; break;
6337 case LT: cc_producer_mode = CCVFHmode; code = GT; swap_p = true; break;
6338 case LE: cc_producer_mode = CCVFHEmode; code = GE; swap_p = true; break;
6339 default: gcc_unreachable ();
6341 scratch_mode = mode_for_vector (
6342 int_mode_for_mode (GET_MODE_INNER (GET_MODE (cmp1))),
6343 GET_MODE_NUNITS (GET_MODE (cmp1)));
6344 gcc_assert (scratch_mode != BLKmode);
6349 cc_consumer_mode = all_p ? CCVFALLmode : CCVFANYmode;
6361 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6362 gen_rtvec (2, gen_rtx_SET (
6363 gen_rtx_REG (cc_producer_mode, CC_REGNUM),
6364 gen_rtx_COMPARE (cc_producer_mode, cmp1, cmp2)),
6365 gen_rtx_CLOBBER (VOIDmode,
6366 gen_rtx_SCRATCH (scratch_mode)))));
6367 emit_move_insn (target, const0_rtx);
6368 emit_move_insn (tmp_reg, const1_rtx);
6370 emit_move_insn (target,
6371 gen_rtx_IF_THEN_ELSE (SImode,
6372 gen_rtx_fmt_ee (code, VOIDmode,
6373 gen_rtx_REG (cc_consumer_mode, CC_REGNUM),
6378 /* Invert the comparison CODE applied to a CC mode. This is only safe
6379 if we know whether there result was created by a floating point
6380 compare or not. For the CCV modes this is encoded as part of the
6383 s390_reverse_condition (machine_mode mode, enum rtx_code code)
6385 /* Reversal of FP compares takes care -- an ordered compare
6386 becomes an unordered compare and vice versa. */
6387 if (mode == CCVFALLmode || mode == CCVFANYmode)
6388 return reverse_condition_maybe_unordered (code);
6389 else if (mode == CCVIALLmode || mode == CCVIANYmode)
6390 return reverse_condition (code);
6395 /* Generate a vector comparison expression loading either elements of
6396 THEN or ELS into TARGET depending on the comparison COND of CMP_OP1
6400 s390_expand_vcond (rtx target, rtx then, rtx els,
6401 enum rtx_code cond, rtx cmp_op1, rtx cmp_op2)
6404 machine_mode result_mode;
6407 machine_mode target_mode = GET_MODE (target);
6408 machine_mode cmp_mode = GET_MODE (cmp_op1);
6409 rtx op = (cond == LT) ? els : then;
6411 /* Try to optimize x < 0 ? -1 : 0 into (signed) x >> 31
6412 and x < 0 ? 1 : 0 into (unsigned) x >> 31. Likewise
6413 for short and byte (x >> 15 and x >> 7 respectively). */
6414 if ((cond == LT || cond == GE)
6415 && target_mode == cmp_mode
6416 && cmp_op2 == CONST0_RTX (cmp_mode)
6417 && op == CONST0_RTX (target_mode)
6418 && s390_vector_mode_supported_p (target_mode)
6419 && GET_MODE_CLASS (target_mode) == MODE_VECTOR_INT)
6421 rtx negop = (cond == LT) ? then : els;
6423 int shift = GET_MODE_BITSIZE (GET_MODE_INNER (target_mode)) - 1;
6425 /* if x < 0 ? 1 : 0 or if x >= 0 ? 0 : 1 */
6426 if (negop == CONST1_RTX (target_mode))
6428 rtx res = expand_simple_binop (cmp_mode, LSHIFTRT, cmp_op1,
6429 GEN_INT (shift), target,
6432 emit_move_insn (target, res);
6436 /* if x < 0 ? -1 : 0 or if x >= 0 ? 0 : -1 */
6437 else if (all_ones_operand (negop, target_mode))
6439 rtx res = expand_simple_binop (cmp_mode, ASHIFTRT, cmp_op1,
6440 GEN_INT (shift), target,
6443 emit_move_insn (target, res);
6448 /* We always use an integral type vector to hold the comparison
6450 result_mode = cmp_mode == V2DFmode ? V2DImode : cmp_mode;
6451 result_target = gen_reg_rtx (result_mode);
6453 /* We allow vector immediates as comparison operands that
6454 can be handled by the optimization above but not by the
6455 following code. Hence, force them into registers here. */
6456 if (!REG_P (cmp_op1))
6457 cmp_op1 = force_reg (GET_MODE (cmp_op1), cmp_op1);
6459 if (!REG_P (cmp_op2))
6460 cmp_op2 = force_reg (GET_MODE (cmp_op2), cmp_op2);
6462 s390_expand_vec_compare (result_target, cond,
6465 /* If the results are supposed to be either -1 or 0 we are done
6466 since this is what our compare instructions generate anyway. */
6467 if (all_ones_operand (then, GET_MODE (then))
6468 && const0_operand (els, GET_MODE (els)))
6470 emit_move_insn (target, gen_rtx_SUBREG (target_mode,
6475 /* Otherwise we will do a vsel afterwards. */
6476 /* This gets triggered e.g.
6477 with gcc.c-torture/compile/pr53410-1.c */
6479 then = force_reg (target_mode, then);
6482 els = force_reg (target_mode, els);
6484 tmp = gen_rtx_fmt_ee (EQ, VOIDmode,
6486 CONST0_RTX (result_mode));
6488 /* We compared the result against zero above so we have to swap then
6490 tmp = gen_rtx_IF_THEN_ELSE (target_mode, tmp, els, then);
6492 gcc_assert (target_mode == GET_MODE (then));
6493 emit_insn (gen_rtx_SET (target, tmp));
6496 /* Emit the RTX necessary to initialize the vector TARGET with values
6499 s390_expand_vec_init (rtx target, rtx vals)
6501 machine_mode mode = GET_MODE (target);
6502 machine_mode inner_mode = GET_MODE_INNER (mode);
6503 int n_elts = GET_MODE_NUNITS (mode);
6504 bool all_same = true, all_regs = true, all_const_int = true;
6508 for (i = 0; i < n_elts; ++i)
6510 x = XVECEXP (vals, 0, i);
6512 if (!CONST_INT_P (x))
6513 all_const_int = false;
6515 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6522 /* Use vector gen mask or vector gen byte mask if possible. */
6523 if (all_same && all_const_int
6524 && (XVECEXP (vals, 0, 0) == const0_rtx
6525 || s390_contiguous_bitmask_vector_p (XVECEXP (vals, 0, 0),
6527 || s390_bytemask_vector_p (XVECEXP (vals, 0, 0), NULL)))
6529 emit_insn (gen_rtx_SET (target,
6530 gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0))));
6536 emit_insn (gen_rtx_SET (target,
6537 gen_rtx_VEC_DUPLICATE (mode,
6538 XVECEXP (vals, 0, 0))));
6545 && GET_MODE_SIZE (inner_mode) == 8)
6547 /* Use vector load pair. */
6548 emit_insn (gen_rtx_SET (target,
6549 gen_rtx_VEC_CONCAT (mode,
6550 XVECEXP (vals, 0, 0),
6551 XVECEXP (vals, 0, 1))));
6555 /* Use vector load logical element and zero. */
6556 if (TARGET_VXE && (mode == V4SImode || mode == V4SFmode))
6560 x = XVECEXP (vals, 0, 0);
6561 if (memory_operand (x, inner_mode))
6563 for (i = 1; i < n_elts; ++i)
6564 found = found && XVECEXP (vals, 0, i) == const0_rtx;
6568 machine_mode half_mode = (inner_mode == SFmode
6569 ? V2SFmode : V2SImode);
6570 emit_insn (gen_rtx_SET (target,
6571 gen_rtx_VEC_CONCAT (mode,
6572 gen_rtx_VEC_CONCAT (half_mode,
6575 gen_rtx_VEC_CONCAT (half_mode,
6583 /* We are about to set the vector elements one by one. Zero out the
6584 full register first in order to help the data flow framework to
6585 detect it as full VR set. */
6586 emit_insn (gen_rtx_SET (target, CONST0_RTX (mode)));
6588 /* Unfortunately the vec_init expander is not allowed to fail. So
6589 we have to implement the fallback ourselves. */
6590 for (i = 0; i < n_elts; i++)
6592 rtx elem = XVECEXP (vals, 0, i);
6593 if (!general_operand (elem, GET_MODE (elem)))
6594 elem = force_reg (inner_mode, elem);
6596 emit_insn (gen_rtx_SET (target,
6597 gen_rtx_UNSPEC (mode,
6599 GEN_INT (i), target),
6604 /* Structure to hold the initial parameters for a compare_and_swap operation
6605 in HImode and QImode. */
6607 struct alignment_context
6609 rtx memsi; /* SI aligned memory location. */
6610 rtx shift; /* Bit offset with regard to lsb. */
6611 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
6612 rtx modemaski; /* ~modemask */
6613 bool aligned; /* True if memory is aligned, false else. */
6616 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
6617 structure AC for transparent simplifying, if the memory alignment is known
6618 to be at least 32bit. MEM is the memory location for the actual operation
6619 and MODE its mode. */
6622 init_alignment_context (struct alignment_context *ac, rtx mem,
6625 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
6626 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
6629 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
6632 /* Alignment is unknown. */
6633 rtx byteoffset, addr, align;
6635 /* Force the address into a register. */
6636 addr = force_reg (Pmode, XEXP (mem, 0));
6638 /* Align it to SImode. */
6639 align = expand_simple_binop (Pmode, AND, addr,
6640 GEN_INT (-GET_MODE_SIZE (SImode)),
6641 NULL_RTX, 1, OPTAB_DIRECT);
6643 ac->memsi = gen_rtx_MEM (SImode, align);
6644 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
6645 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
6646 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
6648 /* Calculate shiftcount. */
6649 byteoffset = expand_simple_binop (Pmode, AND, addr,
6650 GEN_INT (GET_MODE_SIZE (SImode) - 1),
6651 NULL_RTX, 1, OPTAB_DIRECT);
6652 /* As we already have some offset, evaluate the remaining distance. */
6653 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
6654 NULL_RTX, 1, OPTAB_DIRECT);
6657 /* Shift is the byte count, but we need the bitcount. */
6658 ac->shift = expand_simple_binop (SImode, ASHIFT, ac->shift, GEN_INT (3),
6659 NULL_RTX, 1, OPTAB_DIRECT);
6661 /* Calculate masks. */
6662 ac->modemask = expand_simple_binop (SImode, ASHIFT,
6663 GEN_INT (GET_MODE_MASK (mode)),
6664 ac->shift, NULL_RTX, 1, OPTAB_DIRECT);
6665 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask,
6669 /* A subroutine of s390_expand_cs_hqi. Insert INS into VAL. If possible,
6670 use a single insv insn into SEQ2. Otherwise, put prep insns in SEQ1 and
6671 perform the merge in SEQ2. */
6674 s390_two_part_insv (struct alignment_context *ac, rtx *seq1, rtx *seq2,
6675 machine_mode mode, rtx val, rtx ins)
6682 tmp = copy_to_mode_reg (SImode, val);
6683 if (s390_expand_insv (tmp, GEN_INT (GET_MODE_BITSIZE (mode)),
6687 *seq2 = get_insns ();
6694 /* Failed to use insv. Generate a two part shift and mask. */
6696 tmp = s390_expand_mask_and_shift (ins, mode, ac->shift);
6697 *seq1 = get_insns ();
6701 tmp = expand_simple_binop (SImode, IOR, tmp, val, NULL_RTX, 1, OPTAB_DIRECT);
6702 *seq2 = get_insns ();
6708 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
6709 the memory location, CMP the old value to compare MEM with and NEW_RTX the
6710 value to set if CMP == MEM. */
6713 s390_expand_cs_hqi (machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
6714 rtx cmp, rtx new_rtx, bool is_weak)
6716 struct alignment_context ac;
6717 rtx cmpv, newv, val, cc, seq0, seq1, seq2, seq3;
6718 rtx res = gen_reg_rtx (SImode);
6719 rtx_code_label *csloop = NULL, *csend = NULL;
6721 gcc_assert (MEM_P (mem));
6723 init_alignment_context (&ac, mem, mode);
6725 /* Load full word. Subsequent loads are performed by CS. */
6726 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
6727 NULL_RTX, 1, OPTAB_DIRECT);
6729 /* Prepare insertions of cmp and new_rtx into the loaded value. When
6730 possible, we try to use insv to make this happen efficiently. If
6731 that fails we'll generate code both inside and outside the loop. */
6732 cmpv = s390_two_part_insv (&ac, &seq0, &seq2, mode, val, cmp);
6733 newv = s390_two_part_insv (&ac, &seq1, &seq3, mode, val, new_rtx);
6740 /* Start CS loop. */
6743 /* Begin assuming success. */
6744 emit_move_insn (btarget, const1_rtx);
6746 csloop = gen_label_rtx ();
6747 csend = gen_label_rtx ();
6748 emit_label (csloop);
6751 /* val = "<mem>00..0<mem>"
6752 * cmp = "00..0<cmp>00..0"
6753 * new = "00..0<new>00..0"
6759 cc = s390_emit_compare_and_swap (EQ, res, ac.memsi, cmpv, newv);
6761 emit_insn (gen_cstorecc4 (btarget, cc, XEXP (cc, 0), XEXP (cc, 1)));
6766 /* Jump to end if we're done (likely?). */
6767 s390_emit_jump (csend, cc);
6769 /* Check for changes outside mode, and loop internal if so.
6770 Arrange the moves so that the compare is adjacent to the
6771 branch so that we can generate CRJ. */
6772 tmp = copy_to_reg (val);
6773 force_expand_binop (SImode, and_optab, res, ac.modemaski, val,
6775 cc = s390_emit_compare (NE, val, tmp);
6776 s390_emit_jump (csloop, cc);
6779 emit_move_insn (btarget, const0_rtx);
6783 /* Return the correct part of the bitfield. */
6784 convert_move (vtarget, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
6785 NULL_RTX, 1, OPTAB_DIRECT), 1);
6788 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
6789 and VAL the value to play with. If AFTER is true then store the value
6790 MEM holds after the operation, if AFTER is false then store the value MEM
6791 holds before the operation. If TARGET is zero then discard that value, else
6792 store it to TARGET. */
6795 s390_expand_atomic (machine_mode mode, enum rtx_code code,
6796 rtx target, rtx mem, rtx val, bool after)
6798 struct alignment_context ac;
6800 rtx new_rtx = gen_reg_rtx (SImode);
6801 rtx orig = gen_reg_rtx (SImode);
6802 rtx_code_label *csloop = gen_label_rtx ();
6804 gcc_assert (!target || register_operand (target, VOIDmode));
6805 gcc_assert (MEM_P (mem));
6807 init_alignment_context (&ac, mem, mode);
6809 /* Shift val to the correct bit positions.
6810 Preserve "icm", but prevent "ex icm". */
6811 if (!(ac.aligned && code == SET && MEM_P (val)))
6812 val = s390_expand_mask_and_shift (val, mode, ac.shift);
6814 /* Further preparation insns. */
6815 if (code == PLUS || code == MINUS)
6816 emit_move_insn (orig, val);
6817 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
6818 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
6819 NULL_RTX, 1, OPTAB_DIRECT);
6821 /* Load full word. Subsequent loads are performed by CS. */
6822 cmp = force_reg (SImode, ac.memsi);
6824 /* Start CS loop. */
6825 emit_label (csloop);
6826 emit_move_insn (new_rtx, cmp);
6828 /* Patch new with val at correct position. */
6833 val = expand_simple_binop (SImode, code, new_rtx, orig,
6834 NULL_RTX, 1, OPTAB_DIRECT);
6835 val = expand_simple_binop (SImode, AND, val, ac.modemask,
6836 NULL_RTX, 1, OPTAB_DIRECT);
6839 if (ac.aligned && MEM_P (val))
6840 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0,
6841 0, 0, SImode, val, false);
6844 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
6845 NULL_RTX, 1, OPTAB_DIRECT);
6846 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
6847 NULL_RTX, 1, OPTAB_DIRECT);
6853 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
6854 NULL_RTX, 1, OPTAB_DIRECT);
6856 case MULT: /* NAND */
6857 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
6858 NULL_RTX, 1, OPTAB_DIRECT);
6859 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
6860 NULL_RTX, 1, OPTAB_DIRECT);
6866 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
6867 ac.memsi, cmp, new_rtx));
6869 /* Return the correct part of the bitfield. */
6871 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
6872 after ? new_rtx : cmp, ac.shift,
6873 NULL_RTX, 1, OPTAB_DIRECT), 1);
6876 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
6877 We need to emit DTP-relative relocations. */
6879 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
6882 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
6887 fputs ("\t.long\t", file);
6890 fputs ("\t.quad\t", file);
6895 output_addr_const (file, x);
6896 fputs ("@DTPOFF", file);
6899 /* Return the proper mode for REGNO being represented in the dwarf
6902 s390_dwarf_frame_reg_mode (int regno)
6904 machine_mode save_mode = default_dwarf_frame_reg_mode (regno);
6906 /* Make sure not to return DImode for any GPR with -m31 -mzarch. */
6907 if (GENERAL_REGNO_P (regno))
6910 /* The rightmost 64 bits of vector registers are call-clobbered. */
6911 if (GET_MODE_SIZE (save_mode) > 8)
6917 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
6918 /* Implement TARGET_MANGLE_TYPE. */
6921 s390_mangle_type (const_tree type)
6923 type = TYPE_MAIN_VARIANT (type);
6925 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
6926 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
6929 if (type == s390_builtin_types[BT_BV16QI]) return "U6__boolc";
6930 if (type == s390_builtin_types[BT_BV8HI]) return "U6__bools";
6931 if (type == s390_builtin_types[BT_BV4SI]) return "U6__booli";
6932 if (type == s390_builtin_types[BT_BV2DI]) return "U6__booll";
6934 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
6935 && TARGET_LONG_DOUBLE_128)
6938 /* For all other types, use normal C++ mangling. */
6943 /* In the name of slightly smaller debug output, and to cater to
6944 general assembler lossage, recognize various UNSPEC sequences
6945 and turn them back into a direct symbol reference. */
6948 s390_delegitimize_address (rtx orig_x)
6952 orig_x = delegitimize_mem_from_attrs (orig_x);
6955 /* Extract the symbol ref from:
6956 (plus:SI (reg:SI 12 %r12)
6957 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
6958 UNSPEC_GOTOFF/PLTOFF)))
6960 (plus:SI (reg:SI 12 %r12)
6961 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
6962 UNSPEC_GOTOFF/PLTOFF)
6963 (const_int 4 [0x4])))) */
6964 if (GET_CODE (x) == PLUS
6965 && REG_P (XEXP (x, 0))
6966 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
6967 && GET_CODE (XEXP (x, 1)) == CONST)
6969 HOST_WIDE_INT offset = 0;
6971 /* The const operand. */
6972 y = XEXP (XEXP (x, 1), 0);
6974 if (GET_CODE (y) == PLUS
6975 && GET_CODE (XEXP (y, 1)) == CONST_INT)
6977 offset = INTVAL (XEXP (y, 1));
6981 if (GET_CODE (y) == UNSPEC
6982 && (XINT (y, 1) == UNSPEC_GOTOFF
6983 || XINT (y, 1) == UNSPEC_PLTOFF))
6984 return plus_constant (Pmode, XVECEXP (y, 0, 0), offset);
6987 if (GET_CODE (x) != MEM)
6991 if (GET_CODE (x) == PLUS
6992 && GET_CODE (XEXP (x, 1)) == CONST
6993 && GET_CODE (XEXP (x, 0)) == REG
6994 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
6996 y = XEXP (XEXP (x, 1), 0);
6997 if (GET_CODE (y) == UNSPEC
6998 && XINT (y, 1) == UNSPEC_GOT)
6999 y = XVECEXP (y, 0, 0);
7003 else if (GET_CODE (x) == CONST)
7005 /* Extract the symbol ref from:
7006 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
7007 UNSPEC_PLT/GOTENT))) */
7010 if (GET_CODE (y) == UNSPEC
7011 && (XINT (y, 1) == UNSPEC_GOTENT
7012 || XINT (y, 1) == UNSPEC_PLT))
7013 y = XVECEXP (y, 0, 0);
7020 if (GET_MODE (orig_x) != Pmode)
7022 if (GET_MODE (orig_x) == BLKmode)
7024 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
7031 /* Output operand OP to stdio stream FILE.
7032 OP is an address (register + offset) which is not used to address data;
7033 instead the rightmost bits are interpreted as the value. */
7036 print_addrstyle_operand (FILE *file, rtx op)
7038 HOST_WIDE_INT offset;
7041 /* Extract base register and offset. */
7042 if (!s390_decompose_addrstyle_without_index (op, &base, &offset))
7048 gcc_assert (GET_CODE (base) == REG);
7049 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
7050 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
7053 /* Offsets are constricted to twelve bits. */
7054 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
7056 fprintf (file, "(%s)", reg_names[REGNO (base)]);
7059 /* Assigns the number of NOP halfwords to be emitted before and after the
7060 function label to *HW_BEFORE and *HW_AFTER. Both pointers must not be NULL.
7061 If hotpatching is disabled for the function, the values are set to zero.
7065 s390_function_num_hotpatch_hw (tree decl,
7071 attr = lookup_attribute ("hotpatch", DECL_ATTRIBUTES (decl));
7073 /* Handle the arguments of the hotpatch attribute. The values
7074 specified via attribute might override the cmdline argument
7078 tree args = TREE_VALUE (attr);
7080 *hw_before = TREE_INT_CST_LOW (TREE_VALUE (args));
7081 *hw_after = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (args)));
7085 /* Use the values specified by the cmdline arguments. */
7086 *hw_before = s390_hotpatch_hw_before_label;
7087 *hw_after = s390_hotpatch_hw_after_label;
7091 /* Write the current .machine and .machinemode specification to the assembler
7094 #ifdef HAVE_AS_MACHINE_MACHINEMODE
7096 s390_asm_output_machine_for_arch (FILE *asm_out_file)
7098 fprintf (asm_out_file, "\t.machinemode %s\n",
7099 (TARGET_ZARCH) ? "zarch" : "esa");
7100 fprintf (asm_out_file, "\t.machine \"%s", processor_table[s390_arch].name);
7101 if (S390_USE_ARCHITECTURE_MODIFIERS)
7105 cpu_flags = processor_flags_table[(int) s390_arch];
7106 if (TARGET_HTM && !(cpu_flags & PF_TX))
7107 fprintf (asm_out_file, "+htm");
7108 else if (!TARGET_HTM && (cpu_flags & PF_TX))
7109 fprintf (asm_out_file, "+nohtm");
7110 if (TARGET_VX && !(cpu_flags & PF_VX))
7111 fprintf (asm_out_file, "+vx");
7112 else if (!TARGET_VX && (cpu_flags & PF_VX))
7113 fprintf (asm_out_file, "+novx");
7115 fprintf (asm_out_file, "\"\n");
7118 /* Write an extra function header before the very start of the function. */
7121 s390_asm_output_function_prefix (FILE *asm_out_file,
7122 const char *fnname ATTRIBUTE_UNUSED)
7124 if (DECL_FUNCTION_SPECIFIC_TARGET (current_function_decl) == NULL)
7126 /* Since only the function specific options are saved but not the indications
7127 which options are set, it's too much work here to figure out which options
7128 have actually changed. Thus, generate .machine and .machinemode whenever a
7129 function has the target attribute or pragma. */
7130 fprintf (asm_out_file, "\t.machinemode push\n");
7131 fprintf (asm_out_file, "\t.machine push\n");
7132 s390_asm_output_machine_for_arch (asm_out_file);
7135 /* Write an extra function footer after the very end of the function. */
7138 s390_asm_declare_function_size (FILE *asm_out_file,
7139 const char *fnname, tree decl)
7141 if (!flag_inhibit_size_directive)
7142 ASM_OUTPUT_MEASURED_SIZE (asm_out_file, fnname);
7143 if (DECL_FUNCTION_SPECIFIC_TARGET (decl) == NULL)
7145 fprintf (asm_out_file, "\t.machine pop\n");
7146 fprintf (asm_out_file, "\t.machinemode pop\n");
7150 /* Write the extra assembler code needed to declare a function properly. */
7153 s390_asm_output_function_label (FILE *asm_out_file, const char *fname,
7156 int hw_before, hw_after;
7158 s390_function_num_hotpatch_hw (decl, &hw_before, &hw_after);
7161 unsigned int function_alignment;
7164 /* Add a trampoline code area before the function label and initialize it
7165 with two-byte nop instructions. This area can be overwritten with code
7166 that jumps to a patched version of the function. */
7167 asm_fprintf (asm_out_file, "\tnopr\t%%r0"
7168 "\t# pre-label NOPs for hotpatch (%d halfwords)\n",
7170 for (i = 1; i < hw_before; i++)
7171 fputs ("\tnopr\t%r0\n", asm_out_file);
7173 /* Note: The function label must be aligned so that (a) the bytes of the
7174 following nop do not cross a cacheline boundary, and (b) a jump address
7175 (eight bytes for 64 bit targets, 4 bytes for 32 bit targets) can be
7176 stored directly before the label without crossing a cacheline
7177 boundary. All this is necessary to make sure the trampoline code can
7178 be changed atomically.
7179 This alignment is done automatically using the FOUNCTION_BOUNDARY, but
7180 if there are NOPs before the function label, the alignment is placed
7181 before them. So it is necessary to duplicate the alignment after the
7183 function_alignment = MAX (8, DECL_ALIGN (decl) / BITS_PER_UNIT);
7184 if (! DECL_USER_ALIGN (decl))
7185 function_alignment = MAX (function_alignment,
7186 (unsigned int) align_functions);
7187 fputs ("\t# alignment for hotpatch\n", asm_out_file);
7188 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (function_alignment));
7191 if (S390_USE_TARGET_ATTRIBUTE && TARGET_DEBUG_ARG)
7193 asm_fprintf (asm_out_file, "\t# fn:%s ar%d\n", fname, s390_arch);
7194 asm_fprintf (asm_out_file, "\t# fn:%s tu%d\n", fname, s390_tune);
7195 asm_fprintf (asm_out_file, "\t# fn:%s sg%d\n", fname, s390_stack_guard);
7196 asm_fprintf (asm_out_file, "\t# fn:%s ss%d\n", fname, s390_stack_size);
7197 asm_fprintf (asm_out_file, "\t# fn:%s bc%d\n", fname, s390_branch_cost);
7198 asm_fprintf (asm_out_file, "\t# fn:%s wf%d\n", fname,
7199 s390_warn_framesize);
7200 asm_fprintf (asm_out_file, "\t# fn:%s ba%d\n", fname, TARGET_BACKCHAIN);
7201 asm_fprintf (asm_out_file, "\t# fn:%s hd%d\n", fname, TARGET_HARD_DFP);
7202 asm_fprintf (asm_out_file, "\t# fn:%s hf%d\n", fname, !TARGET_SOFT_FLOAT);
7203 asm_fprintf (asm_out_file, "\t# fn:%s ht%d\n", fname, TARGET_OPT_HTM);
7204 asm_fprintf (asm_out_file, "\t# fn:%s vx%d\n", fname, TARGET_OPT_VX);
7205 asm_fprintf (asm_out_file, "\t# fn:%s ps%d\n", fname,
7206 TARGET_PACKED_STACK);
7207 asm_fprintf (asm_out_file, "\t# fn:%s se%d\n", fname, TARGET_SMALL_EXEC);
7208 asm_fprintf (asm_out_file, "\t# fn:%s mv%d\n", fname, TARGET_MVCLE);
7209 asm_fprintf (asm_out_file, "\t# fn:%s zv%d\n", fname, TARGET_ZVECTOR);
7210 asm_fprintf (asm_out_file, "\t# fn:%s wd%d\n", fname,
7211 s390_warn_dynamicstack_p);
7213 ASM_OUTPUT_LABEL (asm_out_file, fname);
7215 asm_fprintf (asm_out_file,
7216 "\t# post-label NOPs for hotpatch (%d halfwords)\n",
7220 /* Output machine-dependent UNSPECs occurring in address constant X
7221 in assembler syntax to stdio stream FILE. Returns true if the
7222 constant X could be recognized, false otherwise. */
7225 s390_output_addr_const_extra (FILE *file, rtx x)
7227 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
7228 switch (XINT (x, 1))
7231 output_addr_const (file, XVECEXP (x, 0, 0));
7232 fprintf (file, "@GOTENT");
7235 output_addr_const (file, XVECEXP (x, 0, 0));
7236 fprintf (file, "@GOT");
7239 output_addr_const (file, XVECEXP (x, 0, 0));
7240 fprintf (file, "@GOTOFF");
7243 output_addr_const (file, XVECEXP (x, 0, 0));
7244 fprintf (file, "@PLT");
7247 output_addr_const (file, XVECEXP (x, 0, 0));
7248 fprintf (file, "@PLTOFF");
7251 output_addr_const (file, XVECEXP (x, 0, 0));
7252 fprintf (file, "@TLSGD");
7255 assemble_name (file, get_some_local_dynamic_name ());
7256 fprintf (file, "@TLSLDM");
7259 output_addr_const (file, XVECEXP (x, 0, 0));
7260 fprintf (file, "@DTPOFF");
7263 output_addr_const (file, XVECEXP (x, 0, 0));
7264 fprintf (file, "@NTPOFF");
7266 case UNSPEC_GOTNTPOFF:
7267 output_addr_const (file, XVECEXP (x, 0, 0));
7268 fprintf (file, "@GOTNTPOFF");
7270 case UNSPEC_INDNTPOFF:
7271 output_addr_const (file, XVECEXP (x, 0, 0));
7272 fprintf (file, "@INDNTPOFF");
7276 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
7277 switch (XINT (x, 1))
7279 case UNSPEC_POOL_OFFSET:
7280 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
7281 output_addr_const (file, x);
7287 /* Output address operand ADDR in assembler syntax to
7288 stdio stream FILE. */
7291 print_operand_address (FILE *file, rtx addr)
7293 struct s390_address ad;
7294 memset (&ad, 0, sizeof (s390_address));
7296 if (s390_loadrelative_operand_p (addr, NULL, NULL))
7300 output_operand_lossage ("symbolic memory references are "
7301 "only supported on z10 or later");
7304 output_addr_const (file, addr);
7308 if (!s390_decompose_address (addr, &ad)
7309 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7310 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
7311 output_operand_lossage ("cannot decompose address");
7314 output_addr_const (file, ad.disp);
7316 fprintf (file, "0");
7318 if (ad.base && ad.indx)
7319 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
7320 reg_names[REGNO (ad.base)]);
7322 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
7325 /* Output operand X in assembler syntax to stdio stream FILE.
7326 CODE specified the format flag. The following format flags
7329 'C': print opcode suffix for branch condition.
7330 'D': print opcode suffix for inverse branch condition.
7331 'E': print opcode suffix for branch on index instruction.
7332 'G': print the size of the operand in bytes.
7333 'J': print tls_load/tls_gdcall/tls_ldcall suffix
7334 'M': print the second word of a TImode operand.
7335 'N': print the second word of a DImode operand.
7336 'O': print only the displacement of a memory reference or address.
7337 'R': print only the base register of a memory reference or address.
7338 'S': print S-type memory reference (base+displacement).
7339 'Y': print address style operand without index (e.g. shift count or setmem
7342 'b': print integer X as if it's an unsigned byte.
7343 'c': print integer X as if it's an signed byte.
7344 'e': "end" contiguous bitmask X in either DImode or vector inner mode.
7345 'f': "end" contiguous bitmask X in SImode.
7346 'h': print integer X as if it's a signed halfword.
7347 'i': print the first nonzero HImode part of X.
7348 'j': print the first HImode part unequal to -1 of X.
7349 'k': print the first nonzero SImode part of X.
7350 'm': print the first SImode part unequal to -1 of X.
7351 'o': print integer X as if it's an unsigned 32bit word.
7352 's': "start" of contiguous bitmask X in either DImode or vector inner mode.
7353 't': CONST_INT: "start" of contiguous bitmask X in SImode.
7354 CONST_VECTOR: Generate a bitmask for vgbm instruction.
7355 'x': print integer X as if it's an unsigned halfword.
7356 'v': print register number as vector register (v1 instead of f1).
7360 print_operand (FILE *file, rtx x, int code)
7367 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
7371 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
7375 if (GET_CODE (x) == LE)
7376 fprintf (file, "l");
7377 else if (GET_CODE (x) == GT)
7378 fprintf (file, "h");
7380 output_operand_lossage ("invalid comparison operator "
7381 "for 'E' output modifier");
7385 if (GET_CODE (x) == SYMBOL_REF)
7387 fprintf (file, "%s", ":tls_load:");
7388 output_addr_const (file, x);
7390 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
7392 fprintf (file, "%s", ":tls_gdcall:");
7393 output_addr_const (file, XVECEXP (x, 0, 0));
7395 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
7397 fprintf (file, "%s", ":tls_ldcall:");
7398 const char *name = get_some_local_dynamic_name ();
7400 assemble_name (file, name);
7403 output_operand_lossage ("invalid reference for 'J' output modifier");
7407 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
7412 struct s390_address ad;
7415 ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
7418 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7421 output_operand_lossage ("invalid address for 'O' output modifier");
7426 output_addr_const (file, ad.disp);
7428 fprintf (file, "0");
7434 struct s390_address ad;
7437 ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
7440 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7443 output_operand_lossage ("invalid address for 'R' output modifier");
7448 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
7450 fprintf (file, "0");
7456 struct s390_address ad;
7461 output_operand_lossage ("memory reference expected for "
7462 "'S' output modifier");
7465 ret = s390_decompose_address (XEXP (x, 0), &ad);
7468 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7471 output_operand_lossage ("invalid address for 'S' output modifier");
7476 output_addr_const (file, ad.disp);
7478 fprintf (file, "0");
7481 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
7486 if (GET_CODE (x) == REG)
7487 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
7488 else if (GET_CODE (x) == MEM)
7489 x = change_address (x, VOIDmode,
7490 plus_constant (Pmode, XEXP (x, 0), 4));
7492 output_operand_lossage ("register or memory expression expected "
7493 "for 'N' output modifier");
7497 if (GET_CODE (x) == REG)
7498 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
7499 else if (GET_CODE (x) == MEM)
7500 x = change_address (x, VOIDmode,
7501 plus_constant (Pmode, XEXP (x, 0), 8));
7503 output_operand_lossage ("register or memory expression expected "
7504 "for 'M' output modifier");
7508 print_addrstyle_operand (file, x);
7512 switch (GET_CODE (x))
7515 /* Print FP regs as fx instead of vx when they are accessed
7516 through non-vector mode. */
7518 || VECTOR_NOFP_REG_P (x)
7519 || (FP_REG_P (x) && VECTOR_MODE_P (GET_MODE (x)))
7520 || (VECTOR_REG_P (x)
7521 && (GET_MODE_SIZE (GET_MODE (x)) /
7522 s390_class_max_nregs (FP_REGS, GET_MODE (x))) > 8))
7523 fprintf (file, "%%v%s", reg_names[REGNO (x)] + 2);
7525 fprintf (file, "%s", reg_names[REGNO (x)]);
7529 output_address (GET_MODE (x), XEXP (x, 0));
7536 output_addr_const (file, x);
7549 ival = ((ival & 0xff) ^ 0x80) - 0x80;
7555 ival = ((ival & 0xffff) ^ 0x8000) - 0x8000;
7558 ival = s390_extract_part (x, HImode, 0);
7561 ival = s390_extract_part (x, HImode, -1);
7564 ival = s390_extract_part (x, SImode, 0);
7567 ival = s390_extract_part (x, SImode, -1);
7579 len = (code == 's' || code == 'e' ? 64 : 32);
7580 ok = s390_contiguous_bitmask_p (ival, true, len, &start, &end);
7582 if (code == 's' || code == 't')
7589 output_operand_lossage ("invalid constant for output modifier '%c'", code);
7591 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
7594 case CONST_WIDE_INT:
7596 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7597 CONST_WIDE_INT_ELT (x, 0) & 0xff);
7598 else if (code == 'x')
7599 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7600 CONST_WIDE_INT_ELT (x, 0) & 0xffff);
7601 else if (code == 'h')
7602 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7603 ((CONST_WIDE_INT_ELT (x, 0) & 0xffff) ^ 0x8000) - 0x8000);
7607 output_operand_lossage ("invalid constant - try using "
7608 "an output modifier");
7610 output_operand_lossage ("invalid constant for output modifier '%c'",
7618 gcc_assert (const_vec_duplicate_p (x));
7619 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7620 ((INTVAL (XVECEXP (x, 0, 0)) & 0xffff) ^ 0x8000) - 0x8000);
7628 ok = s390_contiguous_bitmask_vector_p (x, &start, &end);
7630 ival = (code == 's') ? start : end;
7631 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
7637 bool ok = s390_bytemask_vector_p (x, &mask);
7639 fprintf (file, "%u", mask);
7644 output_operand_lossage ("invalid constant vector for output "
7645 "modifier '%c'", code);
7651 output_operand_lossage ("invalid expression - try using "
7652 "an output modifier");
7654 output_operand_lossage ("invalid expression for output "
7655 "modifier '%c'", code);
7660 /* Target hook for assembling integer objects. We need to define it
7661 here to work a round a bug in some versions of GAS, which couldn't
7662 handle values smaller than INT_MIN when printed in decimal. */
7665 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
7667 if (size == 8 && aligned_p
7668 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
7670 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
7674 return default_assemble_integer (x, size, aligned_p);
7677 /* Returns true if register REGNO is used for forming
7678 a memory address in expression X. */
7681 reg_used_in_mem_p (int regno, rtx x)
7683 enum rtx_code code = GET_CODE (x);
7689 if (refers_to_regno_p (regno, XEXP (x, 0)))
7692 else if (code == SET
7693 && GET_CODE (SET_DEST (x)) == PC)
7695 if (refers_to_regno_p (regno, SET_SRC (x)))
7699 fmt = GET_RTX_FORMAT (code);
7700 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7703 && reg_used_in_mem_p (regno, XEXP (x, i)))
7706 else if (fmt[i] == 'E')
7707 for (j = 0; j < XVECLEN (x, i); j++)
7708 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
7714 /* Returns true if expression DEP_RTX sets an address register
7715 used by instruction INSN to address memory. */
7718 addr_generation_dependency_p (rtx dep_rtx, rtx_insn *insn)
7722 if (NONJUMP_INSN_P (dep_rtx))
7723 dep_rtx = PATTERN (dep_rtx);
7725 if (GET_CODE (dep_rtx) == SET)
7727 target = SET_DEST (dep_rtx);
7728 if (GET_CODE (target) == STRICT_LOW_PART)
7729 target = XEXP (target, 0);
7730 while (GET_CODE (target) == SUBREG)
7731 target = SUBREG_REG (target);
7733 if (GET_CODE (target) == REG)
7735 int regno = REGNO (target);
7737 if (s390_safe_attr_type (insn) == TYPE_LA)
7739 pat = PATTERN (insn);
7740 if (GET_CODE (pat) == PARALLEL)
7742 gcc_assert (XVECLEN (pat, 0) == 2);
7743 pat = XVECEXP (pat, 0, 0);
7745 gcc_assert (GET_CODE (pat) == SET);
7746 return refers_to_regno_p (regno, SET_SRC (pat));
7748 else if (get_attr_atype (insn) == ATYPE_AGEN)
7749 return reg_used_in_mem_p (regno, PATTERN (insn));
7755 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
7758 s390_agen_dep_p (rtx_insn *dep_insn, rtx_insn *insn)
7760 rtx dep_rtx = PATTERN (dep_insn);
7763 if (GET_CODE (dep_rtx) == SET
7764 && addr_generation_dependency_p (dep_rtx, insn))
7766 else if (GET_CODE (dep_rtx) == PARALLEL)
7768 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
7770 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
7778 /* A C statement (sans semicolon) to update the integer scheduling priority
7779 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
7780 reduce the priority to execute INSN later. Do not define this macro if
7781 you do not need to adjust the scheduling priorities of insns.
7783 A STD instruction should be scheduled earlier,
7784 in order to use the bypass. */
7786 s390_adjust_priority (rtx_insn *insn, int priority)
7788 if (! INSN_P (insn))
7791 if (s390_tune <= PROCESSOR_2064_Z900)
7794 switch (s390_safe_attr_type (insn))
7798 priority = priority << 3;
7802 priority = priority << 1;
7811 /* The number of instructions that can be issued per cycle. */
7814 s390_issue_rate (void)
7818 case PROCESSOR_2084_Z990:
7819 case PROCESSOR_2094_Z9_109:
7820 case PROCESSOR_2094_Z9_EC:
7821 case PROCESSOR_2817_Z196:
7823 case PROCESSOR_2097_Z10:
7825 case PROCESSOR_9672_G5:
7826 case PROCESSOR_9672_G6:
7827 case PROCESSOR_2064_Z900:
7828 /* Starting with EC12 we use the sched_reorder hook to take care
7829 of instruction dispatch constraints. The algorithm only
7830 picks the best instruction and assumes only a single
7831 instruction gets issued per cycle. */
7832 case PROCESSOR_2827_ZEC12:
7833 case PROCESSOR_2964_Z13:
7834 case PROCESSOR_ARCH12:
7841 s390_first_cycle_multipass_dfa_lookahead (void)
7846 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
7847 Fix up MEMs as required. */
7850 annotate_constant_pool_refs (rtx *x)
7855 gcc_assert (GET_CODE (*x) != SYMBOL_REF
7856 || !CONSTANT_POOL_ADDRESS_P (*x));
7858 /* Literal pool references can only occur inside a MEM ... */
7859 if (GET_CODE (*x) == MEM)
7861 rtx memref = XEXP (*x, 0);
7863 if (GET_CODE (memref) == SYMBOL_REF
7864 && CONSTANT_POOL_ADDRESS_P (memref))
7866 rtx base = cfun->machine->base_reg;
7867 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
7870 *x = replace_equiv_address (*x, addr);
7874 if (GET_CODE (memref) == CONST
7875 && GET_CODE (XEXP (memref, 0)) == PLUS
7876 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
7877 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
7878 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
7880 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
7881 rtx sym = XEXP (XEXP (memref, 0), 0);
7882 rtx base = cfun->machine->base_reg;
7883 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
7886 *x = replace_equiv_address (*x, plus_constant (Pmode, addr, off));
7891 /* ... or a load-address type pattern. */
7892 if (GET_CODE (*x) == SET)
7894 rtx addrref = SET_SRC (*x);
7896 if (GET_CODE (addrref) == SYMBOL_REF
7897 && CONSTANT_POOL_ADDRESS_P (addrref))
7899 rtx base = cfun->machine->base_reg;
7900 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
7903 SET_SRC (*x) = addr;
7907 if (GET_CODE (addrref) == CONST
7908 && GET_CODE (XEXP (addrref, 0)) == PLUS
7909 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
7910 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
7911 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
7913 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
7914 rtx sym = XEXP (XEXP (addrref, 0), 0);
7915 rtx base = cfun->machine->base_reg;
7916 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
7919 SET_SRC (*x) = plus_constant (Pmode, addr, off);
7924 /* Annotate LTREL_BASE as well. */
7925 if (GET_CODE (*x) == UNSPEC
7926 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
7928 rtx base = cfun->machine->base_reg;
7929 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
7934 fmt = GET_RTX_FORMAT (GET_CODE (*x));
7935 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
7939 annotate_constant_pool_refs (&XEXP (*x, i));
7941 else if (fmt[i] == 'E')
7943 for (j = 0; j < XVECLEN (*x, i); j++)
7944 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
7949 /* Split all branches that exceed the maximum distance.
7950 Returns true if this created a new literal pool entry. */
7953 s390_split_branches (void)
7955 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
7956 int new_literal = 0, ret;
7961 /* We need correct insn addresses. */
7963 shorten_branches (get_insns ());
7965 /* Find all branches that exceed 64KB, and split them. */
7967 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7969 if (! JUMP_P (insn) || tablejump_p (insn, NULL, NULL))
7972 pat = PATTERN (insn);
7973 if (GET_CODE (pat) == PARALLEL)
7974 pat = XVECEXP (pat, 0, 0);
7975 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
7978 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
7980 label = &SET_SRC (pat);
7982 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
7984 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
7985 label = &XEXP (SET_SRC (pat), 1);
7986 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
7987 label = &XEXP (SET_SRC (pat), 2);
7994 if (get_attr_length (insn) <= 4)
7997 /* We are going to use the return register as scratch register,
7998 make sure it will be saved/restored by the prologue/epilogue. */
7999 cfun_frame_layout.save_return_addr_p = 1;
8004 rtx mem = force_const_mem (Pmode, *label);
8005 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, mem),
8007 INSN_ADDRESSES_NEW (set_insn, -1);
8008 annotate_constant_pool_refs (&PATTERN (set_insn));
8015 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
8016 UNSPEC_LTREL_OFFSET);
8017 target = gen_rtx_CONST (Pmode, target);
8018 target = force_const_mem (Pmode, target);
8019 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, target),
8021 INSN_ADDRESSES_NEW (set_insn, -1);
8022 annotate_constant_pool_refs (&PATTERN (set_insn));
8024 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
8025 cfun->machine->base_reg),
8027 target = gen_rtx_PLUS (Pmode, temp_reg, target);
8030 ret = validate_change (insn, label, target, 0);
8038 /* Find an annotated literal pool symbol referenced in RTX X,
8039 and store it at REF. Will abort if X contains references to
8040 more than one such pool symbol; multiple references to the same
8041 symbol are allowed, however.
8043 The rtx pointed to by REF must be initialized to NULL_RTX
8044 by the caller before calling this routine. */
8047 find_constant_pool_ref (rtx x, rtx *ref)
8052 /* Ignore LTREL_BASE references. */
8053 if (GET_CODE (x) == UNSPEC
8054 && XINT (x, 1) == UNSPEC_LTREL_BASE)
8056 /* Likewise POOL_ENTRY insns. */
8057 if (GET_CODE (x) == UNSPEC_VOLATILE
8058 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
8061 gcc_assert (GET_CODE (x) != SYMBOL_REF
8062 || !CONSTANT_POOL_ADDRESS_P (x));
8064 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
8066 rtx sym = XVECEXP (x, 0, 0);
8067 gcc_assert (GET_CODE (sym) == SYMBOL_REF
8068 && CONSTANT_POOL_ADDRESS_P (sym));
8070 if (*ref == NULL_RTX)
8073 gcc_assert (*ref == sym);
8078 fmt = GET_RTX_FORMAT (GET_CODE (x));
8079 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8083 find_constant_pool_ref (XEXP (x, i), ref);
8085 else if (fmt[i] == 'E')
8087 for (j = 0; j < XVECLEN (x, i); j++)
8088 find_constant_pool_ref (XVECEXP (x, i, j), ref);
8093 /* Replace every reference to the annotated literal pool
8094 symbol REF in X by its base plus OFFSET. */
8097 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
8102 gcc_assert (*x != ref);
8104 if (GET_CODE (*x) == UNSPEC
8105 && XINT (*x, 1) == UNSPEC_LTREF
8106 && XVECEXP (*x, 0, 0) == ref)
8108 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
8112 if (GET_CODE (*x) == PLUS
8113 && GET_CODE (XEXP (*x, 1)) == CONST_INT
8114 && GET_CODE (XEXP (*x, 0)) == UNSPEC
8115 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
8116 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
8118 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
8119 *x = plus_constant (Pmode, addr, INTVAL (XEXP (*x, 1)));
8123 fmt = GET_RTX_FORMAT (GET_CODE (*x));
8124 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
8128 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
8130 else if (fmt[i] == 'E')
8132 for (j = 0; j < XVECLEN (*x, i); j++)
8133 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
8138 /* Check whether X contains an UNSPEC_LTREL_BASE.
8139 Return its constant pool symbol if found, NULL_RTX otherwise. */
8142 find_ltrel_base (rtx x)
8147 if (GET_CODE (x) == UNSPEC
8148 && XINT (x, 1) == UNSPEC_LTREL_BASE)
8149 return XVECEXP (x, 0, 0);
8151 fmt = GET_RTX_FORMAT (GET_CODE (x));
8152 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8156 rtx fnd = find_ltrel_base (XEXP (x, i));
8160 else if (fmt[i] == 'E')
8162 for (j = 0; j < XVECLEN (x, i); j++)
8164 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
8174 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
8177 replace_ltrel_base (rtx *x)
8182 if (GET_CODE (*x) == UNSPEC
8183 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
8185 *x = XVECEXP (*x, 0, 1);
8189 fmt = GET_RTX_FORMAT (GET_CODE (*x));
8190 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
8194 replace_ltrel_base (&XEXP (*x, i));
8196 else if (fmt[i] == 'E')
8198 for (j = 0; j < XVECLEN (*x, i); j++)
8199 replace_ltrel_base (&XVECEXP (*x, i, j));
8205 /* We keep a list of constants which we have to add to internal
8206 constant tables in the middle of large functions. */
8208 #define NR_C_MODES 32
8209 machine_mode constant_modes[NR_C_MODES] =
8211 TFmode, TImode, TDmode,
8212 V16QImode, V8HImode, V4SImode, V2DImode, V1TImode,
8213 V4SFmode, V2DFmode, V1TFmode,
8214 DFmode, DImode, DDmode,
8215 V8QImode, V4HImode, V2SImode, V1DImode, V2SFmode, V1DFmode,
8216 SFmode, SImode, SDmode,
8217 V4QImode, V2HImode, V1SImode, V1SFmode,
8226 struct constant *next;
8228 rtx_code_label *label;
8231 struct constant_pool
8233 struct constant_pool *next;
8234 rtx_insn *first_insn;
8235 rtx_insn *pool_insn;
8237 rtx_insn *emit_pool_after;
8239 struct constant *constants[NR_C_MODES];
8240 struct constant *execute;
8241 rtx_code_label *label;
8245 /* Allocate new constant_pool structure. */
8247 static struct constant_pool *
8248 s390_alloc_pool (void)
8250 struct constant_pool *pool;
8253 pool = (struct constant_pool *) xmalloc (sizeof *pool);
8255 for (i = 0; i < NR_C_MODES; i++)
8256 pool->constants[i] = NULL;
8258 pool->execute = NULL;
8259 pool->label = gen_label_rtx ();
8260 pool->first_insn = NULL;
8261 pool->pool_insn = NULL;
8262 pool->insns = BITMAP_ALLOC (NULL);
8264 pool->emit_pool_after = NULL;
8269 /* Create new constant pool covering instructions starting at INSN
8270 and chain it to the end of POOL_LIST. */
8272 static struct constant_pool *
8273 s390_start_pool (struct constant_pool **pool_list, rtx_insn *insn)
8275 struct constant_pool *pool, **prev;
8277 pool = s390_alloc_pool ();
8278 pool->first_insn = insn;
8280 for (prev = pool_list; *prev; prev = &(*prev)->next)
8287 /* End range of instructions covered by POOL at INSN and emit
8288 placeholder insn representing the pool. */
8291 s390_end_pool (struct constant_pool *pool, rtx_insn *insn)
8293 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
8296 insn = get_last_insn ();
8298 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
8299 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8302 /* Add INSN to the list of insns covered by POOL. */
8305 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
8307 bitmap_set_bit (pool->insns, INSN_UID (insn));
8310 /* Return pool out of POOL_LIST that covers INSN. */
8312 static struct constant_pool *
8313 s390_find_pool (struct constant_pool *pool_list, rtx insn)
8315 struct constant_pool *pool;
8317 for (pool = pool_list; pool; pool = pool->next)
8318 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
8324 /* Add constant VAL of mode MODE to the constant pool POOL. */
8327 s390_add_constant (struct constant_pool *pool, rtx val, machine_mode mode)
8332 for (i = 0; i < NR_C_MODES; i++)
8333 if (constant_modes[i] == mode)
8335 gcc_assert (i != NR_C_MODES);
8337 for (c = pool->constants[i]; c != NULL; c = c->next)
8338 if (rtx_equal_p (val, c->value))
8343 c = (struct constant *) xmalloc (sizeof *c);
8345 c->label = gen_label_rtx ();
8346 c->next = pool->constants[i];
8347 pool->constants[i] = c;
8348 pool->size += GET_MODE_SIZE (mode);
8352 /* Return an rtx that represents the offset of X from the start of
8356 s390_pool_offset (struct constant_pool *pool, rtx x)
8360 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
8361 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
8362 UNSPEC_POOL_OFFSET);
8363 return gen_rtx_CONST (GET_MODE (x), x);
8366 /* Find constant VAL of mode MODE in the constant pool POOL.
8367 Return an RTX describing the distance from the start of
8368 the pool to the location of the new constant. */
8371 s390_find_constant (struct constant_pool *pool, rtx val,
8377 for (i = 0; i < NR_C_MODES; i++)
8378 if (constant_modes[i] == mode)
8380 gcc_assert (i != NR_C_MODES);
8382 for (c = pool->constants[i]; c != NULL; c = c->next)
8383 if (rtx_equal_p (val, c->value))
8388 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
8391 /* Check whether INSN is an execute. Return the label_ref to its
8392 execute target template if so, NULL_RTX otherwise. */
8395 s390_execute_label (rtx insn)
8397 if (NONJUMP_INSN_P (insn)
8398 && GET_CODE (PATTERN (insn)) == PARALLEL
8399 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
8400 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
8401 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
8406 /* Add execute target for INSN to the constant pool POOL. */
8409 s390_add_execute (struct constant_pool *pool, rtx insn)
8413 for (c = pool->execute; c != NULL; c = c->next)
8414 if (INSN_UID (insn) == INSN_UID (c->value))
8419 c = (struct constant *) xmalloc (sizeof *c);
8421 c->label = gen_label_rtx ();
8422 c->next = pool->execute;
8428 /* Find execute target for INSN in the constant pool POOL.
8429 Return an RTX describing the distance from the start of
8430 the pool to the location of the execute target. */
8433 s390_find_execute (struct constant_pool *pool, rtx insn)
8437 for (c = pool->execute; c != NULL; c = c->next)
8438 if (INSN_UID (insn) == INSN_UID (c->value))
8443 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
8446 /* For an execute INSN, extract the execute target template. */
8449 s390_execute_target (rtx insn)
8451 rtx pattern = PATTERN (insn);
8452 gcc_assert (s390_execute_label (insn));
8454 if (XVECLEN (pattern, 0) == 2)
8456 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
8460 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
8463 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
8464 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
8466 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
8472 /* Indicate that INSN cannot be duplicated. This is the case for
8473 execute insns that carry a unique label. */
8476 s390_cannot_copy_insn_p (rtx_insn *insn)
8478 rtx label = s390_execute_label (insn);
8479 return label && label != const0_rtx;
8482 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
8483 do not emit the pool base label. */
8486 s390_dump_pool (struct constant_pool *pool, bool remote_label)
8489 rtx_insn *insn = pool->pool_insn;
8492 /* Switch to rodata section. */
8493 if (TARGET_CPU_ZARCH)
8495 insn = emit_insn_after (gen_pool_section_start (), insn);
8496 INSN_ADDRESSES_NEW (insn, -1);
8499 /* Ensure minimum pool alignment. */
8500 if (TARGET_CPU_ZARCH)
8501 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
8503 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
8504 INSN_ADDRESSES_NEW (insn, -1);
8506 /* Emit pool base label. */
8509 insn = emit_label_after (pool->label, insn);
8510 INSN_ADDRESSES_NEW (insn, -1);
8513 /* Dump constants in descending alignment requirement order,
8514 ensuring proper alignment for every constant. */
8515 for (i = 0; i < NR_C_MODES; i++)
8516 for (c = pool->constants[i]; c; c = c->next)
8518 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
8519 rtx value = copy_rtx (c->value);
8520 if (GET_CODE (value) == CONST
8521 && GET_CODE (XEXP (value, 0)) == UNSPEC
8522 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
8523 && XVECLEN (XEXP (value, 0), 0) == 1)
8524 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
8526 insn = emit_label_after (c->label, insn);
8527 INSN_ADDRESSES_NEW (insn, -1);
8529 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
8530 gen_rtvec (1, value),
8531 UNSPECV_POOL_ENTRY);
8532 insn = emit_insn_after (value, insn);
8533 INSN_ADDRESSES_NEW (insn, -1);
8536 /* Ensure minimum alignment for instructions. */
8537 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
8538 INSN_ADDRESSES_NEW (insn, -1);
8540 /* Output in-pool execute template insns. */
8541 for (c = pool->execute; c; c = c->next)
8543 insn = emit_label_after (c->label, insn);
8544 INSN_ADDRESSES_NEW (insn, -1);
8546 insn = emit_insn_after (s390_execute_target (c->value), insn);
8547 INSN_ADDRESSES_NEW (insn, -1);
8550 /* Switch back to previous section. */
8551 if (TARGET_CPU_ZARCH)
8553 insn = emit_insn_after (gen_pool_section_end (), insn);
8554 INSN_ADDRESSES_NEW (insn, -1);
8557 insn = emit_barrier_after (insn);
8558 INSN_ADDRESSES_NEW (insn, -1);
8560 /* Remove placeholder insn. */
8561 remove_insn (pool->pool_insn);
8564 /* Free all memory used by POOL. */
8567 s390_free_pool (struct constant_pool *pool)
8569 struct constant *c, *next;
8572 for (i = 0; i < NR_C_MODES; i++)
8573 for (c = pool->constants[i]; c; c = next)
8579 for (c = pool->execute; c; c = next)
8585 BITMAP_FREE (pool->insns);
8590 /* Collect main literal pool. Return NULL on overflow. */
8592 static struct constant_pool *
8593 s390_mainpool_start (void)
8595 struct constant_pool *pool;
8598 pool = s390_alloc_pool ();
8600 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8602 if (NONJUMP_INSN_P (insn)
8603 && GET_CODE (PATTERN (insn)) == SET
8604 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
8605 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
8607 /* There might be two main_pool instructions if base_reg
8608 is call-clobbered; one for shrink-wrapped code and one
8609 for the rest. We want to keep the first. */
8610 if (pool->pool_insn)
8612 insn = PREV_INSN (insn);
8613 delete_insn (NEXT_INSN (insn));
8616 pool->pool_insn = insn;
8619 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
8621 s390_add_execute (pool, insn);
8623 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8625 rtx pool_ref = NULL_RTX;
8626 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8629 rtx constant = get_pool_constant (pool_ref);
8630 machine_mode mode = get_pool_mode (pool_ref);
8631 s390_add_constant (pool, constant, mode);
8635 /* If hot/cold partitioning is enabled we have to make sure that
8636 the literal pool is emitted in the same section where the
8637 initialization of the literal pool base pointer takes place.
8638 emit_pool_after is only used in the non-overflow case on non
8639 Z cpus where we can emit the literal pool at the end of the
8640 function body within the text section. */
8642 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
8643 && !pool->emit_pool_after)
8644 pool->emit_pool_after = PREV_INSN (insn);
8647 gcc_assert (pool->pool_insn || pool->size == 0);
8649 if (pool->size >= 4096)
8651 /* We're going to chunkify the pool, so remove the main
8652 pool placeholder insn. */
8653 remove_insn (pool->pool_insn);
8655 s390_free_pool (pool);
8659 /* If the functions ends with the section where the literal pool
8660 should be emitted set the marker to its end. */
8661 if (pool && !pool->emit_pool_after)
8662 pool->emit_pool_after = get_last_insn ();
8667 /* POOL holds the main literal pool as collected by s390_mainpool_start.
8668 Modify the current function to output the pool constants as well as
8669 the pool register setup instruction. */
8672 s390_mainpool_finish (struct constant_pool *pool)
8674 rtx base_reg = cfun->machine->base_reg;
8676 /* If the pool is empty, we're done. */
8677 if (pool->size == 0)
8679 /* We don't actually need a base register after all. */
8680 cfun->machine->base_reg = NULL_RTX;
8682 if (pool->pool_insn)
8683 remove_insn (pool->pool_insn);
8684 s390_free_pool (pool);
8688 /* We need correct insn addresses. */
8689 shorten_branches (get_insns ());
8691 /* On zSeries, we use a LARL to load the pool register. The pool is
8692 located in the .rodata section, so we emit it after the function. */
8693 if (TARGET_CPU_ZARCH)
8695 rtx set = gen_main_base_64 (base_reg, pool->label);
8696 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
8697 INSN_ADDRESSES_NEW (insn, -1);
8698 remove_insn (pool->pool_insn);
8700 insn = get_last_insn ();
8701 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8702 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8704 s390_dump_pool (pool, 0);
8707 /* On S/390, if the total size of the function's code plus literal pool
8708 does not exceed 4096 bytes, we use BASR to set up a function base
8709 pointer, and emit the literal pool at the end of the function. */
8710 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
8711 + pool->size + 8 /* alignment slop */ < 4096)
8713 rtx set = gen_main_base_31_small (base_reg, pool->label);
8714 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
8715 INSN_ADDRESSES_NEW (insn, -1);
8716 remove_insn (pool->pool_insn);
8718 insn = emit_label_after (pool->label, insn);
8719 INSN_ADDRESSES_NEW (insn, -1);
8721 /* emit_pool_after will be set by s390_mainpool_start to the
8722 last insn of the section where the literal pool should be
8724 insn = pool->emit_pool_after;
8726 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8727 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8729 s390_dump_pool (pool, 1);
8732 /* Otherwise, we emit an inline literal pool and use BASR to branch
8733 over it, setting up the pool register at the same time. */
8736 rtx_code_label *pool_end = gen_label_rtx ();
8738 rtx pat = gen_main_base_31_large (base_reg, pool->label, pool_end);
8739 rtx_insn *insn = emit_jump_insn_after (pat, pool->pool_insn);
8740 JUMP_LABEL (insn) = pool_end;
8741 INSN_ADDRESSES_NEW (insn, -1);
8742 remove_insn (pool->pool_insn);
8744 insn = emit_label_after (pool->label, insn);
8745 INSN_ADDRESSES_NEW (insn, -1);
8747 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8748 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8750 insn = emit_label_after (pool_end, pool->pool_insn);
8751 INSN_ADDRESSES_NEW (insn, -1);
8753 s390_dump_pool (pool, 1);
8757 /* Replace all literal pool references. */
8759 for (rtx_insn *insn = get_insns (); insn; insn = NEXT_INSN (insn))
8762 replace_ltrel_base (&PATTERN (insn));
8764 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8766 rtx addr, pool_ref = NULL_RTX;
8767 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8770 if (s390_execute_label (insn))
8771 addr = s390_find_execute (pool, insn);
8773 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
8774 get_pool_mode (pool_ref));
8776 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
8777 INSN_CODE (insn) = -1;
8783 /* Free the pool. */
8784 s390_free_pool (pool);
8787 /* POOL holds the main literal pool as collected by s390_mainpool_start.
8788 We have decided we cannot use this pool, so revert all changes
8789 to the current function that were done by s390_mainpool_start. */
8791 s390_mainpool_cancel (struct constant_pool *pool)
8793 /* We didn't actually change the instruction stream, so simply
8794 free the pool memory. */
8795 s390_free_pool (pool);
8799 /* Chunkify the literal pool. */
8801 #define S390_POOL_CHUNK_MIN 0xc00
8802 #define S390_POOL_CHUNK_MAX 0xe00
8804 static struct constant_pool *
8805 s390_chunkify_start (void)
8807 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
8810 rtx pending_ltrel = NULL_RTX;
8813 rtx (*gen_reload_base) (rtx, rtx) =
8814 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
8817 /* We need correct insn addresses. */
8819 shorten_branches (get_insns ());
8821 /* Scan all insns and move literals to pool chunks. */
8823 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8825 bool section_switch_p = false;
8827 /* Check for pending LTREL_BASE. */
8830 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
8833 gcc_assert (ltrel_base == pending_ltrel);
8834 pending_ltrel = NULL_RTX;
8838 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
8841 curr_pool = s390_start_pool (&pool_list, insn);
8843 s390_add_execute (curr_pool, insn);
8844 s390_add_pool_insn (curr_pool, insn);
8846 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8848 rtx pool_ref = NULL_RTX;
8849 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8852 rtx constant = get_pool_constant (pool_ref);
8853 machine_mode mode = get_pool_mode (pool_ref);
8856 curr_pool = s390_start_pool (&pool_list, insn);
8858 s390_add_constant (curr_pool, constant, mode);
8859 s390_add_pool_insn (curr_pool, insn);
8861 /* Don't split the pool chunk between a LTREL_OFFSET load
8862 and the corresponding LTREL_BASE. */
8863 if (GET_CODE (constant) == CONST
8864 && GET_CODE (XEXP (constant, 0)) == UNSPEC
8865 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
8867 gcc_assert (!pending_ltrel);
8868 pending_ltrel = pool_ref;
8873 if (JUMP_P (insn) || JUMP_TABLE_DATA_P (insn) || LABEL_P (insn))
8876 s390_add_pool_insn (curr_pool, insn);
8877 /* An LTREL_BASE must follow within the same basic block. */
8878 gcc_assert (!pending_ltrel);
8882 switch (NOTE_KIND (insn))
8884 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
8885 section_switch_p = true;
8887 case NOTE_INSN_VAR_LOCATION:
8888 case NOTE_INSN_CALL_ARG_LOCATION:
8895 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
8896 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
8899 if (TARGET_CPU_ZARCH)
8901 if (curr_pool->size < S390_POOL_CHUNK_MAX)
8904 s390_end_pool (curr_pool, NULL);
8909 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
8910 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
8913 /* We will later have to insert base register reload insns.
8914 Those will have an effect on code size, which we need to
8915 consider here. This calculation makes rather pessimistic
8916 worst-case assumptions. */
8920 if (chunk_size < S390_POOL_CHUNK_MIN
8921 && curr_pool->size < S390_POOL_CHUNK_MIN
8922 && !section_switch_p)
8925 /* Pool chunks can only be inserted after BARRIERs ... */
8926 if (BARRIER_P (insn))
8928 s390_end_pool (curr_pool, insn);
8933 /* ... so if we don't find one in time, create one. */
8934 else if (chunk_size > S390_POOL_CHUNK_MAX
8935 || curr_pool->size > S390_POOL_CHUNK_MAX
8936 || section_switch_p)
8938 rtx_insn *label, *jump, *barrier, *next, *prev;
8940 if (!section_switch_p)
8942 /* We can insert the barrier only after a 'real' insn. */
8943 if (! NONJUMP_INSN_P (insn) && ! CALL_P (insn))
8945 if (get_attr_length (insn) == 0)
8947 /* Don't separate LTREL_BASE from the corresponding
8948 LTREL_OFFSET load. */
8955 next = NEXT_INSN (insn);
8959 && (NOTE_KIND (next) == NOTE_INSN_VAR_LOCATION
8960 || NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION));
8964 gcc_assert (!pending_ltrel);
8966 /* The old pool has to end before the section switch
8967 note in order to make it part of the current
8969 insn = PREV_INSN (insn);
8972 label = gen_label_rtx ();
8974 if (prev && NOTE_P (prev))
8975 prev = prev_nonnote_insn (prev);
8977 jump = emit_jump_insn_after_setloc (gen_jump (label), insn,
8978 INSN_LOCATION (prev));
8980 jump = emit_jump_insn_after_noloc (gen_jump (label), insn);
8981 barrier = emit_barrier_after (jump);
8982 insn = emit_label_after (label, barrier);
8983 JUMP_LABEL (jump) = label;
8984 LABEL_NUSES (label) = 1;
8986 INSN_ADDRESSES_NEW (jump, -1);
8987 INSN_ADDRESSES_NEW (barrier, -1);
8988 INSN_ADDRESSES_NEW (insn, -1);
8990 s390_end_pool (curr_pool, barrier);
8998 s390_end_pool (curr_pool, NULL);
8999 gcc_assert (!pending_ltrel);
9001 /* Find all labels that are branched into
9002 from an insn belonging to a different chunk. */
9004 far_labels = BITMAP_ALLOC (NULL);
9006 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9008 rtx_jump_table_data *table;
9010 /* Labels marked with LABEL_PRESERVE_P can be target
9011 of non-local jumps, so we have to mark them.
9012 The same holds for named labels.
9014 Don't do that, however, if it is the label before
9018 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
9020 rtx_insn *vec_insn = NEXT_INSN (insn);
9021 if (! vec_insn || ! JUMP_TABLE_DATA_P (vec_insn))
9022 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
9024 /* Check potential targets in a table jump (casesi_jump). */
9025 else if (tablejump_p (insn, NULL, &table))
9027 rtx vec_pat = PATTERN (table);
9028 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
9030 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
9032 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
9034 if (s390_find_pool (pool_list, label)
9035 != s390_find_pool (pool_list, insn))
9036 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
9039 /* If we have a direct jump (conditional or unconditional),
9040 check all potential targets. */
9041 else if (JUMP_P (insn))
9043 rtx pat = PATTERN (insn);
9045 if (GET_CODE (pat) == PARALLEL)
9046 pat = XVECEXP (pat, 0, 0);
9048 if (GET_CODE (pat) == SET)
9050 rtx label = JUMP_LABEL (insn);
9051 if (label && !ANY_RETURN_P (label))
9053 if (s390_find_pool (pool_list, label)
9054 != s390_find_pool (pool_list, insn))
9055 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
9061 /* Insert base register reload insns before every pool. */
9063 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
9065 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
9067 rtx_insn *insn = curr_pool->first_insn;
9068 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
9071 /* Insert base register reload insns at every far label. */
9073 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9075 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
9077 struct constant_pool *pool = s390_find_pool (pool_list, insn);
9080 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
9082 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
9087 BITMAP_FREE (far_labels);
9090 /* Recompute insn addresses. */
9092 init_insn_lengths ();
9093 shorten_branches (get_insns ());
9098 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
9099 After we have decided to use this list, finish implementing
9100 all changes to the current function as required. */
9103 s390_chunkify_finish (struct constant_pool *pool_list)
9105 struct constant_pool *curr_pool = NULL;
9109 /* Replace all literal pool references. */
9111 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9114 replace_ltrel_base (&PATTERN (insn));
9116 curr_pool = s390_find_pool (pool_list, insn);
9120 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
9122 rtx addr, pool_ref = NULL_RTX;
9123 find_constant_pool_ref (PATTERN (insn), &pool_ref);
9126 if (s390_execute_label (insn))
9127 addr = s390_find_execute (curr_pool, insn);
9129 addr = s390_find_constant (curr_pool,
9130 get_pool_constant (pool_ref),
9131 get_pool_mode (pool_ref));
9133 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
9134 INSN_CODE (insn) = -1;
9139 /* Dump out all literal pools. */
9141 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
9142 s390_dump_pool (curr_pool, 0);
9144 /* Free pool list. */
9148 struct constant_pool *next = pool_list->next;
9149 s390_free_pool (pool_list);
9154 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
9155 We have decided we cannot use this list, so revert all changes
9156 to the current function that were done by s390_chunkify_start. */
9159 s390_chunkify_cancel (struct constant_pool *pool_list)
9161 struct constant_pool *curr_pool = NULL;
9164 /* Remove all pool placeholder insns. */
9166 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
9168 /* Did we insert an extra barrier? Remove it. */
9169 rtx_insn *barrier = PREV_INSN (curr_pool->pool_insn);
9170 rtx_insn *jump = barrier? PREV_INSN (barrier) : NULL;
9171 rtx_insn *label = NEXT_INSN (curr_pool->pool_insn);
9173 if (jump && JUMP_P (jump)
9174 && barrier && BARRIER_P (barrier)
9175 && label && LABEL_P (label)
9176 && GET_CODE (PATTERN (jump)) == SET
9177 && SET_DEST (PATTERN (jump)) == pc_rtx
9178 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
9179 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
9182 remove_insn (barrier);
9183 remove_insn (label);
9186 remove_insn (curr_pool->pool_insn);
9189 /* Remove all base register reload insns. */
9191 for (insn = get_insns (); insn; )
9193 rtx_insn *next_insn = NEXT_INSN (insn);
9195 if (NONJUMP_INSN_P (insn)
9196 && GET_CODE (PATTERN (insn)) == SET
9197 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
9198 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
9204 /* Free pool list. */
9208 struct constant_pool *next = pool_list->next;
9209 s390_free_pool (pool_list);
9214 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
9217 s390_output_pool_entry (rtx exp, machine_mode mode, unsigned int align)
9219 switch (GET_MODE_CLASS (mode))
9222 case MODE_DECIMAL_FLOAT:
9223 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
9225 assemble_real (*CONST_DOUBLE_REAL_VALUE (exp), mode, align);
9229 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
9230 mark_symbol_refs_as_used (exp);
9233 case MODE_VECTOR_INT:
9234 case MODE_VECTOR_FLOAT:
9237 machine_mode inner_mode;
9238 gcc_assert (GET_CODE (exp) == CONST_VECTOR);
9240 inner_mode = GET_MODE_INNER (GET_MODE (exp));
9241 for (i = 0; i < XVECLEN (exp, 0); i++)
9242 s390_output_pool_entry (XVECEXP (exp, 0, i),
9246 : GET_MODE_BITSIZE (inner_mode));
9256 /* Return an RTL expression representing the value of the return address
9257 for the frame COUNT steps up from the current frame. FRAME is the
9258 frame pointer of that frame. */
9261 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
9266 /* Without backchain, we fail for all but the current frame. */
9268 if (!TARGET_BACKCHAIN && count > 0)
9271 /* For the current frame, we need to make sure the initial
9272 value of RETURN_REGNUM is actually saved. */
9276 /* On non-z architectures branch splitting could overwrite r14. */
9277 if (TARGET_CPU_ZARCH)
9278 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
9281 cfun_frame_layout.save_return_addr_p = true;
9282 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
9286 if (TARGET_PACKED_STACK)
9287 offset = -2 * UNITS_PER_LONG;
9289 offset = RETURN_REGNUM * UNITS_PER_LONG;
9291 addr = plus_constant (Pmode, frame, offset);
9292 addr = memory_address (Pmode, addr);
9293 return gen_rtx_MEM (Pmode, addr);
9296 /* Return an RTL expression representing the back chain stored in
9297 the current stack frame. */
9300 s390_back_chain_rtx (void)
9304 gcc_assert (TARGET_BACKCHAIN);
9306 if (TARGET_PACKED_STACK)
9307 chain = plus_constant (Pmode, stack_pointer_rtx,
9308 STACK_POINTER_OFFSET - UNITS_PER_LONG);
9310 chain = stack_pointer_rtx;
9312 chain = gen_rtx_MEM (Pmode, chain);
9316 /* Find first call clobbered register unused in a function.
9317 This could be used as base register in a leaf function
9318 or for holding the return address before epilogue. */
9321 find_unused_clobbered_reg (void)
9324 for (i = 0; i < 6; i++)
9325 if (!df_regs_ever_live_p (i))
9331 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
9332 clobbered hard regs in SETREG. */
9335 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
9337 char *regs_ever_clobbered = (char *)data;
9338 unsigned int i, regno;
9339 machine_mode mode = GET_MODE (setreg);
9341 if (GET_CODE (setreg) == SUBREG)
9343 rtx inner = SUBREG_REG (setreg);
9344 if (!GENERAL_REG_P (inner) && !FP_REG_P (inner))
9346 regno = subreg_regno (setreg);
9348 else if (GENERAL_REG_P (setreg) || FP_REG_P (setreg))
9349 regno = REGNO (setreg);
9354 i < regno + HARD_REGNO_NREGS (regno, mode);
9356 regs_ever_clobbered[i] = 1;
9359 /* Walks through all basic blocks of the current function looking
9360 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
9361 of the passed integer array REGS_EVER_CLOBBERED are set to one for
9362 each of those regs. */
9365 s390_regs_ever_clobbered (char regs_ever_clobbered[])
9371 memset (regs_ever_clobbered, 0, 32);
9373 /* For non-leaf functions we have to consider all call clobbered regs to be
9377 for (i = 0; i < 32; i++)
9378 regs_ever_clobbered[i] = call_really_used_regs[i];
9381 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
9382 this work is done by liveness analysis (mark_regs_live_at_end).
9383 Special care is needed for functions containing landing pads. Landing pads
9384 may use the eh registers, but the code which sets these registers is not
9385 contained in that function. Hence s390_regs_ever_clobbered is not able to
9386 deal with this automatically. */
9387 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
9388 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
9389 if (crtl->calls_eh_return
9390 || (cfun->machine->has_landing_pad_p
9391 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
9392 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
9394 /* For nonlocal gotos all call-saved registers have to be saved.
9395 This flag is also set for the unwinding code in libgcc.
9396 See expand_builtin_unwind_init. For regs_ever_live this is done by
9398 if (crtl->saves_all_registers)
9399 for (i = 0; i < 32; i++)
9400 if (!call_really_used_regs[i])
9401 regs_ever_clobbered[i] = 1;
9403 FOR_EACH_BB_FN (cur_bb, cfun)
9405 FOR_BB_INSNS (cur_bb, cur_insn)
9409 if (!INSN_P (cur_insn))
9412 pat = PATTERN (cur_insn);
9414 /* Ignore GPR restore insns. */
9415 if (epilogue_completed && RTX_FRAME_RELATED_P (cur_insn))
9417 if (GET_CODE (pat) == SET
9418 && GENERAL_REG_P (SET_DEST (pat)))
9421 if (GET_MODE (SET_SRC (pat)) == DImode
9422 && FP_REG_P (SET_SRC (pat)))
9426 if (GET_CODE (SET_SRC (pat)) == MEM)
9431 if (GET_CODE (pat) == PARALLEL
9432 && load_multiple_operation (pat, VOIDmode))
9437 s390_reg_clobbered_rtx,
9438 regs_ever_clobbered);
9443 /* Determine the frame area which actually has to be accessed
9444 in the function epilogue. The values are stored at the
9445 given pointers AREA_BOTTOM (address of the lowest used stack
9446 address) and AREA_TOP (address of the first item which does
9447 not belong to the stack frame). */
9450 s390_frame_area (int *area_bottom, int *area_top)
9457 if (cfun_frame_layout.first_restore_gpr != -1)
9459 b = (cfun_frame_layout.gprs_offset
9460 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
9461 t = b + (cfun_frame_layout.last_restore_gpr
9462 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
9465 if (TARGET_64BIT && cfun_save_high_fprs_p)
9467 b = MIN (b, cfun_frame_layout.f8_offset);
9468 t = MAX (t, (cfun_frame_layout.f8_offset
9469 + cfun_frame_layout.high_fprs * 8));
9474 if (cfun_fpr_save_p (FPR4_REGNUM))
9476 b = MIN (b, cfun_frame_layout.f4_offset);
9477 t = MAX (t, cfun_frame_layout.f4_offset + 8);
9479 if (cfun_fpr_save_p (FPR6_REGNUM))
9481 b = MIN (b, cfun_frame_layout.f4_offset + 8);
9482 t = MAX (t, cfun_frame_layout.f4_offset + 16);
9488 /* Update gpr_save_slots in the frame layout trying to make use of
9489 FPRs as GPR save slots.
9490 This is a helper routine of s390_register_info. */
9493 s390_register_info_gprtofpr ()
9495 int save_reg_slot = FPR0_REGNUM;
9498 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
9501 for (i = 15; i >= 6; i--)
9503 if (cfun_gpr_save_slot (i) == SAVE_SLOT_NONE)
9506 /* Advance to the next FP register which can be used as a
9508 while ((!call_really_used_regs[save_reg_slot]
9509 || df_regs_ever_live_p (save_reg_slot)
9510 || cfun_fpr_save_p (save_reg_slot))
9511 && FP_REGNO_P (save_reg_slot))
9513 if (!FP_REGNO_P (save_reg_slot))
9515 /* We only want to use ldgr/lgdr if we can get rid of
9516 stm/lm entirely. So undo the gpr slot allocation in
9517 case we ran out of FPR save slots. */
9518 for (j = 6; j <= 15; j++)
9519 if (FP_REGNO_P (cfun_gpr_save_slot (j)))
9520 cfun_gpr_save_slot (j) = SAVE_SLOT_STACK;
9523 cfun_gpr_save_slot (i) = save_reg_slot++;
9527 /* Set the bits in fpr_bitmap for FPRs which need to be saved due to
9529 This is a helper routine for s390_register_info. */
9532 s390_register_info_stdarg_fpr ()
9538 /* Save the FP argument regs for stdarg. f0, f2 for 31 bit and
9539 f0-f4 for 64 bit. */
9541 || !TARGET_HARD_FLOAT
9542 || !cfun->va_list_fpr_size
9543 || crtl->args.info.fprs >= FP_ARG_NUM_REG)
9546 min_fpr = crtl->args.info.fprs;
9547 max_fpr = min_fpr + cfun->va_list_fpr_size - 1;
9548 if (max_fpr >= FP_ARG_NUM_REG)
9549 max_fpr = FP_ARG_NUM_REG - 1;
9551 /* FPR argument regs start at f0. */
9552 min_fpr += FPR0_REGNUM;
9553 max_fpr += FPR0_REGNUM;
9555 for (i = min_fpr; i <= max_fpr; i++)
9556 cfun_set_fpr_save (i);
9559 /* Reserve the GPR save slots for GPRs which need to be saved due to
9561 This is a helper routine for s390_register_info. */
9564 s390_register_info_stdarg_gpr ()
9571 || !cfun->va_list_gpr_size
9572 || crtl->args.info.gprs >= GP_ARG_NUM_REG)
9575 min_gpr = crtl->args.info.gprs;
9576 max_gpr = min_gpr + cfun->va_list_gpr_size - 1;
9577 if (max_gpr >= GP_ARG_NUM_REG)
9578 max_gpr = GP_ARG_NUM_REG - 1;
9580 /* GPR argument regs start at r2. */
9581 min_gpr += GPR2_REGNUM;
9582 max_gpr += GPR2_REGNUM;
9584 /* If r6 was supposed to be saved into an FPR and now needs to go to
9585 the stack for vararg we have to adjust the restore range to make
9586 sure that the restore is done from stack as well. */
9587 if (FP_REGNO_P (cfun_gpr_save_slot (GPR6_REGNUM))
9588 && min_gpr <= GPR6_REGNUM
9589 && max_gpr >= GPR6_REGNUM)
9591 if (cfun_frame_layout.first_restore_gpr == -1
9592 || cfun_frame_layout.first_restore_gpr > GPR6_REGNUM)
9593 cfun_frame_layout.first_restore_gpr = GPR6_REGNUM;
9594 if (cfun_frame_layout.last_restore_gpr == -1
9595 || cfun_frame_layout.last_restore_gpr < GPR6_REGNUM)
9596 cfun_frame_layout.last_restore_gpr = GPR6_REGNUM;
9599 if (cfun_frame_layout.first_save_gpr == -1
9600 || cfun_frame_layout.first_save_gpr > min_gpr)
9601 cfun_frame_layout.first_save_gpr = min_gpr;
9603 if (cfun_frame_layout.last_save_gpr == -1
9604 || cfun_frame_layout.last_save_gpr < max_gpr)
9605 cfun_frame_layout.last_save_gpr = max_gpr;
9607 for (i = min_gpr; i <= max_gpr; i++)
9608 cfun_gpr_save_slot (i) = SAVE_SLOT_STACK;
9611 /* Calculate the save and restore ranges for stm(g) and lm(g) in the
9612 prologue and epilogue. */
9615 s390_register_info_set_ranges ()
9619 /* Find the first and the last save slot supposed to use the stack
9620 to set the restore range.
9621 Vararg regs might be marked as save to stack but only the
9622 call-saved regs really need restoring (i.e. r6). This code
9623 assumes that the vararg regs have not yet been recorded in
9624 cfun_gpr_save_slot. */
9625 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != SAVE_SLOT_STACK; i++);
9626 for (j = 15; j > i && cfun_gpr_save_slot (j) != SAVE_SLOT_STACK; j--);
9627 cfun_frame_layout.first_restore_gpr = (i == 16) ? -1 : i;
9628 cfun_frame_layout.last_restore_gpr = (i == 16) ? -1 : j;
9629 cfun_frame_layout.first_save_gpr = (i == 16) ? -1 : i;
9630 cfun_frame_layout.last_save_gpr = (i == 16) ? -1 : j;
9633 /* The GPR and FPR save slots in cfun->machine->frame_layout are set
9634 for registers which need to be saved in function prologue.
9635 This function can be used until the insns emitted for save/restore
9636 of the regs are visible in the RTL stream. */
9639 s390_register_info ()
9642 char clobbered_regs[32];
9644 gcc_assert (!epilogue_completed);
9646 if (reload_completed)
9647 /* After reload we rely on our own routine to determine which
9648 registers need saving. */
9649 s390_regs_ever_clobbered (clobbered_regs);
9651 /* During reload we use regs_ever_live as a base since reload
9652 does changes in there which we otherwise would not be aware
9654 for (i = 0; i < 32; i++)
9655 clobbered_regs[i] = df_regs_ever_live_p (i);
9657 for (i = 0; i < 32; i++)
9658 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
9660 /* Mark the call-saved FPRs which need to be saved.
9661 This needs to be done before checking the special GPRs since the
9662 stack pointer usage depends on whether high FPRs have to be saved
9664 cfun_frame_layout.fpr_bitmap = 0;
9665 cfun_frame_layout.high_fprs = 0;
9666 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
9667 if (clobbered_regs[i] && !call_really_used_regs[i])
9669 cfun_set_fpr_save (i);
9670 if (i >= FPR8_REGNUM)
9671 cfun_frame_layout.high_fprs++;
9674 /* Register 12 is used for GOT address, but also as temp in prologue
9675 for split-stack stdarg functions (unless r14 is available). */
9677 |= ((flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
9678 || (flag_split_stack && cfun->stdarg
9679 && (crtl->is_leaf || TARGET_TPF_PROFILING
9680 || has_hard_reg_initial_val (Pmode, RETURN_REGNUM))));
9682 clobbered_regs[BASE_REGNUM]
9683 |= (cfun->machine->base_reg
9684 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
9686 clobbered_regs[HARD_FRAME_POINTER_REGNUM]
9687 |= !!frame_pointer_needed;
9689 /* On pre z900 machines this might take until machine dependent
9691 save_return_addr_p will only be set on non-zarch machines so
9692 there is no risk that r14 goes into an FPR instead of a stack
9694 clobbered_regs[RETURN_REGNUM]
9696 || TARGET_TPF_PROFILING
9697 || cfun->machine->split_branches_pending_p
9698 || cfun_frame_layout.save_return_addr_p
9699 || crtl->calls_eh_return);
9701 clobbered_regs[STACK_POINTER_REGNUM]
9703 || TARGET_TPF_PROFILING
9704 || cfun_save_high_fprs_p
9705 || get_frame_size () > 0
9706 || (reload_completed && cfun_frame_layout.frame_size > 0)
9707 || cfun->calls_alloca);
9709 memset (cfun_frame_layout.gpr_save_slots, SAVE_SLOT_NONE, 16);
9711 for (i = 6; i < 16; i++)
9712 if (clobbered_regs[i])
9713 cfun_gpr_save_slot (i) = SAVE_SLOT_STACK;
9715 s390_register_info_stdarg_fpr ();
9716 s390_register_info_gprtofpr ();
9717 s390_register_info_set_ranges ();
9718 /* stdarg functions might need to save GPRs 2 to 6. This might
9719 override the GPR->FPR save decision made by
9720 s390_register_info_gprtofpr for r6 since vararg regs must go to
9722 s390_register_info_stdarg_gpr ();
9725 /* This function is called by s390_optimize_prologue in order to get
9726 rid of unnecessary GPR save/restore instructions. The register info
9727 for the GPRs is re-computed and the ranges are re-calculated. */
9730 s390_optimize_register_info ()
9732 char clobbered_regs[32];
9735 gcc_assert (epilogue_completed);
9736 gcc_assert (!cfun->machine->split_branches_pending_p);
9738 s390_regs_ever_clobbered (clobbered_regs);
9740 for (i = 0; i < 32; i++)
9741 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
9743 /* There is still special treatment needed for cases invisible to
9744 s390_regs_ever_clobbered. */
9745 clobbered_regs[RETURN_REGNUM]
9746 |= (TARGET_TPF_PROFILING
9747 /* When expanding builtin_return_addr in ESA mode we do not
9748 know whether r14 will later be needed as scratch reg when
9749 doing branch splitting. So the builtin always accesses the
9750 r14 save slot and we need to stick to the save/restore
9751 decision for r14 even if it turns out that it didn't get
9753 || cfun_frame_layout.save_return_addr_p
9754 || crtl->calls_eh_return);
9756 memset (cfun_frame_layout.gpr_save_slots, SAVE_SLOT_NONE, 6);
9758 for (i = 6; i < 16; i++)
9759 if (!clobbered_regs[i])
9760 cfun_gpr_save_slot (i) = SAVE_SLOT_NONE;
9762 s390_register_info_set_ranges ();
9763 s390_register_info_stdarg_gpr ();
9766 /* Fill cfun->machine with info about frame of current function. */
9769 s390_frame_info (void)
9771 HOST_WIDE_INT lowest_offset;
9773 cfun_frame_layout.first_save_gpr_slot = cfun_frame_layout.first_save_gpr;
9774 cfun_frame_layout.last_save_gpr_slot = cfun_frame_layout.last_save_gpr;
9776 /* The va_arg builtin uses a constant distance of 16 *
9777 UNITS_PER_LONG (r0-r15) to reach the FPRs from the reg_save_area
9778 pointer. So even if we are going to save the stack pointer in an
9779 FPR we need the stack space in order to keep the offsets
9781 if (cfun->stdarg && cfun_save_arg_fprs_p)
9783 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
9785 if (cfun_frame_layout.first_save_gpr_slot == -1)
9786 cfun_frame_layout.first_save_gpr_slot = STACK_POINTER_REGNUM;
9789 cfun_frame_layout.frame_size = get_frame_size ();
9790 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
9791 fatal_error (input_location,
9792 "total size of local variables exceeds architecture limit");
9794 if (!TARGET_PACKED_STACK)
9796 /* Fixed stack layout. */
9797 cfun_frame_layout.backchain_offset = 0;
9798 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
9799 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
9800 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
9801 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
9804 else if (TARGET_BACKCHAIN)
9806 /* Kernel stack layout - packed stack, backchain, no float */
9807 gcc_assert (TARGET_SOFT_FLOAT);
9808 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
9811 /* The distance between the backchain and the return address
9812 save slot must not change. So we always need a slot for the
9813 stack pointer which resides in between. */
9814 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
9816 cfun_frame_layout.gprs_offset
9817 = cfun_frame_layout.backchain_offset - cfun_gprs_save_area_size;
9819 /* FPRs will not be saved. Nevertheless pick sane values to
9820 keep area calculations valid. */
9821 cfun_frame_layout.f0_offset =
9822 cfun_frame_layout.f4_offset =
9823 cfun_frame_layout.f8_offset = cfun_frame_layout.gprs_offset;
9829 /* Packed stack layout without backchain. */
9831 /* With stdarg FPRs need their dedicated slots. */
9832 num_fprs = (TARGET_64BIT && cfun->stdarg ? 2
9833 : (cfun_fpr_save_p (FPR4_REGNUM) +
9834 cfun_fpr_save_p (FPR6_REGNUM)));
9835 cfun_frame_layout.f4_offset = STACK_POINTER_OFFSET - 8 * num_fprs;
9837 num_fprs = (cfun->stdarg ? 2
9838 : (cfun_fpr_save_p (FPR0_REGNUM)
9839 + cfun_fpr_save_p (FPR2_REGNUM)));
9840 cfun_frame_layout.f0_offset = cfun_frame_layout.f4_offset - 8 * num_fprs;
9842 cfun_frame_layout.gprs_offset
9843 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
9845 cfun_frame_layout.f8_offset = (cfun_frame_layout.gprs_offset
9846 - cfun_frame_layout.high_fprs * 8);
9849 if (cfun_save_high_fprs_p)
9850 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
9853 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
9855 /* In the following cases we have to allocate a STACK_POINTER_OFFSET
9856 sized area at the bottom of the stack. This is required also for
9857 leaf functions. When GCC generates a local stack reference it
9858 will always add STACK_POINTER_OFFSET to all these references. */
9860 && !TARGET_TPF_PROFILING
9861 && cfun_frame_layout.frame_size == 0
9862 && !cfun->calls_alloca)
9865 /* Calculate the number of bytes we have used in our own register
9866 save area. With the packed stack layout we can re-use the
9867 remaining bytes for normal stack elements. */
9869 if (TARGET_PACKED_STACK)
9870 lowest_offset = MIN (MIN (cfun_frame_layout.f0_offset,
9871 cfun_frame_layout.f4_offset),
9872 cfun_frame_layout.gprs_offset);
9876 if (TARGET_BACKCHAIN)
9877 lowest_offset = MIN (lowest_offset, cfun_frame_layout.backchain_offset);
9879 cfun_frame_layout.frame_size += STACK_POINTER_OFFSET - lowest_offset;
9881 /* If under 31 bit an odd number of gprs has to be saved we have to
9882 adjust the frame size to sustain 8 byte alignment of stack
9884 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
9885 STACK_BOUNDARY / BITS_PER_UNIT - 1)
9886 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
9889 /* Generate frame layout. Fills in register and frame data for the current
9890 function in cfun->machine. This routine can be called multiple times;
9891 it will re-do the complete frame layout every time. */
9894 s390_init_frame_layout (void)
9896 HOST_WIDE_INT frame_size;
9899 /* After LRA the frame layout is supposed to be read-only and should
9900 not be re-computed. */
9901 if (reload_completed)
9904 /* On S/390 machines, we may need to perform branch splitting, which
9905 will require both base and return address register. We have no
9906 choice but to assume we're going to need them until right at the
9907 end of the machine dependent reorg phase. */
9908 if (!TARGET_CPU_ZARCH)
9909 cfun->machine->split_branches_pending_p = true;
9913 frame_size = cfun_frame_layout.frame_size;
9915 /* Try to predict whether we'll need the base register. */
9916 base_used = cfun->machine->split_branches_pending_p
9917 || crtl->uses_const_pool
9918 || (!DISP_IN_RANGE (frame_size)
9919 && !CONST_OK_FOR_K (frame_size));
9921 /* Decide which register to use as literal pool base. In small
9922 leaf functions, try to use an unused call-clobbered register
9923 as base register to avoid save/restore overhead. */
9925 cfun->machine->base_reg = NULL_RTX;
9931 /* Prefer r5 (most likely to be free). */
9932 for (br = 5; br >= 2 && df_regs_ever_live_p (br); br--)
9934 cfun->machine->base_reg =
9935 gen_rtx_REG (Pmode, (br >= 2) ? br : BASE_REGNUM);
9938 s390_register_info ();
9941 while (frame_size != cfun_frame_layout.frame_size);
9944 /* Remove the FPR clobbers from a tbegin insn if it can be proven that
9945 the TX is nonescaping. A transaction is considered escaping if
9946 there is at least one path from tbegin returning CC0 to the
9947 function exit block without an tend.
9949 The check so far has some limitations:
9950 - only single tbegin/tend BBs are supported
9951 - the first cond jump after tbegin must separate the CC0 path from ~CC0
9952 - when CC is copied to a GPR and the CC0 check is done with the GPR
9953 this is not supported
9957 s390_optimize_nonescaping_tx (void)
9959 const unsigned int CC0 = 1 << 3;
9960 basic_block tbegin_bb = NULL;
9961 basic_block tend_bb = NULL;
9966 rtx_insn *tbegin_insn = NULL;
9968 if (!cfun->machine->tbegin_p)
9971 for (bb_index = 0; bb_index < n_basic_blocks_for_fn (cfun); bb_index++)
9973 bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
9978 FOR_BB_INSNS (bb, insn)
9980 rtx ite, cc, pat, target;
9981 unsigned HOST_WIDE_INT mask;
9983 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
9986 pat = PATTERN (insn);
9988 if (GET_CODE (pat) == PARALLEL)
9989 pat = XVECEXP (pat, 0, 0);
9991 if (GET_CODE (pat) != SET
9992 || GET_CODE (SET_SRC (pat)) != UNSPEC_VOLATILE)
9995 if (XINT (SET_SRC (pat), 1) == UNSPECV_TBEGIN)
10001 /* Just return if the tbegin doesn't have clobbers. */
10002 if (GET_CODE (PATTERN (insn)) != PARALLEL)
10005 if (tbegin_bb != NULL)
10008 /* Find the next conditional jump. */
10009 for (tmp = NEXT_INSN (insn);
10011 tmp = NEXT_INSN (tmp))
10013 if (reg_set_p (gen_rtx_REG (CCmode, CC_REGNUM), tmp))
10018 ite = SET_SRC (PATTERN (tmp));
10019 if (GET_CODE (ite) != IF_THEN_ELSE)
10022 cc = XEXP (XEXP (ite, 0), 0);
10023 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc))
10024 || GET_MODE (cc) != CCRAWmode
10025 || GET_CODE (XEXP (XEXP (ite, 0), 1)) != CONST_INT)
10028 if (bb->succs->length () != 2)
10031 mask = INTVAL (XEXP (XEXP (ite, 0), 1));
10032 if (GET_CODE (XEXP (ite, 0)) == NE)
10036 target = XEXP (ite, 1);
10037 else if (mask == (CC0 ^ 0xf))
10038 target = XEXP (ite, 2);
10046 ei = ei_start (bb->succs);
10047 e1 = ei_safe_edge (ei);
10049 e2 = ei_safe_edge (ei);
10051 if (e2->flags & EDGE_FALLTHRU)
10054 e1 = ei_safe_edge (ei);
10057 if (!(e1->flags & EDGE_FALLTHRU))
10060 tbegin_bb = (target == pc_rtx) ? e1->dest : e2->dest;
10062 if (tmp == BB_END (bb))
10067 if (XINT (SET_SRC (pat), 1) == UNSPECV_TEND)
10069 if (tend_bb != NULL)
10076 /* Either we successfully remove the FPR clobbers here or we are not
10077 able to do anything for this TX. Both cases don't qualify for
10079 cfun->machine->tbegin_p = false;
10081 if (tbegin_bb == NULL || tend_bb == NULL)
10084 calculate_dominance_info (CDI_POST_DOMINATORS);
10085 result = dominated_by_p (CDI_POST_DOMINATORS, tbegin_bb, tend_bb);
10086 free_dominance_info (CDI_POST_DOMINATORS);
10091 PATTERN (tbegin_insn) = gen_rtx_PARALLEL (VOIDmode,
10093 XVECEXP (PATTERN (tbegin_insn), 0, 0),
10094 XVECEXP (PATTERN (tbegin_insn), 0, 1)));
10095 INSN_CODE (tbegin_insn) = -1;
10096 df_insn_rescan (tbegin_insn);
10101 /* Return true if it is legal to put a value with MODE into REGNO. */
10104 s390_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
10106 if (!TARGET_VX && VECTOR_NOFP_REGNO_P (regno))
10109 switch (REGNO_REG_CLASS (regno))
10112 return ((GET_MODE_CLASS (mode) == MODE_INT
10113 && s390_class_max_nregs (VEC_REGS, mode) == 1)
10115 || s390_vector_mode_supported_p (mode));
10119 && ((GET_MODE_CLASS (mode) == MODE_INT
10120 && s390_class_max_nregs (FP_REGS, mode) == 1)
10122 || s390_vector_mode_supported_p (mode)))
10125 if (REGNO_PAIR_OK (regno, mode))
10127 if (mode == SImode || mode == DImode)
10130 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
10135 if (FRAME_REGNO_P (regno) && mode == Pmode)
10140 if (REGNO_PAIR_OK (regno, mode))
10143 || (mode != TFmode && mode != TCmode && mode != TDmode))
10148 if (GET_MODE_CLASS (mode) == MODE_CC)
10152 if (REGNO_PAIR_OK (regno, mode))
10154 if (mode == SImode || mode == Pmode)
10165 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
10168 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
10170 /* Once we've decided upon a register to use as base register, it must
10171 no longer be used for any other purpose. */
10172 if (cfun->machine->base_reg)
10173 if (REGNO (cfun->machine->base_reg) == old_reg
10174 || REGNO (cfun->machine->base_reg) == new_reg)
10177 /* Prevent regrename from using call-saved regs which haven't
10178 actually been saved. This is necessary since regrename assumes
10179 the backend save/restore decisions are based on
10180 df_regs_ever_live. Since we have our own routine we have to tell
10181 regrename manually about it. */
10182 if (GENERAL_REGNO_P (new_reg)
10183 && !call_really_used_regs[new_reg]
10184 && cfun_gpr_save_slot (new_reg) == SAVE_SLOT_NONE)
10190 /* Return nonzero if register REGNO can be used as a scratch register
10194 s390_hard_regno_scratch_ok (unsigned int regno)
10196 /* See s390_hard_regno_rename_ok. */
10197 if (GENERAL_REGNO_P (regno)
10198 && !call_really_used_regs[regno]
10199 && cfun_gpr_save_slot (regno) == SAVE_SLOT_NONE)
10205 /* Maximum number of registers to represent a value of mode MODE
10206 in a register of class RCLASS. */
10209 s390_class_max_nregs (enum reg_class rclass, machine_mode mode)
10212 bool reg_pair_required_p = false;
10218 reg_size = TARGET_VX ? 16 : 8;
10220 /* TF and TD modes would fit into a VR but we put them into a
10221 register pair since we do not have 128bit FP instructions on
10224 && SCALAR_FLOAT_MODE_P (mode)
10225 && GET_MODE_SIZE (mode) >= 16)
10226 reg_pair_required_p = true;
10228 /* Even if complex types would fit into a single FPR/VR we force
10229 them into a register pair to deal with the parts more easily.
10230 (FIXME: What about complex ints?) */
10231 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
10232 reg_pair_required_p = true;
10238 reg_size = UNITS_PER_WORD;
10242 if (reg_pair_required_p)
10243 return 2 * ((GET_MODE_SIZE (mode) / 2 + reg_size - 1) / reg_size);
10245 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
10248 /* Return TRUE if changing mode from FROM to TO should not be allowed
10249 for register class CLASS. */
10252 s390_cannot_change_mode_class (machine_mode from_mode,
10253 machine_mode to_mode,
10254 enum reg_class rclass)
10256 machine_mode small_mode;
10257 machine_mode big_mode;
10259 if (GET_MODE_SIZE (from_mode) == GET_MODE_SIZE (to_mode))
10262 if (GET_MODE_SIZE (from_mode) < GET_MODE_SIZE (to_mode))
10264 small_mode = from_mode;
10265 big_mode = to_mode;
10269 small_mode = to_mode;
10270 big_mode = from_mode;
10273 /* Values residing in VRs are little-endian style. All modes are
10274 placed left-aligned in an VR. This means that we cannot allow
10275 switching between modes with differing sizes. Also if the vector
10276 facility is available we still place TFmode values in VR register
10277 pairs, since the only instructions we have operating on TFmodes
10278 only deal with register pairs. Therefore we have to allow DFmode
10279 subregs of TFmodes to enable the TFmode splitters. */
10280 if (reg_classes_intersect_p (VEC_REGS, rclass)
10281 && (GET_MODE_SIZE (small_mode) < 8
10282 || s390_class_max_nregs (VEC_REGS, big_mode) == 1))
10285 /* Likewise for access registers, since they have only half the
10286 word size on 64-bit. */
10287 if (reg_classes_intersect_p (ACCESS_REGS, rclass))
10293 /* Return true if we use LRA instead of reload pass. */
10297 return s390_lra_flag;
10300 /* Return true if register FROM can be eliminated via register TO. */
10303 s390_can_eliminate (const int from, const int to)
10305 /* On zSeries machines, we have not marked the base register as fixed.
10306 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
10307 If a function requires the base register, we say here that this
10308 elimination cannot be performed. This will cause reload to free
10309 up the base register (as if it were fixed). On the other hand,
10310 if the current function does *not* require the base register, we
10311 say here the elimination succeeds, which in turn allows reload
10312 to allocate the base register for any other purpose. */
10313 if (from == BASE_REGNUM && to == BASE_REGNUM)
10315 if (TARGET_CPU_ZARCH)
10317 s390_init_frame_layout ();
10318 return cfun->machine->base_reg == NULL_RTX;
10324 /* Everything else must point into the stack frame. */
10325 gcc_assert (to == STACK_POINTER_REGNUM
10326 || to == HARD_FRAME_POINTER_REGNUM);
10328 gcc_assert (from == FRAME_POINTER_REGNUM
10329 || from == ARG_POINTER_REGNUM
10330 || from == RETURN_ADDRESS_POINTER_REGNUM);
10332 /* Make sure we actually saved the return address. */
10333 if (from == RETURN_ADDRESS_POINTER_REGNUM)
10334 if (!crtl->calls_eh_return
10336 && !cfun_frame_layout.save_return_addr_p)
10342 /* Return offset between register FROM and TO initially after prolog. */
10345 s390_initial_elimination_offset (int from, int to)
10347 HOST_WIDE_INT offset;
10349 /* ??? Why are we called for non-eliminable pairs? */
10350 if (!s390_can_eliminate (from, to))
10355 case FRAME_POINTER_REGNUM:
10356 offset = (get_frame_size()
10357 + STACK_POINTER_OFFSET
10358 + crtl->outgoing_args_size);
10361 case ARG_POINTER_REGNUM:
10362 s390_init_frame_layout ();
10363 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
10366 case RETURN_ADDRESS_POINTER_REGNUM:
10367 s390_init_frame_layout ();
10369 if (cfun_frame_layout.first_save_gpr_slot == -1)
10371 /* If it turns out that for stdarg nothing went into the reg
10372 save area we also do not need the return address
10374 if (cfun->stdarg && !cfun_save_arg_fprs_p)
10377 gcc_unreachable ();
10380 /* In order to make the following work it is not necessary for
10381 r14 to have a save slot. It is sufficient if one other GPR
10382 got one. Since the GPRs are always stored without gaps we
10383 are able to calculate where the r14 save slot would
10385 offset = (cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset +
10386 (RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot) *
10395 gcc_unreachable ();
10401 /* Emit insn to save fpr REGNUM at offset OFFSET relative
10402 to register BASE. Return generated insn. */
10405 save_fpr (rtx base, int offset, int regnum)
10408 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
10410 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
10411 set_mem_alias_set (addr, get_varargs_alias_set ());
10413 set_mem_alias_set (addr, get_frame_alias_set ());
10415 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
10418 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
10419 to register BASE. Return generated insn. */
10422 restore_fpr (rtx base, int offset, int regnum)
10425 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
10426 set_mem_alias_set (addr, get_frame_alias_set ());
10428 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
10431 /* Return true if REGNO is a global register, but not one
10432 of the special ones that need to be saved/restored in anyway. */
10435 global_not_special_regno_p (int regno)
10437 return (global_regs[regno]
10438 /* These registers are special and need to be
10439 restored in any case. */
10440 && !(regno == STACK_POINTER_REGNUM
10441 || regno == RETURN_REGNUM
10442 || regno == BASE_REGNUM
10443 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
10446 /* Generate insn to save registers FIRST to LAST into
10447 the register save area located at offset OFFSET
10448 relative to register BASE. */
10451 save_gprs (rtx base, int offset, int first, int last)
10453 rtx addr, insn, note;
10456 addr = plus_constant (Pmode, base, offset);
10457 addr = gen_rtx_MEM (Pmode, addr);
10459 set_mem_alias_set (addr, get_frame_alias_set ());
10461 /* Special-case single register. */
10465 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
10467 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
10469 if (!global_not_special_regno_p (first))
10470 RTX_FRAME_RELATED_P (insn) = 1;
10475 insn = gen_store_multiple (addr,
10476 gen_rtx_REG (Pmode, first),
10477 GEN_INT (last - first + 1));
10479 if (first <= 6 && cfun->stdarg)
10480 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
10482 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
10484 if (first + i <= 6)
10485 set_mem_alias_set (mem, get_varargs_alias_set ());
10488 /* We need to set the FRAME_RELATED flag on all SETs
10489 inside the store-multiple pattern.
10491 However, we must not emit DWARF records for registers 2..5
10492 if they are stored for use by variable arguments ...
10494 ??? Unfortunately, it is not enough to simply not the
10495 FRAME_RELATED flags for those SETs, because the first SET
10496 of the PARALLEL is always treated as if it had the flag
10497 set, even if it does not. Therefore we emit a new pattern
10498 without those registers as REG_FRAME_RELATED_EXPR note. */
10500 if (first >= 6 && !global_not_special_regno_p (first))
10502 rtx pat = PATTERN (insn);
10504 for (i = 0; i < XVECLEN (pat, 0); i++)
10505 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
10506 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
10508 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
10510 RTX_FRAME_RELATED_P (insn) = 1;
10512 else if (last >= 6)
10516 for (start = first >= 6 ? first : 6; start <= last; start++)
10517 if (!global_not_special_regno_p (start))
10523 addr = plus_constant (Pmode, base,
10524 offset + (start - first) * UNITS_PER_LONG);
10529 note = gen_movdi (gen_rtx_MEM (Pmode, addr),
10530 gen_rtx_REG (Pmode, start));
10532 note = gen_movsi (gen_rtx_MEM (Pmode, addr),
10533 gen_rtx_REG (Pmode, start));
10534 note = PATTERN (note);
10536 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
10537 RTX_FRAME_RELATED_P (insn) = 1;
10542 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
10543 gen_rtx_REG (Pmode, start),
10544 GEN_INT (last - start + 1));
10545 note = PATTERN (note);
10547 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
10549 for (i = 0; i < XVECLEN (note, 0); i++)
10550 if (GET_CODE (XVECEXP (note, 0, i)) == SET
10551 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
10553 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
10555 RTX_FRAME_RELATED_P (insn) = 1;
10561 /* Generate insn to restore registers FIRST to LAST from
10562 the register save area located at offset OFFSET
10563 relative to register BASE. */
10566 restore_gprs (rtx base, int offset, int first, int last)
10570 addr = plus_constant (Pmode, base, offset);
10571 addr = gen_rtx_MEM (Pmode, addr);
10572 set_mem_alias_set (addr, get_frame_alias_set ());
10574 /* Special-case single register. */
10578 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
10580 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
10582 RTX_FRAME_RELATED_P (insn) = 1;
10586 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
10588 GEN_INT (last - first + 1));
10589 RTX_FRAME_RELATED_P (insn) = 1;
10593 /* Return insn sequence to load the GOT register. */
10595 static GTY(()) rtx got_symbol;
10597 s390_load_got (void)
10601 /* We cannot use pic_offset_table_rtx here since we use this
10602 function also for non-pic if __tls_get_offset is called and in
10603 that case PIC_OFFSET_TABLE_REGNUM as well as pic_offset_table_rtx
10605 rtx got_rtx = gen_rtx_REG (Pmode, 12);
10609 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
10610 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
10615 if (TARGET_CPU_ZARCH)
10617 emit_move_insn (got_rtx, got_symbol);
10623 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
10624 UNSPEC_LTREL_OFFSET);
10625 offset = gen_rtx_CONST (Pmode, offset);
10626 offset = force_const_mem (Pmode, offset);
10628 emit_move_insn (got_rtx, offset);
10630 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
10631 UNSPEC_LTREL_BASE);
10632 offset = gen_rtx_PLUS (Pmode, got_rtx, offset);
10634 emit_move_insn (got_rtx, offset);
10637 insns = get_insns ();
10642 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
10643 and the change to the stack pointer. */
10646 s390_emit_stack_tie (void)
10648 rtx mem = gen_frame_mem (BLKmode,
10649 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
10651 emit_insn (gen_stack_tie (mem));
10654 /* Copy GPRS into FPR save slots. */
10657 s390_save_gprs_to_fprs (void)
10661 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
10664 for (i = 6; i < 16; i++)
10666 if (FP_REGNO_P (cfun_gpr_save_slot (i)))
10669 emit_move_insn (gen_rtx_REG (DImode, cfun_gpr_save_slot (i)),
10670 gen_rtx_REG (DImode, i));
10671 RTX_FRAME_RELATED_P (insn) = 1;
10672 /* This prevents dwarf2cfi from interpreting the set. Doing
10673 so it might emit def_cfa_register infos setting an FPR as
10675 add_reg_note (insn, REG_CFA_REGISTER, copy_rtx (PATTERN (insn)));
10680 /* Restore GPRs from FPR save slots. */
10683 s390_restore_gprs_from_fprs (void)
10687 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
10690 for (i = 6; i < 16; i++)
10694 if (!FP_REGNO_P (cfun_gpr_save_slot (i)))
10697 rtx fpr = gen_rtx_REG (DImode, cfun_gpr_save_slot (i));
10699 if (i == STACK_POINTER_REGNUM)
10700 insn = emit_insn (gen_stack_restore_from_fpr (fpr));
10702 insn = emit_move_insn (gen_rtx_REG (DImode, i), fpr);
10704 df_set_regs_ever_live (i, true);
10705 add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, i));
10706 if (i == STACK_POINTER_REGNUM)
10707 add_reg_note (insn, REG_CFA_DEF_CFA,
10708 plus_constant (Pmode, stack_pointer_rtx,
10709 STACK_POINTER_OFFSET));
10710 RTX_FRAME_RELATED_P (insn) = 1;
10715 /* A pass run immediately before shrink-wrapping and prologue and epilogue
10720 const pass_data pass_data_s390_early_mach =
10722 RTL_PASS, /* type */
10723 "early_mach", /* name */
10724 OPTGROUP_NONE, /* optinfo_flags */
10725 TV_MACH_DEP, /* tv_id */
10726 0, /* properties_required */
10727 0, /* properties_provided */
10728 0, /* properties_destroyed */
10729 0, /* todo_flags_start */
10730 ( TODO_df_verify | TODO_df_finish ), /* todo_flags_finish */
10733 class pass_s390_early_mach : public rtl_opt_pass
10736 pass_s390_early_mach (gcc::context *ctxt)
10737 : rtl_opt_pass (pass_data_s390_early_mach, ctxt)
10740 /* opt_pass methods: */
10741 virtual unsigned int execute (function *);
10743 }; // class pass_s390_early_mach
10746 pass_s390_early_mach::execute (function *fun)
10750 /* Try to get rid of the FPR clobbers. */
10751 s390_optimize_nonescaping_tx ();
10753 /* Re-compute register info. */
10754 s390_register_info ();
10756 /* If we're using a base register, ensure that it is always valid for
10757 the first non-prologue instruction. */
10758 if (fun->machine->base_reg)
10759 emit_insn_at_entry (gen_main_pool (fun->machine->base_reg));
10761 /* Annotate all constant pool references to let the scheduler know
10762 they implicitly use the base register. */
10763 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10766 annotate_constant_pool_refs (&PATTERN (insn));
10767 df_insn_rescan (insn);
10772 } // anon namespace
10774 /* Expand the prologue into a bunch of separate insns. */
10777 s390_emit_prologue (void)
10785 /* Choose best register to use for temp use within prologue.
10786 TPF with profiling must avoid the register 14 - the tracing function
10787 needs the original contents of r14 to be preserved. */
10789 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
10791 && !TARGET_TPF_PROFILING)
10792 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
10793 else if (flag_split_stack && cfun->stdarg)
10794 temp_reg = gen_rtx_REG (Pmode, 12);
10796 temp_reg = gen_rtx_REG (Pmode, 1);
10798 s390_save_gprs_to_fprs ();
10800 /* Save call saved gprs. */
10801 if (cfun_frame_layout.first_save_gpr != -1)
10803 insn = save_gprs (stack_pointer_rtx,
10804 cfun_frame_layout.gprs_offset +
10805 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
10806 - cfun_frame_layout.first_save_gpr_slot),
10807 cfun_frame_layout.first_save_gpr,
10808 cfun_frame_layout.last_save_gpr);
10812 /* Dummy insn to mark literal pool slot. */
10814 if (cfun->machine->base_reg)
10815 emit_insn (gen_main_pool (cfun->machine->base_reg));
10817 offset = cfun_frame_layout.f0_offset;
10819 /* Save f0 and f2. */
10820 for (i = FPR0_REGNUM; i <= FPR0_REGNUM + 1; i++)
10822 if (cfun_fpr_save_p (i))
10824 save_fpr (stack_pointer_rtx, offset, i);
10827 else if (!TARGET_PACKED_STACK || cfun->stdarg)
10831 /* Save f4 and f6. */
10832 offset = cfun_frame_layout.f4_offset;
10833 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
10835 if (cfun_fpr_save_p (i))
10837 insn = save_fpr (stack_pointer_rtx, offset, i);
10840 /* If f4 and f6 are call clobbered they are saved due to
10841 stdargs and therefore are not frame related. */
10842 if (!call_really_used_regs[i])
10843 RTX_FRAME_RELATED_P (insn) = 1;
10845 else if (!TARGET_PACKED_STACK || call_really_used_regs[i])
10849 if (TARGET_PACKED_STACK
10850 && cfun_save_high_fprs_p
10851 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
10853 offset = (cfun_frame_layout.f8_offset
10854 + (cfun_frame_layout.high_fprs - 1) * 8);
10856 for (i = FPR15_REGNUM; i >= FPR8_REGNUM && offset >= 0; i--)
10857 if (cfun_fpr_save_p (i))
10859 insn = save_fpr (stack_pointer_rtx, offset, i);
10861 RTX_FRAME_RELATED_P (insn) = 1;
10864 if (offset >= cfun_frame_layout.f8_offset)
10868 if (!TARGET_PACKED_STACK)
10869 next_fpr = cfun_save_high_fprs_p ? FPR15_REGNUM : 0;
10871 if (flag_stack_usage_info)
10872 current_function_static_stack_size = cfun_frame_layout.frame_size;
10874 /* Decrement stack pointer. */
10876 if (cfun_frame_layout.frame_size > 0)
10878 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
10879 rtx real_frame_off;
10881 if (s390_stack_size)
10883 HOST_WIDE_INT stack_guard;
10885 if (s390_stack_guard)
10886 stack_guard = s390_stack_guard;
10889 /* If no value for stack guard is provided the smallest power of 2
10890 larger than the current frame size is chosen. */
10892 while (stack_guard < cfun_frame_layout.frame_size)
10896 if (cfun_frame_layout.frame_size >= s390_stack_size)
10898 warning (0, "frame size of function %qs is %wd"
10899 " bytes exceeding user provided stack limit of "
10901 "An unconditional trap is added.",
10902 current_function_name(), cfun_frame_layout.frame_size,
10904 emit_insn (gen_trap ());
10909 /* stack_guard has to be smaller than s390_stack_size.
10910 Otherwise we would emit an AND with zero which would
10911 not match the test under mask pattern. */
10912 if (stack_guard >= s390_stack_size)
10914 warning (0, "frame size of function %qs is %wd"
10915 " bytes which is more than half the stack size. "
10916 "The dynamic check would not be reliable. "
10917 "No check emitted for this function.",
10918 current_function_name(),
10919 cfun_frame_layout.frame_size);
10923 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
10924 & ~(stack_guard - 1));
10926 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
10927 GEN_INT (stack_check_mask));
10929 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
10931 t, const0_rtx, const0_rtx));
10933 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
10935 t, const0_rtx, const0_rtx));
10940 if (s390_warn_framesize > 0
10941 && cfun_frame_layout.frame_size >= s390_warn_framesize)
10942 warning (0, "frame size of %qs is %wd bytes",
10943 current_function_name (), cfun_frame_layout.frame_size);
10945 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
10946 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
10948 /* Save incoming stack pointer into temp reg. */
10949 if (TARGET_BACKCHAIN || next_fpr)
10950 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
10952 /* Subtract frame size from stack pointer. */
10954 if (DISP_IN_RANGE (INTVAL (frame_off)))
10956 insn = gen_rtx_SET (stack_pointer_rtx,
10957 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
10959 insn = emit_insn (insn);
10963 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
10964 frame_off = force_const_mem (Pmode, frame_off);
10966 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
10967 annotate_constant_pool_refs (&PATTERN (insn));
10970 RTX_FRAME_RELATED_P (insn) = 1;
10971 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
10972 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
10973 gen_rtx_SET (stack_pointer_rtx,
10974 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
10977 /* Set backchain. */
10979 if (TARGET_BACKCHAIN)
10981 if (cfun_frame_layout.backchain_offset)
10982 addr = gen_rtx_MEM (Pmode,
10983 plus_constant (Pmode, stack_pointer_rtx,
10984 cfun_frame_layout.backchain_offset));
10986 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
10987 set_mem_alias_set (addr, get_frame_alias_set ());
10988 insn = emit_insn (gen_move_insn (addr, temp_reg));
10991 /* If we support non-call exceptions (e.g. for Java),
10992 we need to make sure the backchain pointer is set up
10993 before any possibly trapping memory access. */
10994 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
10996 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
10997 emit_clobber (addr);
11001 /* Save fprs 8 - 15 (64 bit ABI). */
11003 if (cfun_save_high_fprs_p && next_fpr)
11005 /* If the stack might be accessed through a different register
11006 we have to make sure that the stack pointer decrement is not
11007 moved below the use of the stack slots. */
11008 s390_emit_stack_tie ();
11010 insn = emit_insn (gen_add2_insn (temp_reg,
11011 GEN_INT (cfun_frame_layout.f8_offset)));
11015 for (i = FPR8_REGNUM; i <= next_fpr; i++)
11016 if (cfun_fpr_save_p (i))
11018 rtx addr = plus_constant (Pmode, stack_pointer_rtx,
11019 cfun_frame_layout.frame_size
11020 + cfun_frame_layout.f8_offset
11023 insn = save_fpr (temp_reg, offset, i);
11025 RTX_FRAME_RELATED_P (insn) = 1;
11026 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
11027 gen_rtx_SET (gen_rtx_MEM (DFmode, addr),
11028 gen_rtx_REG (DFmode, i)));
11032 /* Set frame pointer, if needed. */
11034 if (frame_pointer_needed)
11036 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
11037 RTX_FRAME_RELATED_P (insn) = 1;
11040 /* Set up got pointer, if needed. */
11042 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
11044 rtx_insn *insns = s390_load_got ();
11046 for (rtx_insn *insn = insns; insn; insn = NEXT_INSN (insn))
11047 annotate_constant_pool_refs (&PATTERN (insn));
11052 if (TARGET_TPF_PROFILING)
11054 /* Generate a BAS instruction to serve as a function
11055 entry intercept to facilitate the use of tracing
11056 algorithms located at the branch target. */
11057 emit_insn (gen_prologue_tpf ());
11059 /* Emit a blockage here so that all code
11060 lies between the profiling mechanisms. */
11061 emit_insn (gen_blockage ());
11065 /* Expand the epilogue into a bunch of separate insns. */
11068 s390_emit_epilogue (bool sibcall)
11070 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
11071 int area_bottom, area_top, offset = 0;
11076 if (TARGET_TPF_PROFILING)
11079 /* Generate a BAS instruction to serve as a function
11080 entry intercept to facilitate the use of tracing
11081 algorithms located at the branch target. */
11083 /* Emit a blockage here so that all code
11084 lies between the profiling mechanisms. */
11085 emit_insn (gen_blockage ());
11087 emit_insn (gen_epilogue_tpf ());
11090 /* Check whether to use frame or stack pointer for restore. */
11092 frame_pointer = (frame_pointer_needed
11093 ? hard_frame_pointer_rtx : stack_pointer_rtx);
11095 s390_frame_area (&area_bottom, &area_top);
11097 /* Check whether we can access the register save area.
11098 If not, increment the frame pointer as required. */
11100 if (area_top <= area_bottom)
11102 /* Nothing to restore. */
11104 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
11105 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
11107 /* Area is in range. */
11108 offset = cfun_frame_layout.frame_size;
11112 rtx insn, frame_off, cfa;
11114 offset = area_bottom < 0 ? -area_bottom : 0;
11115 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
11117 cfa = gen_rtx_SET (frame_pointer,
11118 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
11119 if (DISP_IN_RANGE (INTVAL (frame_off)))
11121 insn = gen_rtx_SET (frame_pointer,
11122 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
11123 insn = emit_insn (insn);
11127 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
11128 frame_off = force_const_mem (Pmode, frame_off);
11130 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
11131 annotate_constant_pool_refs (&PATTERN (insn));
11133 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
11134 RTX_FRAME_RELATED_P (insn) = 1;
11137 /* Restore call saved fprs. */
11141 if (cfun_save_high_fprs_p)
11143 next_offset = cfun_frame_layout.f8_offset;
11144 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
11146 if (cfun_fpr_save_p (i))
11148 restore_fpr (frame_pointer,
11149 offset + next_offset, i);
11151 = alloc_reg_note (REG_CFA_RESTORE,
11152 gen_rtx_REG (DFmode, i), cfa_restores);
11161 next_offset = cfun_frame_layout.f4_offset;
11163 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
11165 if (cfun_fpr_save_p (i))
11167 restore_fpr (frame_pointer,
11168 offset + next_offset, i);
11170 = alloc_reg_note (REG_CFA_RESTORE,
11171 gen_rtx_REG (DFmode, i), cfa_restores);
11174 else if (!TARGET_PACKED_STACK)
11180 /* Return register. */
11182 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
11184 /* Restore call saved gprs. */
11186 if (cfun_frame_layout.first_restore_gpr != -1)
11191 /* Check for global register and save them
11192 to stack location from where they get restored. */
11194 for (i = cfun_frame_layout.first_restore_gpr;
11195 i <= cfun_frame_layout.last_restore_gpr;
11198 if (global_not_special_regno_p (i))
11200 addr = plus_constant (Pmode, frame_pointer,
11201 offset + cfun_frame_layout.gprs_offset
11202 + (i - cfun_frame_layout.first_save_gpr_slot)
11204 addr = gen_rtx_MEM (Pmode, addr);
11205 set_mem_alias_set (addr, get_frame_alias_set ());
11206 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
11210 = alloc_reg_note (REG_CFA_RESTORE,
11211 gen_rtx_REG (Pmode, i), cfa_restores);
11216 /* Fetch return address from stack before load multiple,
11217 this will do good for scheduling.
11219 Only do this if we already decided that r14 needs to be
11220 saved to a stack slot. (And not just because r14 happens to
11221 be in between two GPRs which need saving.) Otherwise it
11222 would be difficult to take that decision back in
11223 s390_optimize_prologue. */
11224 if (cfun_gpr_save_slot (RETURN_REGNUM) == SAVE_SLOT_STACK)
11226 int return_regnum = find_unused_clobbered_reg();
11227 if (!return_regnum)
11229 return_reg = gen_rtx_REG (Pmode, return_regnum);
11231 addr = plus_constant (Pmode, frame_pointer,
11232 offset + cfun_frame_layout.gprs_offset
11234 - cfun_frame_layout.first_save_gpr_slot)
11236 addr = gen_rtx_MEM (Pmode, addr);
11237 set_mem_alias_set (addr, get_frame_alias_set ());
11238 emit_move_insn (return_reg, addr);
11240 /* Once we did that optimization we have to make sure
11241 s390_optimize_prologue does not try to remove the
11242 store of r14 since we will not be able to find the
11243 load issued here. */
11244 cfun_frame_layout.save_return_addr_p = true;
11248 insn = restore_gprs (frame_pointer,
11249 offset + cfun_frame_layout.gprs_offset
11250 + (cfun_frame_layout.first_restore_gpr
11251 - cfun_frame_layout.first_save_gpr_slot)
11253 cfun_frame_layout.first_restore_gpr,
11254 cfun_frame_layout.last_restore_gpr);
11255 insn = emit_insn (insn);
11256 REG_NOTES (insn) = cfa_restores;
11257 add_reg_note (insn, REG_CFA_DEF_CFA,
11258 plus_constant (Pmode, stack_pointer_rtx,
11259 STACK_POINTER_OFFSET));
11260 RTX_FRAME_RELATED_P (insn) = 1;
11263 s390_restore_gprs_from_fprs ();
11268 /* Return to caller. */
11270 p = rtvec_alloc (2);
11272 RTVEC_ELT (p, 0) = ret_rtx;
11273 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
11274 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
11278 /* Implement TARGET_SET_UP_BY_PROLOGUE. */
11281 s300_set_up_by_prologue (hard_reg_set_container *regs)
11283 if (cfun->machine->base_reg
11284 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
11285 SET_HARD_REG_BIT (regs->set, REGNO (cfun->machine->base_reg));
11288 /* -fsplit-stack support. */
11290 /* A SYMBOL_REF for __morestack. */
11291 static GTY(()) rtx morestack_ref;
11293 /* When using -fsplit-stack, the allocation routines set a field in
11294 the TCB to the bottom of the stack plus this much space, measured
11297 #define SPLIT_STACK_AVAILABLE 1024
11299 /* Emit -fsplit-stack prologue, which goes before the regular function
11303 s390_expand_split_stack_prologue (void)
11305 rtx r1, guard, cc = NULL;
11307 /* Offset from thread pointer to __private_ss. */
11308 int psso = TARGET_64BIT ? 0x38 : 0x20;
11309 /* Pointer size in bytes. */
11310 /* Frame size and argument size - the two parameters to __morestack. */
11311 HOST_WIDE_INT frame_size = cfun_frame_layout.frame_size;
11312 /* Align argument size to 8 bytes - simplifies __morestack code. */
11313 HOST_WIDE_INT args_size = crtl->args.size >= 0
11314 ? ((crtl->args.size + 7) & ~7)
11316 /* Label to be called by __morestack. */
11317 rtx_code_label *call_done = NULL;
11318 rtx_code_label *parm_base = NULL;
11321 gcc_assert (flag_split_stack && reload_completed);
11322 if (!TARGET_CPU_ZARCH)
11324 sorry ("CPUs older than z900 are not supported for -fsplit-stack");
11328 r1 = gen_rtx_REG (Pmode, 1);
11330 /* If no stack frame will be allocated, don't do anything. */
11333 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11335 /* If va_start is used, just use r15. */
11336 emit_move_insn (r1,
11337 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11338 GEN_INT (STACK_POINTER_OFFSET)));
11344 if (morestack_ref == NULL_RTX)
11346 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
11347 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
11348 | SYMBOL_FLAG_FUNCTION);
11351 if (CONST_OK_FOR_K (frame_size) || CONST_OK_FOR_Op (frame_size))
11353 /* If frame_size will fit in an add instruction, do a stack space
11354 check, and only call __morestack if there's not enough space. */
11356 /* Get thread pointer. r1 is the only register we can always destroy - r0
11357 could contain a static chain (and cannot be used to address memory
11358 anyway), r2-r6 can contain parameters, and r6-r15 are callee-saved. */
11359 emit_move_insn (r1, gen_rtx_REG (Pmode, TP_REGNUM));
11360 /* Aim at __private_ss. */
11361 guard = gen_rtx_MEM (Pmode, plus_constant (Pmode, r1, psso));
11363 /* If less that 1kiB used, skip addition and compare directly with
11365 if (frame_size > SPLIT_STACK_AVAILABLE)
11367 emit_move_insn (r1, guard);
11369 emit_insn (gen_adddi3 (r1, r1, GEN_INT (frame_size)));
11371 emit_insn (gen_addsi3 (r1, r1, GEN_INT (frame_size)));
11375 /* Compare the (maybe adjusted) guard with the stack pointer. */
11376 cc = s390_emit_compare (LT, stack_pointer_rtx, guard);
11379 call_done = gen_label_rtx ();
11380 parm_base = gen_label_rtx ();
11382 /* Emit the parameter block. */
11383 tmp = gen_split_stack_data (parm_base, call_done,
11384 GEN_INT (frame_size),
11385 GEN_INT (args_size));
11386 insn = emit_insn (tmp);
11387 add_reg_note (insn, REG_LABEL_OPERAND, call_done);
11388 LABEL_NUSES (call_done)++;
11389 add_reg_note (insn, REG_LABEL_OPERAND, parm_base);
11390 LABEL_NUSES (parm_base)++;
11392 /* %r1 = litbase. */
11393 insn = emit_move_insn (r1, gen_rtx_LABEL_REF (VOIDmode, parm_base));
11394 add_reg_note (insn, REG_LABEL_OPERAND, parm_base);
11395 LABEL_NUSES (parm_base)++;
11397 /* Now, we need to call __morestack. It has very special calling
11398 conventions: it preserves param/return/static chain registers for
11399 calling main function body, and looks for its own parameters at %r1. */
11403 tmp = gen_split_stack_cond_call (morestack_ref, cc, call_done);
11405 insn = emit_jump_insn (tmp);
11406 JUMP_LABEL (insn) = call_done;
11407 LABEL_NUSES (call_done)++;
11409 /* Mark the jump as very unlikely to be taken. */
11410 add_int_reg_note (insn, REG_BR_PROB, REG_BR_PROB_BASE / 100);
11412 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11414 /* If va_start is used, and __morestack was not called, just use
11416 emit_move_insn (r1,
11417 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11418 GEN_INT (STACK_POINTER_OFFSET)));
11423 tmp = gen_split_stack_call (morestack_ref, call_done);
11424 insn = emit_jump_insn (tmp);
11425 JUMP_LABEL (insn) = call_done;
11426 LABEL_NUSES (call_done)++;
11430 /* __morestack will call us here. */
11432 emit_label (call_done);
11435 /* We may have to tell the dataflow pass that the split stack prologue
11436 is initializing a register. */
11439 s390_live_on_entry (bitmap regs)
11441 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11443 gcc_assert (flag_split_stack);
11444 bitmap_set_bit (regs, 1);
11448 /* Return true if the function can use simple_return to return outside
11449 of a shrink-wrapped region. At present shrink-wrapping is supported
11453 s390_can_use_simple_return_insn (void)
11458 /* Return true if the epilogue is guaranteed to contain only a return
11459 instruction and if a direct return can therefore be used instead.
11460 One of the main advantages of using direct return instructions
11461 is that we can then use conditional returns. */
11464 s390_can_use_return_insn (void)
11468 if (!reload_completed)
11474 if (TARGET_TPF_PROFILING)
11477 for (i = 0; i < 16; i++)
11478 if (cfun_gpr_save_slot (i) != SAVE_SLOT_NONE)
11481 /* For 31 bit this is not covered by the frame_size check below
11482 since f4, f6 are saved in the register save area without needing
11483 additional stack space. */
11485 && (cfun_fpr_save_p (FPR4_REGNUM) || cfun_fpr_save_p (FPR6_REGNUM)))
11488 if (cfun->machine->base_reg
11489 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
11492 return cfun_frame_layout.frame_size == 0;
11495 /* The VX ABI differs for vararg functions. Therefore we need the
11496 prototype of the callee to be available when passing vector type
11498 static const char *
11499 s390_invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
11501 return ((TARGET_VX_ABI
11503 && VECTOR_TYPE_P (TREE_TYPE (val))
11504 && (funcdecl == NULL_TREE
11505 || (TREE_CODE (funcdecl) == FUNCTION_DECL
11506 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
11507 ? N_("vector argument passed to unprototyped function")
11512 /* Return the size in bytes of a function argument of
11513 type TYPE and/or mode MODE. At least one of TYPE or
11514 MODE must be specified. */
11517 s390_function_arg_size (machine_mode mode, const_tree type)
11520 return int_size_in_bytes (type);
11522 /* No type info available for some library calls ... */
11523 if (mode != BLKmode)
11524 return GET_MODE_SIZE (mode);
11526 /* If we have neither type nor mode, abort */
11527 gcc_unreachable ();
11530 /* Return true if a function argument of type TYPE and mode MODE
11531 is to be passed in a vector register, if available. */
11534 s390_function_arg_vector (machine_mode mode, const_tree type)
11536 if (!TARGET_VX_ABI)
11539 if (s390_function_arg_size (mode, type) > 16)
11542 /* No type info available for some library calls ... */
11544 return VECTOR_MODE_P (mode);
11546 /* The ABI says that record types with a single member are treated
11547 just like that member would be. */
11548 while (TREE_CODE (type) == RECORD_TYPE)
11550 tree field, single = NULL_TREE;
11552 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
11554 if (TREE_CODE (field) != FIELD_DECL)
11557 if (single == NULL_TREE)
11558 single = TREE_TYPE (field);
11563 if (single == NULL_TREE)
11567 /* If the field declaration adds extra byte due to
11568 e.g. padding this is not accepted as vector type. */
11569 if (int_size_in_bytes (single) <= 0
11570 || int_size_in_bytes (single) != int_size_in_bytes (type))
11576 return VECTOR_TYPE_P (type);
11579 /* Return true if a function argument of type TYPE and mode MODE
11580 is to be passed in a floating-point register, if available. */
11583 s390_function_arg_float (machine_mode mode, const_tree type)
11585 if (s390_function_arg_size (mode, type) > 8)
11588 /* Soft-float changes the ABI: no floating-point registers are used. */
11589 if (TARGET_SOFT_FLOAT)
11592 /* No type info available for some library calls ... */
11594 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
11596 /* The ABI says that record types with a single member are treated
11597 just like that member would be. */
11598 while (TREE_CODE (type) == RECORD_TYPE)
11600 tree field, single = NULL_TREE;
11602 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
11604 if (TREE_CODE (field) != FIELD_DECL)
11607 if (single == NULL_TREE)
11608 single = TREE_TYPE (field);
11613 if (single == NULL_TREE)
11619 return TREE_CODE (type) == REAL_TYPE;
11622 /* Return true if a function argument of type TYPE and mode MODE
11623 is to be passed in an integer register, or a pair of integer
11624 registers, if available. */
11627 s390_function_arg_integer (machine_mode mode, const_tree type)
11629 int size = s390_function_arg_size (mode, type);
11633 /* No type info available for some library calls ... */
11635 return GET_MODE_CLASS (mode) == MODE_INT
11636 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
11638 /* We accept small integral (and similar) types. */
11639 if (INTEGRAL_TYPE_P (type)
11640 || POINTER_TYPE_P (type)
11641 || TREE_CODE (type) == NULLPTR_TYPE
11642 || TREE_CODE (type) == OFFSET_TYPE
11643 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
11646 /* We also accept structs of size 1, 2, 4, 8 that are not
11647 passed in floating-point registers. */
11648 if (AGGREGATE_TYPE_P (type)
11649 && exact_log2 (size) >= 0
11650 && !s390_function_arg_float (mode, type))
11656 /* Return 1 if a function argument of type TYPE and mode MODE
11657 is to be passed by reference. The ABI specifies that only
11658 structures of size 1, 2, 4, or 8 bytes are passed by value,
11659 all other structures (and complex numbers) are passed by
11663 s390_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
11664 machine_mode mode, const_tree type,
11665 bool named ATTRIBUTE_UNUSED)
11667 int size = s390_function_arg_size (mode, type);
11669 if (s390_function_arg_vector (mode, type))
11677 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
11680 if (TREE_CODE (type) == COMPLEX_TYPE
11681 || TREE_CODE (type) == VECTOR_TYPE)
11688 /* Update the data in CUM to advance over an argument of mode MODE and
11689 data type TYPE. (TYPE is null for libcalls where that information
11690 may not be available.). The boolean NAMED specifies whether the
11691 argument is a named argument (as opposed to an unnamed argument
11692 matching an ellipsis). */
11695 s390_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
11696 const_tree type, bool named)
11698 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11700 if (s390_function_arg_vector (mode, type))
11702 /* We are called for unnamed vector stdarg arguments which are
11703 passed on the stack. In this case this hook does not have to
11704 do anything since stack arguments are tracked by common
11710 else if (s390_function_arg_float (mode, type))
11714 else if (s390_function_arg_integer (mode, type))
11716 int size = s390_function_arg_size (mode, type);
11717 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
11720 gcc_unreachable ();
11723 /* Define where to put the arguments to a function.
11724 Value is zero to push the argument on the stack,
11725 or a hard register in which to store the argument.
11727 MODE is the argument's machine mode.
11728 TYPE is the data type of the argument (as a tree).
11729 This is null for libcalls where that information may
11731 CUM is a variable of type CUMULATIVE_ARGS which gives info about
11732 the preceding args and about the function being called.
11733 NAMED is nonzero if this argument is a named parameter
11734 (otherwise it is an extra parameter matching an ellipsis).
11736 On S/390, we use general purpose registers 2 through 6 to
11737 pass integer, pointer, and certain structure arguments, and
11738 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
11739 to pass floating point arguments. All remaining arguments
11740 are pushed to the stack. */
11743 s390_function_arg (cumulative_args_t cum_v, machine_mode mode,
11744 const_tree type, bool named)
11746 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11749 s390_check_type_for_vector_abi (type, true, false);
11751 if (s390_function_arg_vector (mode, type))
11753 /* Vector arguments being part of the ellipsis are passed on the
11755 if (!named || (cum->vrs + 1 > VEC_ARG_NUM_REG))
11758 return gen_rtx_REG (mode, cum->vrs + FIRST_VEC_ARG_REGNO);
11760 else if (s390_function_arg_float (mode, type))
11762 if (cum->fprs + 1 > FP_ARG_NUM_REG)
11765 return gen_rtx_REG (mode, cum->fprs + 16);
11767 else if (s390_function_arg_integer (mode, type))
11769 int size = s390_function_arg_size (mode, type);
11770 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
11772 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
11774 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
11775 return gen_rtx_REG (mode, cum->gprs + 2);
11776 else if (n_gprs == 2)
11778 rtvec p = rtvec_alloc (2);
11781 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
11784 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
11787 return gen_rtx_PARALLEL (mode, p);
11791 /* After the real arguments, expand_call calls us once again
11792 with a void_type_node type. Whatever we return here is
11793 passed as operand 2 to the call expanders.
11795 We don't need this feature ... */
11796 else if (type == void_type_node)
11799 gcc_unreachable ();
11802 /* Return true if return values of type TYPE should be returned
11803 in a memory buffer whose address is passed by the caller as
11804 hidden first argument. */
11807 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
11809 /* We accept small integral (and similar) types. */
11810 if (INTEGRAL_TYPE_P (type)
11811 || POINTER_TYPE_P (type)
11812 || TREE_CODE (type) == OFFSET_TYPE
11813 || TREE_CODE (type) == REAL_TYPE)
11814 return int_size_in_bytes (type) > 8;
11816 /* vector types which fit into a VR. */
11818 && VECTOR_TYPE_P (type)
11819 && int_size_in_bytes (type) <= 16)
11822 /* Aggregates and similar constructs are always returned
11824 if (AGGREGATE_TYPE_P (type)
11825 || TREE_CODE (type) == COMPLEX_TYPE
11826 || VECTOR_TYPE_P (type))
11829 /* ??? We get called on all sorts of random stuff from
11830 aggregate_value_p. We can't abort, but it's not clear
11831 what's safe to return. Pretend it's a struct I guess. */
11835 /* Function arguments and return values are promoted to word size. */
11837 static machine_mode
11838 s390_promote_function_mode (const_tree type, machine_mode mode,
11840 const_tree fntype ATTRIBUTE_UNUSED,
11841 int for_return ATTRIBUTE_UNUSED)
11843 if (INTEGRAL_MODE_P (mode)
11844 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
11846 if (type != NULL_TREE && POINTER_TYPE_P (type))
11847 *punsignedp = POINTERS_EXTEND_UNSIGNED;
11854 /* Define where to return a (scalar) value of type RET_TYPE.
11855 If RET_TYPE is null, define where to return a (scalar)
11856 value of mode MODE from a libcall. */
11859 s390_function_and_libcall_value (machine_mode mode,
11860 const_tree ret_type,
11861 const_tree fntype_or_decl,
11862 bool outgoing ATTRIBUTE_UNUSED)
11864 /* For vector return types it is important to use the RET_TYPE
11865 argument whenever available since the middle-end might have
11866 changed the mode to a scalar mode. */
11867 bool vector_ret_type_p = ((ret_type && VECTOR_TYPE_P (ret_type))
11868 || (!ret_type && VECTOR_MODE_P (mode)));
11870 /* For normal functions perform the promotion as
11871 promote_function_mode would do. */
11874 int unsignedp = TYPE_UNSIGNED (ret_type);
11875 mode = promote_function_mode (ret_type, mode, &unsignedp,
11876 fntype_or_decl, 1);
11879 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT
11880 || SCALAR_FLOAT_MODE_P (mode)
11881 || (TARGET_VX_ABI && vector_ret_type_p));
11882 gcc_assert (GET_MODE_SIZE (mode) <= (TARGET_VX_ABI ? 16 : 8));
11884 if (TARGET_VX_ABI && vector_ret_type_p)
11885 return gen_rtx_REG (mode, FIRST_VEC_ARG_REGNO);
11886 else if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
11887 return gen_rtx_REG (mode, 16);
11888 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
11889 || UNITS_PER_LONG == UNITS_PER_WORD)
11890 return gen_rtx_REG (mode, 2);
11891 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
11893 /* This case is triggered when returning a 64 bit value with
11894 -m31 -mzarch. Although the value would fit into a single
11895 register it has to be forced into a 32 bit register pair in
11896 order to match the ABI. */
11897 rtvec p = rtvec_alloc (2);
11900 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
11902 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
11904 return gen_rtx_PARALLEL (mode, p);
11907 gcc_unreachable ();
11910 /* Define where to return a scalar return value of type RET_TYPE. */
11913 s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
11916 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
11917 fn_decl_or_type, outgoing);
11920 /* Define where to return a scalar libcall return value of mode
11924 s390_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
11926 return s390_function_and_libcall_value (mode, NULL_TREE,
11931 /* Create and return the va_list datatype.
11933 On S/390, va_list is an array type equivalent to
11935 typedef struct __va_list_tag
11939 void *__overflow_arg_area;
11940 void *__reg_save_area;
11943 where __gpr and __fpr hold the number of general purpose
11944 or floating point arguments used up to now, respectively,
11945 __overflow_arg_area points to the stack location of the
11946 next argument passed on the stack, and __reg_save_area
11947 always points to the start of the register area in the
11948 call frame of the current function. The function prologue
11949 saves all registers used for argument passing into this
11950 area if the function uses variable arguments. */
11953 s390_build_builtin_va_list (void)
11955 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
11957 record = lang_hooks.types.make_type (RECORD_TYPE);
11960 build_decl (BUILTINS_LOCATION,
11961 TYPE_DECL, get_identifier ("__va_list_tag"), record);
11963 f_gpr = build_decl (BUILTINS_LOCATION,
11964 FIELD_DECL, get_identifier ("__gpr"),
11965 long_integer_type_node);
11966 f_fpr = build_decl (BUILTINS_LOCATION,
11967 FIELD_DECL, get_identifier ("__fpr"),
11968 long_integer_type_node);
11969 f_ovf = build_decl (BUILTINS_LOCATION,
11970 FIELD_DECL, get_identifier ("__overflow_arg_area"),
11972 f_sav = build_decl (BUILTINS_LOCATION,
11973 FIELD_DECL, get_identifier ("__reg_save_area"),
11976 va_list_gpr_counter_field = f_gpr;
11977 va_list_fpr_counter_field = f_fpr;
11979 DECL_FIELD_CONTEXT (f_gpr) = record;
11980 DECL_FIELD_CONTEXT (f_fpr) = record;
11981 DECL_FIELD_CONTEXT (f_ovf) = record;
11982 DECL_FIELD_CONTEXT (f_sav) = record;
11984 TYPE_STUB_DECL (record) = type_decl;
11985 TYPE_NAME (record) = type_decl;
11986 TYPE_FIELDS (record) = f_gpr;
11987 DECL_CHAIN (f_gpr) = f_fpr;
11988 DECL_CHAIN (f_fpr) = f_ovf;
11989 DECL_CHAIN (f_ovf) = f_sav;
11991 layout_type (record);
11993 /* The correct type is an array type of one element. */
11994 return build_array_type (record, build_index_type (size_zero_node));
11997 /* Implement va_start by filling the va_list structure VALIST.
11998 STDARG_P is always true, and ignored.
11999 NEXTARG points to the first anonymous stack argument.
12001 The following global variables are used to initialize
12002 the va_list structure:
12005 holds number of gprs and fprs used for named arguments.
12006 crtl->args.arg_offset_rtx:
12007 holds the offset of the first anonymous stack argument
12008 (relative to the virtual arg pointer). */
12011 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
12013 HOST_WIDE_INT n_gpr, n_fpr;
12015 tree f_gpr, f_fpr, f_ovf, f_sav;
12016 tree gpr, fpr, ovf, sav, t;
12018 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12019 f_fpr = DECL_CHAIN (f_gpr);
12020 f_ovf = DECL_CHAIN (f_fpr);
12021 f_sav = DECL_CHAIN (f_ovf);
12023 valist = build_simple_mem_ref (valist);
12024 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12025 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
12026 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
12027 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
12029 /* Count number of gp and fp argument registers used. */
12031 n_gpr = crtl->args.info.gprs;
12032 n_fpr = crtl->args.info.fprs;
12034 if (cfun->va_list_gpr_size)
12036 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
12037 build_int_cst (NULL_TREE, n_gpr));
12038 TREE_SIDE_EFFECTS (t) = 1;
12039 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12042 if (cfun->va_list_fpr_size)
12044 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
12045 build_int_cst (NULL_TREE, n_fpr));
12046 TREE_SIDE_EFFECTS (t) = 1;
12047 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12050 if (flag_split_stack
12051 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
12053 && cfun->machine->split_stack_varargs_pointer == NULL_RTX)
12058 reg = gen_reg_rtx (Pmode);
12059 cfun->machine->split_stack_varargs_pointer = reg;
12062 emit_move_insn (reg, gen_rtx_REG (Pmode, 1));
12063 seq = get_insns ();
12066 push_topmost_sequence ();
12067 emit_insn_after (seq, entry_of_function ());
12068 pop_topmost_sequence ();
12071 /* Find the overflow area.
12072 FIXME: This currently is too pessimistic when the vector ABI is
12073 enabled. In that case we *always* set up the overflow area
12075 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
12076 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG
12079 if (cfun->machine->split_stack_varargs_pointer == NULL_RTX)
12080 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
12082 t = make_tree (TREE_TYPE (ovf), cfun->machine->split_stack_varargs_pointer);
12084 off = INTVAL (crtl->args.arg_offset_rtx);
12085 off = off < 0 ? 0 : off;
12086 if (TARGET_DEBUG_ARG)
12087 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
12088 (int)n_gpr, (int)n_fpr, off);
12090 t = fold_build_pointer_plus_hwi (t, off);
12092 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
12093 TREE_SIDE_EFFECTS (t) = 1;
12094 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12097 /* Find the register save area. */
12098 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
12099 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
12101 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
12102 t = fold_build_pointer_plus_hwi (t, -RETURN_REGNUM * UNITS_PER_LONG);
12104 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
12105 TREE_SIDE_EFFECTS (t) = 1;
12106 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12110 /* Implement va_arg by updating the va_list structure
12111 VALIST as required to retrieve an argument of type
12112 TYPE, and returning that argument.
12114 Generates code equivalent to:
12116 if (integral value) {
12117 if (size <= 4 && args.gpr < 5 ||
12118 size > 4 && args.gpr < 4 )
12119 ret = args.reg_save_area[args.gpr+8]
12121 ret = *args.overflow_arg_area++;
12122 } else if (vector value) {
12123 ret = *args.overflow_arg_area;
12124 args.overflow_arg_area += size / 8;
12125 } else if (float value) {
12127 ret = args.reg_save_area[args.fpr+64]
12129 ret = *args.overflow_arg_area++;
12130 } else if (aggregate value) {
12132 ret = *args.reg_save_area[args.gpr]
12134 ret = **args.overflow_arg_area++;
12138 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
12139 gimple_seq *post_p ATTRIBUTE_UNUSED)
12141 tree f_gpr, f_fpr, f_ovf, f_sav;
12142 tree gpr, fpr, ovf, sav, reg, t, u;
12143 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
12144 tree lab_false, lab_over = NULL_TREE;
12145 tree addr = create_tmp_var (ptr_type_node, "addr");
12146 bool left_align_p; /* How a value < UNITS_PER_LONG is aligned within
12149 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12150 f_fpr = DECL_CHAIN (f_gpr);
12151 f_ovf = DECL_CHAIN (f_fpr);
12152 f_sav = DECL_CHAIN (f_ovf);
12154 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12155 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
12156 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
12158 /* The tree for args* cannot be shared between gpr/fpr and ovf since
12159 both appear on a lhs. */
12160 valist = unshare_expr (valist);
12161 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
12163 size = int_size_in_bytes (type);
12165 s390_check_type_for_vector_abi (type, true, false);
12167 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
12169 if (TARGET_DEBUG_ARG)
12171 fprintf (stderr, "va_arg: aggregate type");
12175 /* Aggregates are passed by reference. */
12180 /* kernel stack layout on 31 bit: It is assumed here that no padding
12181 will be added by s390_frame_info because for va_args always an even
12182 number of gprs has to be saved r15-r2 = 14 regs. */
12183 sav_ofs = 2 * UNITS_PER_LONG;
12184 sav_scale = UNITS_PER_LONG;
12185 size = UNITS_PER_LONG;
12186 max_reg = GP_ARG_NUM_REG - n_reg;
12187 left_align_p = false;
12189 else if (s390_function_arg_vector (TYPE_MODE (type), type))
12191 if (TARGET_DEBUG_ARG)
12193 fprintf (stderr, "va_arg: vector type");
12203 left_align_p = true;
12205 else if (s390_function_arg_float (TYPE_MODE (type), type))
12207 if (TARGET_DEBUG_ARG)
12209 fprintf (stderr, "va_arg: float type");
12213 /* FP args go in FP registers, if present. */
12217 sav_ofs = 16 * UNITS_PER_LONG;
12219 max_reg = FP_ARG_NUM_REG - n_reg;
12220 left_align_p = false;
12224 if (TARGET_DEBUG_ARG)
12226 fprintf (stderr, "va_arg: other type");
12230 /* Otherwise into GP registers. */
12233 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
12235 /* kernel stack layout on 31 bit: It is assumed here that no padding
12236 will be added by s390_frame_info because for va_args always an even
12237 number of gprs has to be saved r15-r2 = 14 regs. */
12238 sav_ofs = 2 * UNITS_PER_LONG;
12240 if (size < UNITS_PER_LONG)
12241 sav_ofs += UNITS_PER_LONG - size;
12243 sav_scale = UNITS_PER_LONG;
12244 max_reg = GP_ARG_NUM_REG - n_reg;
12245 left_align_p = false;
12248 /* Pull the value out of the saved registers ... */
12250 if (reg != NULL_TREE)
12253 if (reg > ((typeof (reg))max_reg))
12256 addr = sav + sav_ofs + reg * save_scale;
12263 lab_false = create_artificial_label (UNKNOWN_LOCATION);
12264 lab_over = create_artificial_label (UNKNOWN_LOCATION);
12266 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
12267 t = build2 (GT_EXPR, boolean_type_node, reg, t);
12268 u = build1 (GOTO_EXPR, void_type_node, lab_false);
12269 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
12270 gimplify_and_add (t, pre_p);
12272 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
12273 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
12274 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
12275 t = fold_build_pointer_plus (t, u);
12277 gimplify_assign (addr, t, pre_p);
12279 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
12281 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
12284 /* ... Otherwise out of the overflow area. */
12287 if (size < UNITS_PER_LONG && !left_align_p)
12288 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG - size);
12290 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
12292 gimplify_assign (addr, t, pre_p);
12294 if (size < UNITS_PER_LONG && left_align_p)
12295 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG);
12297 t = fold_build_pointer_plus_hwi (t, size);
12299 gimplify_assign (ovf, t, pre_p);
12301 if (reg != NULL_TREE)
12302 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
12305 /* Increment register save count. */
12309 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
12310 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
12311 gimplify_and_add (u, pre_p);
12316 t = build_pointer_type_for_mode (build_pointer_type (type),
12318 addr = fold_convert (t, addr);
12319 addr = build_va_arg_indirect_ref (addr);
12323 t = build_pointer_type_for_mode (type, ptr_mode, true);
12324 addr = fold_convert (t, addr);
12327 return build_va_arg_indirect_ref (addr);
12330 /* Emit rtl for the tbegin or tbegin_retry (RETRY != NULL_RTX)
12332 DEST - Register location where CC will be stored.
12333 TDB - Pointer to a 256 byte area where to store the transaction.
12334 diagnostic block. NULL if TDB is not needed.
12335 RETRY - Retry count value. If non-NULL a retry loop for CC2
12337 CLOBBER_FPRS_P - If true clobbers for all FPRs are emitted as part
12338 of the tbegin instruction pattern. */
12341 s390_expand_tbegin (rtx dest, rtx tdb, rtx retry, bool clobber_fprs_p)
12343 rtx retry_plus_two = gen_reg_rtx (SImode);
12344 rtx retry_reg = gen_reg_rtx (SImode);
12345 rtx_code_label *retry_label = NULL;
12347 if (retry != NULL_RTX)
12349 emit_move_insn (retry_reg, retry);
12350 emit_insn (gen_addsi3 (retry_plus_two, retry_reg, const2_rtx));
12351 emit_insn (gen_addsi3 (retry_reg, retry_reg, const1_rtx));
12352 retry_label = gen_label_rtx ();
12353 emit_label (retry_label);
12356 if (clobber_fprs_p)
12359 emit_insn (gen_tbegin_1_z13 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
12362 emit_insn (gen_tbegin_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
12366 emit_insn (gen_tbegin_nofloat_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
12369 emit_move_insn (dest, gen_rtx_UNSPEC (SImode,
12370 gen_rtvec (1, gen_rtx_REG (CCRAWmode,
12372 UNSPEC_CC_TO_INT));
12373 if (retry != NULL_RTX)
12375 const int CC0 = 1 << 3;
12376 const int CC1 = 1 << 2;
12377 const int CC3 = 1 << 0;
12379 rtx count = gen_reg_rtx (SImode);
12380 rtx_code_label *leave_label = gen_label_rtx ();
12382 /* Exit for success and permanent failures. */
12383 jump = s390_emit_jump (leave_label,
12384 gen_rtx_EQ (VOIDmode,
12385 gen_rtx_REG (CCRAWmode, CC_REGNUM),
12386 gen_rtx_CONST_INT (VOIDmode, CC0 | CC1 | CC3)));
12387 LABEL_NUSES (leave_label) = 1;
12389 /* CC2 - transient failure. Perform retry with ppa. */
12390 emit_move_insn (count, retry_plus_two);
12391 emit_insn (gen_subsi3 (count, count, retry_reg));
12392 emit_insn (gen_tx_assist (count));
12393 jump = emit_jump_insn (gen_doloop_si64 (retry_label,
12396 JUMP_LABEL (jump) = retry_label;
12397 LABEL_NUSES (retry_label) = 1;
12398 emit_label (leave_label);
12403 /* Return the decl for the target specific builtin with the function
12407 s390_builtin_decl (unsigned fcode, bool initialized_p ATTRIBUTE_UNUSED)
12409 if (fcode >= S390_BUILTIN_MAX)
12410 return error_mark_node;
12412 return s390_builtin_decls[fcode];
12415 /* We call mcount before the function prologue. So a profiled leaf
12416 function should stay a leaf function. */
12419 s390_keep_leaf_when_profiled ()
12424 /* Output assembly code for the trampoline template to
12427 On S/390, we use gpr 1 internally in the trampoline code;
12428 gpr 0 is used to hold the static chain. */
12431 s390_asm_trampoline_template (FILE *file)
12434 op[0] = gen_rtx_REG (Pmode, 0);
12435 op[1] = gen_rtx_REG (Pmode, 1);
12439 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
12440 output_asm_insn ("lmg\t%0,%1,14(%1)", op); /* 6 byte */
12441 output_asm_insn ("br\t%1", op); /* 2 byte */
12442 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
12446 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
12447 output_asm_insn ("lm\t%0,%1,6(%1)", op); /* 4 byte */
12448 output_asm_insn ("br\t%1", op); /* 2 byte */
12449 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
12453 /* Emit RTL insns to initialize the variable parts of a trampoline.
12454 FNADDR is an RTX for the address of the function's pure code.
12455 CXT is an RTX for the static chain value for the function. */
12458 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
12460 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
12463 emit_block_move (m_tramp, assemble_trampoline_template (),
12464 GEN_INT (2 * UNITS_PER_LONG), BLOCK_OP_NORMAL);
12466 mem = adjust_address (m_tramp, Pmode, 2 * UNITS_PER_LONG);
12467 emit_move_insn (mem, cxt);
12468 mem = adjust_address (m_tramp, Pmode, 3 * UNITS_PER_LONG);
12469 emit_move_insn (mem, fnaddr);
12472 /* Output assembler code to FILE to increment profiler label # LABELNO
12473 for profiling a function entry. */
12476 s390_function_profiler (FILE *file, int labelno)
12481 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
12483 fprintf (file, "# function profiler \n");
12485 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
12486 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
12487 op[1] = gen_rtx_MEM (Pmode, plus_constant (Pmode, op[1], UNITS_PER_LONG));
12489 op[2] = gen_rtx_REG (Pmode, 1);
12490 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
12491 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
12493 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
12496 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
12497 op[4] = gen_rtx_CONST (Pmode, op[4]);
12502 output_asm_insn ("stg\t%0,%1", op);
12503 output_asm_insn ("larl\t%2,%3", op);
12504 output_asm_insn ("brasl\t%0,%4", op);
12505 output_asm_insn ("lg\t%0,%1", op);
12507 else if (TARGET_CPU_ZARCH)
12509 output_asm_insn ("st\t%0,%1", op);
12510 output_asm_insn ("larl\t%2,%3", op);
12511 output_asm_insn ("brasl\t%0,%4", op);
12512 output_asm_insn ("l\t%0,%1", op);
12514 else if (!flag_pic)
12516 op[6] = gen_label_rtx ();
12518 output_asm_insn ("st\t%0,%1", op);
12519 output_asm_insn ("bras\t%2,%l6", op);
12520 output_asm_insn (".long\t%4", op);
12521 output_asm_insn (".long\t%3", op);
12522 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
12523 output_asm_insn ("l\t%0,0(%2)", op);
12524 output_asm_insn ("l\t%2,4(%2)", op);
12525 output_asm_insn ("basr\t%0,%0", op);
12526 output_asm_insn ("l\t%0,%1", op);
12530 op[5] = gen_label_rtx ();
12531 op[6] = gen_label_rtx ();
12533 output_asm_insn ("st\t%0,%1", op);
12534 output_asm_insn ("bras\t%2,%l6", op);
12535 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
12536 output_asm_insn (".long\t%4-%l5", op);
12537 output_asm_insn (".long\t%3-%l5", op);
12538 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
12539 output_asm_insn ("lr\t%0,%2", op);
12540 output_asm_insn ("a\t%0,0(%2)", op);
12541 output_asm_insn ("a\t%2,4(%2)", op);
12542 output_asm_insn ("basr\t%0,%0", op);
12543 output_asm_insn ("l\t%0,%1", op);
12547 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
12548 into its SYMBOL_REF_FLAGS. */
12551 s390_encode_section_info (tree decl, rtx rtl, int first)
12553 default_encode_section_info (decl, rtl, first);
12555 if (TREE_CODE (decl) == VAR_DECL)
12557 /* Store the alignment to be able to check if we can use
12558 a larl/load-relative instruction. We only handle the cases
12559 that can go wrong (i.e. no FUNC_DECLs). */
12560 if (DECL_ALIGN (decl) == 0 || DECL_ALIGN (decl) % 16)
12561 SYMBOL_FLAG_SET_NOTALIGN2 (XEXP (rtl, 0));
12562 else if (DECL_ALIGN (decl) % 32)
12563 SYMBOL_FLAG_SET_NOTALIGN4 (XEXP (rtl, 0));
12564 else if (DECL_ALIGN (decl) % 64)
12565 SYMBOL_FLAG_SET_NOTALIGN8 (XEXP (rtl, 0));
12568 /* Literal pool references don't have a decl so they are handled
12569 differently here. We rely on the information in the MEM_ALIGN
12570 entry to decide upon the alignment. */
12572 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
12573 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0)))
12575 if (MEM_ALIGN (rtl) == 0 || MEM_ALIGN (rtl) % 16)
12576 SYMBOL_FLAG_SET_NOTALIGN2 (XEXP (rtl, 0));
12577 else if (MEM_ALIGN (rtl) % 32)
12578 SYMBOL_FLAG_SET_NOTALIGN4 (XEXP (rtl, 0));
12579 else if (MEM_ALIGN (rtl) % 64)
12580 SYMBOL_FLAG_SET_NOTALIGN8 (XEXP (rtl, 0));
12584 /* Output thunk to FILE that implements a C++ virtual function call (with
12585 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
12586 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
12587 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
12588 relative to the resulting this pointer. */
12591 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
12592 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
12598 /* Make sure unwind info is emitted for the thunk if needed. */
12599 final_start_function (emit_barrier (), file, 1);
12601 /* Operand 0 is the target function. */
12602 op[0] = XEXP (DECL_RTL (function), 0);
12603 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
12606 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
12607 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
12608 op[0] = gen_rtx_CONST (Pmode, op[0]);
12611 /* Operand 1 is the 'this' pointer. */
12612 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
12613 op[1] = gen_rtx_REG (Pmode, 3);
12615 op[1] = gen_rtx_REG (Pmode, 2);
12617 /* Operand 2 is the delta. */
12618 op[2] = GEN_INT (delta);
12620 /* Operand 3 is the vcall_offset. */
12621 op[3] = GEN_INT (vcall_offset);
12623 /* Operand 4 is the temporary register. */
12624 op[4] = gen_rtx_REG (Pmode, 1);
12626 /* Operands 5 to 8 can be used as labels. */
12632 /* Operand 9 can be used for temporary register. */
12635 /* Generate code. */
12638 /* Setup literal pool pointer if required. */
12639 if ((!DISP_IN_RANGE (delta)
12640 && !CONST_OK_FOR_K (delta)
12641 && !CONST_OK_FOR_Os (delta))
12642 || (!DISP_IN_RANGE (vcall_offset)
12643 && !CONST_OK_FOR_K (vcall_offset)
12644 && !CONST_OK_FOR_Os (vcall_offset)))
12646 op[5] = gen_label_rtx ();
12647 output_asm_insn ("larl\t%4,%5", op);
12650 /* Add DELTA to this pointer. */
12653 if (CONST_OK_FOR_J (delta))
12654 output_asm_insn ("la\t%1,%2(%1)", op);
12655 else if (DISP_IN_RANGE (delta))
12656 output_asm_insn ("lay\t%1,%2(%1)", op);
12657 else if (CONST_OK_FOR_K (delta))
12658 output_asm_insn ("aghi\t%1,%2", op);
12659 else if (CONST_OK_FOR_Os (delta))
12660 output_asm_insn ("agfi\t%1,%2", op);
12663 op[6] = gen_label_rtx ();
12664 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
12668 /* Perform vcall adjustment. */
12671 if (DISP_IN_RANGE (vcall_offset))
12673 output_asm_insn ("lg\t%4,0(%1)", op);
12674 output_asm_insn ("ag\t%1,%3(%4)", op);
12676 else if (CONST_OK_FOR_K (vcall_offset))
12678 output_asm_insn ("lghi\t%4,%3", op);
12679 output_asm_insn ("ag\t%4,0(%1)", op);
12680 output_asm_insn ("ag\t%1,0(%4)", op);
12682 else if (CONST_OK_FOR_Os (vcall_offset))
12684 output_asm_insn ("lgfi\t%4,%3", op);
12685 output_asm_insn ("ag\t%4,0(%1)", op);
12686 output_asm_insn ("ag\t%1,0(%4)", op);
12690 op[7] = gen_label_rtx ();
12691 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
12692 output_asm_insn ("ag\t%4,0(%1)", op);
12693 output_asm_insn ("ag\t%1,0(%4)", op);
12697 /* Jump to target. */
12698 output_asm_insn ("jg\t%0", op);
12700 /* Output literal pool if required. */
12703 output_asm_insn (".align\t4", op);
12704 targetm.asm_out.internal_label (file, "L",
12705 CODE_LABEL_NUMBER (op[5]));
12709 targetm.asm_out.internal_label (file, "L",
12710 CODE_LABEL_NUMBER (op[6]));
12711 output_asm_insn (".long\t%2", op);
12715 targetm.asm_out.internal_label (file, "L",
12716 CODE_LABEL_NUMBER (op[7]));
12717 output_asm_insn (".long\t%3", op);
12722 /* Setup base pointer if required. */
12724 || (!DISP_IN_RANGE (delta)
12725 && !CONST_OK_FOR_K (delta)
12726 && !CONST_OK_FOR_Os (delta))
12727 || (!DISP_IN_RANGE (delta)
12728 && !CONST_OK_FOR_K (vcall_offset)
12729 && !CONST_OK_FOR_Os (vcall_offset)))
12731 op[5] = gen_label_rtx ();
12732 output_asm_insn ("basr\t%4,0", op);
12733 targetm.asm_out.internal_label (file, "L",
12734 CODE_LABEL_NUMBER (op[5]));
12737 /* Add DELTA to this pointer. */
12740 if (CONST_OK_FOR_J (delta))
12741 output_asm_insn ("la\t%1,%2(%1)", op);
12742 else if (DISP_IN_RANGE (delta))
12743 output_asm_insn ("lay\t%1,%2(%1)", op);
12744 else if (CONST_OK_FOR_K (delta))
12745 output_asm_insn ("ahi\t%1,%2", op);
12746 else if (CONST_OK_FOR_Os (delta))
12747 output_asm_insn ("afi\t%1,%2", op);
12750 op[6] = gen_label_rtx ();
12751 output_asm_insn ("a\t%1,%6-%5(%4)", op);
12755 /* Perform vcall adjustment. */
12758 if (CONST_OK_FOR_J (vcall_offset))
12760 output_asm_insn ("l\t%4,0(%1)", op);
12761 output_asm_insn ("a\t%1,%3(%4)", op);
12763 else if (DISP_IN_RANGE (vcall_offset))
12765 output_asm_insn ("l\t%4,0(%1)", op);
12766 output_asm_insn ("ay\t%1,%3(%4)", op);
12768 else if (CONST_OK_FOR_K (vcall_offset))
12770 output_asm_insn ("lhi\t%4,%3", op);
12771 output_asm_insn ("a\t%4,0(%1)", op);
12772 output_asm_insn ("a\t%1,0(%4)", op);
12774 else if (CONST_OK_FOR_Os (vcall_offset))
12776 output_asm_insn ("iilf\t%4,%3", op);
12777 output_asm_insn ("a\t%4,0(%1)", op);
12778 output_asm_insn ("a\t%1,0(%4)", op);
12782 op[7] = gen_label_rtx ();
12783 output_asm_insn ("l\t%4,%7-%5(%4)", op);
12784 output_asm_insn ("a\t%4,0(%1)", op);
12785 output_asm_insn ("a\t%1,0(%4)", op);
12788 /* We had to clobber the base pointer register.
12789 Re-setup the base pointer (with a different base). */
12790 op[5] = gen_label_rtx ();
12791 output_asm_insn ("basr\t%4,0", op);
12792 targetm.asm_out.internal_label (file, "L",
12793 CODE_LABEL_NUMBER (op[5]));
12796 /* Jump to target. */
12797 op[8] = gen_label_rtx ();
12800 output_asm_insn ("l\t%4,%8-%5(%4)", op);
12801 else if (!nonlocal)
12802 output_asm_insn ("a\t%4,%8-%5(%4)", op);
12803 /* We cannot call through .plt, since .plt requires %r12 loaded. */
12804 else if (flag_pic == 1)
12806 output_asm_insn ("a\t%4,%8-%5(%4)", op);
12807 output_asm_insn ("l\t%4,%0(%4)", op);
12809 else if (flag_pic == 2)
12811 op[9] = gen_rtx_REG (Pmode, 0);
12812 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
12813 output_asm_insn ("a\t%4,%8-%5(%4)", op);
12814 output_asm_insn ("ar\t%4,%9", op);
12815 output_asm_insn ("l\t%4,0(%4)", op);
12818 output_asm_insn ("br\t%4", op);
12820 /* Output literal pool. */
12821 output_asm_insn (".align\t4", op);
12823 if (nonlocal && flag_pic == 2)
12824 output_asm_insn (".long\t%0", op);
12827 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
12828 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
12831 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
12833 output_asm_insn (".long\t%0", op);
12835 output_asm_insn (".long\t%0-%5", op);
12839 targetm.asm_out.internal_label (file, "L",
12840 CODE_LABEL_NUMBER (op[6]));
12841 output_asm_insn (".long\t%2", op);
12845 targetm.asm_out.internal_label (file, "L",
12846 CODE_LABEL_NUMBER (op[7]));
12847 output_asm_insn (".long\t%3", op);
12850 final_end_function ();
12854 s390_valid_pointer_mode (machine_mode mode)
12856 return (mode == SImode || (TARGET_64BIT && mode == DImode));
12859 /* Checks whether the given CALL_EXPR would use a caller
12860 saved register. This is used to decide whether sibling call
12861 optimization could be performed on the respective function
12865 s390_call_saved_register_used (tree call_expr)
12867 CUMULATIVE_ARGS cum_v;
12868 cumulative_args_t cum;
12875 INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0);
12876 cum = pack_cumulative_args (&cum_v);
12878 for (i = 0; i < call_expr_nargs (call_expr); i++)
12880 parameter = CALL_EXPR_ARG (call_expr, i);
12881 gcc_assert (parameter);
12883 /* For an undeclared variable passed as parameter we will get
12884 an ERROR_MARK node here. */
12885 if (TREE_CODE (parameter) == ERROR_MARK)
12888 type = TREE_TYPE (parameter);
12891 mode = TYPE_MODE (type);
12894 /* We assume that in the target function all parameters are
12895 named. This only has an impact on vector argument register
12896 usage none of which is call-saved. */
12897 if (pass_by_reference (&cum_v, mode, type, true))
12900 type = build_pointer_type (type);
12903 parm_rtx = s390_function_arg (cum, mode, type, true);
12905 s390_function_arg_advance (cum, mode, type, true);
12910 if (REG_P (parm_rtx))
12913 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
12915 if (!call_used_regs[reg + REGNO (parm_rtx)])
12919 if (GET_CODE (parm_rtx) == PARALLEL)
12923 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
12925 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
12927 gcc_assert (REG_P (r));
12930 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
12932 if (!call_used_regs[reg + REGNO (r)])
12941 /* Return true if the given call expression can be
12942 turned into a sibling call.
12943 DECL holds the declaration of the function to be called whereas
12944 EXP is the call expression itself. */
12947 s390_function_ok_for_sibcall (tree decl, tree exp)
12949 /* The TPF epilogue uses register 1. */
12950 if (TARGET_TPF_PROFILING)
12953 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
12954 which would have to be restored before the sibcall. */
12955 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
12958 /* Register 6 on s390 is available as an argument register but unfortunately
12959 "caller saved". This makes functions needing this register for arguments
12960 not suitable for sibcalls. */
12961 return !s390_call_saved_register_used (exp);
12964 /* Return the fixed registers used for condition codes. */
12967 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
12970 *p2 = INVALID_REGNUM;
12975 /* This function is used by the call expanders of the machine description.
12976 It emits the call insn itself together with the necessary operations
12977 to adjust the target address and returns the emitted insn.
12978 ADDR_LOCATION is the target address rtx
12979 TLS_CALL the location of the thread-local symbol
12980 RESULT_REG the register where the result of the call should be stored
12981 RETADDR_REG the register where the return address should be stored
12982 If this parameter is NULL_RTX the call is considered
12983 to be a sibling call. */
12986 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
12989 bool plt_call = false;
12995 /* Direct function calls need special treatment. */
12996 if (GET_CODE (addr_location) == SYMBOL_REF)
12998 /* When calling a global routine in PIC mode, we must
12999 replace the symbol itself with the PLT stub. */
13000 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
13002 if (TARGET_64BIT || retaddr_reg != NULL_RTX)
13004 addr_location = gen_rtx_UNSPEC (Pmode,
13005 gen_rtvec (1, addr_location),
13007 addr_location = gen_rtx_CONST (Pmode, addr_location);
13011 /* For -fpic code the PLT entries might use r12 which is
13012 call-saved. Therefore we cannot do a sibcall when
13013 calling directly using a symbol ref. When reaching
13014 this point we decided (in s390_function_ok_for_sibcall)
13015 to do a sibcall for a function pointer but one of the
13016 optimizers was able to get rid of the function pointer
13017 by propagating the symbol ref into the call. This
13018 optimization is illegal for S/390 so we turn the direct
13019 call into a indirect call again. */
13020 addr_location = force_reg (Pmode, addr_location);
13023 /* Unless we can use the bras(l) insn, force the
13024 routine address into a register. */
13025 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
13028 addr_location = legitimize_pic_address (addr_location, 0);
13030 addr_location = force_reg (Pmode, addr_location);
13034 /* If it is already an indirect call or the code above moved the
13035 SYMBOL_REF to somewhere else make sure the address can be found in
13037 if (retaddr_reg == NULL_RTX
13038 && GET_CODE (addr_location) != SYMBOL_REF
13041 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
13042 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
13045 addr_location = gen_rtx_MEM (QImode, addr_location);
13046 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
13048 if (result_reg != NULL_RTX)
13049 call = gen_rtx_SET (result_reg, call);
13051 if (retaddr_reg != NULL_RTX)
13053 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
13055 if (tls_call != NULL_RTX)
13056 vec = gen_rtvec (3, call, clobber,
13057 gen_rtx_USE (VOIDmode, tls_call));
13059 vec = gen_rtvec (2, call, clobber);
13061 call = gen_rtx_PARALLEL (VOIDmode, vec);
13064 insn = emit_call_insn (call);
13066 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
13067 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
13069 /* s390_function_ok_for_sibcall should
13070 have denied sibcalls in this case. */
13071 gcc_assert (retaddr_reg != NULL_RTX);
13072 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, 12));
13077 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
13080 s390_conditional_register_usage (void)
13086 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
13087 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
13089 if (TARGET_CPU_ZARCH)
13091 fixed_regs[BASE_REGNUM] = 0;
13092 call_used_regs[BASE_REGNUM] = 0;
13093 fixed_regs[RETURN_REGNUM] = 0;
13094 call_used_regs[RETURN_REGNUM] = 0;
13098 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
13099 call_used_regs[i] = call_really_used_regs[i] = 0;
13103 call_used_regs[FPR4_REGNUM] = call_really_used_regs[FPR4_REGNUM] = 0;
13104 call_used_regs[FPR6_REGNUM] = call_really_used_regs[FPR6_REGNUM] = 0;
13107 if (TARGET_SOFT_FLOAT)
13109 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
13110 call_used_regs[i] = fixed_regs[i] = 1;
13113 /* Disable v16 - v31 for non-vector target. */
13116 for (i = VR16_REGNUM; i <= VR31_REGNUM; i++)
13117 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
13121 /* Corresponding function to eh_return expander. */
13123 static GTY(()) rtx s390_tpf_eh_return_symbol;
13125 s390_emit_tpf_eh_return (rtx target)
13130 if (!s390_tpf_eh_return_symbol)
13131 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
13133 reg = gen_rtx_REG (Pmode, 2);
13134 orig_ra = gen_rtx_REG (Pmode, 3);
13136 emit_move_insn (reg, target);
13137 emit_move_insn (orig_ra, get_hard_reg_initial_val (Pmode, RETURN_REGNUM));
13138 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
13139 gen_rtx_REG (Pmode, RETURN_REGNUM));
13140 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
13141 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), orig_ra);
13143 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
13146 /* Rework the prologue/epilogue to avoid saving/restoring
13147 registers unnecessarily. */
13150 s390_optimize_prologue (void)
13152 rtx_insn *insn, *new_insn, *next_insn;
13154 /* Do a final recompute of the frame-related data. */
13155 s390_optimize_register_info ();
13157 /* If all special registers are in fact used, there's nothing we
13158 can do, so no point in walking the insn list. */
13160 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
13161 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
13162 && (TARGET_CPU_ZARCH
13163 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
13164 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
13167 /* Search for prologue/epilogue insns and replace them. */
13169 for (insn = get_insns (); insn; insn = next_insn)
13171 int first, last, off;
13172 rtx set, base, offset;
13175 next_insn = NEXT_INSN (insn);
13177 if (! NONJUMP_INSN_P (insn) || ! RTX_FRAME_RELATED_P (insn))
13180 pat = PATTERN (insn);
13182 /* Remove ldgr/lgdr instructions used for saving and restore
13183 GPRs if possible. */
13188 if (INSN_CODE (insn) == CODE_FOR_stack_restore_from_fpr)
13189 tmp_pat = XVECEXP (pat, 0, 0);
13191 if (GET_CODE (tmp_pat) == SET
13192 && GET_MODE (SET_SRC (tmp_pat)) == DImode
13193 && REG_P (SET_SRC (tmp_pat))
13194 && REG_P (SET_DEST (tmp_pat)))
13196 int src_regno = REGNO (SET_SRC (tmp_pat));
13197 int dest_regno = REGNO (SET_DEST (tmp_pat));
13201 if (!((GENERAL_REGNO_P (src_regno)
13202 && FP_REGNO_P (dest_regno))
13203 || (FP_REGNO_P (src_regno)
13204 && GENERAL_REGNO_P (dest_regno))))
13207 gpr_regno = GENERAL_REGNO_P (src_regno) ? src_regno : dest_regno;
13208 fpr_regno = FP_REGNO_P (src_regno) ? src_regno : dest_regno;
13210 /* GPR must be call-saved, FPR must be call-clobbered. */
13211 if (!call_really_used_regs[fpr_regno]
13212 || call_really_used_regs[gpr_regno])
13215 /* It must not happen that what we once saved in an FPR now
13216 needs a stack slot. */
13217 gcc_assert (cfun_gpr_save_slot (gpr_regno) != SAVE_SLOT_STACK);
13219 if (cfun_gpr_save_slot (gpr_regno) == SAVE_SLOT_NONE)
13221 remove_insn (insn);
13227 if (GET_CODE (pat) == PARALLEL
13228 && store_multiple_operation (pat, VOIDmode))
13230 set = XVECEXP (pat, 0, 0);
13231 first = REGNO (SET_SRC (set));
13232 last = first + XVECLEN (pat, 0) - 1;
13233 offset = const0_rtx;
13234 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
13235 off = INTVAL (offset);
13237 if (GET_CODE (base) != REG || off < 0)
13239 if (cfun_frame_layout.first_save_gpr != -1
13240 && (cfun_frame_layout.first_save_gpr < first
13241 || cfun_frame_layout.last_save_gpr > last))
13243 if (REGNO (base) != STACK_POINTER_REGNUM
13244 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13246 if (first > BASE_REGNUM || last < BASE_REGNUM)
13249 if (cfun_frame_layout.first_save_gpr != -1)
13251 rtx s_pat = save_gprs (base,
13252 off + (cfun_frame_layout.first_save_gpr
13253 - first) * UNITS_PER_LONG,
13254 cfun_frame_layout.first_save_gpr,
13255 cfun_frame_layout.last_save_gpr);
13256 new_insn = emit_insn_before (s_pat, insn);
13257 INSN_ADDRESSES_NEW (new_insn, -1);
13260 remove_insn (insn);
13264 if (cfun_frame_layout.first_save_gpr == -1
13265 && GET_CODE (pat) == SET
13266 && GENERAL_REG_P (SET_SRC (pat))
13267 && GET_CODE (SET_DEST (pat)) == MEM)
13270 first = REGNO (SET_SRC (set));
13271 offset = const0_rtx;
13272 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
13273 off = INTVAL (offset);
13275 if (GET_CODE (base) != REG || off < 0)
13277 if (REGNO (base) != STACK_POINTER_REGNUM
13278 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13281 remove_insn (insn);
13285 if (GET_CODE (pat) == PARALLEL
13286 && load_multiple_operation (pat, VOIDmode))
13288 set = XVECEXP (pat, 0, 0);
13289 first = REGNO (SET_DEST (set));
13290 last = first + XVECLEN (pat, 0) - 1;
13291 offset = const0_rtx;
13292 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
13293 off = INTVAL (offset);
13295 if (GET_CODE (base) != REG || off < 0)
13298 if (cfun_frame_layout.first_restore_gpr != -1
13299 && (cfun_frame_layout.first_restore_gpr < first
13300 || cfun_frame_layout.last_restore_gpr > last))
13302 if (REGNO (base) != STACK_POINTER_REGNUM
13303 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13305 if (first > BASE_REGNUM || last < BASE_REGNUM)
13308 if (cfun_frame_layout.first_restore_gpr != -1)
13310 rtx rpat = restore_gprs (base,
13311 off + (cfun_frame_layout.first_restore_gpr
13312 - first) * UNITS_PER_LONG,
13313 cfun_frame_layout.first_restore_gpr,
13314 cfun_frame_layout.last_restore_gpr);
13316 /* Remove REG_CFA_RESTOREs for registers that we no
13317 longer need to save. */
13318 REG_NOTES (rpat) = REG_NOTES (insn);
13319 for (rtx *ptr = ®_NOTES (rpat); *ptr; )
13320 if (REG_NOTE_KIND (*ptr) == REG_CFA_RESTORE
13321 && ((int) REGNO (XEXP (*ptr, 0))
13322 < cfun_frame_layout.first_restore_gpr))
13323 *ptr = XEXP (*ptr, 1);
13325 ptr = &XEXP (*ptr, 1);
13326 new_insn = emit_insn_before (rpat, insn);
13327 RTX_FRAME_RELATED_P (new_insn) = 1;
13328 INSN_ADDRESSES_NEW (new_insn, -1);
13331 remove_insn (insn);
13335 if (cfun_frame_layout.first_restore_gpr == -1
13336 && GET_CODE (pat) == SET
13337 && GENERAL_REG_P (SET_DEST (pat))
13338 && GET_CODE (SET_SRC (pat)) == MEM)
13341 first = REGNO (SET_DEST (set));
13342 offset = const0_rtx;
13343 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
13344 off = INTVAL (offset);
13346 if (GET_CODE (base) != REG || off < 0)
13349 if (REGNO (base) != STACK_POINTER_REGNUM
13350 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13353 remove_insn (insn);
13359 /* On z10 and later the dynamic branch prediction must see the
13360 backward jump within a certain windows. If not it falls back to
13361 the static prediction. This function rearranges the loop backward
13362 branch in a way which makes the static prediction always correct.
13363 The function returns true if it added an instruction. */
13365 s390_fix_long_loop_prediction (rtx_insn *insn)
13367 rtx set = single_set (insn);
13368 rtx code_label, label_ref;
13369 rtx_insn *uncond_jump;
13370 rtx_insn *cur_insn;
13374 /* This will exclude branch on count and branch on index patterns
13375 since these are correctly statically predicted. */
13377 || SET_DEST (set) != pc_rtx
13378 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
13381 /* Skip conditional returns. */
13382 if (ANY_RETURN_P (XEXP (SET_SRC (set), 1))
13383 && XEXP (SET_SRC (set), 2) == pc_rtx)
13386 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
13387 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
13389 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
13391 code_label = XEXP (label_ref, 0);
13393 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
13394 || INSN_ADDRESSES (INSN_UID (insn)) == -1
13395 || (INSN_ADDRESSES (INSN_UID (insn))
13396 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
13399 for (distance = 0, cur_insn = PREV_INSN (insn);
13400 distance < PREDICT_DISTANCE - 6;
13401 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
13402 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
13405 rtx_code_label *new_label = gen_label_rtx ();
13406 uncond_jump = emit_jump_insn_after (
13407 gen_rtx_SET (pc_rtx,
13408 gen_rtx_LABEL_REF (VOIDmode, code_label)),
13410 emit_label_after (new_label, uncond_jump);
13412 tmp = XEXP (SET_SRC (set), 1);
13413 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
13414 XEXP (SET_SRC (set), 2) = tmp;
13415 INSN_CODE (insn) = -1;
13417 XEXP (label_ref, 0) = new_label;
13418 JUMP_LABEL (insn) = new_label;
13419 JUMP_LABEL (uncond_jump) = code_label;
13424 /* Returns 1 if INSN reads the value of REG for purposes not related
13425 to addressing of memory, and 0 otherwise. */
13427 s390_non_addr_reg_read_p (rtx reg, rtx_insn *insn)
13429 return reg_referenced_p (reg, PATTERN (insn))
13430 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
13433 /* Starting from INSN find_cond_jump looks downwards in the insn
13434 stream for a single jump insn which is the last user of the
13435 condition code set in INSN. */
13437 find_cond_jump (rtx_insn *insn)
13439 for (; insn; insn = NEXT_INSN (insn))
13443 if (LABEL_P (insn))
13446 if (!JUMP_P (insn))
13448 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
13453 /* This will be triggered by a return. */
13454 if (GET_CODE (PATTERN (insn)) != SET)
13457 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
13458 ite = SET_SRC (PATTERN (insn));
13460 if (GET_CODE (ite) != IF_THEN_ELSE)
13463 cc = XEXP (XEXP (ite, 0), 0);
13464 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
13467 if (find_reg_note (insn, REG_DEAD, cc))
13475 /* Swap the condition in COND and the operands in OP0 and OP1 so that
13476 the semantics does not change. If NULL_RTX is passed as COND the
13477 function tries to find the conditional jump starting with INSN. */
13479 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx_insn *insn)
13483 if (cond == NULL_RTX)
13485 rtx_insn *jump = find_cond_jump (NEXT_INSN (insn));
13486 rtx set = jump ? single_set (jump) : NULL_RTX;
13488 if (set == NULL_RTX)
13491 cond = XEXP (SET_SRC (set), 0);
13496 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
13499 /* On z10, instructions of the compare-and-branch family have the
13500 property to access the register occurring as second operand with
13501 its bits complemented. If such a compare is grouped with a second
13502 instruction that accesses the same register non-complemented, and
13503 if that register's value is delivered via a bypass, then the
13504 pipeline recycles, thereby causing significant performance decline.
13505 This function locates such situations and exchanges the two
13506 operands of the compare. The function return true whenever it
13509 s390_z10_optimize_cmp (rtx_insn *insn)
13511 rtx_insn *prev_insn, *next_insn;
13512 bool insn_added_p = false;
13513 rtx cond, *op0, *op1;
13515 if (GET_CODE (PATTERN (insn)) == PARALLEL)
13517 /* Handle compare and branch and branch on count
13519 rtx pattern = single_set (insn);
13522 || SET_DEST (pattern) != pc_rtx
13523 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
13526 cond = XEXP (SET_SRC (pattern), 0);
13527 op0 = &XEXP (cond, 0);
13528 op1 = &XEXP (cond, 1);
13530 else if (GET_CODE (PATTERN (insn)) == SET)
13534 /* Handle normal compare instructions. */
13535 src = SET_SRC (PATTERN (insn));
13536 dest = SET_DEST (PATTERN (insn));
13539 || !CC_REGNO_P (REGNO (dest))
13540 || GET_CODE (src) != COMPARE)
13543 /* s390_swap_cmp will try to find the conditional
13544 jump when passing NULL_RTX as condition. */
13546 op0 = &XEXP (src, 0);
13547 op1 = &XEXP (src, 1);
13552 if (!REG_P (*op0) || !REG_P (*op1))
13555 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
13558 /* Swap the COMPARE arguments and its mask if there is a
13559 conflicting access in the previous insn. */
13560 prev_insn = prev_active_insn (insn);
13561 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
13562 && reg_referenced_p (*op1, PATTERN (prev_insn)))
13563 s390_swap_cmp (cond, op0, op1, insn);
13565 /* Check if there is a conflict with the next insn. If there
13566 was no conflict with the previous insn, then swap the
13567 COMPARE arguments and its mask. If we already swapped
13568 the operands, or if swapping them would cause a conflict
13569 with the previous insn, issue a NOP after the COMPARE in
13570 order to separate the two instuctions. */
13571 next_insn = next_active_insn (insn);
13572 if (next_insn != NULL_RTX && INSN_P (next_insn)
13573 && s390_non_addr_reg_read_p (*op1, next_insn))
13575 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
13576 && s390_non_addr_reg_read_p (*op0, prev_insn))
13578 if (REGNO (*op1) == 0)
13579 emit_insn_after (gen_nop1 (), insn);
13581 emit_insn_after (gen_nop (), insn);
13582 insn_added_p = true;
13585 s390_swap_cmp (cond, op0, op1, insn);
13587 return insn_added_p;
13590 /* Number of INSNs to be scanned backward in the last BB of the loop
13591 and forward in the first BB of the loop. This usually should be a
13592 bit more than the number of INSNs which could go into one
13594 #define S390_OSC_SCAN_INSN_NUM 5
13596 /* Scan LOOP for static OSC collisions and return true if a osc_break
13597 should be issued for this loop. */
13599 s390_adjust_loop_scan_osc (struct loop* loop)
13602 HARD_REG_SET modregs, newregs;
13603 rtx_insn *insn, *store_insn = NULL;
13605 struct s390_address addr_store, addr_load;
13606 subrtx_iterator::array_type array;
13609 CLEAR_HARD_REG_SET (modregs);
13612 FOR_BB_INSNS_REVERSE (loop->latch, insn)
13614 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
13618 if (insn_count > S390_OSC_SCAN_INSN_NUM)
13621 find_all_hard_reg_sets (insn, &newregs, true);
13622 IOR_HARD_REG_SET (modregs, newregs);
13624 set = single_set (insn);
13628 if (MEM_P (SET_DEST (set))
13629 && s390_decompose_address (XEXP (SET_DEST (set), 0), &addr_store))
13636 if (store_insn == NULL_RTX)
13640 FOR_BB_INSNS (loop->header, insn)
13642 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
13645 if (insn == store_insn)
13649 if (insn_count > S390_OSC_SCAN_INSN_NUM)
13652 find_all_hard_reg_sets (insn, &newregs, true);
13653 IOR_HARD_REG_SET (modregs, newregs);
13655 set = single_set (insn);
13659 /* An intermediate store disrupts static OSC checking
13661 if (MEM_P (SET_DEST (set))
13662 && s390_decompose_address (XEXP (SET_DEST (set), 0), NULL))
13665 FOR_EACH_SUBRTX (iter, array, SET_SRC (set), NONCONST)
13667 && s390_decompose_address (XEXP (*iter, 0), &addr_load)
13668 && rtx_equal_p (addr_load.base, addr_store.base)
13669 && rtx_equal_p (addr_load.indx, addr_store.indx)
13670 && rtx_equal_p (addr_load.disp, addr_store.disp))
13672 if ((addr_load.base != NULL_RTX
13673 && TEST_HARD_REG_BIT (modregs, REGNO (addr_load.base)))
13674 || (addr_load.indx != NULL_RTX
13675 && TEST_HARD_REG_BIT (modregs, REGNO (addr_load.indx))))
13682 /* Look for adjustments which can be done on simple innermost
13685 s390_adjust_loops ()
13687 struct loop *loop = NULL;
13690 compute_bb_for_insn ();
13692 /* Find the loops. */
13693 loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
13695 FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
13699 flow_loop_dump (loop, dump_file, NULL, 0);
13700 fprintf (dump_file, ";; OSC loop scan Loop: ");
13702 if (loop->latch == NULL
13703 || pc_set (BB_END (loop->latch)) == NULL_RTX
13704 || !s390_adjust_loop_scan_osc (loop))
13708 if (loop->latch == NULL)
13709 fprintf (dump_file, " muliple backward jumps\n");
13712 fprintf (dump_file, " header insn: %d latch insn: %d ",
13713 INSN_UID (BB_HEAD (loop->header)),
13714 INSN_UID (BB_END (loop->latch)));
13715 if (pc_set (BB_END (loop->latch)) == NULL_RTX)
13716 fprintf (dump_file, " loop does not end with jump\n");
13718 fprintf (dump_file, " not instrumented\n");
13724 rtx_insn *new_insn;
13727 fprintf (dump_file, " adding OSC break insn: ");
13728 new_insn = emit_insn_before (gen_osc_break (),
13729 BB_END (loop->latch));
13730 INSN_ADDRESSES_NEW (new_insn, -1);
13734 loop_optimizer_finalize ();
13736 df_finish_pass (false);
13739 /* Perform machine-dependent processing. */
13744 bool pool_overflow = false;
13745 int hw_before, hw_after;
13747 if (s390_tune == PROCESSOR_2964_Z13)
13748 s390_adjust_loops ();
13750 /* Make sure all splits have been performed; splits after
13751 machine_dependent_reorg might confuse insn length counts. */
13752 split_all_insns_noflow ();
13754 /* Install the main literal pool and the associated base
13755 register load insns.
13757 In addition, there are two problematic situations we need
13760 - the literal pool might be > 4096 bytes in size, so that
13761 some of its elements cannot be directly accessed
13763 - a branch target might be > 64K away from the branch, so that
13764 it is not possible to use a PC-relative instruction.
13766 To fix those, we split the single literal pool into multiple
13767 pool chunks, reloading the pool base register at various
13768 points throughout the function to ensure it always points to
13769 the pool chunk the following code expects, and / or replace
13770 PC-relative branches by absolute branches.
13772 However, the two problems are interdependent: splitting the
13773 literal pool can move a branch further away from its target,
13774 causing the 64K limit to overflow, and on the other hand,
13775 replacing a PC-relative branch by an absolute branch means
13776 we need to put the branch target address into the literal
13777 pool, possibly causing it to overflow.
13779 So, we loop trying to fix up both problems until we manage
13780 to satisfy both conditions at the same time. Note that the
13781 loop is guaranteed to terminate as every pass of the loop
13782 strictly decreases the total number of PC-relative branches
13783 in the function. (This is not completely true as there
13784 might be branch-over-pool insns introduced by chunkify_start.
13785 Those never need to be split however.) */
13789 struct constant_pool *pool = NULL;
13791 /* Collect the literal pool. */
13792 if (!pool_overflow)
13794 pool = s390_mainpool_start ();
13796 pool_overflow = true;
13799 /* If literal pool overflowed, start to chunkify it. */
13801 pool = s390_chunkify_start ();
13803 /* Split out-of-range branches. If this has created new
13804 literal pool entries, cancel current chunk list and
13805 recompute it. zSeries machines have large branch
13806 instructions, so we never need to split a branch. */
13807 if (!TARGET_CPU_ZARCH && s390_split_branches ())
13810 s390_chunkify_cancel (pool);
13812 s390_mainpool_cancel (pool);
13817 /* If we made it up to here, both conditions are satisfied.
13818 Finish up literal pool related changes. */
13820 s390_chunkify_finish (pool);
13822 s390_mainpool_finish (pool);
13824 /* We're done splitting branches. */
13825 cfun->machine->split_branches_pending_p = false;
13829 /* Generate out-of-pool execute target insns. */
13830 if (TARGET_CPU_ZARCH)
13832 rtx_insn *insn, *target;
13835 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13837 label = s390_execute_label (insn);
13841 gcc_assert (label != const0_rtx);
13843 target = emit_label (XEXP (label, 0));
13844 INSN_ADDRESSES_NEW (target, -1);
13846 target = emit_insn (s390_execute_target (insn));
13847 INSN_ADDRESSES_NEW (target, -1);
13851 /* Try to optimize prologue and epilogue further. */
13852 s390_optimize_prologue ();
13854 /* Walk over the insns and do some >=z10 specific changes. */
13855 if (s390_tune >= PROCESSOR_2097_Z10)
13858 bool insn_added_p = false;
13860 /* The insn lengths and addresses have to be up to date for the
13861 following manipulations. */
13862 shorten_branches (get_insns ());
13864 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13866 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
13870 insn_added_p |= s390_fix_long_loop_prediction (insn);
13872 if ((GET_CODE (PATTERN (insn)) == PARALLEL
13873 || GET_CODE (PATTERN (insn)) == SET)
13874 && s390_tune == PROCESSOR_2097_Z10)
13875 insn_added_p |= s390_z10_optimize_cmp (insn);
13878 /* Adjust branches if we added new instructions. */
13880 shorten_branches (get_insns ());
13883 s390_function_num_hotpatch_hw (current_function_decl, &hw_before, &hw_after);
13888 /* Insert NOPs for hotpatching. */
13889 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13891 1. inside the area covered by debug information to allow setting
13892 breakpoints at the NOPs,
13893 2. before any insn which results in an asm instruction,
13894 3. before in-function labels to avoid jumping to the NOPs, for
13895 example as part of a loop,
13896 4. before any barrier in case the function is completely empty
13897 (__builtin_unreachable ()) and has neither internal labels nor
13900 if (active_insn_p (insn) || BARRIER_P (insn) || LABEL_P (insn))
13902 /* Output a series of NOPs before the first active insn. */
13903 while (insn && hw_after > 0)
13905 if (hw_after >= 3 && TARGET_CPU_ZARCH)
13907 emit_insn_before (gen_nop_6_byte (), insn);
13910 else if (hw_after >= 2)
13912 emit_insn_before (gen_nop_4_byte (), insn);
13917 emit_insn_before (gen_nop_2_byte (), insn);
13924 /* Return true if INSN is a fp load insn writing register REGNO. */
13926 s390_fpload_toreg (rtx_insn *insn, unsigned int regno)
13929 enum attr_type flag = s390_safe_attr_type (insn);
13931 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
13934 set = single_set (insn);
13936 if (set == NULL_RTX)
13939 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
13942 if (REGNO (SET_DEST (set)) != regno)
13948 /* This value describes the distance to be avoided between an
13949 aritmetic fp instruction and an fp load writing the same register.
13950 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
13951 fine but the exact value has to be avoided. Otherwise the FP
13952 pipeline will throw an exception causing a major penalty. */
13953 #define Z10_EARLYLOAD_DISTANCE 7
13955 /* Rearrange the ready list in order to avoid the situation described
13956 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
13957 moved to the very end of the ready list. */
13959 s390_z10_prevent_earlyload_conflicts (rtx_insn **ready, int *nready_p)
13961 unsigned int regno;
13962 int nready = *nready_p;
13967 enum attr_type flag;
13970 /* Skip DISTANCE - 1 active insns. */
13971 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
13972 distance > 0 && insn != NULL_RTX;
13973 distance--, insn = prev_active_insn (insn))
13974 if (CALL_P (insn) || JUMP_P (insn))
13977 if (insn == NULL_RTX)
13980 set = single_set (insn);
13982 if (set == NULL_RTX || !REG_P (SET_DEST (set))
13983 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
13986 flag = s390_safe_attr_type (insn);
13988 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
13991 regno = REGNO (SET_DEST (set));
13994 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
14001 memmove (&ready[1], &ready[0], sizeof (rtx_insn *) * i);
14006 /* The s390_sched_state variable tracks the state of the current or
14007 the last instruction group.
14009 0,1,2 number of instructions scheduled in the current group
14010 3 the last group is complete - normal insns
14011 4 the last group was a cracked/expanded insn */
14013 static int s390_sched_state;
14015 #define S390_SCHED_STATE_NORMAL 3
14016 #define S390_SCHED_STATE_CRACKED 4
14018 #define S390_SCHED_ATTR_MASK_CRACKED 0x1
14019 #define S390_SCHED_ATTR_MASK_EXPANDED 0x2
14020 #define S390_SCHED_ATTR_MASK_ENDGROUP 0x4
14021 #define S390_SCHED_ATTR_MASK_GROUPALONE 0x8
14023 static unsigned int
14024 s390_get_sched_attrmask (rtx_insn *insn)
14026 unsigned int mask = 0;
14030 case PROCESSOR_2827_ZEC12:
14031 if (get_attr_zEC12_cracked (insn))
14032 mask |= S390_SCHED_ATTR_MASK_CRACKED;
14033 if (get_attr_zEC12_expanded (insn))
14034 mask |= S390_SCHED_ATTR_MASK_EXPANDED;
14035 if (get_attr_zEC12_endgroup (insn))
14036 mask |= S390_SCHED_ATTR_MASK_ENDGROUP;
14037 if (get_attr_zEC12_groupalone (insn))
14038 mask |= S390_SCHED_ATTR_MASK_GROUPALONE;
14040 case PROCESSOR_2964_Z13:
14041 case PROCESSOR_ARCH12:
14042 if (get_attr_z13_cracked (insn))
14043 mask |= S390_SCHED_ATTR_MASK_CRACKED;
14044 if (get_attr_z13_expanded (insn))
14045 mask |= S390_SCHED_ATTR_MASK_EXPANDED;
14046 if (get_attr_z13_endgroup (insn))
14047 mask |= S390_SCHED_ATTR_MASK_ENDGROUP;
14048 if (get_attr_z13_groupalone (insn))
14049 mask |= S390_SCHED_ATTR_MASK_GROUPALONE;
14052 gcc_unreachable ();
14057 static unsigned int
14058 s390_get_unit_mask (rtx_insn *insn, int *units)
14060 unsigned int mask = 0;
14064 case PROCESSOR_2964_Z13:
14065 case PROCESSOR_ARCH12:
14067 if (get_attr_z13_unit_lsu (insn))
14069 if (get_attr_z13_unit_fxu (insn))
14071 if (get_attr_z13_unit_vfu (insn))
14075 gcc_unreachable ();
14080 /* Return the scheduling score for INSN. The higher the score the
14081 better. The score is calculated from the OOO scheduling attributes
14082 of INSN and the scheduling state s390_sched_state. */
14084 s390_sched_score (rtx_insn *insn)
14086 unsigned int mask = s390_get_sched_attrmask (insn);
14089 switch (s390_sched_state)
14092 /* Try to put insns into the first slot which would otherwise
14094 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
14095 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
14097 if ((mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
14101 /* Prefer not cracked insns while trying to put together a
14103 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
14104 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0
14105 && (mask & S390_SCHED_ATTR_MASK_GROUPALONE) == 0)
14107 if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) == 0)
14111 /* Prefer not cracked insns while trying to put together a
14113 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
14114 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0
14115 && (mask & S390_SCHED_ATTR_MASK_GROUPALONE) == 0)
14117 /* Prefer endgroup insns in the last slot. */
14118 if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) != 0)
14121 case S390_SCHED_STATE_NORMAL:
14122 /* Prefer not cracked insns if the last was not cracked. */
14123 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
14124 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0)
14126 if ((mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
14129 case S390_SCHED_STATE_CRACKED:
14130 /* Try to keep cracked insns together to prevent them from
14131 interrupting groups. */
14132 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
14133 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
14138 if (s390_tune >= PROCESSOR_2964_Z13)
14141 unsigned unit_mask, m = 1;
14143 unit_mask = s390_get_unit_mask (insn, &units);
14144 gcc_assert (units <= MAX_SCHED_UNITS);
14146 /* Add a score in range 0..MAX_SCHED_MIX_SCORE depending on how long
14147 ago the last insn of this unit type got scheduled. This is
14148 supposed to help providing a proper instruction mix to the
14150 for (i = 0; i < units; i++, m <<= 1)
14152 score += (last_scheduled_unit_distance[i] * MAX_SCHED_MIX_SCORE /
14153 MAX_SCHED_MIX_DISTANCE);
14158 /* This function is called via hook TARGET_SCHED_REORDER before
14159 issuing one insn from list READY which contains *NREADYP entries.
14160 For target z10 it reorders load instructions to avoid early load
14161 conflicts in the floating point pipeline */
14163 s390_sched_reorder (FILE *file, int verbose,
14164 rtx_insn **ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
14166 if (s390_tune == PROCESSOR_2097_Z10
14167 && reload_completed
14169 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
14171 if (s390_tune >= PROCESSOR_2827_ZEC12
14172 && reload_completed
14176 int last_index = *nreadyp - 1;
14177 int max_index = -1;
14178 int max_score = -1;
14181 /* Just move the insn with the highest score to the top (the
14182 end) of the list. A full sort is not needed since a conflict
14183 in the hazard recognition cannot happen. So the top insn in
14184 the ready list will always be taken. */
14185 for (i = last_index; i >= 0; i--)
14189 if (recog_memoized (ready[i]) < 0)
14192 score = s390_sched_score (ready[i]);
14193 if (score > max_score)
14200 if (max_index != -1)
14202 if (max_index != last_index)
14204 tmp = ready[max_index];
14205 ready[max_index] = ready[last_index];
14206 ready[last_index] = tmp;
14210 ";;\t\tBACKEND: move insn %d to the top of list\n",
14211 INSN_UID (ready[last_index]));
14213 else if (verbose > 5)
14215 ";;\t\tBACKEND: best insn %d already on top\n",
14216 INSN_UID (ready[last_index]));
14221 fprintf (file, "ready list ooo attributes - sched state: %d\n",
14224 for (i = last_index; i >= 0; i--)
14226 unsigned int sched_mask;
14227 rtx_insn *insn = ready[i];
14229 if (recog_memoized (insn) < 0)
14232 sched_mask = s390_get_sched_attrmask (insn);
14233 fprintf (file, ";;\t\tBACKEND: insn %d score: %d: ",
14235 s390_sched_score (insn));
14236 #define PRINT_SCHED_ATTR(M, ATTR) fprintf (file, "%s ",\
14237 ((M) & sched_mask) ? #ATTR : "");
14238 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_CRACKED, cracked);
14239 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_EXPANDED, expanded);
14240 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_ENDGROUP, endgroup);
14241 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_GROUPALONE, groupalone);
14242 #undef PRINT_SCHED_ATTR
14243 if (s390_tune >= PROCESSOR_2964_Z13)
14245 unsigned int unit_mask, m = 1;
14248 unit_mask = s390_get_unit_mask (insn, &units);
14249 fprintf (file, "(units:");
14250 for (j = 0; j < units; j++, m <<= 1)
14252 fprintf (file, " u%d", j);
14253 fprintf (file, ")");
14255 fprintf (file, "\n");
14260 return s390_issue_rate ();
14264 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
14265 the scheduler has issued INSN. It stores the last issued insn into
14266 last_scheduled_insn in order to make it available for
14267 s390_sched_reorder. */
14269 s390_sched_variable_issue (FILE *file, int verbose, rtx_insn *insn, int more)
14271 last_scheduled_insn = insn;
14273 if (s390_tune >= PROCESSOR_2827_ZEC12
14274 && reload_completed
14275 && recog_memoized (insn) >= 0)
14277 unsigned int mask = s390_get_sched_attrmask (insn);
14279 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
14280 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
14281 s390_sched_state = S390_SCHED_STATE_CRACKED;
14282 else if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) != 0
14283 || (mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
14284 s390_sched_state = S390_SCHED_STATE_NORMAL;
14287 /* Only normal insns are left (mask == 0). */
14288 switch (s390_sched_state)
14293 case S390_SCHED_STATE_NORMAL:
14294 if (s390_sched_state == S390_SCHED_STATE_NORMAL)
14295 s390_sched_state = 1;
14297 s390_sched_state++;
14300 case S390_SCHED_STATE_CRACKED:
14301 s390_sched_state = S390_SCHED_STATE_NORMAL;
14306 if (s390_tune >= PROCESSOR_2964_Z13)
14309 unsigned unit_mask, m = 1;
14311 unit_mask = s390_get_unit_mask (insn, &units);
14312 gcc_assert (units <= MAX_SCHED_UNITS);
14314 for (i = 0; i < units; i++, m <<= 1)
14316 last_scheduled_unit_distance[i] = 0;
14317 else if (last_scheduled_unit_distance[i] < MAX_SCHED_MIX_DISTANCE)
14318 last_scheduled_unit_distance[i]++;
14323 unsigned int sched_mask;
14325 sched_mask = s390_get_sched_attrmask (insn);
14327 fprintf (file, ";;\t\tBACKEND: insn %d: ", INSN_UID (insn));
14328 #define PRINT_SCHED_ATTR(M, ATTR) fprintf (file, "%s ", ((M) & sched_mask) ? #ATTR : "");
14329 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_CRACKED, cracked);
14330 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_EXPANDED, expanded);
14331 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_ENDGROUP, endgroup);
14332 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_GROUPALONE, groupalone);
14333 #undef PRINT_SCHED_ATTR
14335 if (s390_tune >= PROCESSOR_2964_Z13)
14337 unsigned int unit_mask, m = 1;
14340 unit_mask = s390_get_unit_mask (insn, &units);
14341 fprintf (file, "(units:");
14342 for (j = 0; j < units; j++, m <<= 1)
14344 fprintf (file, " %d", j);
14345 fprintf (file, ")");
14347 fprintf (file, " sched state: %d\n", s390_sched_state);
14349 if (s390_tune >= PROCESSOR_2964_Z13)
14353 s390_get_unit_mask (insn, &units);
14355 fprintf (file, ";;\t\tBACKEND: units unused for: ");
14356 for (j = 0; j < units; j++)
14357 fprintf (file, "%d:%d ", j, last_scheduled_unit_distance[j]);
14358 fprintf (file, "\n");
14363 if (GET_CODE (PATTERN (insn)) != USE
14364 && GET_CODE (PATTERN (insn)) != CLOBBER)
14371 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
14372 int verbose ATTRIBUTE_UNUSED,
14373 int max_ready ATTRIBUTE_UNUSED)
14375 last_scheduled_insn = NULL;
14376 memset (last_scheduled_unit_distance, 0, MAX_SCHED_UNITS * sizeof (int));
14377 s390_sched_state = 0;
14380 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
14381 a new number struct loop *loop should be unrolled if tuned for cpus with
14382 a built-in stride prefetcher.
14383 The loop is analyzed for memory accesses by calling check_dpu for
14384 each rtx of the loop. Depending on the loop_depth and the amount of
14385 memory accesses a new number <=nunroll is returned to improve the
14386 behavior of the hardware prefetch unit. */
14388 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
14393 unsigned mem_count = 0;
14395 if (s390_tune < PROCESSOR_2097_Z10)
14398 /* Count the number of memory references within the loop body. */
14399 bbs = get_loop_body (loop);
14400 subrtx_iterator::array_type array;
14401 for (i = 0; i < loop->num_nodes; i++)
14402 FOR_BB_INSNS (bbs[i], insn)
14403 if (INSN_P (insn) && INSN_CODE (insn) != -1)
14404 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
14409 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
14410 if (mem_count == 0)
14413 switch (loop_depth(loop))
14416 return MIN (nunroll, 28 / mem_count);
14418 return MIN (nunroll, 22 / mem_count);
14420 return MIN (nunroll, 16 / mem_count);
14424 /* Restore the current options. This is a hook function and also called
14428 s390_function_specific_restore (struct gcc_options *opts,
14429 struct cl_target_option *ptr ATTRIBUTE_UNUSED)
14431 opts->x_s390_cost_pointer = (long)processor_table[opts->x_s390_tune].cost;
14435 s390_option_override_internal (bool main_args_p,
14436 struct gcc_options *opts,
14437 const struct gcc_options *opts_set)
14439 const char *prefix;
14440 const char *suffix;
14442 /* Set up prefix/suffix so the error messages refer to either the command
14443 line argument, or the attribute(target). */
14451 prefix = "option(\"";
14456 /* Architecture mode defaults according to ABI. */
14457 if (!(opts_set->x_target_flags & MASK_ZARCH))
14460 opts->x_target_flags |= MASK_ZARCH;
14462 opts->x_target_flags &= ~MASK_ZARCH;
14465 /* Set the march default in case it hasn't been specified on cmdline. */
14466 if (!opts_set->x_s390_arch)
14467 opts->x_s390_arch = PROCESSOR_2064_Z900;
14468 else if (opts->x_s390_arch == PROCESSOR_9672_G5
14469 || opts->x_s390_arch == PROCESSOR_9672_G6)
14470 warning (OPT_Wdeprecated, "%sarch=%s%s is deprecated and will be removed "
14471 "in future releases; use at least %sarch=z900%s",
14472 prefix, opts->x_s390_arch == PROCESSOR_9672_G5 ? "g5" : "g6",
14473 suffix, prefix, suffix);
14475 opts->x_s390_arch_flags = processor_flags_table[(int) opts->x_s390_arch];
14477 /* Determine processor to tune for. */
14478 if (!opts_set->x_s390_tune)
14479 opts->x_s390_tune = opts->x_s390_arch;
14480 else if (opts->x_s390_tune == PROCESSOR_9672_G5
14481 || opts->x_s390_tune == PROCESSOR_9672_G6)
14482 warning (OPT_Wdeprecated, "%stune=%s%s is deprecated and will be removed "
14483 "in future releases; use at least %stune=z900%s",
14484 prefix, opts->x_s390_tune == PROCESSOR_9672_G5 ? "g5" : "g6",
14485 suffix, prefix, suffix);
14487 opts->x_s390_tune_flags = processor_flags_table[opts->x_s390_tune];
14489 /* Sanity checks. */
14490 if (opts->x_s390_arch == PROCESSOR_NATIVE
14491 || opts->x_s390_tune == PROCESSOR_NATIVE)
14492 gcc_unreachable ();
14493 if (TARGET_ZARCH_P (opts->x_target_flags) && !TARGET_CPU_ZARCH_P (opts))
14494 error ("z/Architecture mode not supported on %s",
14495 processor_table[(int)opts->x_s390_arch].name);
14496 if (TARGET_64BIT && !TARGET_ZARCH_P (opts->x_target_flags))
14497 error ("64-bit ABI not supported in ESA/390 mode");
14499 /* Enable hardware transactions if available and not explicitly
14500 disabled by user. E.g. with -m31 -march=zEC12 -mzarch */
14501 if (!TARGET_OPT_HTM_P (opts_set->x_target_flags))
14503 if (TARGET_CPU_HTM_P (opts) && TARGET_ZARCH_P (opts->x_target_flags))
14504 opts->x_target_flags |= MASK_OPT_HTM;
14506 opts->x_target_flags &= ~MASK_OPT_HTM;
14509 if (TARGET_OPT_VX_P (opts_set->x_target_flags))
14511 if (TARGET_OPT_VX_P (opts->x_target_flags))
14513 if (!TARGET_CPU_VX_P (opts))
14514 error ("hardware vector support not available on %s",
14515 processor_table[(int)opts->x_s390_arch].name);
14516 if (TARGET_SOFT_FLOAT_P (opts->x_target_flags))
14517 error ("hardware vector support not available with -msoft-float");
14522 if (TARGET_CPU_VX_P (opts))
14523 /* Enable vector support if available and not explicitly disabled
14524 by user. E.g. with -m31 -march=z13 -mzarch */
14525 opts->x_target_flags |= MASK_OPT_VX;
14527 opts->x_target_flags &= ~MASK_OPT_VX;
14530 /* Use hardware DFP if available and not explicitly disabled by
14531 user. E.g. with -m31 -march=z10 -mzarch */
14532 if (!TARGET_HARD_DFP_P (opts_set->x_target_flags))
14534 if (TARGET_DFP_P (opts))
14535 opts->x_target_flags |= MASK_HARD_DFP;
14537 opts->x_target_flags &= ~MASK_HARD_DFP;
14540 if (TARGET_HARD_DFP_P (opts->x_target_flags) && !TARGET_DFP_P (opts))
14542 if (TARGET_HARD_DFP_P (opts_set->x_target_flags))
14544 if (!TARGET_CPU_DFP_P (opts))
14545 error ("hardware decimal floating point instructions"
14546 " not available on %s",
14547 processor_table[(int)opts->x_s390_arch].name);
14548 if (!TARGET_ZARCH_P (opts->x_target_flags))
14549 error ("hardware decimal floating point instructions"
14550 " not available in ESA/390 mode");
14553 opts->x_target_flags &= ~MASK_HARD_DFP;
14556 if (TARGET_SOFT_FLOAT_P (opts_set->x_target_flags)
14557 && TARGET_SOFT_FLOAT_P (opts->x_target_flags))
14559 if (TARGET_HARD_DFP_P (opts_set->x_target_flags)
14560 && TARGET_HARD_DFP_P (opts->x_target_flags))
14561 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
14563 opts->x_target_flags &= ~MASK_HARD_DFP;
14566 if (TARGET_BACKCHAIN_P (opts->x_target_flags)
14567 && TARGET_PACKED_STACK_P (opts->x_target_flags)
14568 && TARGET_HARD_FLOAT_P (opts->x_target_flags))
14569 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
14572 if (opts->x_s390_stack_size)
14574 if (opts->x_s390_stack_guard >= opts->x_s390_stack_size)
14575 error ("stack size must be greater than the stack guard value");
14576 else if (opts->x_s390_stack_size > 1 << 16)
14577 error ("stack size must not be greater than 64k");
14579 else if (opts->x_s390_stack_guard)
14580 error ("-mstack-guard implies use of -mstack-size");
14582 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
14583 if (!TARGET_LONG_DOUBLE_128_P (opts_set->x_target_flags))
14584 opts->x_target_flags |= MASK_LONG_DOUBLE_128;
14587 if (opts->x_s390_tune >= PROCESSOR_2097_Z10)
14589 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
14590 opts->x_param_values,
14591 opts_set->x_param_values);
14592 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
14593 opts->x_param_values,
14594 opts_set->x_param_values);
14595 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
14596 opts->x_param_values,
14597 opts_set->x_param_values);
14598 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
14599 opts->x_param_values,
14600 opts_set->x_param_values);
14603 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
14604 opts->x_param_values,
14605 opts_set->x_param_values);
14606 /* values for loop prefetching */
14607 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
14608 opts->x_param_values,
14609 opts_set->x_param_values);
14610 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
14611 opts->x_param_values,
14612 opts_set->x_param_values);
14613 /* s390 has more than 2 levels and the size is much larger. Since
14614 we are always running virtualized assume that we only get a small
14615 part of the caches above l1. */
14616 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
14617 opts->x_param_values,
14618 opts_set->x_param_values);
14619 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
14620 opts->x_param_values,
14621 opts_set->x_param_values);
14622 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
14623 opts->x_param_values,
14624 opts_set->x_param_values);
14626 /* Use the alternative scheduling-pressure algorithm by default. */
14627 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
14628 opts->x_param_values,
14629 opts_set->x_param_values);
14631 maybe_set_param_value (PARAM_MIN_VECT_LOOP_BOUND, 2,
14632 opts->x_param_values,
14633 opts_set->x_param_values);
14635 /* Call target specific restore function to do post-init work. At the moment,
14636 this just sets opts->x_s390_cost_pointer. */
14637 s390_function_specific_restore (opts, NULL);
14641 s390_option_override (void)
14644 cl_deferred_option *opt;
14645 vec<cl_deferred_option> *v =
14646 (vec<cl_deferred_option> *) s390_deferred_options;
14649 FOR_EACH_VEC_ELT (*v, i, opt)
14651 switch (opt->opt_index)
14653 case OPT_mhotpatch_:
14660 strncpy (s, opt->arg, 256);
14662 t = strchr (s, ',');
14667 val1 = integral_argument (s);
14668 val2 = integral_argument (t);
14675 if (val1 == -1 || val2 == -1)
14677 /* argument is not a plain number */
14678 error ("arguments to %qs should be non-negative integers",
14682 else if (val1 > s390_hotpatch_hw_max
14683 || val2 > s390_hotpatch_hw_max)
14685 error ("argument to %qs is too large (max. %d)",
14686 "-mhotpatch=n,m", s390_hotpatch_hw_max);
14689 s390_hotpatch_hw_before_label = val1;
14690 s390_hotpatch_hw_after_label = val2;
14694 gcc_unreachable ();
14698 /* Set up function hooks. */
14699 init_machine_status = s390_init_machine_status;
14701 s390_option_override_internal (true, &global_options, &global_options_set);
14703 /* Save the initial options in case the user does function specific
14705 target_option_default_node = build_target_option_node (&global_options);
14706 target_option_current_node = target_option_default_node;
14708 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
14709 requires the arch flags to be evaluated already. Since prefetching
14710 is beneficial on s390, we enable it if available. */
14711 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
14712 flag_prefetch_loop_arrays = 1;
14716 /* Don't emit DWARF3/4 unless specifically selected. The TPF
14717 debuggers do not yet support DWARF 3/4. */
14718 if (!global_options_set.x_dwarf_strict)
14720 if (!global_options_set.x_dwarf_version)
14724 /* Register a target-specific optimization-and-lowering pass
14725 to run immediately before prologue and epilogue generation.
14727 Registering the pass must be done at start up. It's
14728 convenient to do it here. */
14729 opt_pass *new_pass = new pass_s390_early_mach (g);
14730 struct register_pass_info insert_pass_s390_early_mach =
14732 new_pass, /* pass */
14733 "pro_and_epilogue", /* reference_pass_name */
14734 1, /* ref_pass_instance_number */
14735 PASS_POS_INSERT_BEFORE /* po_op */
14737 register_pass (&insert_pass_s390_early_mach);
14740 #if S390_USE_TARGET_ATTRIBUTE
14741 /* Inner function to process the attribute((target(...))), take an argument and
14742 set the current options from the argument. If we have a list, recursively go
14746 s390_valid_target_attribute_inner_p (tree args,
14747 struct gcc_options *opts,
14748 struct gcc_options *new_opts_set,
14754 #define S390_ATTRIB(S,O,A) { S, sizeof (S)-1, O, A, 0 }
14755 #define S390_PRAGMA(S,O,A) { S, sizeof (S)-1, O, A, 1 }
14756 static const struct
14758 const char *string;
14762 int only_as_pragma;
14765 S390_ATTRIB ("arch=", OPT_march_, 1),
14766 S390_ATTRIB ("tune=", OPT_mtune_, 1),
14767 /* uinteger options */
14768 S390_ATTRIB ("stack-guard=", OPT_mstack_guard_, 1),
14769 S390_ATTRIB ("stack-size=", OPT_mstack_size_, 1),
14770 S390_ATTRIB ("branch-cost=", OPT_mbranch_cost_, 1),
14771 S390_ATTRIB ("warn-framesize=", OPT_mwarn_framesize_, 1),
14773 S390_ATTRIB ("backchain", OPT_mbackchain, 0),
14774 S390_ATTRIB ("hard-dfp", OPT_mhard_dfp, 0),
14775 S390_ATTRIB ("hard-float", OPT_mhard_float, 0),
14776 S390_ATTRIB ("htm", OPT_mhtm, 0),
14777 S390_ATTRIB ("vx", OPT_mvx, 0),
14778 S390_ATTRIB ("packed-stack", OPT_mpacked_stack, 0),
14779 S390_ATTRIB ("small-exec", OPT_msmall_exec, 0),
14780 S390_ATTRIB ("soft-float", OPT_msoft_float, 0),
14781 S390_ATTRIB ("mvcle", OPT_mmvcle, 0),
14782 S390_PRAGMA ("zvector", OPT_mzvector, 0),
14783 /* boolean options */
14784 S390_ATTRIB ("warn-dynamicstack", OPT_mwarn_dynamicstack, 0),
14789 /* If this is a list, recurse to get the options. */
14790 if (TREE_CODE (args) == TREE_LIST)
14793 int num_pragma_values;
14796 /* Note: attribs.c:decl_attributes prepends the values from
14797 current_target_pragma to the list of target attributes. To determine
14798 whether we're looking at a value of the attribute or the pragma we
14799 assume that the first [list_length (current_target_pragma)] values in
14800 the list are the values from the pragma. */
14801 num_pragma_values = (!force_pragma && current_target_pragma != NULL)
14802 ? list_length (current_target_pragma) : 0;
14803 for (i = 0; args; args = TREE_CHAIN (args), i++)
14807 is_pragma = (force_pragma || i < num_pragma_values);
14808 if (TREE_VALUE (args)
14809 && !s390_valid_target_attribute_inner_p (TREE_VALUE (args),
14810 opts, new_opts_set,
14819 else if (TREE_CODE (args) != STRING_CST)
14821 error ("attribute %<target%> argument not a string");
14825 /* Handle multiple arguments separated by commas. */
14826 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
14828 while (next_optstr && *next_optstr != '\0')
14830 char *p = next_optstr;
14832 char *comma = strchr (next_optstr, ',');
14833 size_t len, opt_len;
14839 enum cl_var_type var_type;
14845 len = comma - next_optstr;
14846 next_optstr = comma + 1;
14851 next_optstr = NULL;
14854 /* Recognize no-xxx. */
14855 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
14864 /* Find the option. */
14867 for (i = 0; i < ARRAY_SIZE (attrs); i++)
14869 opt_len = attrs[i].len;
14870 if (ch == attrs[i].string[0]
14871 && ((attrs[i].has_arg) ? len > opt_len : len == opt_len)
14872 && memcmp (p, attrs[i].string, opt_len) == 0)
14874 opt = attrs[i].opt;
14875 if (!opt_set_p && cl_options[opt].cl_reject_negative)
14877 mask = cl_options[opt].var_value;
14878 var_type = cl_options[opt].var_type;
14884 /* Process the option. */
14887 error ("attribute(target(\"%s\")) is unknown", orig_p);
14890 else if (attrs[i].only_as_pragma && !force_pragma)
14892 /* Value is not allowed for the target attribute. */
14893 error ("value %qs is not supported by attribute %<target%>",
14898 else if (var_type == CLVC_BIT_SET || var_type == CLVC_BIT_CLEAR)
14900 if (var_type == CLVC_BIT_CLEAR)
14901 opt_set_p = !opt_set_p;
14904 opts->x_target_flags |= mask;
14906 opts->x_target_flags &= ~mask;
14907 new_opts_set->x_target_flags |= mask;
14910 else if (cl_options[opt].var_type == CLVC_BOOLEAN)
14914 if (cl_options[opt].cl_uinteger)
14916 /* Unsigned integer argument. Code based on the function
14917 decode_cmdline_option () in opts-common.c. */
14918 value = integral_argument (p + opt_len);
14921 value = (opt_set_p) ? 1 : 0;
14925 struct cl_decoded_option decoded;
14927 /* Value range check; only implemented for numeric and boolean
14928 options at the moment. */
14929 generate_option (opt, NULL, value, CL_TARGET, &decoded);
14930 s390_handle_option (opts, new_opts_set, &decoded, input_location);
14931 set_option (opts, new_opts_set, opt, value,
14932 p + opt_len, DK_UNSPECIFIED, input_location,
14937 error ("attribute(target(\"%s\")) is unknown", orig_p);
14942 else if (cl_options[opt].var_type == CLVC_ENUM)
14947 arg_ok = opt_enum_arg_to_value (opt, p + opt_len, &value, CL_TARGET);
14949 set_option (opts, new_opts_set, opt, value,
14950 p + opt_len, DK_UNSPECIFIED, input_location,
14954 error ("attribute(target(\"%s\")) is unknown", orig_p);
14960 gcc_unreachable ();
14965 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
14968 s390_valid_target_attribute_tree (tree args,
14969 struct gcc_options *opts,
14970 const struct gcc_options *opts_set,
14973 tree t = NULL_TREE;
14974 struct gcc_options new_opts_set;
14976 memset (&new_opts_set, 0, sizeof (new_opts_set));
14978 /* Process each of the options on the chain. */
14979 if (! s390_valid_target_attribute_inner_p (args, opts, &new_opts_set,
14981 return error_mark_node;
14983 /* If some option was set (even if it has not changed), rerun
14984 s390_option_override_internal, and then save the options away. */
14985 if (new_opts_set.x_target_flags
14986 || new_opts_set.x_s390_arch
14987 || new_opts_set.x_s390_tune
14988 || new_opts_set.x_s390_stack_guard
14989 || new_opts_set.x_s390_stack_size
14990 || new_opts_set.x_s390_branch_cost
14991 || new_opts_set.x_s390_warn_framesize
14992 || new_opts_set.x_s390_warn_dynamicstack_p)
14994 const unsigned char *src = (const unsigned char *)opts_set;
14995 unsigned char *dest = (unsigned char *)&new_opts_set;
14998 /* Merge the original option flags into the new ones. */
14999 for (i = 0; i < sizeof(*opts_set); i++)
15002 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
15003 s390_option_override_internal (false, opts, &new_opts_set);
15004 /* Save the current options unless we are validating options for
15006 t = build_target_option_node (opts);
15011 /* Hook to validate attribute((target("string"))). */
15014 s390_valid_target_attribute_p (tree fndecl,
15015 tree ARG_UNUSED (name),
15017 int ARG_UNUSED (flags))
15019 struct gcc_options func_options;
15020 tree new_target, new_optimize;
15023 /* attribute((target("default"))) does nothing, beyond
15024 affecting multi-versioning. */
15025 if (TREE_VALUE (args)
15026 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
15027 && TREE_CHAIN (args) == NULL_TREE
15028 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
15031 tree old_optimize = build_optimization_node (&global_options);
15033 /* Get the optimization options of the current function. */
15034 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
15036 if (!func_optimize)
15037 func_optimize = old_optimize;
15039 /* Init func_options. */
15040 memset (&func_options, 0, sizeof (func_options));
15041 init_options_struct (&func_options, NULL);
15042 lang_hooks.init_options_struct (&func_options);
15044 cl_optimization_restore (&func_options, TREE_OPTIMIZATION (func_optimize));
15046 /* Initialize func_options to the default before its target options can
15048 cl_target_option_restore (&func_options,
15049 TREE_TARGET_OPTION (target_option_default_node));
15051 new_target = s390_valid_target_attribute_tree (args, &func_options,
15052 &global_options_set,
15054 current_target_pragma));
15055 new_optimize = build_optimization_node (&func_options);
15056 if (new_target == error_mark_node)
15058 else if (fndecl && new_target)
15060 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
15061 if (old_optimize != new_optimize)
15062 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
15067 /* Restore targets globals from NEW_TREE and invalidate s390_previous_fndecl
15071 s390_activate_target_options (tree new_tree)
15073 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
15074 if (TREE_TARGET_GLOBALS (new_tree))
15075 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
15076 else if (new_tree == target_option_default_node)
15077 restore_target_globals (&default_target_globals);
15079 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
15080 s390_previous_fndecl = NULL_TREE;
15083 /* Establish appropriate back-end context for processing the function
15084 FNDECL. The argument might be NULL to indicate processing at top
15085 level, outside of any function scope. */
15087 s390_set_current_function (tree fndecl)
15089 /* Only change the context if the function changes. This hook is called
15090 several times in the course of compiling a function, and we don't want to
15091 slow things down too much or call target_reinit when it isn't safe. */
15092 if (fndecl == s390_previous_fndecl)
15096 if (s390_previous_fndecl == NULL_TREE)
15097 old_tree = target_option_current_node;
15098 else if (DECL_FUNCTION_SPECIFIC_TARGET (s390_previous_fndecl))
15099 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (s390_previous_fndecl);
15101 old_tree = target_option_default_node;
15103 if (fndecl == NULL_TREE)
15105 if (old_tree != target_option_current_node)
15106 s390_activate_target_options (target_option_current_node);
15110 tree new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
15111 if (new_tree == NULL_TREE)
15112 new_tree = target_option_default_node;
15114 if (old_tree != new_tree)
15115 s390_activate_target_options (new_tree);
15116 s390_previous_fndecl = fndecl;
15120 /* Implement TARGET_USE_BY_PIECES_INFRASTRUCTURE_P. */
15123 s390_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size,
15124 unsigned int align ATTRIBUTE_UNUSED,
15125 enum by_pieces_operation op ATTRIBUTE_UNUSED,
15126 bool speed_p ATTRIBUTE_UNUSED)
15128 return (size == 1 || size == 2
15129 || size == 4 || (TARGET_ZARCH && size == 8));
15132 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
15135 s390_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
15137 tree sfpc = s390_builtin_decls[S390_BUILTIN_s390_sfpc];
15138 tree efpc = s390_builtin_decls[S390_BUILTIN_s390_efpc];
15139 tree call_efpc = build_call_expr (efpc, 0);
15140 tree fenv_var = create_tmp_var_raw (unsigned_type_node);
15142 #define FPC_EXCEPTION_MASK HOST_WIDE_INT_UC (0xf8000000)
15143 #define FPC_FLAGS_MASK HOST_WIDE_INT_UC (0x00f80000)
15144 #define FPC_DXC_MASK HOST_WIDE_INT_UC (0x0000ff00)
15145 #define FPC_EXCEPTION_MASK_SHIFT HOST_WIDE_INT_UC (24)
15146 #define FPC_FLAGS_SHIFT HOST_WIDE_INT_UC (16)
15147 #define FPC_DXC_SHIFT HOST_WIDE_INT_UC (8)
15149 /* Generates the equivalent of feholdexcept (&fenv_var)
15151 fenv_var = __builtin_s390_efpc ();
15152 __builtin_s390_sfpc (fenv_var & mask) */
15153 tree old_fpc = build2 (MODIFY_EXPR, unsigned_type_node, fenv_var, call_efpc);
15155 build2 (BIT_AND_EXPR, unsigned_type_node, fenv_var,
15156 build_int_cst (unsigned_type_node,
15157 ~(FPC_DXC_MASK | FPC_FLAGS_MASK |
15158 FPC_EXCEPTION_MASK)));
15159 tree set_new_fpc = build_call_expr (sfpc, 1, new_fpc);
15160 *hold = build2 (COMPOUND_EXPR, void_type_node, old_fpc, set_new_fpc);
15162 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT)
15164 __builtin_s390_sfpc (__builtin_s390_efpc () & mask) */
15165 new_fpc = build2 (BIT_AND_EXPR, unsigned_type_node, call_efpc,
15166 build_int_cst (unsigned_type_node,
15167 ~(FPC_DXC_MASK | FPC_FLAGS_MASK)));
15168 *clear = build_call_expr (sfpc, 1, new_fpc);
15170 /* Generates the equivalent of feupdateenv (fenv_var)
15172 old_fpc = __builtin_s390_efpc ();
15173 __builtin_s390_sfpc (fenv_var);
15174 __atomic_feraiseexcept ((old_fpc & FPC_FLAGS_MASK) >> FPC_FLAGS_SHIFT); */
15176 old_fpc = create_tmp_var_raw (unsigned_type_node);
15177 tree store_old_fpc = build2 (MODIFY_EXPR, void_type_node,
15178 old_fpc, call_efpc);
15180 set_new_fpc = build_call_expr (sfpc, 1, fenv_var);
15182 tree raise_old_except = build2 (BIT_AND_EXPR, unsigned_type_node, old_fpc,
15183 build_int_cst (unsigned_type_node,
15185 raise_old_except = build2 (RSHIFT_EXPR, unsigned_type_node, raise_old_except,
15186 build_int_cst (unsigned_type_node,
15188 tree atomic_feraiseexcept
15189 = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
15190 raise_old_except = build_call_expr (atomic_feraiseexcept,
15191 1, raise_old_except);
15193 *update = build2 (COMPOUND_EXPR, void_type_node,
15194 build2 (COMPOUND_EXPR, void_type_node,
15195 store_old_fpc, set_new_fpc),
15198 #undef FPC_EXCEPTION_MASK
15199 #undef FPC_FLAGS_MASK
15200 #undef FPC_DXC_MASK
15201 #undef FPC_EXCEPTION_MASK_SHIFT
15202 #undef FPC_FLAGS_SHIFT
15203 #undef FPC_DXC_SHIFT
15206 /* Return the vector mode to be used for inner mode MODE when doing
15208 static machine_mode
15209 s390_preferred_simd_mode (machine_mode mode)
15229 /* Our hardware does not require vectors to be strictly aligned. */
15231 s390_support_vector_misalignment (machine_mode mode ATTRIBUTE_UNUSED,
15232 const_tree type ATTRIBUTE_UNUSED,
15233 int misalignment ATTRIBUTE_UNUSED,
15234 bool is_packed ATTRIBUTE_UNUSED)
15239 return default_builtin_support_vector_misalignment (mode, type, misalignment,
15243 /* The vector ABI requires vector types to be aligned on an 8 byte
15244 boundary (our stack alignment). However, we allow this to be
15245 overriden by the user, while this definitely breaks the ABI. */
15246 static HOST_WIDE_INT
15247 s390_vector_alignment (const_tree type)
15249 if (!TARGET_VX_ABI)
15250 return default_vector_alignment (type);
15252 if (TYPE_USER_ALIGN (type))
15253 return TYPE_ALIGN (type);
15255 return MIN (64, tree_to_shwi (TYPE_SIZE (type)));
15258 #ifdef HAVE_AS_MACHINE_MACHINEMODE
15259 /* Implement TARGET_ASM_FILE_START. */
15261 s390_asm_file_start (void)
15263 default_file_start ();
15264 s390_asm_output_machine_for_arch (asm_out_file);
15268 /* Implement TARGET_ASM_FILE_END. */
15270 s390_asm_file_end (void)
15272 #ifdef HAVE_AS_GNU_ATTRIBUTE
15273 varpool_node *vnode;
15274 cgraph_node *cnode;
15276 FOR_EACH_VARIABLE (vnode)
15277 if (TREE_PUBLIC (vnode->decl))
15278 s390_check_type_for_vector_abi (TREE_TYPE (vnode->decl), false, false);
15280 FOR_EACH_FUNCTION (cnode)
15281 if (TREE_PUBLIC (cnode->decl))
15282 s390_check_type_for_vector_abi (TREE_TYPE (cnode->decl), false, false);
15285 if (s390_vector_abi != 0)
15286 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
15289 file_end_indicate_exec_stack ();
15291 if (flag_split_stack)
15292 file_end_indicate_split_stack ();
15295 /* Return true if TYPE is a vector bool type. */
15297 s390_vector_bool_type_p (const_tree type)
15299 return TYPE_VECTOR_OPAQUE (type);
15302 /* Return the diagnostic message string if the binary operation OP is
15303 not permitted on TYPE1 and TYPE2, NULL otherwise. */
15305 s390_invalid_binary_op (int op ATTRIBUTE_UNUSED, const_tree type1, const_tree type2)
15307 bool bool1_p, bool2_p;
15311 machine_mode mode1, mode2;
15313 if (!TARGET_ZVECTOR)
15316 if (!VECTOR_TYPE_P (type1) || !VECTOR_TYPE_P (type2))
15319 bool1_p = s390_vector_bool_type_p (type1);
15320 bool2_p = s390_vector_bool_type_p (type2);
15322 /* Mixing signed and unsigned types is forbidden for all
15324 if (!bool1_p && !bool2_p
15325 && TYPE_UNSIGNED (type1) != TYPE_UNSIGNED (type2))
15326 return N_("types differ in signedness");
15328 plusminus_p = (op == PLUS_EXPR || op == MINUS_EXPR);
15329 muldiv_p = (op == MULT_EXPR || op == RDIV_EXPR || op == TRUNC_DIV_EXPR
15330 || op == CEIL_DIV_EXPR || op == FLOOR_DIV_EXPR
15331 || op == ROUND_DIV_EXPR);
15332 compare_p = (op == LT_EXPR || op == LE_EXPR || op == GT_EXPR || op == GE_EXPR
15333 || op == EQ_EXPR || op == NE_EXPR);
15335 if (bool1_p && bool2_p && (plusminus_p || muldiv_p))
15336 return N_("binary operator does not support two vector bool operands");
15338 if (bool1_p != bool2_p && (muldiv_p || compare_p))
15339 return N_("binary operator does not support vector bool operand");
15341 mode1 = TYPE_MODE (type1);
15342 mode2 = TYPE_MODE (type2);
15344 if (bool1_p != bool2_p && plusminus_p
15345 && (GET_MODE_CLASS (mode1) == MODE_VECTOR_FLOAT
15346 || GET_MODE_CLASS (mode2) == MODE_VECTOR_FLOAT))
15347 return N_("binary operator does not support mixing vector "
15348 "bool with floating point vector operands");
15353 /* Implement TARGET_C_EXCESS_PRECISION.
15355 FIXME: For historical reasons, float_t and double_t are typedef'ed to
15356 double on s390, causing operations on float_t to operate in a higher
15357 precision than is necessary. However, it is not the case that SFmode
15358 operations have implicit excess precision, and we generate more optimal
15359 code if we let the compiler know no implicit extra precision is added.
15361 That means when we are compiling with -fexcess-precision=fast, the value
15362 we set for FLT_EVAL_METHOD will be out of line with the actual precision of
15363 float_t (though they would be correct for -fexcess-precision=standard).
15365 A complete fix would modify glibc to remove the unnecessary typedef
15366 of float_t to double. */
15368 static enum flt_eval_method
15369 s390_excess_precision (enum excess_precision_type type)
15373 case EXCESS_PRECISION_TYPE_IMPLICIT:
15374 case EXCESS_PRECISION_TYPE_FAST:
15375 /* The fastest type to promote to will always be the native type,
15376 whether that occurs with implicit excess precision or
15378 return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT;
15379 case EXCESS_PRECISION_TYPE_STANDARD:
15380 /* Otherwise, when we are in a standards compliant mode, to
15381 ensure consistency with the implementation in glibc, report that
15382 float is evaluated to the range and precision of double. */
15383 return FLT_EVAL_METHOD_PROMOTE_TO_DOUBLE;
15385 gcc_unreachable ();
15387 return FLT_EVAL_METHOD_UNPREDICTABLE;
15390 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
15392 static unsigned HOST_WIDE_INT
15393 s390_asan_shadow_offset (void)
15395 return TARGET_64BIT ? HOST_WIDE_INT_1U << 52 : HOST_WIDE_INT_UC (0x20000000);
15398 /* Initialize GCC target structure. */
15400 #undef TARGET_ASM_ALIGNED_HI_OP
15401 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
15402 #undef TARGET_ASM_ALIGNED_DI_OP
15403 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
15404 #undef TARGET_ASM_INTEGER
15405 #define TARGET_ASM_INTEGER s390_assemble_integer
15407 #undef TARGET_ASM_OPEN_PAREN
15408 #define TARGET_ASM_OPEN_PAREN ""
15410 #undef TARGET_ASM_CLOSE_PAREN
15411 #define TARGET_ASM_CLOSE_PAREN ""
15413 #undef TARGET_OPTION_OVERRIDE
15414 #define TARGET_OPTION_OVERRIDE s390_option_override
15416 #ifdef TARGET_THREAD_SSP_OFFSET
15417 #undef TARGET_STACK_PROTECT_GUARD
15418 #define TARGET_STACK_PROTECT_GUARD hook_tree_void_null
15421 #undef TARGET_ENCODE_SECTION_INFO
15422 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
15424 #undef TARGET_SCALAR_MODE_SUPPORTED_P
15425 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
15428 #undef TARGET_HAVE_TLS
15429 #define TARGET_HAVE_TLS true
15431 #undef TARGET_CANNOT_FORCE_CONST_MEM
15432 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
15434 #undef TARGET_DELEGITIMIZE_ADDRESS
15435 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
15437 #undef TARGET_LEGITIMIZE_ADDRESS
15438 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
15440 #undef TARGET_RETURN_IN_MEMORY
15441 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
15443 #undef TARGET_INIT_BUILTINS
15444 #define TARGET_INIT_BUILTINS s390_init_builtins
15445 #undef TARGET_EXPAND_BUILTIN
15446 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
15447 #undef TARGET_BUILTIN_DECL
15448 #define TARGET_BUILTIN_DECL s390_builtin_decl
15450 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
15451 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
15453 #undef TARGET_ASM_OUTPUT_MI_THUNK
15454 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
15455 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
15456 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
15458 #undef TARGET_C_EXCESS_PRECISION
15459 #define TARGET_C_EXCESS_PRECISION s390_excess_precision
15461 #undef TARGET_SCHED_ADJUST_PRIORITY
15462 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
15463 #undef TARGET_SCHED_ISSUE_RATE
15464 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
15465 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
15466 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
15468 #undef TARGET_SCHED_VARIABLE_ISSUE
15469 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
15470 #undef TARGET_SCHED_REORDER
15471 #define TARGET_SCHED_REORDER s390_sched_reorder
15472 #undef TARGET_SCHED_INIT
15473 #define TARGET_SCHED_INIT s390_sched_init
15475 #undef TARGET_CANNOT_COPY_INSN_P
15476 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
15477 #undef TARGET_RTX_COSTS
15478 #define TARGET_RTX_COSTS s390_rtx_costs
15479 #undef TARGET_ADDRESS_COST
15480 #define TARGET_ADDRESS_COST s390_address_cost
15481 #undef TARGET_REGISTER_MOVE_COST
15482 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
15483 #undef TARGET_MEMORY_MOVE_COST
15484 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
15485 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
15486 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
15487 s390_builtin_vectorization_cost
15489 #undef TARGET_MACHINE_DEPENDENT_REORG
15490 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
15492 #undef TARGET_VALID_POINTER_MODE
15493 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
15495 #undef TARGET_BUILD_BUILTIN_VA_LIST
15496 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
15497 #undef TARGET_EXPAND_BUILTIN_VA_START
15498 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
15499 #undef TARGET_ASAN_SHADOW_OFFSET
15500 #define TARGET_ASAN_SHADOW_OFFSET s390_asan_shadow_offset
15501 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
15502 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
15504 #undef TARGET_PROMOTE_FUNCTION_MODE
15505 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
15506 #undef TARGET_PASS_BY_REFERENCE
15507 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
15509 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
15510 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
15511 #undef TARGET_FUNCTION_ARG
15512 #define TARGET_FUNCTION_ARG s390_function_arg
15513 #undef TARGET_FUNCTION_ARG_ADVANCE
15514 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
15515 #undef TARGET_FUNCTION_VALUE
15516 #define TARGET_FUNCTION_VALUE s390_function_value
15517 #undef TARGET_LIBCALL_VALUE
15518 #define TARGET_LIBCALL_VALUE s390_libcall_value
15519 #undef TARGET_STRICT_ARGUMENT_NAMING
15520 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
15522 #undef TARGET_KEEP_LEAF_WHEN_PROFILED
15523 #define TARGET_KEEP_LEAF_WHEN_PROFILED s390_keep_leaf_when_profiled
15525 #undef TARGET_FIXED_CONDITION_CODE_REGS
15526 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
15528 #undef TARGET_CC_MODES_COMPATIBLE
15529 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
15531 #undef TARGET_INVALID_WITHIN_DOLOOP
15532 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_insn_null
15535 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
15536 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
15539 #undef TARGET_DWARF_FRAME_REG_MODE
15540 #define TARGET_DWARF_FRAME_REG_MODE s390_dwarf_frame_reg_mode
15542 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
15543 #undef TARGET_MANGLE_TYPE
15544 #define TARGET_MANGLE_TYPE s390_mangle_type
15547 #undef TARGET_SCALAR_MODE_SUPPORTED_P
15548 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
15550 #undef TARGET_VECTOR_MODE_SUPPORTED_P
15551 #define TARGET_VECTOR_MODE_SUPPORTED_P s390_vector_mode_supported_p
15553 #undef TARGET_PREFERRED_RELOAD_CLASS
15554 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
15556 #undef TARGET_SECONDARY_RELOAD
15557 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
15559 #undef TARGET_LIBGCC_CMP_RETURN_MODE
15560 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
15562 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
15563 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
15565 #undef TARGET_LEGITIMATE_ADDRESS_P
15566 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
15568 #undef TARGET_LEGITIMATE_CONSTANT_P
15569 #define TARGET_LEGITIMATE_CONSTANT_P s390_legitimate_constant_p
15571 #undef TARGET_LRA_P
15572 #define TARGET_LRA_P s390_lra_p
15574 #undef TARGET_CAN_ELIMINATE
15575 #define TARGET_CAN_ELIMINATE s390_can_eliminate
15577 #undef TARGET_CONDITIONAL_REGISTER_USAGE
15578 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
15580 #undef TARGET_LOOP_UNROLL_ADJUST
15581 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
15583 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
15584 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
15585 #undef TARGET_TRAMPOLINE_INIT
15586 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
15589 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
15590 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
15592 #undef TARGET_UNWIND_WORD_MODE
15593 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
15595 #undef TARGET_CANONICALIZE_COMPARISON
15596 #define TARGET_CANONICALIZE_COMPARISON s390_canonicalize_comparison
15598 #undef TARGET_HARD_REGNO_SCRATCH_OK
15599 #define TARGET_HARD_REGNO_SCRATCH_OK s390_hard_regno_scratch_ok
15601 #undef TARGET_ATTRIBUTE_TABLE
15602 #define TARGET_ATTRIBUTE_TABLE s390_attribute_table
15604 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
15605 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
15607 #undef TARGET_SET_UP_BY_PROLOGUE
15608 #define TARGET_SET_UP_BY_PROLOGUE s300_set_up_by_prologue
15610 #undef TARGET_EXTRA_LIVE_ON_ENTRY
15611 #define TARGET_EXTRA_LIVE_ON_ENTRY s390_live_on_entry
15613 #undef TARGET_USE_BY_PIECES_INFRASTRUCTURE_P
15614 #define TARGET_USE_BY_PIECES_INFRASTRUCTURE_P \
15615 s390_use_by_pieces_infrastructure_p
15617 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
15618 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV s390_atomic_assign_expand_fenv
15620 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
15621 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN s390_invalid_arg_for_unprototyped_fn
15623 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
15624 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE s390_preferred_simd_mode
15626 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
15627 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT s390_support_vector_misalignment
15629 #undef TARGET_VECTOR_ALIGNMENT
15630 #define TARGET_VECTOR_ALIGNMENT s390_vector_alignment
15632 #undef TARGET_INVALID_BINARY_OP
15633 #define TARGET_INVALID_BINARY_OP s390_invalid_binary_op
15635 #ifdef HAVE_AS_MACHINE_MACHINEMODE
15636 #undef TARGET_ASM_FILE_START
15637 #define TARGET_ASM_FILE_START s390_asm_file_start
15640 #undef TARGET_ASM_FILE_END
15641 #define TARGET_ASM_FILE_END s390_asm_file_end
15643 #if S390_USE_TARGET_ATTRIBUTE
15644 #undef TARGET_SET_CURRENT_FUNCTION
15645 #define TARGET_SET_CURRENT_FUNCTION s390_set_current_function
15647 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
15648 #define TARGET_OPTION_VALID_ATTRIBUTE_P s390_valid_target_attribute_p
15651 #undef TARGET_OPTION_RESTORE
15652 #define TARGET_OPTION_RESTORE s390_function_specific_restore
15654 struct gcc_target targetm = TARGET_INITIALIZER;
15656 #include "gt-s390.h"