1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999-2015 Free Software Foundation, Inc.
3 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
4 Ulrich Weigand (uweigand@de.ibm.com) and
5 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
33 #include "fold-const.h"
34 #include "print-tree.h"
35 #include "stringpool.h"
36 #include "stor-layout.h"
41 #include "insn-config.h"
42 #include "conditions.h"
44 #include "insn-attr.h"
55 #include "diagnostic-core.h"
60 #include "cfgcleanup.h"
63 #include "langhooks.h"
64 #include "insn-codes.h"
66 #include "internal-fn.h"
67 #include "gimple-fold.h"
73 #include "tree-pass.h"
80 /* This file should be included last. */
81 #include "target-def.h"
83 /* Define the specific costs for a given cpu. */
85 struct processor_costs
88 const int m; /* cost of an M instruction. */
89 const int mghi; /* cost of an MGHI instruction. */
90 const int mh; /* cost of an MH instruction. */
91 const int mhi; /* cost of an MHI instruction. */
92 const int ml; /* cost of an ML instruction. */
93 const int mr; /* cost of an MR instruction. */
94 const int ms; /* cost of an MS instruction. */
95 const int msg; /* cost of an MSG instruction. */
96 const int msgf; /* cost of an MSGF instruction. */
97 const int msgfr; /* cost of an MSGFR instruction. */
98 const int msgr; /* cost of an MSGR instruction. */
99 const int msr; /* cost of an MSR instruction. */
100 const int mult_df; /* cost of multiplication in DFmode. */
103 const int sqxbr; /* cost of square root in TFmode. */
104 const int sqdbr; /* cost of square root in DFmode. */
105 const int sqebr; /* cost of square root in SFmode. */
106 /* multiply and add */
107 const int madbr; /* cost of multiply and add in DFmode. */
108 const int maebr; /* cost of multiply and add in SFmode. */
120 const struct processor_costs *s390_cost;
123 struct processor_costs z900_cost =
125 COSTS_N_INSNS (5), /* M */
126 COSTS_N_INSNS (10), /* MGHI */
127 COSTS_N_INSNS (5), /* MH */
128 COSTS_N_INSNS (4), /* MHI */
129 COSTS_N_INSNS (5), /* ML */
130 COSTS_N_INSNS (5), /* MR */
131 COSTS_N_INSNS (4), /* MS */
132 COSTS_N_INSNS (15), /* MSG */
133 COSTS_N_INSNS (7), /* MSGF */
134 COSTS_N_INSNS (7), /* MSGFR */
135 COSTS_N_INSNS (10), /* MSGR */
136 COSTS_N_INSNS (4), /* MSR */
137 COSTS_N_INSNS (7), /* multiplication in DFmode */
138 COSTS_N_INSNS (13), /* MXBR */
139 COSTS_N_INSNS (136), /* SQXBR */
140 COSTS_N_INSNS (44), /* SQDBR */
141 COSTS_N_INSNS (35), /* SQEBR */
142 COSTS_N_INSNS (18), /* MADBR */
143 COSTS_N_INSNS (13), /* MAEBR */
144 COSTS_N_INSNS (134), /* DXBR */
145 COSTS_N_INSNS (30), /* DDBR */
146 COSTS_N_INSNS (27), /* DEBR */
147 COSTS_N_INSNS (220), /* DLGR */
148 COSTS_N_INSNS (34), /* DLR */
149 COSTS_N_INSNS (34), /* DR */
150 COSTS_N_INSNS (32), /* DSGFR */
151 COSTS_N_INSNS (32), /* DSGR */
155 struct processor_costs z990_cost =
157 COSTS_N_INSNS (4), /* M */
158 COSTS_N_INSNS (2), /* MGHI */
159 COSTS_N_INSNS (2), /* MH */
160 COSTS_N_INSNS (2), /* MHI */
161 COSTS_N_INSNS (4), /* ML */
162 COSTS_N_INSNS (4), /* MR */
163 COSTS_N_INSNS (5), /* MS */
164 COSTS_N_INSNS (6), /* MSG */
165 COSTS_N_INSNS (4), /* MSGF */
166 COSTS_N_INSNS (4), /* MSGFR */
167 COSTS_N_INSNS (4), /* MSGR */
168 COSTS_N_INSNS (4), /* MSR */
169 COSTS_N_INSNS (1), /* multiplication in DFmode */
170 COSTS_N_INSNS (28), /* MXBR */
171 COSTS_N_INSNS (130), /* SQXBR */
172 COSTS_N_INSNS (66), /* SQDBR */
173 COSTS_N_INSNS (38), /* SQEBR */
174 COSTS_N_INSNS (1), /* MADBR */
175 COSTS_N_INSNS (1), /* MAEBR */
176 COSTS_N_INSNS (60), /* DXBR */
177 COSTS_N_INSNS (40), /* DDBR */
178 COSTS_N_INSNS (26), /* DEBR */
179 COSTS_N_INSNS (176), /* DLGR */
180 COSTS_N_INSNS (31), /* DLR */
181 COSTS_N_INSNS (31), /* DR */
182 COSTS_N_INSNS (31), /* DSGFR */
183 COSTS_N_INSNS (31), /* DSGR */
187 struct processor_costs z9_109_cost =
189 COSTS_N_INSNS (4), /* M */
190 COSTS_N_INSNS (2), /* MGHI */
191 COSTS_N_INSNS (2), /* MH */
192 COSTS_N_INSNS (2), /* MHI */
193 COSTS_N_INSNS (4), /* ML */
194 COSTS_N_INSNS (4), /* MR */
195 COSTS_N_INSNS (5), /* MS */
196 COSTS_N_INSNS (6), /* MSG */
197 COSTS_N_INSNS (4), /* MSGF */
198 COSTS_N_INSNS (4), /* MSGFR */
199 COSTS_N_INSNS (4), /* MSGR */
200 COSTS_N_INSNS (4), /* MSR */
201 COSTS_N_INSNS (1), /* multiplication in DFmode */
202 COSTS_N_INSNS (28), /* MXBR */
203 COSTS_N_INSNS (130), /* SQXBR */
204 COSTS_N_INSNS (66), /* SQDBR */
205 COSTS_N_INSNS (38), /* SQEBR */
206 COSTS_N_INSNS (1), /* MADBR */
207 COSTS_N_INSNS (1), /* MAEBR */
208 COSTS_N_INSNS (60), /* DXBR */
209 COSTS_N_INSNS (40), /* DDBR */
210 COSTS_N_INSNS (26), /* DEBR */
211 COSTS_N_INSNS (30), /* DLGR */
212 COSTS_N_INSNS (23), /* DLR */
213 COSTS_N_INSNS (23), /* DR */
214 COSTS_N_INSNS (24), /* DSGFR */
215 COSTS_N_INSNS (24), /* DSGR */
219 struct processor_costs z10_cost =
221 COSTS_N_INSNS (10), /* M */
222 COSTS_N_INSNS (10), /* MGHI */
223 COSTS_N_INSNS (10), /* MH */
224 COSTS_N_INSNS (10), /* MHI */
225 COSTS_N_INSNS (10), /* ML */
226 COSTS_N_INSNS (10), /* MR */
227 COSTS_N_INSNS (10), /* MS */
228 COSTS_N_INSNS (10), /* MSG */
229 COSTS_N_INSNS (10), /* MSGF */
230 COSTS_N_INSNS (10), /* MSGFR */
231 COSTS_N_INSNS (10), /* MSGR */
232 COSTS_N_INSNS (10), /* MSR */
233 COSTS_N_INSNS (1) , /* multiplication in DFmode */
234 COSTS_N_INSNS (50), /* MXBR */
235 COSTS_N_INSNS (120), /* SQXBR */
236 COSTS_N_INSNS (52), /* SQDBR */
237 COSTS_N_INSNS (38), /* SQEBR */
238 COSTS_N_INSNS (1), /* MADBR */
239 COSTS_N_INSNS (1), /* MAEBR */
240 COSTS_N_INSNS (111), /* DXBR */
241 COSTS_N_INSNS (39), /* DDBR */
242 COSTS_N_INSNS (32), /* DEBR */
243 COSTS_N_INSNS (160), /* DLGR */
244 COSTS_N_INSNS (71), /* DLR */
245 COSTS_N_INSNS (71), /* DR */
246 COSTS_N_INSNS (71), /* DSGFR */
247 COSTS_N_INSNS (71), /* DSGR */
251 struct processor_costs z196_cost =
253 COSTS_N_INSNS (7), /* M */
254 COSTS_N_INSNS (5), /* MGHI */
255 COSTS_N_INSNS (5), /* MH */
256 COSTS_N_INSNS (5), /* MHI */
257 COSTS_N_INSNS (7), /* ML */
258 COSTS_N_INSNS (7), /* MR */
259 COSTS_N_INSNS (6), /* MS */
260 COSTS_N_INSNS (8), /* MSG */
261 COSTS_N_INSNS (6), /* MSGF */
262 COSTS_N_INSNS (6), /* MSGFR */
263 COSTS_N_INSNS (8), /* MSGR */
264 COSTS_N_INSNS (6), /* MSR */
265 COSTS_N_INSNS (1) , /* multiplication in DFmode */
266 COSTS_N_INSNS (40), /* MXBR B+40 */
267 COSTS_N_INSNS (100), /* SQXBR B+100 */
268 COSTS_N_INSNS (42), /* SQDBR B+42 */
269 COSTS_N_INSNS (28), /* SQEBR B+28 */
270 COSTS_N_INSNS (1), /* MADBR B */
271 COSTS_N_INSNS (1), /* MAEBR B */
272 COSTS_N_INSNS (101), /* DXBR B+101 */
273 COSTS_N_INSNS (29), /* DDBR */
274 COSTS_N_INSNS (22), /* DEBR */
275 COSTS_N_INSNS (160), /* DLGR cracked */
276 COSTS_N_INSNS (160), /* DLR cracked */
277 COSTS_N_INSNS (160), /* DR expanded */
278 COSTS_N_INSNS (160), /* DSGFR cracked */
279 COSTS_N_INSNS (160), /* DSGR cracked */
283 struct processor_costs zEC12_cost =
285 COSTS_N_INSNS (7), /* M */
286 COSTS_N_INSNS (5), /* MGHI */
287 COSTS_N_INSNS (5), /* MH */
288 COSTS_N_INSNS (5), /* MHI */
289 COSTS_N_INSNS (7), /* ML */
290 COSTS_N_INSNS (7), /* MR */
291 COSTS_N_INSNS (6), /* MS */
292 COSTS_N_INSNS (8), /* MSG */
293 COSTS_N_INSNS (6), /* MSGF */
294 COSTS_N_INSNS (6), /* MSGFR */
295 COSTS_N_INSNS (8), /* MSGR */
296 COSTS_N_INSNS (6), /* MSR */
297 COSTS_N_INSNS (1) , /* multiplication in DFmode */
298 COSTS_N_INSNS (40), /* MXBR B+40 */
299 COSTS_N_INSNS (100), /* SQXBR B+100 */
300 COSTS_N_INSNS (42), /* SQDBR B+42 */
301 COSTS_N_INSNS (28), /* SQEBR B+28 */
302 COSTS_N_INSNS (1), /* MADBR B */
303 COSTS_N_INSNS (1), /* MAEBR B */
304 COSTS_N_INSNS (131), /* DXBR B+131 */
305 COSTS_N_INSNS (29), /* DDBR */
306 COSTS_N_INSNS (22), /* DEBR */
307 COSTS_N_INSNS (160), /* DLGR cracked */
308 COSTS_N_INSNS (160), /* DLR cracked */
309 COSTS_N_INSNS (160), /* DR expanded */
310 COSTS_N_INSNS (160), /* DSGFR cracked */
311 COSTS_N_INSNS (160), /* DSGR cracked */
314 extern int reload_completed;
316 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
317 static rtx_insn *last_scheduled_insn;
319 /* Structure used to hold the components of a S/390 memory
320 address. A legitimate address on S/390 is of the general
322 base + index + displacement
323 where any of the components is optional.
325 base and index are registers of the class ADDR_REGS,
326 displacement is an unsigned 12-bit immediate constant. */
337 /* The following structure is embedded in the machine
338 specific part of struct function. */
340 struct GTY (()) s390_frame_layout
342 /* Offset within stack frame. */
343 HOST_WIDE_INT gprs_offset;
344 HOST_WIDE_INT f0_offset;
345 HOST_WIDE_INT f4_offset;
346 HOST_WIDE_INT f8_offset;
347 HOST_WIDE_INT backchain_offset;
349 /* Number of first and last gpr where slots in the register
350 save area are reserved for. */
351 int first_save_gpr_slot;
352 int last_save_gpr_slot;
354 /* Location (FP register number) where GPRs (r0-r15) should
356 0 - does not need to be saved at all
358 signed char gpr_save_slots[16];
360 /* Number of first and last gpr to be saved, restored. */
362 int first_restore_gpr;
364 int last_restore_gpr;
366 /* Bits standing for floating point registers. Set, if the
367 respective register has to be saved. Starting with reg 16 (f0)
368 at the rightmost bit.
369 Bit 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
370 fpr 15 13 11 9 14 12 10 8 7 5 3 1 6 4 2 0
371 reg 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 */
372 unsigned int fpr_bitmap;
374 /* Number of floating point registers f8-f15 which must be saved. */
377 /* Set if return address needs to be saved.
378 This flag is set by s390_return_addr_rtx if it could not use
379 the initial value of r14 and therefore depends on r14 saved
381 bool save_return_addr_p;
383 /* Size of stack frame. */
384 HOST_WIDE_INT frame_size;
387 /* Define the structure for the machine field in struct function. */
389 struct GTY(()) machine_function
391 struct s390_frame_layout frame_layout;
393 /* Literal pool base register. */
396 /* True if we may need to perform branch splitting. */
397 bool split_branches_pending_p;
399 bool has_landing_pad_p;
401 /* True if the current function may contain a tbegin clobbering
406 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
408 #define cfun_frame_layout (cfun->machine->frame_layout)
409 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
410 #define cfun_save_arg_fprs_p (!!(TARGET_64BIT \
411 ? cfun_frame_layout.fpr_bitmap & 0x0f \
412 : cfun_frame_layout.fpr_bitmap & 0x03))
413 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
414 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
415 #define cfun_set_fpr_save(REGNO) (cfun->machine->frame_layout.fpr_bitmap |= \
416 (1 << (REGNO - FPR0_REGNUM)))
417 #define cfun_fpr_save_p(REGNO) (!!(cfun->machine->frame_layout.fpr_bitmap & \
418 (1 << (REGNO - FPR0_REGNUM))))
419 #define cfun_gpr_save_slot(REGNO) \
420 cfun->machine->frame_layout.gpr_save_slots[REGNO]
422 /* Number of GPRs and FPRs used for argument passing. */
423 #define GP_ARG_NUM_REG 5
424 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
425 #define VEC_ARG_NUM_REG 8
427 /* A couple of shortcuts. */
428 #define CONST_OK_FOR_J(x) \
429 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
430 #define CONST_OK_FOR_K(x) \
431 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
432 #define CONST_OK_FOR_Os(x) \
433 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
434 #define CONST_OK_FOR_Op(x) \
435 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
436 #define CONST_OK_FOR_On(x) \
437 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
439 #define REGNO_PAIR_OK(REGNO, MODE) \
440 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
442 /* That's the read ahead of the dynamic branch prediction unit in
443 bytes on a z10 (or higher) CPU. */
444 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
447 /* Indicate which ABI has been used for passing vector args.
448 0 - no vector type arguments have been passed where the ABI is relevant
449 1 - the old ABI has been used
450 2 - a vector type argument has been passed either in a vector register
451 or on the stack by value */
452 static int s390_vector_abi = 0;
454 /* Set the vector ABI marker if TYPE is subject to the vector ABI
455 switch. The vector ABI affects only vector data types. There are
456 two aspects of the vector ABI relevant here:
458 1. vectors >= 16 bytes have an alignment of 8 bytes with the new
459 ABI and natural alignment with the old.
461 2. vector <= 16 bytes are passed in VRs or by value on the stack
462 with the new ABI but by reference on the stack with the old.
464 If ARG_P is true TYPE is used for a function argument or return
465 value. The ABI marker then is set for all vector data types. If
466 ARG_P is false only type 1 vectors are being checked. */
469 s390_check_type_for_vector_abi (const_tree type, bool arg_p, bool in_struct_p)
471 static hash_set<const_tree> visited_types_hash;
476 if (type == NULL_TREE || TREE_CODE (type) == ERROR_MARK)
479 if (visited_types_hash.contains (type))
482 visited_types_hash.add (type);
484 if (VECTOR_TYPE_P (type))
486 int type_size = int_size_in_bytes (type);
488 /* Outside arguments only the alignment is changing and this
489 only happens for vector types >= 16 bytes. */
490 if (!arg_p && type_size < 16)
493 /* In arguments vector types > 16 are passed as before (GCC
494 never enforced the bigger alignment for arguments which was
495 required by the old vector ABI). However, it might still be
496 ABI relevant due to the changed alignment if it is a struct
498 if (arg_p && type_size > 16 && !in_struct_p)
501 s390_vector_abi = TARGET_VX_ABI ? 2 : 1;
503 else if (POINTER_TYPE_P (type) || TREE_CODE (type) == ARRAY_TYPE)
505 /* ARRAY_TYPE: Since with neither of the ABIs we have more than
506 natural alignment there will never be ABI dependent padding
507 in an array type. That's why we do not set in_struct_p to
509 s390_check_type_for_vector_abi (TREE_TYPE (type), arg_p, in_struct_p);
511 else if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
515 /* Check the return type. */
516 s390_check_type_for_vector_abi (TREE_TYPE (type), true, false);
518 for (arg_chain = TYPE_ARG_TYPES (type);
520 arg_chain = TREE_CHAIN (arg_chain))
521 s390_check_type_for_vector_abi (TREE_VALUE (arg_chain), true, false);
523 else if (RECORD_OR_UNION_TYPE_P (type))
527 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
529 if (TREE_CODE (field) != FIELD_DECL)
532 s390_check_type_for_vector_abi (TREE_TYPE (field), arg_p, true);
538 /* System z builtins. */
540 #include "s390-builtins.h"
542 const unsigned int bflags_builtin[S390_BUILTIN_MAX + 1] =
547 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, ...) BFLAGS,
549 #define OB_DEF_VAR(...)
550 #include "s390-builtins.def"
554 const unsigned int opflags_builtin[S390_BUILTIN_MAX + 1] =
559 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, OPFLAGS, ...) OPFLAGS,
561 #define OB_DEF_VAR(...)
562 #include "s390-builtins.def"
566 const unsigned int bflags_overloaded_builtin[S390_OVERLOADED_BUILTIN_MAX + 1] =
572 #define OB_DEF(NAME, FIRST_VAR_NAME, LAST_VAR_NAME, BFLAGS, ...) BFLAGS,
573 #define OB_DEF_VAR(...)
574 #include "s390-builtins.def"
579 opflags_overloaded_builtin_var[S390_OVERLOADED_BUILTIN_VAR_MAX + 1] =
586 #define OB_DEF_VAR(NAME, PATTERN, FLAGS, FNTYPE) FLAGS,
587 #include "s390-builtins.def"
591 tree s390_builtin_types[BT_MAX];
592 tree s390_builtin_fn_types[BT_FN_MAX];
593 tree s390_builtin_decls[S390_BUILTIN_MAX +
594 S390_OVERLOADED_BUILTIN_MAX +
595 S390_OVERLOADED_BUILTIN_VAR_MAX];
597 static enum insn_code const code_for_builtin[S390_BUILTIN_MAX + 1] = {
601 #define B_DEF(NAME, PATTERN, ...) CODE_FOR_##PATTERN,
603 #define OB_DEF_VAR(...)
605 #include "s390-builtins.def"
610 s390_init_builtins (void)
612 /* These definitions are being used in s390-builtins.def. */
613 tree returns_twice_attr = tree_cons (get_identifier ("returns_twice"),
615 tree noreturn_attr = tree_cons (get_identifier ("noreturn"), NULL, NULL);
616 tree c_uint64_type_node;
617 unsigned int bflags_mask = (BFLAGS_MASK_INIT);
619 bflags_mask |= (TARGET_VX) ? B_VX : 0;
620 bflags_mask |= (TARGET_HTM) ? B_HTM : 0;
622 /* The uint64_type_node from tree.c is not compatible to the C99
623 uint64_t data type. What we want is c_uint64_type_node from
624 c-common.c. But since backend code is not supposed to interface
625 with the frontend we recreate it here. */
627 c_uint64_type_node = long_unsigned_type_node;
629 c_uint64_type_node = long_long_unsigned_type_node;
632 #define DEF_TYPE(INDEX, BFLAGS, NODE, CONST_P) \
633 if ((BFLAGS) == 0 || ((BFLAGS) & bflags_mask)) \
634 s390_builtin_types[INDEX] = (!CONST_P) ? \
635 (NODE) : build_type_variant ((NODE), 1, 0);
637 #undef DEF_POINTER_TYPE
638 #define DEF_POINTER_TYPE(INDEX, BFLAGS, INDEX_BASE) \
639 if ((BFLAGS) == 0 || ((BFLAGS) & bflags_mask)) \
640 s390_builtin_types[INDEX] = \
641 build_pointer_type (s390_builtin_types[INDEX_BASE]);
643 #undef DEF_DISTINCT_TYPE
644 #define DEF_DISTINCT_TYPE(INDEX, BFLAGS, INDEX_BASE) \
645 if ((BFLAGS) == 0 || ((BFLAGS) & bflags_mask)) \
646 s390_builtin_types[INDEX] = \
647 build_distinct_type_copy (s390_builtin_types[INDEX_BASE]);
649 #undef DEF_VECTOR_TYPE
650 #define DEF_VECTOR_TYPE(INDEX, BFLAGS, INDEX_BASE, ELEMENTS) \
651 if ((BFLAGS) == 0 || ((BFLAGS) & bflags_mask)) \
652 s390_builtin_types[INDEX] = \
653 build_vector_type (s390_builtin_types[INDEX_BASE], ELEMENTS);
655 #undef DEF_OPAQUE_VECTOR_TYPE
656 #define DEF_OPAQUE_VECTOR_TYPE(INDEX, BFLAGS, INDEX_BASE, ELEMENTS) \
657 if ((BFLAGS) == 0 || ((BFLAGS) & bflags_mask)) \
658 s390_builtin_types[INDEX] = \
659 build_opaque_vector_type (s390_builtin_types[INDEX_BASE], ELEMENTS);
662 #define DEF_FN_TYPE(INDEX, BFLAGS, args...) \
663 if ((BFLAGS) == 0 || ((BFLAGS) & bflags_mask)) \
664 s390_builtin_fn_types[INDEX] = \
665 build_function_type_list (args, NULL_TREE);
667 #define DEF_OV_TYPE(...)
668 #include "s390-builtin-types.def"
671 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, OPFLAGS, FNTYPE) \
672 if (((BFLAGS) & ~bflags_mask) == 0) \
673 s390_builtin_decls[S390_BUILTIN_##NAME] = \
674 add_builtin_function ("__builtin_" #NAME, \
675 s390_builtin_fn_types[FNTYPE], \
676 S390_BUILTIN_##NAME, \
681 #define OB_DEF(NAME, FIRST_VAR_NAME, LAST_VAR_NAME, BFLAGS, FNTYPE) \
682 if (((BFLAGS) & ~bflags_mask) == 0) \
683 s390_builtin_decls[S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX] = \
684 add_builtin_function ("__builtin_" #NAME, \
685 s390_builtin_fn_types[FNTYPE], \
686 S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX, \
691 #define OB_DEF_VAR(...)
692 #include "s390-builtins.def"
696 /* Return true if ARG is appropriate as argument number ARGNUM of
697 builtin DECL. The operand flags from s390-builtins.def have to
698 passed as OP_FLAGS. */
700 s390_const_operand_ok (tree arg, int argnum, int op_flags, tree decl)
702 if (O_UIMM_P (op_flags))
704 int bitwidths[] = { 1, 2, 3, 4, 5, 8, 12, 16, 32 };
705 int bitwidth = bitwidths[op_flags - O_U1];
707 if (!tree_fits_uhwi_p (arg)
708 || tree_to_uhwi (arg) > ((unsigned HOST_WIDE_INT)1 << bitwidth) - 1)
710 error("constant argument %d for builtin %qF is out of range (0.."
711 HOST_WIDE_INT_PRINT_UNSIGNED ")",
713 ((unsigned HOST_WIDE_INT)1 << bitwidth) - 1);
718 if (O_SIMM_P (op_flags))
720 int bitwidths[] = { 2, 3, 4, 5, 8, 12, 16, 32 };
721 int bitwidth = bitwidths[op_flags - O_S2];
723 if (!tree_fits_shwi_p (arg)
724 || tree_to_shwi (arg) < -((HOST_WIDE_INT)1 << (bitwidth - 1))
725 || tree_to_shwi (arg) > (((HOST_WIDE_INT)1 << (bitwidth - 1)) - 1))
727 error("constant argument %d for builtin %qF is out of range ("
728 HOST_WIDE_INT_PRINT_DEC ".."
729 HOST_WIDE_INT_PRINT_DEC ")",
731 -(HOST_WIDE_INT)1 << (bitwidth - 1),
732 ((HOST_WIDE_INT)1 << (bitwidth - 1)) - 1);
739 /* Expand an expression EXP that calls a built-in function,
740 with result going to TARGET if that's convenient
741 (and in mode MODE if that's convenient).
742 SUBTARGET may be used as the target for computing one of EXP's operands.
743 IGNORE is nonzero if the value is to be ignored. */
746 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
747 machine_mode mode ATTRIBUTE_UNUSED,
748 int ignore ATTRIBUTE_UNUSED)
752 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
753 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
754 enum insn_code icode;
755 rtx op[MAX_ARGS], pat;
759 call_expr_arg_iterator iter;
760 unsigned int all_op_flags = opflags_for_builtin (fcode);
761 machine_mode last_vec_mode = VOIDmode;
763 if (TARGET_DEBUG_ARG)
766 "s390_expand_builtin, code = %4d, %s\n",
767 (int)fcode, IDENTIFIER_POINTER (DECL_NAME (fndecl)));
770 if (fcode >= S390_OVERLOADED_BUILTIN_VAR_OFFSET
771 && fcode < S390_ALL_BUILTIN_MAX)
775 else if (fcode < S390_OVERLOADED_BUILTIN_OFFSET)
777 icode = code_for_builtin[fcode];
778 /* Set a flag in the machine specific cfun part in order to support
779 saving/restoring of FPRs. */
780 if (fcode == S390_BUILTIN_tbegin || fcode == S390_BUILTIN_tbegin_retry)
781 cfun->machine->tbegin_p = true;
783 else if (fcode < S390_OVERLOADED_BUILTIN_VAR_OFFSET)
785 error ("Unresolved overloaded builtin");
789 internal_error ("bad builtin fcode");
792 internal_error ("bad builtin icode");
794 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
798 machine_mode tmode = insn_data[icode].operand[0].mode;
800 || GET_MODE (target) != tmode
801 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
802 target = gen_reg_rtx (tmode);
804 /* There are builtins (e.g. vec_promote) with no vector
805 arguments but an element selector. So we have to also look
806 at the vector return type when emitting the modulo
808 if (VECTOR_MODE_P (insn_data[icode].operand[0].mode))
809 last_vec_mode = insn_data[icode].operand[0].mode;
813 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
815 const struct insn_operand_data *insn_op;
816 unsigned int op_flags = all_op_flags & ((1 << O_SHIFT) - 1);
818 all_op_flags = all_op_flags >> O_SHIFT;
820 if (arg == error_mark_node)
822 if (arity >= MAX_ARGS)
825 if (O_IMM_P (op_flags)
826 && TREE_CODE (arg) != INTEGER_CST)
828 error ("constant value required for builtin %qF argument %d",
833 if (!s390_const_operand_ok (arg, arity + 1, op_flags, fndecl))
836 insn_op = &insn_data[icode].operand[arity + nonvoid];
837 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
839 /* Wrap the expanded RTX for pointer types into a MEM expr with
840 the proper mode. This allows us to use e.g. (match_operand
841 "memory_operand"..) in the insn patterns instead of (mem
842 (match_operand "address_operand)). This is helpful for
843 patterns not just accepting MEMs. */
844 if (POINTER_TYPE_P (TREE_TYPE (arg))
845 && insn_op->predicate != address_operand)
846 op[arity] = gen_rtx_MEM (insn_op->mode, op[arity]);
848 /* Expand the module operation required on element selectors. */
849 if (op_flags == O_ELEM)
851 gcc_assert (last_vec_mode != VOIDmode);
852 op[arity] = simplify_expand_binop (SImode, code_to_optab (AND),
854 GEN_INT (GET_MODE_NUNITS (last_vec_mode) - 1),
855 NULL_RTX, 1, OPTAB_DIRECT);
858 /* Record the vector mode used for an element selector. This assumes:
859 1. There is no builtin with two different vector modes and an element selector
860 2. The element selector comes after the vector type it is referring to.
861 This currently the true for all the builtins but FIXME we
862 should better check for that. */
863 if (VECTOR_MODE_P (insn_op->mode))
864 last_vec_mode = insn_op->mode;
866 if (insn_op->predicate (op[arity], insn_op->mode))
872 if (MEM_P (op[arity])
873 && insn_op->predicate == memory_operand
874 && (GET_MODE (XEXP (op[arity], 0)) == Pmode
875 || GET_MODE (XEXP (op[arity], 0)) == VOIDmode))
877 op[arity] = replace_equiv_address (op[arity],
878 copy_to_mode_reg (Pmode,
879 XEXP (op[arity], 0)));
881 else if (GET_MODE (op[arity]) == insn_op->mode
882 || GET_MODE (op[arity]) == VOIDmode
883 || (insn_op->predicate == address_operand
884 && GET_MODE (op[arity]) == Pmode))
886 /* An address_operand usually has VOIDmode in the expander
887 so we cannot use this. */
888 machine_mode target_mode =
889 (insn_op->predicate == address_operand
890 ? Pmode : insn_op->mode);
891 op[arity] = copy_to_mode_reg (target_mode, op[arity]);
894 if (!insn_op->predicate (op[arity], insn_op->mode))
896 error ("Invalid argument %d for builtin %qF", arity + 1, fndecl);
902 if (last_vec_mode != VOIDmode && !TARGET_VX)
904 error ("Vector type builtin %qF is not supported without -mvx "
905 "(default with -march=z13).",
913 pat = GEN_FCN (icode) (target);
917 pat = GEN_FCN (icode) (target, op[0]);
919 pat = GEN_FCN (icode) (op[0]);
923 pat = GEN_FCN (icode) (target, op[0], op[1]);
925 pat = GEN_FCN (icode) (op[0], op[1]);
929 pat = GEN_FCN (icode) (target, op[0], op[1], op[2]);
931 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
935 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3]);
937 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
941 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4]);
943 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]);
947 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4], op[5]);
949 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4], op[5]);
965 static const int s390_hotpatch_hw_max = 1000000;
966 static int s390_hotpatch_hw_before_label = 0;
967 static int s390_hotpatch_hw_after_label = 0;
969 /* Check whether the hotpatch attribute is applied to a function and, if it has
970 an argument, the argument is valid. */
973 s390_handle_hotpatch_attribute (tree *node, tree name, tree args,
974 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
980 if (TREE_CODE (*node) != FUNCTION_DECL)
982 warning (OPT_Wattributes, "%qE attribute only applies to functions",
984 *no_add_attrs = true;
986 if (args != NULL && TREE_CHAIN (args) != NULL)
988 expr = TREE_VALUE (args);
989 expr2 = TREE_VALUE (TREE_CHAIN (args));
991 if (args == NULL || TREE_CHAIN (args) == NULL)
993 else if (TREE_CODE (expr) != INTEGER_CST
994 || !INTEGRAL_TYPE_P (TREE_TYPE (expr))
995 || wi::gtu_p (expr, s390_hotpatch_hw_max))
997 else if (TREE_CODE (expr2) != INTEGER_CST
998 || !INTEGRAL_TYPE_P (TREE_TYPE (expr2))
999 || wi::gtu_p (expr2, s390_hotpatch_hw_max))
1005 error ("requested %qE attribute is not a comma separated pair of"
1006 " non-negative integer constants or too large (max. %d)", name,
1007 s390_hotpatch_hw_max);
1008 *no_add_attrs = true;
1014 /* Expand the s390_vector_bool type attribute. */
1017 s390_handle_vectorbool_attribute (tree *node, tree name ATTRIBUTE_UNUSED,
1018 tree args ATTRIBUTE_UNUSED,
1019 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1021 tree type = *node, result = NULL_TREE;
1024 while (POINTER_TYPE_P (type)
1025 || TREE_CODE (type) == FUNCTION_TYPE
1026 || TREE_CODE (type) == METHOD_TYPE
1027 || TREE_CODE (type) == ARRAY_TYPE)
1028 type = TREE_TYPE (type);
1030 mode = TYPE_MODE (type);
1033 case DImode: case V2DImode: result = s390_builtin_types[BT_BV2DI]; break;
1034 case SImode: case V4SImode: result = s390_builtin_types[BT_BV4SI]; break;
1035 case HImode: case V8HImode: result = s390_builtin_types[BT_BV8HI]; break;
1036 case QImode: case V16QImode: result = s390_builtin_types[BT_BV16QI];
1040 *no_add_attrs = true; /* No need to hang on to the attribute. */
1043 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
1048 static const struct attribute_spec s390_attribute_table[] = {
1049 { "hotpatch", 2, 2, true, false, false, s390_handle_hotpatch_attribute, false },
1050 { "s390_vector_bool", 0, 0, false, true, false, s390_handle_vectorbool_attribute, true },
1052 { NULL, 0, 0, false, false, false, NULL, false }
1055 /* Return the alignment for LABEL. We default to the -falign-labels
1056 value except for the literal pool base label. */
1058 s390_label_align (rtx label)
1060 rtx_insn *prev_insn = prev_active_insn (label);
1063 if (prev_insn == NULL_RTX)
1066 set = single_set (prev_insn);
1068 if (set == NULL_RTX)
1071 src = SET_SRC (set);
1073 /* Don't align literal pool base labels. */
1074 if (GET_CODE (src) == UNSPEC
1075 && XINT (src, 1) == UNSPEC_MAIN_BASE)
1079 return align_labels_log;
1083 s390_libgcc_cmp_return_mode (void)
1085 return TARGET_64BIT ? DImode : SImode;
1089 s390_libgcc_shift_count_mode (void)
1091 return TARGET_64BIT ? DImode : SImode;
1095 s390_unwind_word_mode (void)
1097 return TARGET_64BIT ? DImode : SImode;
1100 /* Return true if the back end supports mode MODE. */
1102 s390_scalar_mode_supported_p (machine_mode mode)
1104 /* In contrast to the default implementation reject TImode constants on 31bit
1105 TARGET_ZARCH for ABI compliance. */
1106 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
1109 if (DECIMAL_FLOAT_MODE_P (mode))
1110 return default_decimal_float_supported_p ();
1112 return default_scalar_mode_supported_p (mode);
1115 /* Return true if the back end supports vector mode MODE. */
1117 s390_vector_mode_supported_p (machine_mode mode)
1121 if (!VECTOR_MODE_P (mode)
1123 || GET_MODE_SIZE (mode) > 16)
1126 inner = GET_MODE_INNER (mode);
1144 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
1147 s390_set_has_landing_pad_p (bool value)
1149 cfun->machine->has_landing_pad_p = value;
1152 /* If two condition code modes are compatible, return a condition code
1153 mode which is compatible with both. Otherwise, return
1157 s390_cc_modes_compatible (machine_mode m1, machine_mode m2)
1165 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
1166 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
1187 /* Return true if SET either doesn't set the CC register, or else
1188 the source and destination have matching CC modes and that
1189 CC mode is at least as constrained as REQ_MODE. */
1192 s390_match_ccmode_set (rtx set, machine_mode req_mode)
1194 machine_mode set_mode;
1196 gcc_assert (GET_CODE (set) == SET);
1198 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
1201 set_mode = GET_MODE (SET_DEST (set));
1220 if (req_mode != set_mode)
1225 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
1226 && req_mode != CCSRmode && req_mode != CCURmode)
1232 if (req_mode != CCAmode)
1240 return (GET_MODE (SET_SRC (set)) == set_mode);
1243 /* Return true if every SET in INSN that sets the CC register
1244 has source and destination with matching CC modes and that
1245 CC mode is at least as constrained as REQ_MODE.
1246 If REQ_MODE is VOIDmode, always return false. */
1249 s390_match_ccmode (rtx_insn *insn, machine_mode req_mode)
1253 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
1254 if (req_mode == VOIDmode)
1257 if (GET_CODE (PATTERN (insn)) == SET)
1258 return s390_match_ccmode_set (PATTERN (insn), req_mode);
1260 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1261 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1263 rtx set = XVECEXP (PATTERN (insn), 0, i);
1264 if (GET_CODE (set) == SET)
1265 if (!s390_match_ccmode_set (set, req_mode))
1272 /* If a test-under-mask instruction can be used to implement
1273 (compare (and ... OP1) OP2), return the CC mode required
1274 to do that. Otherwise, return VOIDmode.
1275 MIXED is true if the instruction can distinguish between
1276 CC1 and CC2 for mixed selected bits (TMxx), it is false
1277 if the instruction cannot (TM). */
1280 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
1284 /* ??? Fixme: should work on CONST_DOUBLE as well. */
1285 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
1288 /* Selected bits all zero: CC0.
1289 e.g.: int a; if ((a & (16 + 128)) == 0) */
1290 if (INTVAL (op2) == 0)
1293 /* Selected bits all one: CC3.
1294 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
1295 if (INTVAL (op2) == INTVAL (op1))
1298 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
1300 if ((a & (16 + 128)) == 16) -> CCT1
1301 if ((a & (16 + 128)) == 128) -> CCT2 */
1304 bit1 = exact_log2 (INTVAL (op2));
1305 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
1306 if (bit0 != -1 && bit1 != -1)
1307 return bit0 > bit1 ? CCT1mode : CCT2mode;
1313 /* Given a comparison code OP (EQ, NE, etc.) and the operands
1314 OP0 and OP1 of a COMPARE, return the mode to be used for the
1318 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
1321 && register_operand (op0, DFmode)
1322 && register_operand (op1, DFmode))
1324 /* LT, LE, UNGT, UNGE require swapping OP0 and OP1. Either
1325 s390_emit_compare or s390_canonicalize_comparison will take
1347 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
1348 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1350 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
1351 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
1353 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1354 || GET_CODE (op1) == NEG)
1355 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1358 if (GET_CODE (op0) == AND)
1360 /* Check whether we can potentially do it via TM. */
1361 machine_mode ccmode;
1362 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
1363 if (ccmode != VOIDmode)
1365 /* Relax CCTmode to CCZmode to allow fall-back to AND
1366 if that turns out to be beneficial. */
1367 return ccmode == CCTmode ? CCZmode : ccmode;
1371 if (register_operand (op0, HImode)
1372 && GET_CODE (op1) == CONST_INT
1373 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
1375 if (register_operand (op0, QImode)
1376 && GET_CODE (op1) == CONST_INT
1377 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
1386 /* The only overflow condition of NEG and ABS happens when
1387 -INT_MAX is used as parameter, which stays negative. So
1388 we have an overflow from a positive value to a negative.
1389 Using CCAP mode the resulting cc can be used for comparisons. */
1390 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
1391 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1394 /* If constants are involved in an add instruction it is possible to use
1395 the resulting cc for comparisons with zero. Knowing the sign of the
1396 constant the overflow behavior gets predictable. e.g.:
1397 int a, b; if ((b = a + c) > 0)
1398 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
1399 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
1400 && (CONST_OK_FOR_K (INTVAL (XEXP (op0, 1)))
1401 || (CONST_OK_FOR_CONSTRAINT_P (INTVAL (XEXP (op0, 1)), 'O', "Os")
1402 /* Avoid INT32_MIN on 32 bit. */
1403 && (!TARGET_ZARCH || INTVAL (XEXP (op0, 1)) != -0x7fffffff - 1))))
1405 if (INTVAL (XEXP((op0), 1)) < 0)
1419 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1420 && GET_CODE (op1) != CONST_INT)
1426 if (GET_CODE (op0) == PLUS
1427 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1430 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1431 && GET_CODE (op1) != CONST_INT)
1437 if (GET_CODE (op0) == MINUS
1438 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1441 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1442 && GET_CODE (op1) != CONST_INT)
1451 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
1452 that we can implement more efficiently. */
1455 s390_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
1456 bool op0_preserve_value)
1458 if (op0_preserve_value)
1461 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
1462 if ((*code == EQ || *code == NE)
1463 && *op1 == const0_rtx
1464 && GET_CODE (*op0) == ZERO_EXTRACT
1465 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
1466 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
1467 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
1469 rtx inner = XEXP (*op0, 0);
1470 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
1471 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
1472 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
1474 if (len > 0 && len < modesize
1475 && pos >= 0 && pos + len <= modesize
1476 && modesize <= HOST_BITS_PER_WIDE_INT)
1478 unsigned HOST_WIDE_INT block;
1479 block = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
1480 block <<= modesize - pos - len;
1482 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
1483 gen_int_mode (block, GET_MODE (inner)));
1487 /* Narrow AND of memory against immediate to enable TM. */
1488 if ((*code == EQ || *code == NE)
1489 && *op1 == const0_rtx
1490 && GET_CODE (*op0) == AND
1491 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
1492 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
1494 rtx inner = XEXP (*op0, 0);
1495 rtx mask = XEXP (*op0, 1);
1497 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
1498 if (GET_CODE (inner) == SUBREG
1499 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
1500 && (GET_MODE_SIZE (GET_MODE (inner))
1501 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
1503 & GET_MODE_MASK (GET_MODE (inner))
1504 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
1506 inner = SUBREG_REG (inner);
1508 /* Do not change volatile MEMs. */
1509 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
1511 int part = s390_single_part (XEXP (*op0, 1),
1512 GET_MODE (inner), QImode, 0);
1515 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
1516 inner = adjust_address_nv (inner, QImode, part);
1517 *op0 = gen_rtx_AND (QImode, inner, mask);
1522 /* Narrow comparisons against 0xffff to HImode if possible. */
1523 if ((*code == EQ || *code == NE)
1524 && GET_CODE (*op1) == CONST_INT
1525 && INTVAL (*op1) == 0xffff
1526 && SCALAR_INT_MODE_P (GET_MODE (*op0))
1527 && (nonzero_bits (*op0, GET_MODE (*op0))
1528 & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
1530 *op0 = gen_lowpart (HImode, *op0);
1534 /* Remove redundant UNSPEC_STRCMPCC_TO_INT conversions if possible. */
1535 if (GET_CODE (*op0) == UNSPEC
1536 && XINT (*op0, 1) == UNSPEC_STRCMPCC_TO_INT
1537 && XVECLEN (*op0, 0) == 1
1538 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
1539 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
1540 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
1541 && *op1 == const0_rtx)
1543 enum rtx_code new_code = UNKNOWN;
1546 case EQ: new_code = EQ; break;
1547 case NE: new_code = NE; break;
1548 case LT: new_code = GTU; break;
1549 case GT: new_code = LTU; break;
1550 case LE: new_code = GEU; break;
1551 case GE: new_code = LEU; break;
1555 if (new_code != UNKNOWN)
1557 *op0 = XVECEXP (*op0, 0, 0);
1562 /* Remove redundant UNSPEC_CC_TO_INT conversions if possible. */
1563 if (GET_CODE (*op0) == UNSPEC
1564 && XINT (*op0, 1) == UNSPEC_CC_TO_INT
1565 && XVECLEN (*op0, 0) == 1
1566 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
1567 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
1568 && CONST_INT_P (*op1))
1570 enum rtx_code new_code = UNKNOWN;
1571 switch (GET_MODE (XVECEXP (*op0, 0, 0)))
1577 case EQ: new_code = EQ; break;
1578 case NE: new_code = NE; break;
1585 if (new_code != UNKNOWN)
1587 /* For CCRAWmode put the required cc mask into the second
1589 if (GET_MODE (XVECEXP (*op0, 0, 0)) == CCRAWmode
1590 && INTVAL (*op1) >= 0 && INTVAL (*op1) <= 3)
1591 *op1 = gen_rtx_CONST_INT (VOIDmode, 1 << (3 - INTVAL (*op1)));
1592 *op0 = XVECEXP (*op0, 0, 0);
1597 /* Simplify cascaded EQ, NE with const0_rtx. */
1598 if ((*code == NE || *code == EQ)
1599 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
1600 && GET_MODE (*op0) == SImode
1601 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
1602 && REG_P (XEXP (*op0, 0))
1603 && XEXP (*op0, 1) == const0_rtx
1604 && *op1 == const0_rtx)
1606 if ((*code == EQ && GET_CODE (*op0) == NE)
1607 || (*code == NE && GET_CODE (*op0) == EQ))
1611 *op0 = XEXP (*op0, 0);
1614 /* Prefer register over memory as first operand. */
1615 if (MEM_P (*op0) && REG_P (*op1))
1617 rtx tem = *op0; *op0 = *op1; *op1 = tem;
1618 *code = (int)swap_condition ((enum rtx_code)*code);
1621 /* Using the scalar variants of vector instructions for 64 bit FP
1622 comparisons might require swapping the operands. */
1624 && register_operand (*op0, DFmode)
1625 && register_operand (*op1, DFmode)
1626 && (*code == LT || *code == LE || *code == UNGT || *code == UNGE))
1632 case LT: *code = GT; break;
1633 case LE: *code = GE; break;
1634 case UNGT: *code = UNLE; break;
1635 case UNGE: *code = UNLT; break;
1638 tmp = *op0; *op0 = *op1; *op1 = tmp;
1642 /* Helper function for s390_emit_compare. If possible emit a 64 bit
1643 FP compare using the single element variant of vector instructions.
1644 Replace CODE with the comparison code to be used in the CC reg
1645 compare and return the condition code register RTX in CC. */
1648 s390_expand_vec_compare_scalar (enum rtx_code *code, rtx cmp1, rtx cmp2,
1651 machine_mode cmp_mode;
1652 bool swap_p = false;
1656 case EQ: cmp_mode = CCVEQmode; break;
1657 case NE: cmp_mode = CCVEQmode; break;
1658 case GT: cmp_mode = CCVFHmode; break;
1659 case GE: cmp_mode = CCVFHEmode; break;
1660 case UNLE: cmp_mode = CCVFHmode; break;
1661 case UNLT: cmp_mode = CCVFHEmode; break;
1662 case LT: cmp_mode = CCVFHmode; *code = GT; swap_p = true; break;
1663 case LE: cmp_mode = CCVFHEmode; *code = GE; swap_p = true; break;
1664 case UNGE: cmp_mode = CCVFHmode; *code = UNLE; swap_p = true; break;
1665 case UNGT: cmp_mode = CCVFHEmode; *code = UNLT; swap_p = true; break;
1666 default: return false;
1675 *cc = gen_rtx_REG (cmp_mode, CC_REGNUM);
1676 emit_insn (gen_rtx_PARALLEL (VOIDmode,
1679 gen_rtx_COMPARE (cmp_mode, cmp1,
1681 gen_rtx_CLOBBER (VOIDmode,
1682 gen_rtx_SCRATCH (V2DImode)))));
1687 /* Emit a compare instruction suitable to implement the comparison
1688 OP0 CODE OP1. Return the correct condition RTL to be placed in
1689 the IF_THEN_ELSE of the conditional branch testing the result. */
1692 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
1694 machine_mode mode = s390_select_ccmode (code, op0, op1);
1698 && register_operand (op0, DFmode)
1699 && register_operand (op1, DFmode)
1700 && s390_expand_vec_compare_scalar (&code, op0, op1, &cc))
1702 /* Work has been done by s390_expand_vec_compare_scalar already. */
1704 else if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
1706 /* Do not output a redundant compare instruction if a
1707 compare_and_swap pattern already computed the result and the
1708 machine modes are compatible. */
1709 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
1715 cc = gen_rtx_REG (mode, CC_REGNUM);
1716 emit_insn (gen_rtx_SET (cc, gen_rtx_COMPARE (mode, op0, op1)));
1719 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
1722 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
1724 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
1725 conditional branch testing the result. */
1728 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem,
1729 rtx cmp, rtx new_rtx)
1731 emit_insn (gen_atomic_compare_and_swapsi_internal (old, mem, cmp, new_rtx));
1732 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM),
1736 /* Emit a jump instruction to TARGET and return it. If COND is
1737 NULL_RTX, emit an unconditional jump, else a conditional jump under
1741 s390_emit_jump (rtx target, rtx cond)
1745 target = gen_rtx_LABEL_REF (VOIDmode, target);
1747 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
1749 insn = gen_rtx_SET (pc_rtx, target);
1750 return emit_jump_insn (insn);
1753 /* Return branch condition mask to implement a branch
1754 specified by CODE. Return -1 for invalid comparisons. */
1757 s390_branch_condition_mask (rtx code)
1759 const int CC0 = 1 << 3;
1760 const int CC1 = 1 << 2;
1761 const int CC2 = 1 << 1;
1762 const int CC3 = 1 << 0;
1764 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
1765 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
1766 gcc_assert (XEXP (code, 1) == const0_rtx
1767 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
1768 && CONST_INT_P (XEXP (code, 1))));
1771 switch (GET_MODE (XEXP (code, 0)))
1775 switch (GET_CODE (code))
1777 case EQ: return CC0;
1778 case NE: return CC1 | CC2 | CC3;
1784 switch (GET_CODE (code))
1786 case EQ: return CC1;
1787 case NE: return CC0 | CC2 | CC3;
1793 switch (GET_CODE (code))
1795 case EQ: return CC2;
1796 case NE: return CC0 | CC1 | CC3;
1802 switch (GET_CODE (code))
1804 case EQ: return CC3;
1805 case NE: return CC0 | CC1 | CC2;
1811 switch (GET_CODE (code))
1813 case EQ: return CC0 | CC2;
1814 case NE: return CC1 | CC3;
1820 switch (GET_CODE (code))
1822 case LTU: return CC2 | CC3; /* carry */
1823 case GEU: return CC0 | CC1; /* no carry */
1829 switch (GET_CODE (code))
1831 case GTU: return CC0 | CC1; /* borrow */
1832 case LEU: return CC2 | CC3; /* no borrow */
1838 switch (GET_CODE (code))
1840 case EQ: return CC0 | CC2;
1841 case NE: return CC1 | CC3;
1842 case LTU: return CC1;
1843 case GTU: return CC3;
1844 case LEU: return CC1 | CC2;
1845 case GEU: return CC2 | CC3;
1850 switch (GET_CODE (code))
1852 case EQ: return CC0;
1853 case NE: return CC1 | CC2 | CC3;
1854 case LTU: return CC1;
1855 case GTU: return CC2;
1856 case LEU: return CC0 | CC1;
1857 case GEU: return CC0 | CC2;
1863 switch (GET_CODE (code))
1865 case EQ: return CC0;
1866 case NE: return CC2 | CC1 | CC3;
1867 case LTU: return CC2;
1868 case GTU: return CC1;
1869 case LEU: return CC0 | CC2;
1870 case GEU: return CC0 | CC1;
1876 switch (GET_CODE (code))
1878 case EQ: return CC0;
1879 case NE: return CC1 | CC2 | CC3;
1880 case LT: return CC1 | CC3;
1881 case GT: return CC2;
1882 case LE: return CC0 | CC1 | CC3;
1883 case GE: return CC0 | CC2;
1889 switch (GET_CODE (code))
1891 case EQ: return CC0;
1892 case NE: return CC1 | CC2 | CC3;
1893 case LT: return CC1;
1894 case GT: return CC2 | CC3;
1895 case LE: return CC0 | CC1;
1896 case GE: return CC0 | CC2 | CC3;
1902 switch (GET_CODE (code))
1904 case EQ: return CC0;
1905 case NE: return CC1 | CC2 | CC3;
1906 case LT: return CC1;
1907 case GT: return CC2;
1908 case LE: return CC0 | CC1;
1909 case GE: return CC0 | CC2;
1910 case UNORDERED: return CC3;
1911 case ORDERED: return CC0 | CC1 | CC2;
1912 case UNEQ: return CC0 | CC3;
1913 case UNLT: return CC1 | CC3;
1914 case UNGT: return CC2 | CC3;
1915 case UNLE: return CC0 | CC1 | CC3;
1916 case UNGE: return CC0 | CC2 | CC3;
1917 case LTGT: return CC1 | CC2;
1923 switch (GET_CODE (code))
1925 case EQ: return CC0;
1926 case NE: return CC2 | CC1 | CC3;
1927 case LT: return CC2;
1928 case GT: return CC1;
1929 case LE: return CC0 | CC2;
1930 case GE: return CC0 | CC1;
1931 case UNORDERED: return CC3;
1932 case ORDERED: return CC0 | CC2 | CC1;
1933 case UNEQ: return CC0 | CC3;
1934 case UNLT: return CC2 | CC3;
1935 case UNGT: return CC1 | CC3;
1936 case UNLE: return CC0 | CC2 | CC3;
1937 case UNGE: return CC0 | CC1 | CC3;
1938 case LTGT: return CC2 | CC1;
1943 /* Vector comparison modes. */
1946 switch (GET_CODE (code))
1948 case EQ: return CC0;
1949 case NE: return CC3;
1954 switch (GET_CODE (code))
1956 case EQ: return CC0 | CC1;
1957 case NE: return CC3 | CC1;
1961 /* Integer vector compare modes. */
1964 switch (GET_CODE (code))
1966 case GT: return CC0;
1967 case LE: return CC3;
1972 switch (GET_CODE (code))
1974 case GT: return CC0 | CC1;
1975 case LE: return CC3 | CC1;
1980 switch (GET_CODE (code))
1982 case GTU: return CC0;
1983 case LEU: return CC3;
1988 switch (GET_CODE (code))
1990 case GTU: return CC0 | CC1;
1991 case LEU: return CC3 | CC1;
1995 /* FP vector compare modes. */
1998 switch (GET_CODE (code))
2000 case GT: return CC0;
2001 case UNLE: return CC3;
2006 switch (GET_CODE (code))
2008 case GT: return CC0 | CC1;
2009 case UNLE: return CC3 | CC1;
2014 switch (GET_CODE (code))
2016 case GE: return CC0;
2017 case UNLT: return CC3;
2022 switch (GET_CODE (code))
2024 case GE: return CC0 | CC1;
2025 case UNLT: return CC3 | CC1;
2031 switch (GET_CODE (code))
2034 return INTVAL (XEXP (code, 1));
2036 return (INTVAL (XEXP (code, 1))) ^ 0xf;
2047 /* Return branch condition mask to implement a compare and branch
2048 specified by CODE. Return -1 for invalid comparisons. */
2051 s390_compare_and_branch_condition_mask (rtx code)
2053 const int CC0 = 1 << 3;
2054 const int CC1 = 1 << 2;
2055 const int CC2 = 1 << 1;
2057 switch (GET_CODE (code))
2081 /* If INV is false, return assembler mnemonic string to implement
2082 a branch specified by CODE. If INV is true, return mnemonic
2083 for the corresponding inverted branch. */
2086 s390_branch_condition_mnemonic (rtx code, int inv)
2090 static const char *const mnemonic[16] =
2092 NULL, "o", "h", "nle",
2093 "l", "nhe", "lh", "ne",
2094 "e", "nlh", "he", "nl",
2095 "le", "nh", "no", NULL
2098 if (GET_CODE (XEXP (code, 0)) == REG
2099 && REGNO (XEXP (code, 0)) == CC_REGNUM
2100 && (XEXP (code, 1) == const0_rtx
2101 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
2102 && CONST_INT_P (XEXP (code, 1)))))
2103 mask = s390_branch_condition_mask (code);
2105 mask = s390_compare_and_branch_condition_mask (code);
2107 gcc_assert (mask >= 0);
2112 gcc_assert (mask >= 1 && mask <= 14);
2114 return mnemonic[mask];
2117 /* Return the part of op which has a value different from def.
2118 The size of the part is determined by mode.
2119 Use this function only if you already know that op really
2120 contains such a part. */
2122 unsigned HOST_WIDE_INT
2123 s390_extract_part (rtx op, machine_mode mode, int def)
2125 unsigned HOST_WIDE_INT value = 0;
2126 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
2127 int part_bits = GET_MODE_BITSIZE (mode);
2128 unsigned HOST_WIDE_INT part_mask
2129 = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;
2132 for (i = 0; i < max_parts; i++)
2135 value = (unsigned HOST_WIDE_INT) INTVAL (op);
2137 value >>= part_bits;
2139 if ((value & part_mask) != (def & part_mask))
2140 return value & part_mask;
2146 /* If OP is an integer constant of mode MODE with exactly one
2147 part of mode PART_MODE unequal to DEF, return the number of that
2148 part. Otherwise, return -1. */
2151 s390_single_part (rtx op,
2153 machine_mode part_mode,
2156 unsigned HOST_WIDE_INT value = 0;
2157 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
2158 unsigned HOST_WIDE_INT part_mask
2159 = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;
2162 if (GET_CODE (op) != CONST_INT)
2165 for (i = 0; i < n_parts; i++)
2168 value = (unsigned HOST_WIDE_INT) INTVAL (op);
2170 value >>= GET_MODE_BITSIZE (part_mode);
2172 if ((value & part_mask) != (def & part_mask))
2180 return part == -1 ? -1 : n_parts - 1 - part;
2183 /* Return true if IN contains a contiguous bitfield in the lower SIZE
2184 bits and no other bits are set in IN. POS and LENGTH can be used
2185 to obtain the start position and the length of the bitfield.
2187 POS gives the position of the first bit of the bitfield counting
2188 from the lowest order bit starting with zero. In order to use this
2189 value for S/390 instructions this has to be converted to "bits big
2193 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, int size,
2194 int *pos, int *length)
2199 unsigned HOST_WIDE_INT mask = 1ULL;
2200 bool contiguous = false;
2202 for (i = 0; i < size; mask <<= 1, i++)
2226 /* Calculate a mask for all bits beyond the contiguous bits. */
2227 mask = (-1LL & ~(((1ULL << (tmp_length + tmp_pos - 1)) << 1) - 1));
2229 if ((unsigned)size < sizeof (HOST_WIDE_INT) * BITS_PER_UNIT)
2230 mask &= (HOST_WIDE_INT_1U << size) - 1;
2235 if (tmp_length + tmp_pos - 1 > size)
2239 *length = tmp_length;
2247 /* Return true if OP contains the same contiguous bitfield in *all*
2248 its elements. START and END can be used to obtain the start and
2249 end position of the bitfield.
2251 START/STOP give the position of the first/last bit of the bitfield
2252 counting from the lowest order bit starting with zero. In order to
2253 use these values for S/390 instructions this has to be converted to
2254 "bits big endian" style. */
2257 s390_contiguous_bitmask_vector_p (rtx op, int *start, int *end)
2259 unsigned HOST_WIDE_INT mask;
2263 if (!const_vec_duplicate_p (op, &elt)
2264 || !CONST_INT_P (elt))
2267 size = GET_MODE_UNIT_BITSIZE (GET_MODE (op));
2268 mask = UINTVAL (elt);
2269 if (s390_contiguous_bitmask_p (mask, size, start,
2270 end != NULL ? &length : NULL))
2273 *end = *start + length - 1;
2276 /* 0xff00000f style immediates can be covered by swapping start and
2277 end indices in vgm. */
2278 if (s390_contiguous_bitmask_p (~mask, size, start,
2279 end != NULL ? &length : NULL))
2284 *start = *start + length;
2290 /* Return true if C consists only of byte chunks being either 0 or
2291 0xff. If MASK is !=NULL a byte mask is generated which is
2292 appropriate for the vector generate byte mask instruction. */
2295 s390_bytemask_vector_p (rtx op, unsigned *mask)
2298 unsigned tmp_mask = 0;
2299 int nunit, unit_size;
2301 if (!VECTOR_MODE_P (GET_MODE (op))
2302 || GET_CODE (op) != CONST_VECTOR
2303 || !CONST_INT_P (XVECEXP (op, 0, 0)))
2306 nunit = GET_MODE_NUNITS (GET_MODE (op));
2307 unit_size = GET_MODE_UNIT_SIZE (GET_MODE (op));
2309 for (i = 0; i < nunit; i++)
2311 unsigned HOST_WIDE_INT c;
2314 if (!CONST_INT_P (XVECEXP (op, 0, i)))
2317 c = UINTVAL (XVECEXP (op, 0, i));
2318 for (j = 0; j < unit_size; j++)
2320 if ((c & 0xff) != 0 && (c & 0xff) != 0xff)
2322 tmp_mask |= (c & 1) << ((nunit - 1 - i) * unit_size + j);
2323 c = c >> BITS_PER_UNIT;
2333 /* Check whether a rotate of ROTL followed by an AND of CONTIG is
2334 equivalent to a shift followed by the AND. In particular, CONTIG
2335 should not overlap the (rotated) bit 0/bit 63 gap. Negative values
2336 for ROTL indicate a rotate to the right. */
2339 s390_extzv_shift_ok (int bitsize, int rotl, unsigned HOST_WIDE_INT contig)
2344 ok = s390_contiguous_bitmask_p (contig, bitsize, &pos, &len);
2347 return ((rotl >= 0 && rotl <= pos)
2348 || (rotl < 0 && -rotl <= bitsize - len - pos));
2351 /* Check whether we can (and want to) split a double-word
2352 move in mode MODE from SRC to DST into two single-word
2353 moves, moving the subword FIRST_SUBWORD first. */
2356 s390_split_ok_p (rtx dst, rtx src, machine_mode mode, int first_subword)
2358 /* Floating point and vector registers cannot be split. */
2359 if (FP_REG_P (src) || FP_REG_P (dst) || VECTOR_REG_P (src) || VECTOR_REG_P (dst))
2362 /* We don't need to split if operands are directly accessible. */
2363 if (s_operand (src, mode) || s_operand (dst, mode))
2366 /* Non-offsettable memory references cannot be split. */
2367 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
2368 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
2371 /* Moving the first subword must not clobber a register
2372 needed to move the second subword. */
2373 if (register_operand (dst, mode))
2375 rtx subreg = operand_subword (dst, first_subword, 0, mode);
2376 if (reg_overlap_mentioned_p (subreg, src))
2383 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
2384 and [MEM2, MEM2 + SIZE] do overlap and false
2388 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
2390 rtx addr1, addr2, addr_delta;
2391 HOST_WIDE_INT delta;
2393 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
2399 addr1 = XEXP (mem1, 0);
2400 addr2 = XEXP (mem2, 0);
2402 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
2404 /* This overlapping check is used by peepholes merging memory block operations.
2405 Overlapping operations would otherwise be recognized by the S/390 hardware
2406 and would fall back to a slower implementation. Allowing overlapping
2407 operations would lead to slow code but not to wrong code. Therefore we are
2408 somewhat optimistic if we cannot prove that the memory blocks are
2410 That's why we return false here although this may accept operations on
2411 overlapping memory areas. */
2412 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
2415 delta = INTVAL (addr_delta);
2418 || (delta > 0 && delta < size)
2419 || (delta < 0 && -delta < size))
2425 /* Check whether the address of memory reference MEM2 equals exactly
2426 the address of memory reference MEM1 plus DELTA. Return true if
2427 we can prove this to be the case, false otherwise. */
2430 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
2432 rtx addr1, addr2, addr_delta;
2434 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
2437 addr1 = XEXP (mem1, 0);
2438 addr2 = XEXP (mem2, 0);
2440 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
2441 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
2447 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
2450 s390_expand_logical_operator (enum rtx_code code, machine_mode mode,
2453 machine_mode wmode = mode;
2454 rtx dst = operands[0];
2455 rtx src1 = operands[1];
2456 rtx src2 = operands[2];
2459 /* If we cannot handle the operation directly, use a temp register. */
2460 if (!s390_logical_operator_ok_p (operands))
2461 dst = gen_reg_rtx (mode);
2463 /* QImode and HImode patterns make sense only if we have a destination
2464 in memory. Otherwise perform the operation in SImode. */
2465 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
2468 /* Widen operands if required. */
2471 if (GET_CODE (dst) == SUBREG
2472 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
2474 else if (REG_P (dst))
2475 dst = gen_rtx_SUBREG (wmode, dst, 0);
2477 dst = gen_reg_rtx (wmode);
2479 if (GET_CODE (src1) == SUBREG
2480 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
2482 else if (GET_MODE (src1) != VOIDmode)
2483 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
2485 if (GET_CODE (src2) == SUBREG
2486 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
2488 else if (GET_MODE (src2) != VOIDmode)
2489 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
2492 /* Emit the instruction. */
2493 op = gen_rtx_SET (dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
2494 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
2495 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
2497 /* Fix up the destination if needed. */
2498 if (dst != operands[0])
2499 emit_move_insn (operands[0], gen_lowpart (mode, dst));
2502 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
2505 s390_logical_operator_ok_p (rtx *operands)
2507 /* If the destination operand is in memory, it needs to coincide
2508 with one of the source operands. After reload, it has to be
2509 the first source operand. */
2510 if (GET_CODE (operands[0]) == MEM)
2511 return rtx_equal_p (operands[0], operands[1])
2512 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
2517 /* Narrow logical operation CODE of memory operand MEMOP with immediate
2518 operand IMMOP to switch from SS to SI type instructions. */
2521 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
2523 int def = code == AND ? -1 : 0;
2527 gcc_assert (GET_CODE (*memop) == MEM);
2528 gcc_assert (!MEM_VOLATILE_P (*memop));
2530 mask = s390_extract_part (*immop, QImode, def);
2531 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
2532 gcc_assert (part >= 0);
2534 *memop = adjust_address (*memop, QImode, part);
2535 *immop = gen_int_mode (mask, QImode);
2539 /* How to allocate a 'struct machine_function'. */
2541 static struct machine_function *
2542 s390_init_machine_status (void)
2544 return ggc_cleared_alloc<machine_function> ();
2547 /* Map for smallest class containing reg regno. */
2549 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
2550 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 0 */
2551 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 4 */
2552 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 8 */
2553 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 12 */
2554 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 16 */
2555 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 20 */
2556 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 24 */
2557 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 28 */
2558 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS, /* 32 */
2559 ACCESS_REGS, ACCESS_REGS, VEC_REGS, VEC_REGS, /* 36 */
2560 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 40 */
2561 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 44 */
2562 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 48 */
2563 VEC_REGS, VEC_REGS /* 52 */
2566 /* Return attribute type of insn. */
2568 static enum attr_type
2569 s390_safe_attr_type (rtx_insn *insn)
2571 if (recog_memoized (insn) >= 0)
2572 return get_attr_type (insn);
2577 /* Return true if DISP is a valid short displacement. */
2580 s390_short_displacement (rtx disp)
2582 /* No displacement is OK. */
2586 /* Without the long displacement facility we don't need to
2587 distingiush between long and short displacement. */
2588 if (!TARGET_LONG_DISPLACEMENT)
2591 /* Integer displacement in range. */
2592 if (GET_CODE (disp) == CONST_INT)
2593 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
2595 /* GOT offset is not OK, the GOT can be large. */
2596 if (GET_CODE (disp) == CONST
2597 && GET_CODE (XEXP (disp, 0)) == UNSPEC
2598 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
2599 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
2602 /* All other symbolic constants are literal pool references,
2603 which are OK as the literal pool must be small. */
2604 if (GET_CODE (disp) == CONST)
2610 /* Decompose a RTL expression ADDR for a memory address into
2611 its components, returned in OUT.
2613 Returns false if ADDR is not a valid memory address, true
2614 otherwise. If OUT is NULL, don't return the components,
2615 but check for validity only.
2617 Note: Only addresses in canonical form are recognized.
2618 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
2619 canonical form so that they will be recognized. */
2622 s390_decompose_address (rtx addr, struct s390_address *out)
2624 HOST_WIDE_INT offset = 0;
2625 rtx base = NULL_RTX;
2626 rtx indx = NULL_RTX;
2627 rtx disp = NULL_RTX;
2629 bool pointer = false;
2630 bool base_ptr = false;
2631 bool indx_ptr = false;
2632 bool literal_pool = false;
2634 /* We may need to substitute the literal pool base register into the address
2635 below. However, at this point we do not know which register is going to
2636 be used as base, so we substitute the arg pointer register. This is going
2637 to be treated as holding a pointer below -- it shouldn't be used for any
2639 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
2641 /* Decompose address into base + index + displacement. */
2643 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
2646 else if (GET_CODE (addr) == PLUS)
2648 rtx op0 = XEXP (addr, 0);
2649 rtx op1 = XEXP (addr, 1);
2650 enum rtx_code code0 = GET_CODE (op0);
2651 enum rtx_code code1 = GET_CODE (op1);
2653 if (code0 == REG || code0 == UNSPEC)
2655 if (code1 == REG || code1 == UNSPEC)
2657 indx = op0; /* index + base */
2663 base = op0; /* base + displacement */
2668 else if (code0 == PLUS)
2670 indx = XEXP (op0, 0); /* index + base + disp */
2671 base = XEXP (op0, 1);
2682 disp = addr; /* displacement */
2684 /* Extract integer part of displacement. */
2688 if (GET_CODE (disp) == CONST_INT)
2690 offset = INTVAL (disp);
2693 else if (GET_CODE (disp) == CONST
2694 && GET_CODE (XEXP (disp, 0)) == PLUS
2695 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
2697 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
2698 disp = XEXP (XEXP (disp, 0), 0);
2702 /* Strip off CONST here to avoid special case tests later. */
2703 if (disp && GET_CODE (disp) == CONST)
2704 disp = XEXP (disp, 0);
2706 /* We can convert literal pool addresses to
2707 displacements by basing them off the base register. */
2708 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
2710 /* Either base or index must be free to hold the base register. */
2712 base = fake_pool_base, literal_pool = true;
2714 indx = fake_pool_base, literal_pool = true;
2718 /* Mark up the displacement. */
2719 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
2720 UNSPEC_LTREL_OFFSET);
2723 /* Validate base register. */
2726 if (GET_CODE (base) == UNSPEC)
2727 switch (XINT (base, 1))
2731 disp = gen_rtx_UNSPEC (Pmode,
2732 gen_rtvec (1, XVECEXP (base, 0, 0)),
2733 UNSPEC_LTREL_OFFSET);
2737 base = XVECEXP (base, 0, 1);
2740 case UNSPEC_LTREL_BASE:
2741 if (XVECLEN (base, 0) == 1)
2742 base = fake_pool_base, literal_pool = true;
2744 base = XVECEXP (base, 0, 1);
2752 || (GET_MODE (base) != SImode
2753 && GET_MODE (base) != Pmode))
2756 if (REGNO (base) == STACK_POINTER_REGNUM
2757 || REGNO (base) == FRAME_POINTER_REGNUM
2758 || ((reload_completed || reload_in_progress)
2759 && frame_pointer_needed
2760 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
2761 || REGNO (base) == ARG_POINTER_REGNUM
2763 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
2764 pointer = base_ptr = true;
2766 if ((reload_completed || reload_in_progress)
2767 && base == cfun->machine->base_reg)
2768 pointer = base_ptr = literal_pool = true;
2771 /* Validate index register. */
2774 if (GET_CODE (indx) == UNSPEC)
2775 switch (XINT (indx, 1))
2779 disp = gen_rtx_UNSPEC (Pmode,
2780 gen_rtvec (1, XVECEXP (indx, 0, 0)),
2781 UNSPEC_LTREL_OFFSET);
2785 indx = XVECEXP (indx, 0, 1);
2788 case UNSPEC_LTREL_BASE:
2789 if (XVECLEN (indx, 0) == 1)
2790 indx = fake_pool_base, literal_pool = true;
2792 indx = XVECEXP (indx, 0, 1);
2800 || (GET_MODE (indx) != SImode
2801 && GET_MODE (indx) != Pmode))
2804 if (REGNO (indx) == STACK_POINTER_REGNUM
2805 || REGNO (indx) == FRAME_POINTER_REGNUM
2806 || ((reload_completed || reload_in_progress)
2807 && frame_pointer_needed
2808 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
2809 || REGNO (indx) == ARG_POINTER_REGNUM
2811 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
2812 pointer = indx_ptr = true;
2814 if ((reload_completed || reload_in_progress)
2815 && indx == cfun->machine->base_reg)
2816 pointer = indx_ptr = literal_pool = true;
2819 /* Prefer to use pointer as base, not index. */
2820 if (base && indx && !base_ptr
2821 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
2828 /* Validate displacement. */
2831 /* If virtual registers are involved, the displacement will change later
2832 anyway as the virtual registers get eliminated. This could make a
2833 valid displacement invalid, but it is more likely to make an invalid
2834 displacement valid, because we sometimes access the register save area
2835 via negative offsets to one of those registers.
2836 Thus we don't check the displacement for validity here. If after
2837 elimination the displacement turns out to be invalid after all,
2838 this is fixed up by reload in any case. */
2839 /* LRA maintains always displacements up to date and we need to
2840 know the displacement is right during all LRA not only at the
2841 final elimination. */
2843 || (base != arg_pointer_rtx
2844 && indx != arg_pointer_rtx
2845 && base != return_address_pointer_rtx
2846 && indx != return_address_pointer_rtx
2847 && base != frame_pointer_rtx
2848 && indx != frame_pointer_rtx
2849 && base != virtual_stack_vars_rtx
2850 && indx != virtual_stack_vars_rtx))
2851 if (!DISP_IN_RANGE (offset))
2856 /* All the special cases are pointers. */
2859 /* In the small-PIC case, the linker converts @GOT
2860 and @GOTNTPOFF offsets to possible displacements. */
2861 if (GET_CODE (disp) == UNSPEC
2862 && (XINT (disp, 1) == UNSPEC_GOT
2863 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
2869 /* Accept pool label offsets. */
2870 else if (GET_CODE (disp) == UNSPEC
2871 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
2874 /* Accept literal pool references. */
2875 else if (GET_CODE (disp) == UNSPEC
2876 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
2878 /* In case CSE pulled a non literal pool reference out of
2879 the pool we have to reject the address. This is
2880 especially important when loading the GOT pointer on non
2881 zarch CPUs. In this case the literal pool contains an lt
2882 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
2883 will most likely exceed the displacement. */
2884 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
2885 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
2888 orig_disp = gen_rtx_CONST (Pmode, disp);
2891 /* If we have an offset, make sure it does not
2892 exceed the size of the constant pool entry. */
2893 rtx sym = XVECEXP (disp, 0, 0);
2894 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
2897 orig_disp = plus_constant (Pmode, orig_disp, offset);
2912 out->disp = orig_disp;
2913 out->pointer = pointer;
2914 out->literal_pool = literal_pool;
2920 /* Decompose a RTL expression OP for a shift count into its components,
2921 and return the base register in BASE and the offset in OFFSET.
2923 Return true if OP is a valid shift count, false if not. */
2926 s390_decompose_shift_count (rtx op, rtx *base, HOST_WIDE_INT *offset)
2928 HOST_WIDE_INT off = 0;
2930 /* We can have an integer constant, an address register,
2931 or a sum of the two. */
2932 if (GET_CODE (op) == CONST_INT)
2937 if (op && GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT)
2939 off = INTVAL (XEXP (op, 1));
2942 while (op && GET_CODE (op) == SUBREG)
2943 op = SUBREG_REG (op);
2945 if (op && GET_CODE (op) != REG)
2957 /* Return true if CODE is a valid address without index. */
2960 s390_legitimate_address_without_index_p (rtx op)
2962 struct s390_address addr;
2964 if (!s390_decompose_address (XEXP (op, 0), &addr))
2973 /* Return TRUE if ADDR is an operand valid for a load/store relative
2974 instruction. Be aware that the alignment of the operand needs to
2975 be checked separately.
2976 Valid addresses are single references or a sum of a reference and a
2977 constant integer. Return these parts in SYMREF and ADDEND. You can
2978 pass NULL in REF and/or ADDEND if you are not interested in these
2979 values. Literal pool references are *not* considered symbol
2983 s390_loadrelative_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
2985 HOST_WIDE_INT tmpaddend = 0;
2987 if (GET_CODE (addr) == CONST)
2988 addr = XEXP (addr, 0);
2990 if (GET_CODE (addr) == PLUS)
2992 if (!CONST_INT_P (XEXP (addr, 1)))
2995 tmpaddend = INTVAL (XEXP (addr, 1));
2996 addr = XEXP (addr, 0);
2999 if ((GET_CODE (addr) == SYMBOL_REF && !CONSTANT_POOL_ADDRESS_P (addr))
3000 || (GET_CODE (addr) == UNSPEC
3001 && (XINT (addr, 1) == UNSPEC_GOTENT
3002 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
3007 *addend = tmpaddend;
3014 /* Return true if the address in OP is valid for constraint letter C
3015 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
3016 pool MEMs should be accepted. Only the Q, R, S, T constraint
3017 letters are allowed for C. */
3020 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
3022 struct s390_address addr;
3023 bool decomposed = false;
3025 /* This check makes sure that no symbolic address (except literal
3026 pool references) are accepted by the R or T constraints. */
3027 if (s390_loadrelative_operand_p (op, NULL, NULL))
3030 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
3033 if (!s390_decompose_address (op, &addr))
3035 if (addr.literal_pool)
3042 case 'Q': /* no index short displacement */
3043 if (!decomposed && !s390_decompose_address (op, &addr))
3047 if (!s390_short_displacement (addr.disp))
3051 case 'R': /* with index short displacement */
3052 if (TARGET_LONG_DISPLACEMENT)
3054 if (!decomposed && !s390_decompose_address (op, &addr))
3056 if (!s390_short_displacement (addr.disp))
3059 /* Any invalid address here will be fixed up by reload,
3060 so accept it for the most generic constraint. */
3063 case 'S': /* no index long displacement */
3064 if (!TARGET_LONG_DISPLACEMENT)
3066 if (!decomposed && !s390_decompose_address (op, &addr))
3070 if (s390_short_displacement (addr.disp))
3074 case 'T': /* with index long displacement */
3075 if (!TARGET_LONG_DISPLACEMENT)
3077 /* Any invalid address here will be fixed up by reload,
3078 so accept it for the most generic constraint. */
3079 if ((decomposed || s390_decompose_address (op, &addr))
3080 && s390_short_displacement (addr.disp))
3090 /* Evaluates constraint strings described by the regular expression
3091 ([A|B|Z](Q|R|S|T))|U|W|Y and returns 1 if OP is a valid operand for
3092 the constraint given in STR, or 0 else. */
3095 s390_mem_constraint (const char *str, rtx op)
3102 /* Check for offsettable variants of memory constraints. */
3103 if (!MEM_P (op) || MEM_VOLATILE_P (op))
3105 if ((reload_completed || reload_in_progress)
3106 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
3108 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
3110 /* Check for non-literal-pool variants of memory constraints. */
3113 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
3118 if (GET_CODE (op) != MEM)
3120 return s390_check_qrst_address (c, XEXP (op, 0), true);
3122 return (s390_check_qrst_address ('Q', op, true)
3123 || s390_check_qrst_address ('R', op, true));
3125 return (s390_check_qrst_address ('S', op, true)
3126 || s390_check_qrst_address ('T', op, true));
3128 /* Simply check for the basic form of a shift count. Reload will
3129 take care of making sure we have a proper base register. */
3130 if (!s390_decompose_shift_count (op, NULL, NULL))
3134 return s390_check_qrst_address (str[1], op, true);
3142 /* Evaluates constraint strings starting with letter O. Input
3143 parameter C is the second letter following the "O" in the constraint
3144 string. Returns 1 if VALUE meets the respective constraint and 0
3148 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
3156 return trunc_int_for_mode (value, SImode) == value;
3160 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
3163 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
3171 /* Evaluates constraint strings starting with letter N. Parameter STR
3172 contains the letters following letter "N" in the constraint string.
3173 Returns true if VALUE matches the constraint. */
3176 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
3178 machine_mode mode, part_mode;
3180 int part, part_goal;
3186 part_goal = str[0] - '0';
3230 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
3233 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
3236 if (part_goal != -1 && part_goal != part)
3243 /* Returns true if the input parameter VALUE is a float zero. */
3246 s390_float_const_zero_p (rtx value)
3248 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
3249 && value == CONST0_RTX (GET_MODE (value)));
3252 /* Implement TARGET_REGISTER_MOVE_COST. */
3255 s390_register_move_cost (machine_mode mode,
3256 reg_class_t from, reg_class_t to)
3258 /* On s390, copy between fprs and gprs is expensive. */
3260 /* It becomes somewhat faster having ldgr/lgdr. */
3261 if (TARGET_Z10 && GET_MODE_SIZE (mode) == 8)
3263 /* ldgr is single cycle. */
3264 if (reg_classes_intersect_p (from, GENERAL_REGS)
3265 && reg_classes_intersect_p (to, FP_REGS))
3267 /* lgdr needs 3 cycles. */
3268 if (reg_classes_intersect_p (to, GENERAL_REGS)
3269 && reg_classes_intersect_p (from, FP_REGS))
3273 /* Otherwise copying is done via memory. */
3274 if ((reg_classes_intersect_p (from, GENERAL_REGS)
3275 && reg_classes_intersect_p (to, FP_REGS))
3276 || (reg_classes_intersect_p (from, FP_REGS)
3277 && reg_classes_intersect_p (to, GENERAL_REGS)))
3283 /* Implement TARGET_MEMORY_MOVE_COST. */
3286 s390_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
3287 reg_class_t rclass ATTRIBUTE_UNUSED,
3288 bool in ATTRIBUTE_UNUSED)
3293 /* Compute a (partial) cost for rtx X. Return true if the complete
3294 cost has been computed, and false if subexpressions should be
3295 scanned. In either case, *TOTAL contains the cost result.
3296 OUTER_CODE contains the code of the superexpression of x. */
3299 s390_rtx_costs (rtx x, machine_mode mode, int outer_code,
3300 int opno ATTRIBUTE_UNUSED,
3301 int *total, bool speed ATTRIBUTE_UNUSED)
3303 int code = GET_CODE (x);
3317 if (GET_CODE (XEXP (x, 0)) == AND
3318 && GET_CODE (XEXP (x, 1)) == ASHIFT
3319 && REG_P (XEXP (XEXP (x, 0), 0))
3320 && REG_P (XEXP (XEXP (x, 1), 0))
3321 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
3322 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
3323 && (UINTVAL (XEXP (XEXP (x, 0), 1)) ==
3324 (1UL << UINTVAL (XEXP (XEXP (x, 1), 1))) - 1))
3326 *total = COSTS_N_INSNS (2);
3338 *total = COSTS_N_INSNS (1);
3343 *total = COSTS_N_INSNS (1);
3351 rtx left = XEXP (x, 0);
3352 rtx right = XEXP (x, 1);
3353 if (GET_CODE (right) == CONST_INT
3354 && CONST_OK_FOR_K (INTVAL (right)))
3355 *total = s390_cost->mhi;
3356 else if (GET_CODE (left) == SIGN_EXTEND)
3357 *total = s390_cost->mh;
3359 *total = s390_cost->ms; /* msr, ms, msy */
3364 rtx left = XEXP (x, 0);
3365 rtx right = XEXP (x, 1);
3368 if (GET_CODE (right) == CONST_INT
3369 && CONST_OK_FOR_K (INTVAL (right)))
3370 *total = s390_cost->mghi;
3371 else if (GET_CODE (left) == SIGN_EXTEND)
3372 *total = s390_cost->msgf;
3374 *total = s390_cost->msg; /* msgr, msg */
3376 else /* TARGET_31BIT */
3378 if (GET_CODE (left) == SIGN_EXTEND
3379 && GET_CODE (right) == SIGN_EXTEND)
3380 /* mulsidi case: mr, m */
3381 *total = s390_cost->m;
3382 else if (GET_CODE (left) == ZERO_EXTEND
3383 && GET_CODE (right) == ZERO_EXTEND
3384 && TARGET_CPU_ZARCH)
3385 /* umulsidi case: ml, mlr */
3386 *total = s390_cost->ml;
3388 /* Complex calculation is required. */
3389 *total = COSTS_N_INSNS (40);
3395 *total = s390_cost->mult_df;
3398 *total = s390_cost->mxbr;
3409 *total = s390_cost->madbr;
3412 *total = s390_cost->maebr;
3417 /* Negate in the third argument is free: FMSUB. */
3418 if (GET_CODE (XEXP (x, 2)) == NEG)
3420 *total += (rtx_cost (XEXP (x, 0), mode, FMA, 0, speed)
3421 + rtx_cost (XEXP (x, 1), mode, FMA, 1, speed)
3422 + rtx_cost (XEXP (XEXP (x, 2), 0), mode, FMA, 2, speed));
3429 if (mode == TImode) /* 128 bit division */
3430 *total = s390_cost->dlgr;
3431 else if (mode == DImode)
3433 rtx right = XEXP (x, 1);
3434 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
3435 *total = s390_cost->dlr;
3436 else /* 64 by 64 bit division */
3437 *total = s390_cost->dlgr;
3439 else if (mode == SImode) /* 32 bit division */
3440 *total = s390_cost->dlr;
3447 rtx right = XEXP (x, 1);
3448 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
3450 *total = s390_cost->dsgfr;
3452 *total = s390_cost->dr;
3453 else /* 64 by 64 bit division */
3454 *total = s390_cost->dsgr;
3456 else if (mode == SImode) /* 32 bit division */
3457 *total = s390_cost->dlr;
3458 else if (mode == SFmode)
3460 *total = s390_cost->debr;
3462 else if (mode == DFmode)
3464 *total = s390_cost->ddbr;
3466 else if (mode == TFmode)
3468 *total = s390_cost->dxbr;
3474 *total = s390_cost->sqebr;
3475 else if (mode == DFmode)
3476 *total = s390_cost->sqdbr;
3478 *total = s390_cost->sqxbr;
3483 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
3484 || outer_code == PLUS || outer_code == MINUS
3485 || outer_code == COMPARE)
3490 *total = COSTS_N_INSNS (1);
3491 if (GET_CODE (XEXP (x, 0)) == AND
3492 && GET_CODE (XEXP (x, 1)) == CONST_INT
3493 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
3495 rtx op0 = XEXP (XEXP (x, 0), 0);
3496 rtx op1 = XEXP (XEXP (x, 0), 1);
3497 rtx op2 = XEXP (x, 1);
3499 if (memory_operand (op0, GET_MODE (op0))
3500 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
3502 if (register_operand (op0, GET_MODE (op0))
3503 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
3513 /* Return the cost of an address rtx ADDR. */
3516 s390_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
3517 addr_space_t as ATTRIBUTE_UNUSED,
3518 bool speed ATTRIBUTE_UNUSED)
3520 struct s390_address ad;
3521 if (!s390_decompose_address (addr, &ad))
3524 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
3527 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
3528 otherwise return 0. */
3531 tls_symbolic_operand (rtx op)
3533 if (GET_CODE (op) != SYMBOL_REF)
3535 return SYMBOL_REF_TLS_MODEL (op);
3538 /* Split DImode access register reference REG (on 64-bit) into its constituent
3539 low and high parts, and store them into LO and HI. Note that gen_lowpart/
3540 gen_highpart cannot be used as they assume all registers are word-sized,
3541 while our access registers have only half that size. */
3544 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
3546 gcc_assert (TARGET_64BIT);
3547 gcc_assert (ACCESS_REG_P (reg));
3548 gcc_assert (GET_MODE (reg) == DImode);
3549 gcc_assert (!(REGNO (reg) & 1));
3551 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
3552 *hi = gen_rtx_REG (SImode, REGNO (reg));
3555 /* Return true if OP contains a symbol reference */
3558 symbolic_reference_mentioned_p (rtx op)
3563 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
3566 fmt = GET_RTX_FORMAT (GET_CODE (op));
3567 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3573 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3574 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3578 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
3585 /* Return true if OP contains a reference to a thread-local symbol. */
3588 tls_symbolic_reference_mentioned_p (rtx op)
3593 if (GET_CODE (op) == SYMBOL_REF)
3594 return tls_symbolic_operand (op);
3596 fmt = GET_RTX_FORMAT (GET_CODE (op));
3597 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3603 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3604 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3608 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
3616 /* Return true if OP is a legitimate general operand when
3617 generating PIC code. It is given that flag_pic is on
3618 and that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
3621 legitimate_pic_operand_p (rtx op)
3623 /* Accept all non-symbolic constants. */
3624 if (!SYMBOLIC_CONST (op))
3627 /* Reject everything else; must be handled
3628 via emit_symbolic_move. */
3632 /* Returns true if the constant value OP is a legitimate general operand.
3633 It is given that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
3636 s390_legitimate_constant_p (machine_mode mode, rtx op)
3638 if (TARGET_VX && VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
3640 if (GET_MODE_SIZE (mode) != 16)
3643 if (!const0_operand (op, mode)
3644 && !s390_contiguous_bitmask_vector_p (op, NULL, NULL)
3645 && !s390_bytemask_vector_p (op, NULL))
3649 /* Accept all non-symbolic constants. */
3650 if (!SYMBOLIC_CONST (op))
3653 /* Accept immediate LARL operands. */
3654 if (TARGET_CPU_ZARCH && larl_operand (op, mode))
3657 /* Thread-local symbols are never legal constants. This is
3658 so that emit_call knows that computing such addresses
3659 might require a function call. */
3660 if (TLS_SYMBOLIC_CONST (op))
3663 /* In the PIC case, symbolic constants must *not* be
3664 forced into the literal pool. We accept them here,
3665 so that they will be handled by emit_symbolic_move. */
3669 /* All remaining non-PIC symbolic constants are
3670 forced into the literal pool. */
3674 /* Determine if it's legal to put X into the constant pool. This
3675 is not possible if X contains the address of a symbol that is
3676 not constant (TLS) or not known at final link time (PIC). */
3679 s390_cannot_force_const_mem (machine_mode mode, rtx x)
3681 switch (GET_CODE (x))
3686 /* Accept all non-symbolic constants. */
3690 /* Labels are OK iff we are non-PIC. */
3691 return flag_pic != 0;
3694 /* 'Naked' TLS symbol references are never OK,
3695 non-TLS symbols are OK iff we are non-PIC. */
3696 if (tls_symbolic_operand (x))
3699 return flag_pic != 0;
3702 return s390_cannot_force_const_mem (mode, XEXP (x, 0));
3705 return s390_cannot_force_const_mem (mode, XEXP (x, 0))
3706 || s390_cannot_force_const_mem (mode, XEXP (x, 1));
3709 switch (XINT (x, 1))
3711 /* Only lt-relative or GOT-relative UNSPECs are OK. */
3712 case UNSPEC_LTREL_OFFSET:
3720 case UNSPEC_GOTNTPOFF:
3721 case UNSPEC_INDNTPOFF:
3724 /* If the literal pool shares the code section, be put
3725 execute template placeholders into the pool as well. */
3727 return TARGET_CPU_ZARCH;
3739 /* Returns true if the constant value OP is a legitimate general
3740 operand during and after reload. The difference to
3741 legitimate_constant_p is that this function will not accept
3742 a constant that would need to be forced to the literal pool
3743 before it can be used as operand.
3744 This function accepts all constants which can be loaded directly
3748 legitimate_reload_constant_p (rtx op)
3750 /* Accept la(y) operands. */
3751 if (GET_CODE (op) == CONST_INT
3752 && DISP_IN_RANGE (INTVAL (op)))
3755 /* Accept l(g)hi/l(g)fi operands. */
3756 if (GET_CODE (op) == CONST_INT
3757 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
3760 /* Accept lliXX operands. */
3762 && GET_CODE (op) == CONST_INT
3763 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
3764 && s390_single_part (op, word_mode, HImode, 0) >= 0)
3768 && GET_CODE (op) == CONST_INT
3769 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
3770 && s390_single_part (op, word_mode, SImode, 0) >= 0)
3773 /* Accept larl operands. */
3774 if (TARGET_CPU_ZARCH
3775 && larl_operand (op, VOIDmode))
3778 /* Accept floating-point zero operands that fit into a single GPR. */
3779 if (GET_CODE (op) == CONST_DOUBLE
3780 && s390_float_const_zero_p (op)
3781 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
3784 /* Accept double-word operands that can be split. */
3785 if (GET_CODE (op) == CONST_INT
3786 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op))
3788 machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
3789 rtx hi = operand_subword (op, 0, 0, dword_mode);
3790 rtx lo = operand_subword (op, 1, 0, dword_mode);
3791 return legitimate_reload_constant_p (hi)
3792 && legitimate_reload_constant_p (lo);
3795 /* Everything else cannot be handled without reload. */
3799 /* Returns true if the constant value OP is a legitimate fp operand
3800 during and after reload.
3801 This function accepts all constants which can be loaded directly
3805 legitimate_reload_fp_constant_p (rtx op)
3807 /* Accept floating-point zero operands if the load zero instruction
3808 can be used. Prior to z196 the load fp zero instruction caused a
3809 performance penalty if the result is used as BFP number. */
3811 && GET_CODE (op) == CONST_DOUBLE
3812 && s390_float_const_zero_p (op))
3818 /* Returns true if the constant value OP is a legitimate vector operand
3819 during and after reload.
3820 This function accepts all constants which can be loaded directly
3824 legitimate_reload_vector_constant_p (rtx op)
3826 /* FIXME: Support constant vectors with all the same 16 bit unsigned
3827 operands. These can be loaded with vrepi. */
3829 if (TARGET_VX && GET_MODE_SIZE (GET_MODE (op)) == 16
3830 && (const0_operand (op, GET_MODE (op))
3831 || constm1_operand (op, GET_MODE (op))
3832 || s390_contiguous_bitmask_vector_p (op, NULL, NULL)
3833 || s390_bytemask_vector_p (op, NULL)))
3839 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
3840 return the class of reg to actually use. */
3843 s390_preferred_reload_class (rtx op, reg_class_t rclass)
3845 switch (GET_CODE (op))
3847 /* Constants we cannot reload into general registers
3848 must be forced into the literal pool. */
3852 if (reg_class_subset_p (GENERAL_REGS, rclass)
3853 && legitimate_reload_constant_p (op))
3854 return GENERAL_REGS;
3855 else if (reg_class_subset_p (ADDR_REGS, rclass)
3856 && legitimate_reload_constant_p (op))
3858 else if (reg_class_subset_p (FP_REGS, rclass)
3859 && legitimate_reload_fp_constant_p (op))
3861 else if (reg_class_subset_p (VEC_REGS, rclass)
3862 && legitimate_reload_vector_constant_p (op))
3867 /* If a symbolic constant or a PLUS is reloaded,
3868 it is most likely being used as an address, so
3869 prefer ADDR_REGS. If 'class' is not a superset
3870 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
3872 /* Symrefs cannot be pushed into the literal pool with -fPIC
3873 so we *MUST NOT* return NO_REGS for these cases
3874 (s390_cannot_force_const_mem will return true).
3876 On the other hand we MUST return NO_REGS for symrefs with
3877 invalid addend which might have been pushed to the literal
3878 pool (no -fPIC). Usually we would expect them to be
3879 handled via secondary reload but this does not happen if
3880 they are used as literal pool slot replacement in reload
3881 inheritance (see emit_input_reload_insns). */
3882 if (TARGET_CPU_ZARCH
3883 && GET_CODE (XEXP (op, 0)) == PLUS
3884 && GET_CODE (XEXP (XEXP(op, 0), 0)) == SYMBOL_REF
3885 && GET_CODE (XEXP (XEXP(op, 0), 1)) == CONST_INT)
3887 if (flag_pic && reg_class_subset_p (ADDR_REGS, rclass))
3895 if (!legitimate_reload_constant_p (op))
3899 /* load address will be used. */
3900 if (reg_class_subset_p (ADDR_REGS, rclass))
3912 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
3913 multiple of ALIGNMENT and the SYMBOL_REF being naturally
3917 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
3919 HOST_WIDE_INT addend;
3922 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
3925 if (addend & (alignment - 1))
3928 if (GET_CODE (symref) == SYMBOL_REF
3929 && !SYMBOL_REF_NOT_NATURALLY_ALIGNED_P (symref))
3932 if (GET_CODE (symref) == UNSPEC
3933 && alignment <= UNITS_PER_LONG)
3939 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
3940 operand SCRATCH is used to reload the even part of the address and
3944 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
3946 HOST_WIDE_INT addend;
3949 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
3953 /* Easy case. The addend is even so larl will do fine. */
3954 emit_move_insn (reg, addr);
3957 /* We can leave the scratch register untouched if the target
3958 register is a valid base register. */
3959 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
3960 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
3963 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
3964 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
3967 emit_move_insn (scratch,
3968 gen_rtx_CONST (Pmode,
3969 gen_rtx_PLUS (Pmode, symref,
3970 GEN_INT (addend - 1))));
3972 emit_move_insn (scratch, symref);
3974 /* Increment the address using la in order to avoid clobbering cc. */
3975 s390_load_address (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
3979 /* Generate what is necessary to move between REG and MEM using
3980 SCRATCH. The direction is given by TOMEM. */
3983 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
3985 /* Reload might have pulled a constant out of the literal pool.
3986 Force it back in. */
3987 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
3988 || GET_CODE (mem) == CONST_VECTOR
3989 || GET_CODE (mem) == CONST)
3990 mem = force_const_mem (GET_MODE (reg), mem);
3992 gcc_assert (MEM_P (mem));
3994 /* For a load from memory we can leave the scratch register
3995 untouched if the target register is a valid base register. */
3997 && REGNO (reg) < FIRST_PSEUDO_REGISTER
3998 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
3999 && GET_MODE (reg) == GET_MODE (scratch))
4002 /* Load address into scratch register. Since we can't have a
4003 secondary reload for a secondary reload we have to cover the case
4004 where larl would need a secondary reload here as well. */
4005 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
4007 /* Now we can use a standard load/store to do the move. */
4009 emit_move_insn (replace_equiv_address (mem, scratch), reg);
4011 emit_move_insn (reg, replace_equiv_address (mem, scratch));
4014 /* Inform reload about cases where moving X with a mode MODE to a register in
4015 RCLASS requires an extra scratch or immediate register. Return the class
4016 needed for the immediate register. */
4019 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
4020 machine_mode mode, secondary_reload_info *sri)
4022 enum reg_class rclass = (enum reg_class) rclass_i;
4024 /* Intermediate register needed. */
4025 if (reg_classes_intersect_p (CC_REGS, rclass))
4026 return GENERAL_REGS;
4030 /* The vst/vl vector move instructions allow only for short
4033 && GET_CODE (XEXP (x, 0)) == PLUS
4034 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4035 && !SHORT_DISP_IN_RANGE(INTVAL (XEXP (XEXP (x, 0), 1)))
4036 && reg_class_subset_p (rclass, VEC_REGS)
4037 && (!reg_class_subset_p (rclass, FP_REGS)
4038 || (GET_MODE_SIZE (mode) > 8
4039 && s390_class_max_nregs (FP_REGS, mode) == 1)))
4042 sri->icode = (TARGET_64BIT ?
4043 CODE_FOR_reloaddi_la_in :
4044 CODE_FOR_reloadsi_la_in);
4046 sri->icode = (TARGET_64BIT ?
4047 CODE_FOR_reloaddi_la_out :
4048 CODE_FOR_reloadsi_la_out);
4054 HOST_WIDE_INT offset;
4057 /* On z10 several optimizer steps may generate larl operands with
4060 && s390_loadrelative_operand_p (x, &symref, &offset)
4062 && !SYMBOL_REF_ALIGN1_P (symref)
4063 && (offset & 1) == 1)
4064 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
4065 : CODE_FOR_reloadsi_larl_odd_addend_z10);
4067 /* Handle all the (mem (symref)) accesses we cannot use the z10
4068 instructions for. */
4070 && s390_loadrelative_operand_p (XEXP (x, 0), NULL, NULL)
4072 || !reg_class_subset_p (rclass, GENERAL_REGS)
4073 || GET_MODE_SIZE (mode) > UNITS_PER_WORD
4074 || !s390_check_symref_alignment (XEXP (x, 0),
4075 GET_MODE_SIZE (mode))))
4077 #define __SECONDARY_RELOAD_CASE(M,m) \
4080 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
4081 CODE_FOR_reload##m##di_tomem_z10; \
4083 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
4084 CODE_FOR_reload##m##si_tomem_z10; \
4087 switch (GET_MODE (x))
4089 __SECONDARY_RELOAD_CASE (QI, qi);
4090 __SECONDARY_RELOAD_CASE (HI, hi);
4091 __SECONDARY_RELOAD_CASE (SI, si);
4092 __SECONDARY_RELOAD_CASE (DI, di);
4093 __SECONDARY_RELOAD_CASE (TI, ti);
4094 __SECONDARY_RELOAD_CASE (SF, sf);
4095 __SECONDARY_RELOAD_CASE (DF, df);
4096 __SECONDARY_RELOAD_CASE (TF, tf);
4097 __SECONDARY_RELOAD_CASE (SD, sd);
4098 __SECONDARY_RELOAD_CASE (DD, dd);
4099 __SECONDARY_RELOAD_CASE (TD, td);
4100 __SECONDARY_RELOAD_CASE (V1QI, v1qi);
4101 __SECONDARY_RELOAD_CASE (V2QI, v2qi);
4102 __SECONDARY_RELOAD_CASE (V4QI, v4qi);
4103 __SECONDARY_RELOAD_CASE (V8QI, v8qi);
4104 __SECONDARY_RELOAD_CASE (V16QI, v16qi);
4105 __SECONDARY_RELOAD_CASE (V1HI, v1hi);
4106 __SECONDARY_RELOAD_CASE (V2HI, v2hi);
4107 __SECONDARY_RELOAD_CASE (V4HI, v4hi);
4108 __SECONDARY_RELOAD_CASE (V8HI, v8hi);
4109 __SECONDARY_RELOAD_CASE (V1SI, v1si);
4110 __SECONDARY_RELOAD_CASE (V2SI, v2si);
4111 __SECONDARY_RELOAD_CASE (V4SI, v4si);
4112 __SECONDARY_RELOAD_CASE (V1DI, v1di);
4113 __SECONDARY_RELOAD_CASE (V2DI, v2di);
4114 __SECONDARY_RELOAD_CASE (V1TI, v1ti);
4115 __SECONDARY_RELOAD_CASE (V1SF, v1sf);
4116 __SECONDARY_RELOAD_CASE (V2SF, v2sf);
4117 __SECONDARY_RELOAD_CASE (V4SF, v4sf);
4118 __SECONDARY_RELOAD_CASE (V1DF, v1df);
4119 __SECONDARY_RELOAD_CASE (V2DF, v2df);
4120 __SECONDARY_RELOAD_CASE (V1TF, v1tf);
4124 #undef __SECONDARY_RELOAD_CASE
4128 /* We need a scratch register when loading a PLUS expression which
4129 is not a legitimate operand of the LOAD ADDRESS instruction. */
4130 /* LRA can deal with transformation of plus op very well -- so we
4131 don't need to prompt LRA in this case. */
4132 if (! lra_in_progress && in_p && s390_plus_operand (x, mode))
4133 sri->icode = (TARGET_64BIT ?
4134 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
4136 /* Performing a multiword move from or to memory we have to make sure the
4137 second chunk in memory is addressable without causing a displacement
4138 overflow. If that would be the case we calculate the address in
4139 a scratch register. */
4141 && GET_CODE (XEXP (x, 0)) == PLUS
4142 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4143 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
4144 + GET_MODE_SIZE (mode) - 1))
4146 /* For GENERAL_REGS a displacement overflow is no problem if occurring
4147 in a s_operand address since we may fallback to lm/stm. So we only
4148 have to care about overflows in the b+i+d case. */
4149 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
4150 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
4151 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
4152 /* For FP_REGS no lm/stm is available so this check is triggered
4153 for displacement overflows in b+i+d and b+d like addresses. */
4154 || (reg_classes_intersect_p (FP_REGS, rclass)
4155 && s390_class_max_nregs (FP_REGS, mode) > 1))
4158 sri->icode = (TARGET_64BIT ?
4159 CODE_FOR_reloaddi_la_in :
4160 CODE_FOR_reloadsi_la_in);
4162 sri->icode = (TARGET_64BIT ?
4163 CODE_FOR_reloaddi_la_out :
4164 CODE_FOR_reloadsi_la_out);
4168 /* A scratch address register is needed when a symbolic constant is
4169 copied to r0 compiling with -fPIC. In other cases the target
4170 register might be used as temporary (see legitimize_pic_address). */
4171 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
4172 sri->icode = (TARGET_64BIT ?
4173 CODE_FOR_reloaddi_PIC_addr :
4174 CODE_FOR_reloadsi_PIC_addr);
4176 /* Either scratch or no register needed. */
4180 /* Generate code to load SRC, which is PLUS that is not a
4181 legitimate operand for the LA instruction, into TARGET.
4182 SCRATCH may be used as scratch register. */
4185 s390_expand_plus_operand (rtx target, rtx src,
4189 struct s390_address ad;
4191 /* src must be a PLUS; get its two operands. */
4192 gcc_assert (GET_CODE (src) == PLUS);
4193 gcc_assert (GET_MODE (src) == Pmode);
4195 /* Check if any of the two operands is already scheduled
4196 for replacement by reload. This can happen e.g. when
4197 float registers occur in an address. */
4198 sum1 = find_replacement (&XEXP (src, 0));
4199 sum2 = find_replacement (&XEXP (src, 1));
4200 src = gen_rtx_PLUS (Pmode, sum1, sum2);
4202 /* If the address is already strictly valid, there's nothing to do. */
4203 if (!s390_decompose_address (src, &ad)
4204 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4205 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
4207 /* Otherwise, one of the operands cannot be an address register;
4208 we reload its value into the scratch register. */
4209 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
4211 emit_move_insn (scratch, sum1);
4214 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
4216 emit_move_insn (scratch, sum2);
4220 /* According to the way these invalid addresses are generated
4221 in reload.c, it should never happen (at least on s390) that
4222 *neither* of the PLUS components, after find_replacements
4223 was applied, is an address register. */
4224 if (sum1 == scratch && sum2 == scratch)
4230 src = gen_rtx_PLUS (Pmode, sum1, sum2);
4233 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
4234 is only ever performed on addresses, so we can mark the
4235 sum as legitimate for LA in any case. */
4236 s390_load_address (target, src);
4240 /* Return true if ADDR is a valid memory address.
4241 STRICT specifies whether strict register checking applies. */
4244 s390_legitimate_address_p (machine_mode mode, rtx addr, bool strict)
4246 struct s390_address ad;
4249 && larl_operand (addr, VOIDmode)
4250 && (mode == VOIDmode
4251 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
4254 if (!s390_decompose_address (addr, &ad))
4259 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4262 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
4268 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
4269 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
4273 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
4274 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
4280 /* Return true if OP is a valid operand for the LA instruction.
4281 In 31-bit, we need to prove that the result is used as an
4282 address, as LA performs only a 31-bit addition. */
4285 legitimate_la_operand_p (rtx op)
4287 struct s390_address addr;
4288 if (!s390_decompose_address (op, &addr))
4291 return (TARGET_64BIT || addr.pointer);
4294 /* Return true if it is valid *and* preferable to use LA to
4295 compute the sum of OP1 and OP2. */
4298 preferred_la_operand_p (rtx op1, rtx op2)
4300 struct s390_address addr;
4302 if (op2 != const0_rtx)
4303 op1 = gen_rtx_PLUS (Pmode, op1, op2);
4305 if (!s390_decompose_address (op1, &addr))
4307 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
4309 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
4312 /* Avoid LA instructions with index register on z196; it is
4313 preferable to use regular add instructions when possible.
4314 Starting with zEC12 the la with index register is "uncracked"
4316 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
4319 if (!TARGET_64BIT && !addr.pointer)
4325 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
4326 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
4332 /* Emit a forced load-address operation to load SRC into DST.
4333 This will use the LOAD ADDRESS instruction even in situations
4334 where legitimate_la_operand_p (SRC) returns false. */
4337 s390_load_address (rtx dst, rtx src)
4340 emit_move_insn (dst, src);
4342 emit_insn (gen_force_la_31 (dst, src));
4345 /* Return a legitimate reference for ORIG (an address) using the
4346 register REG. If REG is 0, a new pseudo is generated.
4348 There are two types of references that must be handled:
4350 1. Global data references must load the address from the GOT, via
4351 the PIC reg. An insn is emitted to do this load, and the reg is
4354 2. Static data references, constant pool addresses, and code labels
4355 compute the address as an offset from the GOT, whose base is in
4356 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
4357 differentiate them from global data objects. The returned
4358 address is the PIC reg + an unspec constant.
4360 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
4361 reg also appears in the address. */
4364 legitimize_pic_address (rtx orig, rtx reg)
4367 rtx addend = const0_rtx;
4370 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
4372 if (GET_CODE (addr) == CONST)
4373 addr = XEXP (addr, 0);
4375 if (GET_CODE (addr) == PLUS)
4377 addend = XEXP (addr, 1);
4378 addr = XEXP (addr, 0);
4381 if ((GET_CODE (addr) == LABEL_REF
4382 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr))
4383 || (GET_CODE (addr) == UNSPEC &&
4384 (XINT (addr, 1) == UNSPEC_GOTENT
4385 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
4386 && GET_CODE (addend) == CONST_INT)
4388 /* This can be locally addressed. */
4390 /* larl_operand requires UNSPECs to be wrapped in a const rtx. */
4391 rtx const_addr = (GET_CODE (addr) == UNSPEC ?
4392 gen_rtx_CONST (Pmode, addr) : addr);
4394 if (TARGET_CPU_ZARCH
4395 && larl_operand (const_addr, VOIDmode)
4396 && INTVAL (addend) < (HOST_WIDE_INT)1 << 31
4397 && INTVAL (addend) >= -((HOST_WIDE_INT)1 << 31))
4399 if (INTVAL (addend) & 1)
4401 /* LARL can't handle odd offsets, so emit a pair of LARL
4403 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4405 if (!DISP_IN_RANGE (INTVAL (addend)))
4407 HOST_WIDE_INT even = INTVAL (addend) - 1;
4408 addr = gen_rtx_PLUS (Pmode, addr, GEN_INT (even));
4409 addr = gen_rtx_CONST (Pmode, addr);
4410 addend = const1_rtx;
4413 emit_move_insn (temp, addr);
4414 new_rtx = gen_rtx_PLUS (Pmode, temp, addend);
4418 s390_load_address (reg, new_rtx);
4424 /* If the offset is even, we can just use LARL. This
4425 will happen automatically. */
4430 /* No larl - Access local symbols relative to the GOT. */
4432 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4434 if (reload_in_progress || reload_completed)
4435 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4437 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
4438 if (addend != const0_rtx)
4439 addr = gen_rtx_PLUS (Pmode, addr, addend);
4440 addr = gen_rtx_CONST (Pmode, addr);
4441 addr = force_const_mem (Pmode, addr);
4442 emit_move_insn (temp, addr);
4444 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4447 s390_load_address (reg, new_rtx);
4452 else if (GET_CODE (addr) == SYMBOL_REF && addend == const0_rtx)
4454 /* A non-local symbol reference without addend.
4456 The symbol ref is wrapped into an UNSPEC to make sure the
4457 proper operand modifier (@GOT or @GOTENT) will be emitted.
4458 This will tell the linker to put the symbol into the GOT.
4460 Additionally the code dereferencing the GOT slot is emitted here.
4462 An addend to the symref needs to be added afterwards.
4463 legitimize_pic_address calls itself recursively to handle
4464 that case. So no need to do it here. */
4467 reg = gen_reg_rtx (Pmode);
4471 /* Use load relative if possible.
4472 lgrl <target>, sym@GOTENT */
4473 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
4474 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4475 new_rtx = gen_const_mem (GET_MODE (reg), new_rtx);
4477 emit_move_insn (reg, new_rtx);
4480 else if (flag_pic == 1)
4482 /* Assume GOT offset is a valid displacement operand (< 4k
4483 or < 512k with z990). This is handled the same way in
4484 both 31- and 64-bit code (@GOT).
4485 lg <target>, sym@GOT(r12) */
4487 if (reload_in_progress || reload_completed)
4488 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4490 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
4491 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4492 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
4493 new_rtx = gen_const_mem (Pmode, new_rtx);
4494 emit_move_insn (reg, new_rtx);
4497 else if (TARGET_CPU_ZARCH)
4499 /* If the GOT offset might be >= 4k, we determine the position
4500 of the GOT entry via a PC-relative LARL (@GOTENT).
4501 larl temp, sym@GOTENT
4502 lg <target>, 0(temp) */
4504 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
4506 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
4507 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
4509 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
4510 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4511 emit_move_insn (temp, new_rtx);
4513 new_rtx = gen_const_mem (Pmode, temp);
4514 emit_move_insn (reg, new_rtx);
4520 /* If the GOT offset might be >= 4k, we have to load it
4521 from the literal pool (@GOT).
4523 lg temp, lit-litbase(r13)
4524 lg <target>, 0(temp)
4525 lit: .long sym@GOT */
4527 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
4529 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
4530 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
4532 if (reload_in_progress || reload_completed)
4533 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4535 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
4536 addr = gen_rtx_CONST (Pmode, addr);
4537 addr = force_const_mem (Pmode, addr);
4538 emit_move_insn (temp, addr);
4540 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4541 new_rtx = gen_const_mem (Pmode, new_rtx);
4542 emit_move_insn (reg, new_rtx);
4546 else if (GET_CODE (addr) == UNSPEC && GET_CODE (addend) == CONST_INT)
4548 gcc_assert (XVECLEN (addr, 0) == 1);
4549 switch (XINT (addr, 1))
4551 /* These address symbols (or PLT slots) relative to the GOT
4552 (not GOT slots!). In general this will exceed the
4553 displacement range so these value belong into the literal
4557 new_rtx = force_const_mem (Pmode, orig);
4560 /* For -fPIC the GOT size might exceed the displacement
4561 range so make sure the value is in the literal pool. */
4564 new_rtx = force_const_mem (Pmode, orig);
4567 /* For @GOTENT larl is used. This is handled like local
4573 /* @PLT is OK as is on 64-bit, must be converted to
4574 GOT-relative @PLTOFF on 31-bit. */
4576 if (!TARGET_CPU_ZARCH)
4578 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4580 if (reload_in_progress || reload_completed)
4581 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4583 addr = XVECEXP (addr, 0, 0);
4584 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
4586 if (addend != const0_rtx)
4587 addr = gen_rtx_PLUS (Pmode, addr, addend);
4588 addr = gen_rtx_CONST (Pmode, addr);
4589 addr = force_const_mem (Pmode, addr);
4590 emit_move_insn (temp, addr);
4592 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4595 s390_load_address (reg, new_rtx);
4600 /* On 64 bit larl can be used. This case is handled like
4601 local symbol refs. */
4605 /* Everything else cannot happen. */
4610 else if (addend != const0_rtx)
4612 /* Otherwise, compute the sum. */
4614 rtx base = legitimize_pic_address (addr, reg);
4615 new_rtx = legitimize_pic_address (addend,
4616 base == reg ? NULL_RTX : reg);
4617 if (GET_CODE (new_rtx) == CONST_INT)
4618 new_rtx = plus_constant (Pmode, base, INTVAL (new_rtx));
4621 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
4623 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
4624 new_rtx = XEXP (new_rtx, 1);
4626 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
4629 if (GET_CODE (new_rtx) == CONST)
4630 new_rtx = XEXP (new_rtx, 0);
4631 new_rtx = force_operand (new_rtx, 0);
4637 /* Load the thread pointer into a register. */
4640 s390_get_thread_pointer (void)
4642 rtx tp = gen_reg_rtx (Pmode);
4644 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
4645 mark_reg_pointer (tp, BITS_PER_WORD);
4650 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
4651 in s390_tls_symbol which always refers to __tls_get_offset.
4652 The returned offset is written to RESULT_REG and an USE rtx is
4653 generated for TLS_CALL. */
4655 static GTY(()) rtx s390_tls_symbol;
4658 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
4663 emit_insn (s390_load_got ());
4665 if (!s390_tls_symbol)
4666 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
4668 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
4669 gen_rtx_REG (Pmode, RETURN_REGNUM));
4671 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
4672 RTL_CONST_CALL_P (insn) = 1;
4675 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
4676 this (thread-local) address. REG may be used as temporary. */
4679 legitimize_tls_address (rtx addr, rtx reg)
4681 rtx new_rtx, tls_call, temp, base, r2, insn;
4683 if (GET_CODE (addr) == SYMBOL_REF)
4684 switch (tls_symbolic_operand (addr))
4686 case TLS_MODEL_GLOBAL_DYNAMIC:
4688 r2 = gen_rtx_REG (Pmode, 2);
4689 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
4690 new_rtx = gen_rtx_CONST (Pmode, tls_call);
4691 new_rtx = force_const_mem (Pmode, new_rtx);
4692 emit_move_insn (r2, new_rtx);
4693 s390_emit_tls_call_insn (r2, tls_call);
4694 insn = get_insns ();
4697 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
4698 temp = gen_reg_rtx (Pmode);
4699 emit_libcall_block (insn, temp, r2, new_rtx);
4701 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4704 s390_load_address (reg, new_rtx);
4709 case TLS_MODEL_LOCAL_DYNAMIC:
4711 r2 = gen_rtx_REG (Pmode, 2);
4712 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
4713 new_rtx = gen_rtx_CONST (Pmode, tls_call);
4714 new_rtx = force_const_mem (Pmode, new_rtx);
4715 emit_move_insn (r2, new_rtx);
4716 s390_emit_tls_call_insn (r2, tls_call);
4717 insn = get_insns ();
4720 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
4721 temp = gen_reg_rtx (Pmode);
4722 emit_libcall_block (insn, temp, r2, new_rtx);
4724 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4725 base = gen_reg_rtx (Pmode);
4726 s390_load_address (base, new_rtx);
4728 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
4729 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4730 new_rtx = force_const_mem (Pmode, new_rtx);
4731 temp = gen_reg_rtx (Pmode);
4732 emit_move_insn (temp, new_rtx);
4734 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
4737 s390_load_address (reg, new_rtx);
4742 case TLS_MODEL_INITIAL_EXEC:
4745 /* Assume GOT offset < 4k. This is handled the same way
4746 in both 31- and 64-bit code. */
4748 if (reload_in_progress || reload_completed)
4749 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4751 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
4752 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4753 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
4754 new_rtx = gen_const_mem (Pmode, new_rtx);
4755 temp = gen_reg_rtx (Pmode);
4756 emit_move_insn (temp, new_rtx);
4758 else if (TARGET_CPU_ZARCH)
4760 /* If the GOT offset might be >= 4k, we determine the position
4761 of the GOT entry via a PC-relative LARL. */
4763 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
4764 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4765 temp = gen_reg_rtx (Pmode);
4766 emit_move_insn (temp, new_rtx);
4768 new_rtx = gen_const_mem (Pmode, temp);
4769 temp = gen_reg_rtx (Pmode);
4770 emit_move_insn (temp, new_rtx);
4774 /* If the GOT offset might be >= 4k, we have to load it
4775 from the literal pool. */
4777 if (reload_in_progress || reload_completed)
4778 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4780 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
4781 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4782 new_rtx = force_const_mem (Pmode, new_rtx);
4783 temp = gen_reg_rtx (Pmode);
4784 emit_move_insn (temp, new_rtx);
4786 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4787 new_rtx = gen_const_mem (Pmode, new_rtx);
4789 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
4790 temp = gen_reg_rtx (Pmode);
4791 emit_insn (gen_rtx_SET (temp, new_rtx));
4795 /* In position-dependent code, load the absolute address of
4796 the GOT entry from the literal pool. */
4798 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
4799 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4800 new_rtx = force_const_mem (Pmode, new_rtx);
4801 temp = gen_reg_rtx (Pmode);
4802 emit_move_insn (temp, new_rtx);
4805 new_rtx = gen_const_mem (Pmode, new_rtx);
4806 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
4807 temp = gen_reg_rtx (Pmode);
4808 emit_insn (gen_rtx_SET (temp, new_rtx));
4811 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4814 s390_load_address (reg, new_rtx);
4819 case TLS_MODEL_LOCAL_EXEC:
4820 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
4821 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4822 new_rtx = force_const_mem (Pmode, new_rtx);
4823 temp = gen_reg_rtx (Pmode);
4824 emit_move_insn (temp, new_rtx);
4826 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4829 s390_load_address (reg, new_rtx);
4838 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
4840 switch (XINT (XEXP (addr, 0), 1))
4842 case UNSPEC_INDNTPOFF:
4843 gcc_assert (TARGET_CPU_ZARCH);
4852 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
4853 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
4855 new_rtx = XEXP (XEXP (addr, 0), 0);
4856 if (GET_CODE (new_rtx) != SYMBOL_REF)
4857 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4859 new_rtx = legitimize_tls_address (new_rtx, reg);
4860 new_rtx = plus_constant (Pmode, new_rtx,
4861 INTVAL (XEXP (XEXP (addr, 0), 1)));
4862 new_rtx = force_operand (new_rtx, 0);
4866 gcc_unreachable (); /* for now ... */
4871 /* Emit insns making the address in operands[1] valid for a standard
4872 move to operands[0]. operands[1] is replaced by an address which
4873 should be used instead of the former RTX to emit the move
4877 emit_symbolic_move (rtx *operands)
4879 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
4881 if (GET_CODE (operands[0]) == MEM)
4882 operands[1] = force_reg (Pmode, operands[1]);
4883 else if (TLS_SYMBOLIC_CONST (operands[1]))
4884 operands[1] = legitimize_tls_address (operands[1], temp);
4886 operands[1] = legitimize_pic_address (operands[1], temp);
4889 /* Try machine-dependent ways of modifying an illegitimate address X
4890 to be legitimate. If we find one, return the new, valid address.
4892 OLDX is the address as it was before break_out_memory_refs was called.
4893 In some cases it is useful to look at this to decide what needs to be done.
4895 MODE is the mode of the operand pointed to by X.
4897 When -fpic is used, special handling is needed for symbolic references.
4898 See comments by legitimize_pic_address for details. */
4901 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
4902 machine_mode mode ATTRIBUTE_UNUSED)
4904 rtx constant_term = const0_rtx;
4906 if (TLS_SYMBOLIC_CONST (x))
4908 x = legitimize_tls_address (x, 0);
4910 if (s390_legitimate_address_p (mode, x, FALSE))
4913 else if (GET_CODE (x) == PLUS
4914 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
4915 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
4921 if (SYMBOLIC_CONST (x)
4922 || (GET_CODE (x) == PLUS
4923 && (SYMBOLIC_CONST (XEXP (x, 0))
4924 || SYMBOLIC_CONST (XEXP (x, 1)))))
4925 x = legitimize_pic_address (x, 0);
4927 if (s390_legitimate_address_p (mode, x, FALSE))
4931 x = eliminate_constant_term (x, &constant_term);
4933 /* Optimize loading of large displacements by splitting them
4934 into the multiple of 4K and the rest; this allows the
4935 former to be CSE'd if possible.
4937 Don't do this if the displacement is added to a register
4938 pointing into the stack frame, as the offsets will
4939 change later anyway. */
4941 if (GET_CODE (constant_term) == CONST_INT
4942 && !TARGET_LONG_DISPLACEMENT
4943 && !DISP_IN_RANGE (INTVAL (constant_term))
4944 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
4946 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
4947 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
4949 rtx temp = gen_reg_rtx (Pmode);
4950 rtx val = force_operand (GEN_INT (upper), temp);
4952 emit_move_insn (temp, val);
4954 x = gen_rtx_PLUS (Pmode, x, temp);
4955 constant_term = GEN_INT (lower);
4958 if (GET_CODE (x) == PLUS)
4960 if (GET_CODE (XEXP (x, 0)) == REG)
4962 rtx temp = gen_reg_rtx (Pmode);
4963 rtx val = force_operand (XEXP (x, 1), temp);
4965 emit_move_insn (temp, val);
4967 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
4970 else if (GET_CODE (XEXP (x, 1)) == REG)
4972 rtx temp = gen_reg_rtx (Pmode);
4973 rtx val = force_operand (XEXP (x, 0), temp);
4975 emit_move_insn (temp, val);
4977 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
4981 if (constant_term != const0_rtx)
4982 x = gen_rtx_PLUS (Pmode, x, constant_term);
4987 /* Try a machine-dependent way of reloading an illegitimate address AD
4988 operand. If we find one, push the reload and return the new address.
4990 MODE is the mode of the enclosing MEM. OPNUM is the operand number
4991 and TYPE is the reload type of the current reload. */
4994 legitimize_reload_address (rtx ad, machine_mode mode ATTRIBUTE_UNUSED,
4995 int opnum, int type)
4997 if (!optimize || TARGET_LONG_DISPLACEMENT)
5000 if (GET_CODE (ad) == PLUS)
5002 rtx tem = simplify_binary_operation (PLUS, Pmode,
5003 XEXP (ad, 0), XEXP (ad, 1));
5008 if (GET_CODE (ad) == PLUS
5009 && GET_CODE (XEXP (ad, 0)) == REG
5010 && GET_CODE (XEXP (ad, 1)) == CONST_INT
5011 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
5013 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
5014 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
5015 rtx cst, tem, new_rtx;
5017 cst = GEN_INT (upper);
5018 if (!legitimate_reload_constant_p (cst))
5019 cst = force_const_mem (Pmode, cst);
5021 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
5022 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
5024 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
5025 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
5026 opnum, (enum reload_type) type);
5033 /* Emit code to move LEN bytes from DST to SRC. */
5036 s390_expand_movmem (rtx dst, rtx src, rtx len)
5038 /* When tuning for z10 or higher we rely on the Glibc functions to
5039 do the right thing. Only for constant lengths below 64k we will
5040 generate inline code. */
5041 if (s390_tune >= PROCESSOR_2097_Z10
5042 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
5045 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
5047 if (INTVAL (len) > 0)
5048 emit_insn (gen_movmem_short (dst, src, GEN_INT (INTVAL (len) - 1)));
5051 else if (TARGET_MVCLE)
5053 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
5058 rtx dst_addr, src_addr, count, blocks, temp;
5059 rtx_code_label *loop_start_label = gen_label_rtx ();
5060 rtx_code_label *loop_end_label = gen_label_rtx ();
5061 rtx_code_label *end_label = gen_label_rtx ();
5064 mode = GET_MODE (len);
5065 if (mode == VOIDmode)
5068 dst_addr = gen_reg_rtx (Pmode);
5069 src_addr = gen_reg_rtx (Pmode);
5070 count = gen_reg_rtx (mode);
5071 blocks = gen_reg_rtx (mode);
5073 convert_move (count, len, 1);
5074 emit_cmp_and_jump_insns (count, const0_rtx,
5075 EQ, NULL_RTX, mode, 1, end_label);
5077 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
5078 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
5079 dst = change_address (dst, VOIDmode, dst_addr);
5080 src = change_address (src, VOIDmode, src_addr);
5082 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5085 emit_move_insn (count, temp);
5087 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5090 emit_move_insn (blocks, temp);
5092 emit_cmp_and_jump_insns (blocks, const0_rtx,
5093 EQ, NULL_RTX, mode, 1, loop_end_label);
5095 emit_label (loop_start_label);
5098 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
5102 /* Issue a read prefetch for the +3 cache line. */
5103 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
5104 const0_rtx, const0_rtx);
5105 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5106 emit_insn (prefetch);
5108 /* Issue a write prefetch for the +3 cache line. */
5109 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
5110 const1_rtx, const0_rtx);
5111 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5112 emit_insn (prefetch);
5115 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
5116 s390_load_address (dst_addr,
5117 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
5118 s390_load_address (src_addr,
5119 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
5121 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5124 emit_move_insn (blocks, temp);
5126 emit_cmp_and_jump_insns (blocks, const0_rtx,
5127 EQ, NULL_RTX, mode, 1, loop_end_label);
5129 emit_jump (loop_start_label);
5130 emit_label (loop_end_label);
5132 emit_insn (gen_movmem_short (dst, src,
5133 convert_to_mode (Pmode, count, 1)));
5134 emit_label (end_label);
5139 /* Emit code to set LEN bytes at DST to VAL.
5140 Make use of clrmem if VAL is zero. */
5143 s390_expand_setmem (rtx dst, rtx len, rtx val)
5145 if (GET_CODE (len) == CONST_INT && INTVAL (len) == 0)
5148 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
5150 if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
5152 if (val == const0_rtx && INTVAL (len) <= 256)
5153 emit_insn (gen_clrmem_short (dst, GEN_INT (INTVAL (len) - 1)));
5156 /* Initialize memory by storing the first byte. */
5157 emit_move_insn (adjust_address (dst, QImode, 0), val);
5159 if (INTVAL (len) > 1)
5161 /* Initiate 1 byte overlap move.
5162 The first byte of DST is propagated through DSTP1.
5163 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
5164 DST is set to size 1 so the rest of the memory location
5165 does not count as source operand. */
5166 rtx dstp1 = adjust_address (dst, VOIDmode, 1);
5167 set_mem_size (dst, 1);
5169 emit_insn (gen_movmem_short (dstp1, dst,
5170 GEN_INT (INTVAL (len) - 2)));
5175 else if (TARGET_MVCLE)
5177 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
5178 emit_insn (gen_setmem_long (dst, convert_to_mode (Pmode, len, 1), val));
5183 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
5184 rtx_code_label *loop_start_label = gen_label_rtx ();
5185 rtx_code_label *loop_end_label = gen_label_rtx ();
5186 rtx_code_label *end_label = gen_label_rtx ();
5189 mode = GET_MODE (len);
5190 if (mode == VOIDmode)
5193 dst_addr = gen_reg_rtx (Pmode);
5194 count = gen_reg_rtx (mode);
5195 blocks = gen_reg_rtx (mode);
5197 convert_move (count, len, 1);
5198 emit_cmp_and_jump_insns (count, const0_rtx,
5199 EQ, NULL_RTX, mode, 1, end_label);
5201 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
5202 dst = change_address (dst, VOIDmode, dst_addr);
5204 if (val == const0_rtx)
5205 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5209 dstp1 = adjust_address (dst, VOIDmode, 1);
5210 set_mem_size (dst, 1);
5212 /* Initialize memory by storing the first byte. */
5213 emit_move_insn (adjust_address (dst, QImode, 0), val);
5215 /* If count is 1 we are done. */
5216 emit_cmp_and_jump_insns (count, const1_rtx,
5217 EQ, NULL_RTX, mode, 1, end_label);
5219 temp = expand_binop (mode, add_optab, count, GEN_INT (-2), count, 1,
5223 emit_move_insn (count, temp);
5225 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5228 emit_move_insn (blocks, temp);
5230 emit_cmp_and_jump_insns (blocks, const0_rtx,
5231 EQ, NULL_RTX, mode, 1, loop_end_label);
5233 emit_label (loop_start_label);
5236 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
5238 /* Issue a write prefetch for the +4 cache line. */
5239 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
5241 const1_rtx, const0_rtx);
5242 emit_insn (prefetch);
5243 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5246 if (val == const0_rtx)
5247 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
5249 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (255)));
5250 s390_load_address (dst_addr,
5251 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
5253 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5256 emit_move_insn (blocks, temp);
5258 emit_cmp_and_jump_insns (blocks, const0_rtx,
5259 EQ, NULL_RTX, mode, 1, loop_end_label);
5261 emit_jump (loop_start_label);
5262 emit_label (loop_end_label);
5264 if (val == const0_rtx)
5265 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
5267 emit_insn (gen_movmem_short (dstp1, dst, convert_to_mode (Pmode, count, 1)));
5268 emit_label (end_label);
5272 /* Emit code to compare LEN bytes at OP0 with those at OP1,
5273 and return the result in TARGET. */
5276 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
5278 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
5281 /* When tuning for z10 or higher we rely on the Glibc functions to
5282 do the right thing. Only for constant lengths below 64k we will
5283 generate inline code. */
5284 if (s390_tune >= PROCESSOR_2097_Z10
5285 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
5288 /* As the result of CMPINT is inverted compared to what we need,
5289 we have to swap the operands. */
5290 tmp = op0; op0 = op1; op1 = tmp;
5292 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
5294 if (INTVAL (len) > 0)
5296 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
5297 emit_insn (gen_cmpint (target, ccreg));
5300 emit_move_insn (target, const0_rtx);
5302 else if (TARGET_MVCLE)
5304 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
5305 emit_insn (gen_cmpint (target, ccreg));
5309 rtx addr0, addr1, count, blocks, temp;
5310 rtx_code_label *loop_start_label = gen_label_rtx ();
5311 rtx_code_label *loop_end_label = gen_label_rtx ();
5312 rtx_code_label *end_label = gen_label_rtx ();
5315 mode = GET_MODE (len);
5316 if (mode == VOIDmode)
5319 addr0 = gen_reg_rtx (Pmode);
5320 addr1 = gen_reg_rtx (Pmode);
5321 count = gen_reg_rtx (mode);
5322 blocks = gen_reg_rtx (mode);
5324 convert_move (count, len, 1);
5325 emit_cmp_and_jump_insns (count, const0_rtx,
5326 EQ, NULL_RTX, mode, 1, end_label);
5328 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
5329 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
5330 op0 = change_address (op0, VOIDmode, addr0);
5331 op1 = change_address (op1, VOIDmode, addr1);
5333 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5336 emit_move_insn (count, temp);
5338 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5341 emit_move_insn (blocks, temp);
5343 emit_cmp_and_jump_insns (blocks, const0_rtx,
5344 EQ, NULL_RTX, mode, 1, loop_end_label);
5346 emit_label (loop_start_label);
5349 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
5353 /* Issue a read prefetch for the +2 cache line of operand 1. */
5354 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
5355 const0_rtx, const0_rtx);
5356 emit_insn (prefetch);
5357 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5359 /* Issue a read prefetch for the +2 cache line of operand 2. */
5360 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
5361 const0_rtx, const0_rtx);
5362 emit_insn (prefetch);
5363 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5366 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
5367 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
5368 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
5369 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
5370 temp = gen_rtx_SET (pc_rtx, temp);
5371 emit_jump_insn (temp);
5373 s390_load_address (addr0,
5374 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
5375 s390_load_address (addr1,
5376 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
5378 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5381 emit_move_insn (blocks, temp);
5383 emit_cmp_and_jump_insns (blocks, const0_rtx,
5384 EQ, NULL_RTX, mode, 1, loop_end_label);
5386 emit_jump (loop_start_label);
5387 emit_label (loop_end_label);
5389 emit_insn (gen_cmpmem_short (op0, op1,
5390 convert_to_mode (Pmode, count, 1)));
5391 emit_label (end_label);
5393 emit_insn (gen_cmpint (target, ccreg));
5398 /* Emit a conditional jump to LABEL for condition code mask MASK using
5399 comparsion operator COMPARISON. Return the emitted jump insn. */
5402 s390_emit_ccraw_jump (HOST_WIDE_INT mask, enum rtx_code comparison, rtx label)
5406 gcc_assert (comparison == EQ || comparison == NE);
5407 gcc_assert (mask > 0 && mask < 15);
5409 temp = gen_rtx_fmt_ee (comparison, VOIDmode,
5410 gen_rtx_REG (CCRAWmode, CC_REGNUM), GEN_INT (mask));
5411 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
5412 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
5413 temp = gen_rtx_SET (pc_rtx, temp);
5414 return emit_jump_insn (temp);
5417 /* Emit the instructions to implement strlen of STRING and store the
5418 result in TARGET. The string has the known ALIGNMENT. This
5419 version uses vector instructions and is therefore not appropriate
5420 for targets prior to z13. */
5423 s390_expand_vec_strlen (rtx target, rtx string, rtx alignment)
5425 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
5426 int very_likely = REG_BR_PROB_BASE - 1;
5427 rtx highest_index_to_load_reg = gen_reg_rtx (Pmode);
5428 rtx str_reg = gen_reg_rtx (V16QImode);
5429 rtx str_addr_base_reg = gen_reg_rtx (Pmode);
5430 rtx str_idx_reg = gen_reg_rtx (Pmode);
5431 rtx result_reg = gen_reg_rtx (V16QImode);
5432 rtx is_aligned_label = gen_label_rtx ();
5433 rtx into_loop_label = NULL_RTX;
5434 rtx loop_start_label = gen_label_rtx ();
5436 rtx len = gen_reg_rtx (QImode);
5439 s390_load_address (str_addr_base_reg, XEXP (string, 0));
5440 emit_move_insn (str_idx_reg, const0_rtx);
5442 if (INTVAL (alignment) < 16)
5444 /* Check whether the address happens to be aligned properly so
5445 jump directly to the aligned loop. */
5446 emit_cmp_and_jump_insns (gen_rtx_AND (Pmode,
5447 str_addr_base_reg, GEN_INT (15)),
5448 const0_rtx, EQ, NULL_RTX,
5449 Pmode, 1, is_aligned_label);
5451 temp = gen_reg_rtx (Pmode);
5452 temp = expand_binop (Pmode, and_optab, str_addr_base_reg,
5453 GEN_INT (15), temp, 1, OPTAB_DIRECT);
5454 gcc_assert (REG_P (temp));
5455 highest_index_to_load_reg =
5456 expand_binop (Pmode, sub_optab, GEN_INT (15), temp,
5457 highest_index_to_load_reg, 1, OPTAB_DIRECT);
5458 gcc_assert (REG_P (highest_index_to_load_reg));
5459 emit_insn (gen_vllv16qi (str_reg,
5460 convert_to_mode (SImode, highest_index_to_load_reg, 1),
5461 gen_rtx_MEM (BLKmode, str_addr_base_reg)));
5463 into_loop_label = gen_label_rtx ();
5464 s390_emit_jump (into_loop_label, NULL_RTX);
5468 emit_label (is_aligned_label);
5469 LABEL_NUSES (is_aligned_label) = INTVAL (alignment) < 16 ? 2 : 1;
5471 /* Reaching this point we are only performing 16 bytes aligned
5473 emit_move_insn (highest_index_to_load_reg, GEN_INT (15));
5475 emit_label (loop_start_label);
5476 LABEL_NUSES (loop_start_label) = 1;
5478 /* Load 16 bytes of the string into VR. */
5479 emit_move_insn (str_reg,
5480 gen_rtx_MEM (V16QImode,
5481 gen_rtx_PLUS (Pmode, str_idx_reg,
5482 str_addr_base_reg)));
5483 if (into_loop_label != NULL_RTX)
5485 emit_label (into_loop_label);
5486 LABEL_NUSES (into_loop_label) = 1;
5489 /* Increment string index by 16 bytes. */
5490 expand_binop (Pmode, add_optab, str_idx_reg, GEN_INT (16),
5491 str_idx_reg, 1, OPTAB_DIRECT);
5493 emit_insn (gen_vec_vfenesv16qi (result_reg, str_reg, str_reg,
5494 GEN_INT (VSTRING_FLAG_ZS | VSTRING_FLAG_CS)));
5496 add_int_reg_note (s390_emit_ccraw_jump (8, NE, loop_start_label),
5497 REG_BR_PROB, very_likely);
5498 emit_insn (gen_vec_extractv16qi (len, result_reg, GEN_INT (7)));
5500 /* If the string pointer wasn't aligned we have loaded less then 16
5501 bytes and the remaining bytes got filled with zeros (by vll).
5502 Now we have to check whether the resulting index lies within the
5503 bytes actually part of the string. */
5505 cond = s390_emit_compare (GT, convert_to_mode (Pmode, len, 1),
5506 highest_index_to_load_reg);
5507 s390_load_address (highest_index_to_load_reg,
5508 gen_rtx_PLUS (Pmode, highest_index_to_load_reg,
5511 emit_insn (gen_movdicc (str_idx_reg, cond,
5512 highest_index_to_load_reg, str_idx_reg));
5514 emit_insn (gen_movsicc (str_idx_reg, cond,
5515 highest_index_to_load_reg, str_idx_reg));
5517 add_int_reg_note (s390_emit_jump (is_aligned_label, cond), REG_BR_PROB,
5520 expand_binop (Pmode, add_optab, str_idx_reg,
5521 GEN_INT (-16), str_idx_reg, 1, OPTAB_DIRECT);
5522 /* FIXME: len is already zero extended - so avoid the llgcr emitted
5524 temp = expand_binop (Pmode, add_optab, str_idx_reg,
5525 convert_to_mode (Pmode, len, 1),
5526 target, 1, OPTAB_DIRECT);
5528 emit_move_insn (target, temp);
5531 /* Expand conditional increment or decrement using alc/slb instructions.
5532 Should generate code setting DST to either SRC or SRC + INCREMENT,
5533 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
5534 Returns true if successful, false otherwise.
5536 That makes it possible to implement some if-constructs without jumps e.g.:
5537 (borrow = CC0 | CC1 and carry = CC2 | CC3)
5538 unsigned int a, b, c;
5539 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
5540 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
5541 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
5542 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
5544 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
5545 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
5546 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
5547 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
5548 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
5551 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
5552 rtx dst, rtx src, rtx increment)
5554 machine_mode cmp_mode;
5555 machine_mode cc_mode;
5561 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
5562 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
5564 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
5565 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
5570 /* Try ADD LOGICAL WITH CARRY. */
5571 if (increment == const1_rtx)
5573 /* Determine CC mode to use. */
5574 if (cmp_code == EQ || cmp_code == NE)
5576 if (cmp_op1 != const0_rtx)
5578 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
5579 NULL_RTX, 0, OPTAB_WIDEN);
5580 cmp_op1 = const0_rtx;
5583 cmp_code = cmp_code == EQ ? LEU : GTU;
5586 if (cmp_code == LTU || cmp_code == LEU)
5591 cmp_code = swap_condition (cmp_code);
5608 /* Emit comparison instruction pattern. */
5609 if (!register_operand (cmp_op0, cmp_mode))
5610 cmp_op0 = force_reg (cmp_mode, cmp_op0);
5612 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
5613 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
5614 /* We use insn_invalid_p here to add clobbers if required. */
5615 ret = insn_invalid_p (emit_insn (insn), false);
5618 /* Emit ALC instruction pattern. */
5619 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
5620 gen_rtx_REG (cc_mode, CC_REGNUM),
5623 if (src != const0_rtx)
5625 if (!register_operand (src, GET_MODE (dst)))
5626 src = force_reg (GET_MODE (dst), src);
5628 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
5629 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
5632 p = rtvec_alloc (2);
5634 gen_rtx_SET (dst, op_res);
5636 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
5637 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
5642 /* Try SUBTRACT LOGICAL WITH BORROW. */
5643 if (increment == constm1_rtx)
5645 /* Determine CC mode to use. */
5646 if (cmp_code == EQ || cmp_code == NE)
5648 if (cmp_op1 != const0_rtx)
5650 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
5651 NULL_RTX, 0, OPTAB_WIDEN);
5652 cmp_op1 = const0_rtx;
5655 cmp_code = cmp_code == EQ ? LEU : GTU;
5658 if (cmp_code == GTU || cmp_code == GEU)
5663 cmp_code = swap_condition (cmp_code);
5680 /* Emit comparison instruction pattern. */
5681 if (!register_operand (cmp_op0, cmp_mode))
5682 cmp_op0 = force_reg (cmp_mode, cmp_op0);
5684 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
5685 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
5686 /* We use insn_invalid_p here to add clobbers if required. */
5687 ret = insn_invalid_p (emit_insn (insn), false);
5690 /* Emit SLB instruction pattern. */
5691 if (!register_operand (src, GET_MODE (dst)))
5692 src = force_reg (GET_MODE (dst), src);
5694 op_res = gen_rtx_MINUS (GET_MODE (dst),
5695 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
5696 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
5697 gen_rtx_REG (cc_mode, CC_REGNUM),
5699 p = rtvec_alloc (2);
5701 gen_rtx_SET (dst, op_res);
5703 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
5704 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
5712 /* Expand code for the insv template. Return true if successful. */
5715 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
5717 int bitsize = INTVAL (op1);
5718 int bitpos = INTVAL (op2);
5719 machine_mode mode = GET_MODE (dest);
5721 int smode_bsize, mode_bsize;
5724 if (bitsize + bitpos > GET_MODE_BITSIZE (mode))
5727 /* Generate INSERT IMMEDIATE (IILL et al). */
5728 /* (set (ze (reg)) (const_int)). */
5730 && register_operand (dest, word_mode)
5731 && (bitpos % 16) == 0
5732 && (bitsize % 16) == 0
5733 && const_int_operand (src, VOIDmode))
5735 HOST_WIDE_INT val = INTVAL (src);
5736 int regpos = bitpos + bitsize;
5738 while (regpos > bitpos)
5740 machine_mode putmode;
5743 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
5748 putsize = GET_MODE_BITSIZE (putmode);
5750 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
5753 gen_int_mode (val, putmode));
5756 gcc_assert (regpos == bitpos);
5760 smode = smallest_mode_for_size (bitsize, MODE_INT);
5761 smode_bsize = GET_MODE_BITSIZE (smode);
5762 mode_bsize = GET_MODE_BITSIZE (mode);
5764 /* Generate STORE CHARACTERS UNDER MASK (STCM et al). */
5766 && (bitsize % BITS_PER_UNIT) == 0
5768 && (register_operand (src, word_mode)
5769 || const_int_operand (src, VOIDmode)))
5771 /* Emit standard pattern if possible. */
5772 if (smode_bsize == bitsize)
5774 emit_move_insn (adjust_address (dest, smode, 0),
5775 gen_lowpart (smode, src));
5779 /* (set (ze (mem)) (const_int)). */
5780 else if (const_int_operand (src, VOIDmode))
5782 int size = bitsize / BITS_PER_UNIT;
5783 rtx src_mem = adjust_address (force_const_mem (word_mode, src),
5785 UNITS_PER_WORD - size);
5787 dest = adjust_address (dest, BLKmode, 0);
5788 set_mem_size (dest, size);
5789 s390_expand_movmem (dest, src_mem, GEN_INT (size));
5793 /* (set (ze (mem)) (reg)). */
5794 else if (register_operand (src, word_mode))
5797 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
5801 /* Emit st,stcmh sequence. */
5802 int stcmh_width = bitsize - 32;
5803 int size = stcmh_width / BITS_PER_UNIT;
5805 emit_move_insn (adjust_address (dest, SImode, size),
5806 gen_lowpart (SImode, src));
5807 set_mem_size (dest, size);
5808 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
5809 GEN_INT (stcmh_width),
5811 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT (32)));
5817 /* Generate INSERT CHARACTERS UNDER MASK (IC, ICM et al). */
5818 if ((bitpos % BITS_PER_UNIT) == 0
5819 && (bitsize % BITS_PER_UNIT) == 0
5820 && (bitpos & 32) == ((bitpos + bitsize - 1) & 32)
5822 && (mode == DImode || mode == SImode)
5823 && register_operand (dest, mode))
5825 /* Emit a strict_low_part pattern if possible. */
5826 if (smode_bsize == bitsize && bitpos == mode_bsize - smode_bsize)
5828 op = gen_rtx_STRICT_LOW_PART (VOIDmode, gen_lowpart (smode, dest));
5829 op = gen_rtx_SET (op, gen_lowpart (smode, src));
5830 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
5831 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
5835 /* ??? There are more powerful versions of ICM that are not
5836 completely represented in the md file. */
5839 /* For z10, generate ROTATE THEN INSERT SELECTED BITS (RISBG et al). */
5840 if (TARGET_Z10 && (mode == DImode || mode == SImode))
5842 machine_mode mode_s = GET_MODE (src);
5844 if (mode_s == VOIDmode)
5846 /* For constant zero values the representation with AND
5847 appears to be folded in more situations than the (set
5848 (zero_extract) ...).
5849 We only do this when the start and end of the bitfield
5850 remain in the same SImode chunk. That way nihf or nilf
5852 The AND patterns might still generate a risbg for this. */
5853 if (src == const0_rtx && bitpos / 32 == (bitpos + bitsize - 1) / 32)
5856 src = force_reg (mode, src);
5858 else if (mode_s != mode)
5860 gcc_assert (GET_MODE_BITSIZE (mode_s) >= bitsize);
5861 src = force_reg (mode_s, src);
5862 src = gen_lowpart (mode, src);
5865 op = gen_rtx_ZERO_EXTRACT (mode, dest, op1, op2),
5866 op = gen_rtx_SET (op, src);
5870 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
5871 op = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber));
5881 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
5882 register that holds VAL of mode MODE shifted by COUNT bits. */
5885 s390_expand_mask_and_shift (rtx val, machine_mode mode, rtx count)
5887 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
5888 NULL_RTX, 1, OPTAB_DIRECT);
5889 return expand_simple_binop (SImode, ASHIFT, val, count,
5890 NULL_RTX, 1, OPTAB_DIRECT);
5893 /* Generate a vector comparison COND of CMP_OP1 and CMP_OP2 and store
5894 the result in TARGET. */
5897 s390_expand_vec_compare (rtx target, enum rtx_code cond,
5898 rtx cmp_op1, rtx cmp_op2)
5900 machine_mode mode = GET_MODE (target);
5901 bool neg_p = false, swap_p = false;
5904 if (GET_MODE (cmp_op1) == V2DFmode)
5908 /* NE a != b -> !(a == b) */
5909 case NE: cond = EQ; neg_p = true; break;
5910 /* UNGT a u> b -> !(b >= a) */
5911 case UNGT: cond = GE; neg_p = true; swap_p = true; break;
5912 /* UNGE a u>= b -> !(b > a) */
5913 case UNGE: cond = GT; neg_p = true; swap_p = true; break;
5914 /* LE: a <= b -> b >= a */
5915 case LE: cond = GE; swap_p = true; break;
5916 /* UNLE: a u<= b -> !(a > b) */
5917 case UNLE: cond = GT; neg_p = true; break;
5918 /* LT: a < b -> b > a */
5919 case LT: cond = GT; swap_p = true; break;
5920 /* UNLT: a u< b -> !(a >= b) */
5921 case UNLT: cond = GE; neg_p = true; break;
5923 emit_insn (gen_vec_cmpuneqv2df (target, cmp_op1, cmp_op2));
5926 emit_insn (gen_vec_cmpltgtv2df (target, cmp_op1, cmp_op2));
5929 emit_insn (gen_vec_orderedv2df (target, cmp_op1, cmp_op2));
5932 emit_insn (gen_vec_unorderedv2df (target, cmp_op1, cmp_op2));
5941 /* NE: a != b -> !(a == b) */
5942 case NE: cond = EQ; neg_p = true; break;
5943 /* GE: a >= b -> !(b > a) */
5944 case GE: cond = GT; neg_p = true; swap_p = true; break;
5945 /* GEU: a >= b -> !(b > a) */
5946 case GEU: cond = GTU; neg_p = true; swap_p = true; break;
5947 /* LE: a <= b -> !(a > b) */
5948 case LE: cond = GT; neg_p = true; break;
5949 /* LEU: a <= b -> !(a > b) */
5950 case LEU: cond = GTU; neg_p = true; break;
5951 /* LT: a < b -> b > a */
5952 case LT: cond = GT; swap_p = true; break;
5953 /* LTU: a < b -> b > a */
5954 case LTU: cond = GTU; swap_p = true; break;
5961 tmp = cmp_op1; cmp_op1 = cmp_op2; cmp_op2 = tmp;
5964 emit_insn (gen_rtx_SET (target, gen_rtx_fmt_ee (cond,
5966 cmp_op1, cmp_op2)));
5968 emit_insn (gen_rtx_SET (target, gen_rtx_NOT (mode, target)));
5971 /* Expand the comparison CODE of CMP1 and CMP2 and copy 1 or 0 into
5972 TARGET if either all (ALL_P is true) or any (ALL_P is false) of the
5973 elements in CMP1 and CMP2 fulfill the comparison. */
5975 s390_expand_vec_compare_cc (rtx target, enum rtx_code code,
5976 rtx cmp1, rtx cmp2, bool all_p)
5978 enum rtx_code new_code = code;
5979 machine_mode cmp_mode, full_cmp_mode, scratch_mode;
5980 rtx tmp_reg = gen_reg_rtx (SImode);
5981 bool swap_p = false;
5983 if (GET_MODE_CLASS (GET_MODE (cmp1)) == MODE_VECTOR_INT)
5987 case EQ: cmp_mode = CCVEQmode; break;
5988 case NE: cmp_mode = CCVEQmode; break;
5989 case GT: cmp_mode = CCVHmode; break;
5990 case GE: cmp_mode = CCVHmode; new_code = LE; swap_p = true; break;
5991 case LT: cmp_mode = CCVHmode; new_code = GT; swap_p = true; break;
5992 case LE: cmp_mode = CCVHmode; new_code = LE; break;
5993 case GTU: cmp_mode = CCVHUmode; break;
5994 case GEU: cmp_mode = CCVHUmode; new_code = LEU; swap_p = true; break;
5995 case LTU: cmp_mode = CCVHUmode; new_code = GTU; swap_p = true; break;
5996 case LEU: cmp_mode = CCVHUmode; new_code = LEU; break;
5997 default: gcc_unreachable ();
5999 scratch_mode = GET_MODE (cmp1);
6001 else if (GET_MODE (cmp1) == V2DFmode)
6005 case EQ: cmp_mode = CCVEQmode; break;
6006 case NE: cmp_mode = CCVEQmode; break;
6007 case GT: cmp_mode = CCVFHmode; break;
6008 case GE: cmp_mode = CCVFHEmode; break;
6009 case UNLE: cmp_mode = CCVFHmode; break;
6010 case UNLT: cmp_mode = CCVFHEmode; break;
6011 case LT: cmp_mode = CCVFHmode; new_code = GT; swap_p = true; break;
6012 case LE: cmp_mode = CCVFHEmode; new_code = GE; swap_p = true; break;
6013 default: gcc_unreachable ();
6015 scratch_mode = V2DImode;
6023 case CCVEQmode: full_cmp_mode = CCVEQANYmode; break;
6024 case CCVHmode: full_cmp_mode = CCVHANYmode; break;
6025 case CCVHUmode: full_cmp_mode = CCVHUANYmode; break;
6026 case CCVFHmode: full_cmp_mode = CCVFHANYmode; break;
6027 case CCVFHEmode: full_cmp_mode = CCVFHEANYmode; break;
6028 default: gcc_unreachable ();
6031 /* The modes without ANY match the ALL modes. */
6032 full_cmp_mode = cmp_mode;
6041 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6042 gen_rtvec (2, gen_rtx_SET (
6043 gen_rtx_REG (cmp_mode, CC_REGNUM),
6044 gen_rtx_COMPARE (cmp_mode, cmp1, cmp2)),
6045 gen_rtx_CLOBBER (VOIDmode,
6046 gen_rtx_SCRATCH (scratch_mode)))));
6047 emit_move_insn (target, const0_rtx);
6048 emit_move_insn (tmp_reg, const1_rtx);
6050 emit_move_insn (target,
6051 gen_rtx_IF_THEN_ELSE (SImode,
6052 gen_rtx_fmt_ee (new_code, VOIDmode,
6053 gen_rtx_REG (full_cmp_mode, CC_REGNUM),
6058 /* Generate a vector comparison expression loading either elements of
6059 THEN or ELS into TARGET depending on the comparison COND of CMP_OP1
6063 s390_expand_vcond (rtx target, rtx then, rtx els,
6064 enum rtx_code cond, rtx cmp_op1, rtx cmp_op2)
6067 machine_mode result_mode;
6070 /* We always use an integral type vector to hold the comparison
6072 result_mode = GET_MODE (cmp_op1) == V2DFmode ? V2DImode : GET_MODE (cmp_op1);
6073 result_target = gen_reg_rtx (result_mode);
6075 /* Alternatively this could be done by reload by lowering the cmp*
6076 predicates. But it appears to be better for scheduling etc. to
6077 have that in early. */
6078 if (!REG_P (cmp_op1))
6079 cmp_op1 = force_reg (GET_MODE (target), cmp_op1);
6081 if (!REG_P (cmp_op2))
6082 cmp_op2 = force_reg (GET_MODE (target), cmp_op2);
6084 s390_expand_vec_compare (result_target, cond,
6087 /* If the results are supposed to be either -1 or 0 we are done
6088 since this is what our compare instructions generate anyway. */
6089 if (constm1_operand (then, GET_MODE (then))
6090 && const0_operand (els, GET_MODE (els)))
6092 emit_move_insn (target, gen_rtx_SUBREG (GET_MODE (target),
6097 /* Otherwise we will do a vsel afterwards. */
6098 /* This gets triggered e.g.
6099 with gcc.c-torture/compile/pr53410-1.c */
6101 then = force_reg (GET_MODE (target), then);
6104 els = force_reg (GET_MODE (target), els);
6106 tmp = gen_rtx_fmt_ee (EQ, VOIDmode,
6108 CONST0_RTX (result_mode));
6110 /* We compared the result against zero above so we have to swap then
6112 tmp = gen_rtx_IF_THEN_ELSE (GET_MODE (target), tmp, els, then);
6114 gcc_assert (GET_MODE (target) == GET_MODE (then));
6115 emit_insn (gen_rtx_SET (target, tmp));
6118 /* Emit the RTX necessary to initialize the vector TARGET with values
6121 s390_expand_vec_init (rtx target, rtx vals)
6123 machine_mode mode = GET_MODE (target);
6124 machine_mode inner_mode = GET_MODE_INNER (mode);
6125 int n_elts = GET_MODE_NUNITS (mode);
6126 bool all_same = true, all_regs = true, all_const_int = true;
6130 for (i = 0; i < n_elts; ++i)
6132 x = XVECEXP (vals, 0, i);
6134 if (!CONST_INT_P (x))
6135 all_const_int = false;
6137 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6144 /* Use vector gen mask or vector gen byte mask if possible. */
6145 if (all_same && all_const_int
6146 && (XVECEXP (vals, 0, 0) == const0_rtx
6147 || s390_contiguous_bitmask_vector_p (XVECEXP (vals, 0, 0),
6149 || s390_bytemask_vector_p (XVECEXP (vals, 0, 0), NULL)))
6151 emit_insn (gen_rtx_SET (target,
6152 gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0))));
6158 emit_insn (gen_rtx_SET (target,
6159 gen_rtx_VEC_DUPLICATE (mode,
6160 XVECEXP (vals, 0, 0))));
6164 if (all_regs && REG_P (target) && n_elts == 2 && inner_mode == DImode)
6166 /* Use vector load pair. */
6167 emit_insn (gen_rtx_SET (target,
6168 gen_rtx_VEC_CONCAT (mode,
6169 XVECEXP (vals, 0, 0),
6170 XVECEXP (vals, 0, 1))));
6174 /* We are about to set the vector elements one by one. Zero out the
6175 full register first in order to help the data flow framework to
6176 detect it as full VR set. */
6177 emit_insn (gen_rtx_SET (target, CONST0_RTX (mode)));
6179 /* Unfortunately the vec_init expander is not allowed to fail. So
6180 we have to implement the fallback ourselves. */
6181 for (i = 0; i < n_elts; i++)
6182 emit_insn (gen_rtx_SET (target,
6183 gen_rtx_UNSPEC (mode,
6184 gen_rtvec (3, XVECEXP (vals, 0, i),
6185 GEN_INT (i), target),
6189 /* Structure to hold the initial parameters for a compare_and_swap operation
6190 in HImode and QImode. */
6192 struct alignment_context
6194 rtx memsi; /* SI aligned memory location. */
6195 rtx shift; /* Bit offset with regard to lsb. */
6196 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
6197 rtx modemaski; /* ~modemask */
6198 bool aligned; /* True if memory is aligned, false else. */
6201 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
6202 structure AC for transparent simplifying, if the memory alignment is known
6203 to be at least 32bit. MEM is the memory location for the actual operation
6204 and MODE its mode. */
6207 init_alignment_context (struct alignment_context *ac, rtx mem,
6210 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
6211 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
6214 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
6217 /* Alignment is unknown. */
6218 rtx byteoffset, addr, align;
6220 /* Force the address into a register. */
6221 addr = force_reg (Pmode, XEXP (mem, 0));
6223 /* Align it to SImode. */
6224 align = expand_simple_binop (Pmode, AND, addr,
6225 GEN_INT (-GET_MODE_SIZE (SImode)),
6226 NULL_RTX, 1, OPTAB_DIRECT);
6228 ac->memsi = gen_rtx_MEM (SImode, align);
6229 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
6230 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
6231 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
6233 /* Calculate shiftcount. */
6234 byteoffset = expand_simple_binop (Pmode, AND, addr,
6235 GEN_INT (GET_MODE_SIZE (SImode) - 1),
6236 NULL_RTX, 1, OPTAB_DIRECT);
6237 /* As we already have some offset, evaluate the remaining distance. */
6238 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
6239 NULL_RTX, 1, OPTAB_DIRECT);
6242 /* Shift is the byte count, but we need the bitcount. */
6243 ac->shift = expand_simple_binop (SImode, ASHIFT, ac->shift, GEN_INT (3),
6244 NULL_RTX, 1, OPTAB_DIRECT);
6246 /* Calculate masks. */
6247 ac->modemask = expand_simple_binop (SImode, ASHIFT,
6248 GEN_INT (GET_MODE_MASK (mode)),
6249 ac->shift, NULL_RTX, 1, OPTAB_DIRECT);
6250 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask,
6254 /* A subroutine of s390_expand_cs_hqi. Insert INS into VAL. If possible,
6255 use a single insv insn into SEQ2. Otherwise, put prep insns in SEQ1 and
6256 perform the merge in SEQ2. */
6259 s390_two_part_insv (struct alignment_context *ac, rtx *seq1, rtx *seq2,
6260 machine_mode mode, rtx val, rtx ins)
6267 tmp = copy_to_mode_reg (SImode, val);
6268 if (s390_expand_insv (tmp, GEN_INT (GET_MODE_BITSIZE (mode)),
6272 *seq2 = get_insns ();
6279 /* Failed to use insv. Generate a two part shift and mask. */
6281 tmp = s390_expand_mask_and_shift (ins, mode, ac->shift);
6282 *seq1 = get_insns ();
6286 tmp = expand_simple_binop (SImode, IOR, tmp, val, NULL_RTX, 1, OPTAB_DIRECT);
6287 *seq2 = get_insns ();
6293 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
6294 the memory location, CMP the old value to compare MEM with and NEW_RTX the
6295 value to set if CMP == MEM. */
6298 s390_expand_cs_hqi (machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
6299 rtx cmp, rtx new_rtx, bool is_weak)
6301 struct alignment_context ac;
6302 rtx cmpv, newv, val, cc, seq0, seq1, seq2, seq3;
6303 rtx res = gen_reg_rtx (SImode);
6304 rtx_code_label *csloop = NULL, *csend = NULL;
6306 gcc_assert (MEM_P (mem));
6308 init_alignment_context (&ac, mem, mode);
6310 /* Load full word. Subsequent loads are performed by CS. */
6311 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
6312 NULL_RTX, 1, OPTAB_DIRECT);
6314 /* Prepare insertions of cmp and new_rtx into the loaded value. When
6315 possible, we try to use insv to make this happen efficiently. If
6316 that fails we'll generate code both inside and outside the loop. */
6317 cmpv = s390_two_part_insv (&ac, &seq0, &seq2, mode, val, cmp);
6318 newv = s390_two_part_insv (&ac, &seq1, &seq3, mode, val, new_rtx);
6325 /* Start CS loop. */
6328 /* Begin assuming success. */
6329 emit_move_insn (btarget, const1_rtx);
6331 csloop = gen_label_rtx ();
6332 csend = gen_label_rtx ();
6333 emit_label (csloop);
6336 /* val = "<mem>00..0<mem>"
6337 * cmp = "00..0<cmp>00..0"
6338 * new = "00..0<new>00..0"
6344 cc = s390_emit_compare_and_swap (EQ, res, ac.memsi, cmpv, newv);
6346 emit_insn (gen_cstorecc4 (btarget, cc, XEXP (cc, 0), XEXP (cc, 1)));
6351 /* Jump to end if we're done (likely?). */
6352 s390_emit_jump (csend, cc);
6354 /* Check for changes outside mode, and loop internal if so.
6355 Arrange the moves so that the compare is adjacent to the
6356 branch so that we can generate CRJ. */
6357 tmp = copy_to_reg (val);
6358 force_expand_binop (SImode, and_optab, res, ac.modemaski, val,
6360 cc = s390_emit_compare (NE, val, tmp);
6361 s390_emit_jump (csloop, cc);
6364 emit_move_insn (btarget, const0_rtx);
6368 /* Return the correct part of the bitfield. */
6369 convert_move (vtarget, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
6370 NULL_RTX, 1, OPTAB_DIRECT), 1);
6373 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
6374 and VAL the value to play with. If AFTER is true then store the value
6375 MEM holds after the operation, if AFTER is false then store the value MEM
6376 holds before the operation. If TARGET is zero then discard that value, else
6377 store it to TARGET. */
6380 s390_expand_atomic (machine_mode mode, enum rtx_code code,
6381 rtx target, rtx mem, rtx val, bool after)
6383 struct alignment_context ac;
6385 rtx new_rtx = gen_reg_rtx (SImode);
6386 rtx orig = gen_reg_rtx (SImode);
6387 rtx_code_label *csloop = gen_label_rtx ();
6389 gcc_assert (!target || register_operand (target, VOIDmode));
6390 gcc_assert (MEM_P (mem));
6392 init_alignment_context (&ac, mem, mode);
6394 /* Shift val to the correct bit positions.
6395 Preserve "icm", but prevent "ex icm". */
6396 if (!(ac.aligned && code == SET && MEM_P (val)))
6397 val = s390_expand_mask_and_shift (val, mode, ac.shift);
6399 /* Further preparation insns. */
6400 if (code == PLUS || code == MINUS)
6401 emit_move_insn (orig, val);
6402 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
6403 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
6404 NULL_RTX, 1, OPTAB_DIRECT);
6406 /* Load full word. Subsequent loads are performed by CS. */
6407 cmp = force_reg (SImode, ac.memsi);
6409 /* Start CS loop. */
6410 emit_label (csloop);
6411 emit_move_insn (new_rtx, cmp);
6413 /* Patch new with val at correct position. */
6418 val = expand_simple_binop (SImode, code, new_rtx, orig,
6419 NULL_RTX, 1, OPTAB_DIRECT);
6420 val = expand_simple_binop (SImode, AND, val, ac.modemask,
6421 NULL_RTX, 1, OPTAB_DIRECT);
6424 if (ac.aligned && MEM_P (val))
6425 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0,
6429 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
6430 NULL_RTX, 1, OPTAB_DIRECT);
6431 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
6432 NULL_RTX, 1, OPTAB_DIRECT);
6438 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
6439 NULL_RTX, 1, OPTAB_DIRECT);
6441 case MULT: /* NAND */
6442 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
6443 NULL_RTX, 1, OPTAB_DIRECT);
6444 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
6445 NULL_RTX, 1, OPTAB_DIRECT);
6451 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
6452 ac.memsi, cmp, new_rtx));
6454 /* Return the correct part of the bitfield. */
6456 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
6457 after ? new_rtx : cmp, ac.shift,
6458 NULL_RTX, 1, OPTAB_DIRECT), 1);
6461 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
6462 We need to emit DTP-relative relocations. */
6464 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
6467 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
6472 fputs ("\t.long\t", file);
6475 fputs ("\t.quad\t", file);
6480 output_addr_const (file, x);
6481 fputs ("@DTPOFF", file);
6484 /* Return the proper mode for REGNO being represented in the dwarf
6487 s390_dwarf_frame_reg_mode (int regno)
6489 machine_mode save_mode = default_dwarf_frame_reg_mode (regno);
6491 /* Make sure not to return DImode for any GPR with -m31 -mzarch. */
6492 if (GENERAL_REGNO_P (regno))
6495 /* The rightmost 64 bits of vector registers are call-clobbered. */
6496 if (GET_MODE_SIZE (save_mode) > 8)
6502 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
6503 /* Implement TARGET_MANGLE_TYPE. */
6506 s390_mangle_type (const_tree type)
6508 type = TYPE_MAIN_VARIANT (type);
6510 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
6511 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
6514 if (type == s390_builtin_types[BT_BV16QI]) return "U6__boolc";
6515 if (type == s390_builtin_types[BT_BV8HI]) return "U6__bools";
6516 if (type == s390_builtin_types[BT_BV4SI]) return "U6__booli";
6517 if (type == s390_builtin_types[BT_BV2DI]) return "U6__booll";
6519 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
6520 && TARGET_LONG_DOUBLE_128)
6523 /* For all other types, use normal C++ mangling. */
6528 /* In the name of slightly smaller debug output, and to cater to
6529 general assembler lossage, recognize various UNSPEC sequences
6530 and turn them back into a direct symbol reference. */
6533 s390_delegitimize_address (rtx orig_x)
6537 orig_x = delegitimize_mem_from_attrs (orig_x);
6540 /* Extract the symbol ref from:
6541 (plus:SI (reg:SI 12 %r12)
6542 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
6543 UNSPEC_GOTOFF/PLTOFF)))
6545 (plus:SI (reg:SI 12 %r12)
6546 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
6547 UNSPEC_GOTOFF/PLTOFF)
6548 (const_int 4 [0x4])))) */
6549 if (GET_CODE (x) == PLUS
6550 && REG_P (XEXP (x, 0))
6551 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
6552 && GET_CODE (XEXP (x, 1)) == CONST)
6554 HOST_WIDE_INT offset = 0;
6556 /* The const operand. */
6557 y = XEXP (XEXP (x, 1), 0);
6559 if (GET_CODE (y) == PLUS
6560 && GET_CODE (XEXP (y, 1)) == CONST_INT)
6562 offset = INTVAL (XEXP (y, 1));
6566 if (GET_CODE (y) == UNSPEC
6567 && (XINT (y, 1) == UNSPEC_GOTOFF
6568 || XINT (y, 1) == UNSPEC_PLTOFF))
6569 return plus_constant (Pmode, XVECEXP (y, 0, 0), offset);
6572 if (GET_CODE (x) != MEM)
6576 if (GET_CODE (x) == PLUS
6577 && GET_CODE (XEXP (x, 1)) == CONST
6578 && GET_CODE (XEXP (x, 0)) == REG
6579 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
6581 y = XEXP (XEXP (x, 1), 0);
6582 if (GET_CODE (y) == UNSPEC
6583 && XINT (y, 1) == UNSPEC_GOT)
6584 y = XVECEXP (y, 0, 0);
6588 else if (GET_CODE (x) == CONST)
6590 /* Extract the symbol ref from:
6591 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
6592 UNSPEC_PLT/GOTENT))) */
6595 if (GET_CODE (y) == UNSPEC
6596 && (XINT (y, 1) == UNSPEC_GOTENT
6597 || XINT (y, 1) == UNSPEC_PLT))
6598 y = XVECEXP (y, 0, 0);
6605 if (GET_MODE (orig_x) != Pmode)
6607 if (GET_MODE (orig_x) == BLKmode)
6609 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
6616 /* Output operand OP to stdio stream FILE.
6617 OP is an address (register + offset) which is not used to address data;
6618 instead the rightmost bits are interpreted as the value. */
6621 print_shift_count_operand (FILE *file, rtx op)
6623 HOST_WIDE_INT offset;
6626 /* Extract base register and offset. */
6627 if (!s390_decompose_shift_count (op, &base, &offset))
6633 gcc_assert (GET_CODE (base) == REG);
6634 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
6635 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
6638 /* Offsets are constricted to twelve bits. */
6639 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
6641 fprintf (file, "(%s)", reg_names[REGNO (base)]);
6644 /* Assigns the number of NOP halfwords to be emitted before and after the
6645 function label to *HW_BEFORE and *HW_AFTER. Both pointers must not be NULL.
6646 If hotpatching is disabled for the function, the values are set to zero.
6650 s390_function_num_hotpatch_hw (tree decl,
6656 attr = lookup_attribute ("hotpatch", DECL_ATTRIBUTES (decl));
6658 /* Handle the arguments of the hotpatch attribute. The values
6659 specified via attribute might override the cmdline argument
6663 tree args = TREE_VALUE (attr);
6665 *hw_before = TREE_INT_CST_LOW (TREE_VALUE (args));
6666 *hw_after = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (args)));
6670 /* Use the values specified by the cmdline arguments. */
6671 *hw_before = s390_hotpatch_hw_before_label;
6672 *hw_after = s390_hotpatch_hw_after_label;
6676 /* Write the extra assembler code needed to declare a function properly. */
6679 s390_asm_output_function_label (FILE *asm_out_file, const char *fname,
6682 int hw_before, hw_after;
6684 s390_function_num_hotpatch_hw (decl, &hw_before, &hw_after);
6687 unsigned int function_alignment;
6690 /* Add a trampoline code area before the function label and initialize it
6691 with two-byte nop instructions. This area can be overwritten with code
6692 that jumps to a patched version of the function. */
6693 asm_fprintf (asm_out_file, "\tnopr\t%%r7"
6694 "\t# pre-label NOPs for hotpatch (%d halfwords)\n",
6696 for (i = 1; i < hw_before; i++)
6697 fputs ("\tnopr\t%r7\n", asm_out_file);
6699 /* Note: The function label must be aligned so that (a) the bytes of the
6700 following nop do not cross a cacheline boundary, and (b) a jump address
6701 (eight bytes for 64 bit targets, 4 bytes for 32 bit targets) can be
6702 stored directly before the label without crossing a cacheline
6703 boundary. All this is necessary to make sure the trampoline code can
6704 be changed atomically.
6705 This alignment is done automatically using the FOUNCTION_BOUNDARY, but
6706 if there are NOPs before the function label, the alignment is placed
6707 before them. So it is necessary to duplicate the alignment after the
6709 function_alignment = MAX (8, DECL_ALIGN (decl) / BITS_PER_UNIT);
6710 if (! DECL_USER_ALIGN (decl))
6711 function_alignment = MAX (function_alignment,
6712 (unsigned int) align_functions);
6713 fputs ("\t# alignment for hotpatch\n", asm_out_file);
6714 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (function_alignment));
6717 ASM_OUTPUT_LABEL (asm_out_file, fname);
6719 asm_fprintf (asm_out_file,
6720 "\t# post-label NOPs for hotpatch (%d halfwords)\n",
6724 /* Output machine-dependent UNSPECs occurring in address constant X
6725 in assembler syntax to stdio stream FILE. Returns true if the
6726 constant X could be recognized, false otherwise. */
6729 s390_output_addr_const_extra (FILE *file, rtx x)
6731 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
6732 switch (XINT (x, 1))
6735 output_addr_const (file, XVECEXP (x, 0, 0));
6736 fprintf (file, "@GOTENT");
6739 output_addr_const (file, XVECEXP (x, 0, 0));
6740 fprintf (file, "@GOT");
6743 output_addr_const (file, XVECEXP (x, 0, 0));
6744 fprintf (file, "@GOTOFF");
6747 output_addr_const (file, XVECEXP (x, 0, 0));
6748 fprintf (file, "@PLT");
6751 output_addr_const (file, XVECEXP (x, 0, 0));
6752 fprintf (file, "@PLTOFF");
6755 output_addr_const (file, XVECEXP (x, 0, 0));
6756 fprintf (file, "@TLSGD");
6759 assemble_name (file, get_some_local_dynamic_name ());
6760 fprintf (file, "@TLSLDM");
6763 output_addr_const (file, XVECEXP (x, 0, 0));
6764 fprintf (file, "@DTPOFF");
6767 output_addr_const (file, XVECEXP (x, 0, 0));
6768 fprintf (file, "@NTPOFF");
6770 case UNSPEC_GOTNTPOFF:
6771 output_addr_const (file, XVECEXP (x, 0, 0));
6772 fprintf (file, "@GOTNTPOFF");
6774 case UNSPEC_INDNTPOFF:
6775 output_addr_const (file, XVECEXP (x, 0, 0));
6776 fprintf (file, "@INDNTPOFF");
6780 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
6781 switch (XINT (x, 1))
6783 case UNSPEC_POOL_OFFSET:
6784 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
6785 output_addr_const (file, x);
6791 /* Output address operand ADDR in assembler syntax to
6792 stdio stream FILE. */
6795 print_operand_address (FILE *file, rtx addr)
6797 struct s390_address ad;
6799 if (s390_loadrelative_operand_p (addr, NULL, NULL))
6803 output_operand_lossage ("symbolic memory references are "
6804 "only supported on z10 or later");
6807 output_addr_const (file, addr);
6811 if (!s390_decompose_address (addr, &ad)
6812 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
6813 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
6814 output_operand_lossage ("cannot decompose address");
6817 output_addr_const (file, ad.disp);
6819 fprintf (file, "0");
6821 if (ad.base && ad.indx)
6822 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
6823 reg_names[REGNO (ad.base)]);
6825 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
6828 /* Output operand X in assembler syntax to stdio stream FILE.
6829 CODE specified the format flag. The following format flags
6832 'C': print opcode suffix for branch condition.
6833 'D': print opcode suffix for inverse branch condition.
6834 'E': print opcode suffix for branch on index instruction.
6835 'G': print the size of the operand in bytes.
6836 'J': print tls_load/tls_gdcall/tls_ldcall suffix
6837 'M': print the second word of a TImode operand.
6838 'N': print the second word of a DImode operand.
6839 'O': print only the displacement of a memory reference or address.
6840 'R': print only the base register of a memory reference or address.
6841 'S': print S-type memory reference (base+displacement).
6842 'Y': print shift count operand.
6844 'b': print integer X as if it's an unsigned byte.
6845 'c': print integer X as if it's an signed byte.
6846 'e': "end" contiguous bitmask X in either DImode or vector inner mode.
6847 'f': "end" contiguous bitmask X in SImode.
6848 'h': print integer X as if it's a signed halfword.
6849 'i': print the first nonzero HImode part of X.
6850 'j': print the first HImode part unequal to -1 of X.
6851 'k': print the first nonzero SImode part of X.
6852 'm': print the first SImode part unequal to -1 of X.
6853 'o': print integer X as if it's an unsigned 32bit word.
6854 's': "start" of contiguous bitmask X in either DImode or vector inner mode.
6855 't': CONST_INT: "start" of contiguous bitmask X in SImode.
6856 CONST_VECTOR: Generate a bitmask for vgbm instruction.
6857 'x': print integer X as if it's an unsigned halfword.
6858 'v': print register number as vector register (v1 instead of f1).
6862 print_operand (FILE *file, rtx x, int code)
6869 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
6873 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
6877 if (GET_CODE (x) == LE)
6878 fprintf (file, "l");
6879 else if (GET_CODE (x) == GT)
6880 fprintf (file, "h");
6882 output_operand_lossage ("invalid comparison operator "
6883 "for 'E' output modifier");
6887 if (GET_CODE (x) == SYMBOL_REF)
6889 fprintf (file, "%s", ":tls_load:");
6890 output_addr_const (file, x);
6892 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
6894 fprintf (file, "%s", ":tls_gdcall:");
6895 output_addr_const (file, XVECEXP (x, 0, 0));
6897 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
6899 fprintf (file, "%s", ":tls_ldcall:");
6900 const char *name = get_some_local_dynamic_name ();
6902 assemble_name (file, name);
6905 output_operand_lossage ("invalid reference for 'J' output modifier");
6909 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
6914 struct s390_address ad;
6917 ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
6920 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
6923 output_operand_lossage ("invalid address for 'O' output modifier");
6928 output_addr_const (file, ad.disp);
6930 fprintf (file, "0");
6936 struct s390_address ad;
6939 ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
6942 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
6945 output_operand_lossage ("invalid address for 'R' output modifier");
6950 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
6952 fprintf (file, "0");
6958 struct s390_address ad;
6963 output_operand_lossage ("memory reference expected for "
6964 "'S' output modifier");
6967 ret = s390_decompose_address (XEXP (x, 0), &ad);
6970 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
6973 output_operand_lossage ("invalid address for 'S' output modifier");
6978 output_addr_const (file, ad.disp);
6980 fprintf (file, "0");
6983 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
6988 if (GET_CODE (x) == REG)
6989 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
6990 else if (GET_CODE (x) == MEM)
6991 x = change_address (x, VOIDmode,
6992 plus_constant (Pmode, XEXP (x, 0), 4));
6994 output_operand_lossage ("register or memory expression expected "
6995 "for 'N' output modifier");
6999 if (GET_CODE (x) == REG)
7000 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
7001 else if (GET_CODE (x) == MEM)
7002 x = change_address (x, VOIDmode,
7003 plus_constant (Pmode, XEXP (x, 0), 8));
7005 output_operand_lossage ("register or memory expression expected "
7006 "for 'M' output modifier");
7010 print_shift_count_operand (file, x);
7014 switch (GET_CODE (x))
7017 /* Print FP regs as fx instead of vx when they are accessed
7018 through non-vector mode. */
7020 || VECTOR_NOFP_REG_P (x)
7021 || (FP_REG_P (x) && VECTOR_MODE_P (GET_MODE (x)))
7022 || (VECTOR_REG_P (x)
7023 && (GET_MODE_SIZE (GET_MODE (x)) /
7024 s390_class_max_nregs (FP_REGS, GET_MODE (x))) > 8))
7025 fprintf (file, "%%v%s", reg_names[REGNO (x)] + 2);
7027 fprintf (file, "%s", reg_names[REGNO (x)]);
7031 output_address (XEXP (x, 0));
7038 output_addr_const (file, x);
7051 ival = ((ival & 0xff) ^ 0x80) - 0x80;
7057 ival = ((ival & 0xffff) ^ 0x8000) - 0x8000;
7060 ival = s390_extract_part (x, HImode, 0);
7063 ival = s390_extract_part (x, HImode, -1);
7066 ival = s390_extract_part (x, SImode, 0);
7069 ival = s390_extract_part (x, SImode, -1);
7080 len = (code == 's' || code == 'e' ? 64 : 32);
7081 ok = s390_contiguous_bitmask_p (ival, len, &pos, &len);
7083 if (code == 's' || code == 't')
7084 ival = 64 - pos - len;
7086 ival = 64 - 1 - pos;
7090 output_operand_lossage ("invalid constant for output modifier '%c'", code);
7092 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
7096 gcc_assert (GET_MODE (x) == VOIDmode);
7098 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xff);
7099 else if (code == 'x')
7100 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xffff);
7101 else if (code == 'h')
7102 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7103 ((CONST_DOUBLE_LOW (x) & 0xffff) ^ 0x8000) - 0x8000);
7107 output_operand_lossage ("invalid constant - try using "
7108 "an output modifier");
7110 output_operand_lossage ("invalid constant for output modifier '%c'",
7120 int start, stop, inner_len;
7123 inner_len = GET_MODE_UNIT_BITSIZE (GET_MODE (x));
7124 ok = s390_contiguous_bitmask_vector_p (x, &start, &stop);
7126 if (code == 's' || code == 't')
7127 ival = inner_len - stop - 1;
7129 ival = inner_len - start - 1;
7130 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
7136 bool ok = s390_bytemask_vector_p (x, &mask);
7138 fprintf (file, "%u", mask);
7143 output_operand_lossage ("invalid constant vector for output "
7144 "modifier '%c'", code);
7150 output_operand_lossage ("invalid expression - try using "
7151 "an output modifier");
7153 output_operand_lossage ("invalid expression for output "
7154 "modifier '%c'", code);
7159 /* Target hook for assembling integer objects. We need to define it
7160 here to work a round a bug in some versions of GAS, which couldn't
7161 handle values smaller than INT_MIN when printed in decimal. */
7164 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
7166 if (size == 8 && aligned_p
7167 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
7169 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
7173 return default_assemble_integer (x, size, aligned_p);
7176 /* Returns true if register REGNO is used for forming
7177 a memory address in expression X. */
7180 reg_used_in_mem_p (int regno, rtx x)
7182 enum rtx_code code = GET_CODE (x);
7188 if (refers_to_regno_p (regno, XEXP (x, 0)))
7191 else if (code == SET
7192 && GET_CODE (SET_DEST (x)) == PC)
7194 if (refers_to_regno_p (regno, SET_SRC (x)))
7198 fmt = GET_RTX_FORMAT (code);
7199 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7202 && reg_used_in_mem_p (regno, XEXP (x, i)))
7205 else if (fmt[i] == 'E')
7206 for (j = 0; j < XVECLEN (x, i); j++)
7207 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
7213 /* Returns true if expression DEP_RTX sets an address register
7214 used by instruction INSN to address memory. */
7217 addr_generation_dependency_p (rtx dep_rtx, rtx_insn *insn)
7221 if (NONJUMP_INSN_P (dep_rtx))
7222 dep_rtx = PATTERN (dep_rtx);
7224 if (GET_CODE (dep_rtx) == SET)
7226 target = SET_DEST (dep_rtx);
7227 if (GET_CODE (target) == STRICT_LOW_PART)
7228 target = XEXP (target, 0);
7229 while (GET_CODE (target) == SUBREG)
7230 target = SUBREG_REG (target);
7232 if (GET_CODE (target) == REG)
7234 int regno = REGNO (target);
7236 if (s390_safe_attr_type (insn) == TYPE_LA)
7238 pat = PATTERN (insn);
7239 if (GET_CODE (pat) == PARALLEL)
7241 gcc_assert (XVECLEN (pat, 0) == 2);
7242 pat = XVECEXP (pat, 0, 0);
7244 gcc_assert (GET_CODE (pat) == SET);
7245 return refers_to_regno_p (regno, SET_SRC (pat));
7247 else if (get_attr_atype (insn) == ATYPE_AGEN)
7248 return reg_used_in_mem_p (regno, PATTERN (insn));
7254 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
7257 s390_agen_dep_p (rtx_insn *dep_insn, rtx_insn *insn)
7259 rtx dep_rtx = PATTERN (dep_insn);
7262 if (GET_CODE (dep_rtx) == SET
7263 && addr_generation_dependency_p (dep_rtx, insn))
7265 else if (GET_CODE (dep_rtx) == PARALLEL)
7267 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
7269 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
7277 /* A C statement (sans semicolon) to update the integer scheduling priority
7278 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
7279 reduce the priority to execute INSN later. Do not define this macro if
7280 you do not need to adjust the scheduling priorities of insns.
7282 A STD instruction should be scheduled earlier,
7283 in order to use the bypass. */
7285 s390_adjust_priority (rtx_insn *insn, int priority)
7287 if (! INSN_P (insn))
7290 if (s390_tune <= PROCESSOR_2064_Z900)
7293 switch (s390_safe_attr_type (insn))
7297 priority = priority << 3;
7301 priority = priority << 1;
7310 /* The number of instructions that can be issued per cycle. */
7313 s390_issue_rate (void)
7317 case PROCESSOR_2084_Z990:
7318 case PROCESSOR_2094_Z9_109:
7319 case PROCESSOR_2094_Z9_EC:
7320 case PROCESSOR_2817_Z196:
7322 case PROCESSOR_2097_Z10:
7324 case PROCESSOR_9672_G5:
7325 case PROCESSOR_9672_G6:
7326 case PROCESSOR_2064_Z900:
7327 /* Starting with EC12 we use the sched_reorder hook to take care
7328 of instruction dispatch constraints. The algorithm only
7329 picks the best instruction and assumes only a single
7330 instruction gets issued per cycle. */
7331 case PROCESSOR_2827_ZEC12:
7332 case PROCESSOR_2964_Z13:
7339 s390_first_cycle_multipass_dfa_lookahead (void)
7344 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
7345 Fix up MEMs as required. */
7348 annotate_constant_pool_refs (rtx *x)
7353 gcc_assert (GET_CODE (*x) != SYMBOL_REF
7354 || !CONSTANT_POOL_ADDRESS_P (*x));
7356 /* Literal pool references can only occur inside a MEM ... */
7357 if (GET_CODE (*x) == MEM)
7359 rtx memref = XEXP (*x, 0);
7361 if (GET_CODE (memref) == SYMBOL_REF
7362 && CONSTANT_POOL_ADDRESS_P (memref))
7364 rtx base = cfun->machine->base_reg;
7365 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
7368 *x = replace_equiv_address (*x, addr);
7372 if (GET_CODE (memref) == CONST
7373 && GET_CODE (XEXP (memref, 0)) == PLUS
7374 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
7375 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
7376 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
7378 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
7379 rtx sym = XEXP (XEXP (memref, 0), 0);
7380 rtx base = cfun->machine->base_reg;
7381 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
7384 *x = replace_equiv_address (*x, plus_constant (Pmode, addr, off));
7389 /* ... or a load-address type pattern. */
7390 if (GET_CODE (*x) == SET)
7392 rtx addrref = SET_SRC (*x);
7394 if (GET_CODE (addrref) == SYMBOL_REF
7395 && CONSTANT_POOL_ADDRESS_P (addrref))
7397 rtx base = cfun->machine->base_reg;
7398 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
7401 SET_SRC (*x) = addr;
7405 if (GET_CODE (addrref) == CONST
7406 && GET_CODE (XEXP (addrref, 0)) == PLUS
7407 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
7408 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
7409 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
7411 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
7412 rtx sym = XEXP (XEXP (addrref, 0), 0);
7413 rtx base = cfun->machine->base_reg;
7414 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
7417 SET_SRC (*x) = plus_constant (Pmode, addr, off);
7422 /* Annotate LTREL_BASE as well. */
7423 if (GET_CODE (*x) == UNSPEC
7424 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
7426 rtx base = cfun->machine->base_reg;
7427 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
7432 fmt = GET_RTX_FORMAT (GET_CODE (*x));
7433 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
7437 annotate_constant_pool_refs (&XEXP (*x, i));
7439 else if (fmt[i] == 'E')
7441 for (j = 0; j < XVECLEN (*x, i); j++)
7442 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
7447 /* Split all branches that exceed the maximum distance.
7448 Returns true if this created a new literal pool entry. */
7451 s390_split_branches (void)
7453 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
7454 int new_literal = 0, ret;
7459 /* We need correct insn addresses. */
7461 shorten_branches (get_insns ());
7463 /* Find all branches that exceed 64KB, and split them. */
7465 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7467 if (! JUMP_P (insn) || tablejump_p (insn, NULL, NULL))
7470 pat = PATTERN (insn);
7471 if (GET_CODE (pat) == PARALLEL)
7472 pat = XVECEXP (pat, 0, 0);
7473 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
7476 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
7478 label = &SET_SRC (pat);
7480 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
7482 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
7483 label = &XEXP (SET_SRC (pat), 1);
7484 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
7485 label = &XEXP (SET_SRC (pat), 2);
7492 if (get_attr_length (insn) <= 4)
7495 /* We are going to use the return register as scratch register,
7496 make sure it will be saved/restored by the prologue/epilogue. */
7497 cfun_frame_layout.save_return_addr_p = 1;
7502 rtx mem = force_const_mem (Pmode, *label);
7503 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, mem),
7505 INSN_ADDRESSES_NEW (set_insn, -1);
7506 annotate_constant_pool_refs (&PATTERN (set_insn));
7513 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
7514 UNSPEC_LTREL_OFFSET);
7515 target = gen_rtx_CONST (Pmode, target);
7516 target = force_const_mem (Pmode, target);
7517 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, target),
7519 INSN_ADDRESSES_NEW (set_insn, -1);
7520 annotate_constant_pool_refs (&PATTERN (set_insn));
7522 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
7523 cfun->machine->base_reg),
7525 target = gen_rtx_PLUS (Pmode, temp_reg, target);
7528 ret = validate_change (insn, label, target, 0);
7536 /* Find an annotated literal pool symbol referenced in RTX X,
7537 and store it at REF. Will abort if X contains references to
7538 more than one such pool symbol; multiple references to the same
7539 symbol are allowed, however.
7541 The rtx pointed to by REF must be initialized to NULL_RTX
7542 by the caller before calling this routine. */
7545 find_constant_pool_ref (rtx x, rtx *ref)
7550 /* Ignore LTREL_BASE references. */
7551 if (GET_CODE (x) == UNSPEC
7552 && XINT (x, 1) == UNSPEC_LTREL_BASE)
7554 /* Likewise POOL_ENTRY insns. */
7555 if (GET_CODE (x) == UNSPEC_VOLATILE
7556 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
7559 gcc_assert (GET_CODE (x) != SYMBOL_REF
7560 || !CONSTANT_POOL_ADDRESS_P (x));
7562 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
7564 rtx sym = XVECEXP (x, 0, 0);
7565 gcc_assert (GET_CODE (sym) == SYMBOL_REF
7566 && CONSTANT_POOL_ADDRESS_P (sym));
7568 if (*ref == NULL_RTX)
7571 gcc_assert (*ref == sym);
7576 fmt = GET_RTX_FORMAT (GET_CODE (x));
7577 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
7581 find_constant_pool_ref (XEXP (x, i), ref);
7583 else if (fmt[i] == 'E')
7585 for (j = 0; j < XVECLEN (x, i); j++)
7586 find_constant_pool_ref (XVECEXP (x, i, j), ref);
7591 /* Replace every reference to the annotated literal pool
7592 symbol REF in X by its base plus OFFSET. */
7595 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
7600 gcc_assert (*x != ref);
7602 if (GET_CODE (*x) == UNSPEC
7603 && XINT (*x, 1) == UNSPEC_LTREF
7604 && XVECEXP (*x, 0, 0) == ref)
7606 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
7610 if (GET_CODE (*x) == PLUS
7611 && GET_CODE (XEXP (*x, 1)) == CONST_INT
7612 && GET_CODE (XEXP (*x, 0)) == UNSPEC
7613 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
7614 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
7616 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
7617 *x = plus_constant (Pmode, addr, INTVAL (XEXP (*x, 1)));
7621 fmt = GET_RTX_FORMAT (GET_CODE (*x));
7622 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
7626 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
7628 else if (fmt[i] == 'E')
7630 for (j = 0; j < XVECLEN (*x, i); j++)
7631 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
7636 /* Check whether X contains an UNSPEC_LTREL_BASE.
7637 Return its constant pool symbol if found, NULL_RTX otherwise. */
7640 find_ltrel_base (rtx x)
7645 if (GET_CODE (x) == UNSPEC
7646 && XINT (x, 1) == UNSPEC_LTREL_BASE)
7647 return XVECEXP (x, 0, 0);
7649 fmt = GET_RTX_FORMAT (GET_CODE (x));
7650 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
7654 rtx fnd = find_ltrel_base (XEXP (x, i));
7658 else if (fmt[i] == 'E')
7660 for (j = 0; j < XVECLEN (x, i); j++)
7662 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
7672 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
7675 replace_ltrel_base (rtx *x)
7680 if (GET_CODE (*x) == UNSPEC
7681 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
7683 *x = XVECEXP (*x, 0, 1);
7687 fmt = GET_RTX_FORMAT (GET_CODE (*x));
7688 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
7692 replace_ltrel_base (&XEXP (*x, i));
7694 else if (fmt[i] == 'E')
7696 for (j = 0; j < XVECLEN (*x, i); j++)
7697 replace_ltrel_base (&XVECEXP (*x, i, j));
7703 /* We keep a list of constants which we have to add to internal
7704 constant tables in the middle of large functions. */
7706 #define NR_C_MODES 32
7707 machine_mode constant_modes[NR_C_MODES] =
7709 TFmode, TImode, TDmode,
7710 V16QImode, V8HImode, V4SImode, V2DImode, V1TImode,
7711 V4SFmode, V2DFmode, V1TFmode,
7712 DFmode, DImode, DDmode,
7713 V8QImode, V4HImode, V2SImode, V1DImode, V2SFmode, V1DFmode,
7714 SFmode, SImode, SDmode,
7715 V4QImode, V2HImode, V1SImode, V1SFmode,
7724 struct constant *next;
7726 rtx_code_label *label;
7729 struct constant_pool
7731 struct constant_pool *next;
7732 rtx_insn *first_insn;
7733 rtx_insn *pool_insn;
7735 rtx_insn *emit_pool_after;
7737 struct constant *constants[NR_C_MODES];
7738 struct constant *execute;
7739 rtx_code_label *label;
7743 /* Allocate new constant_pool structure. */
7745 static struct constant_pool *
7746 s390_alloc_pool (void)
7748 struct constant_pool *pool;
7751 pool = (struct constant_pool *) xmalloc (sizeof *pool);
7753 for (i = 0; i < NR_C_MODES; i++)
7754 pool->constants[i] = NULL;
7756 pool->execute = NULL;
7757 pool->label = gen_label_rtx ();
7758 pool->first_insn = NULL;
7759 pool->pool_insn = NULL;
7760 pool->insns = BITMAP_ALLOC (NULL);
7762 pool->emit_pool_after = NULL;
7767 /* Create new constant pool covering instructions starting at INSN
7768 and chain it to the end of POOL_LIST. */
7770 static struct constant_pool *
7771 s390_start_pool (struct constant_pool **pool_list, rtx_insn *insn)
7773 struct constant_pool *pool, **prev;
7775 pool = s390_alloc_pool ();
7776 pool->first_insn = insn;
7778 for (prev = pool_list; *prev; prev = &(*prev)->next)
7785 /* End range of instructions covered by POOL at INSN and emit
7786 placeholder insn representing the pool. */
7789 s390_end_pool (struct constant_pool *pool, rtx_insn *insn)
7791 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
7794 insn = get_last_insn ();
7796 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
7797 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
7800 /* Add INSN to the list of insns covered by POOL. */
7803 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
7805 bitmap_set_bit (pool->insns, INSN_UID (insn));
7808 /* Return pool out of POOL_LIST that covers INSN. */
7810 static struct constant_pool *
7811 s390_find_pool (struct constant_pool *pool_list, rtx insn)
7813 struct constant_pool *pool;
7815 for (pool = pool_list; pool; pool = pool->next)
7816 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
7822 /* Add constant VAL of mode MODE to the constant pool POOL. */
7825 s390_add_constant (struct constant_pool *pool, rtx val, machine_mode mode)
7830 for (i = 0; i < NR_C_MODES; i++)
7831 if (constant_modes[i] == mode)
7833 gcc_assert (i != NR_C_MODES);
7835 for (c = pool->constants[i]; c != NULL; c = c->next)
7836 if (rtx_equal_p (val, c->value))
7841 c = (struct constant *) xmalloc (sizeof *c);
7843 c->label = gen_label_rtx ();
7844 c->next = pool->constants[i];
7845 pool->constants[i] = c;
7846 pool->size += GET_MODE_SIZE (mode);
7850 /* Return an rtx that represents the offset of X from the start of
7854 s390_pool_offset (struct constant_pool *pool, rtx x)
7858 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
7859 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
7860 UNSPEC_POOL_OFFSET);
7861 return gen_rtx_CONST (GET_MODE (x), x);
7864 /* Find constant VAL of mode MODE in the constant pool POOL.
7865 Return an RTX describing the distance from the start of
7866 the pool to the location of the new constant. */
7869 s390_find_constant (struct constant_pool *pool, rtx val,
7875 for (i = 0; i < NR_C_MODES; i++)
7876 if (constant_modes[i] == mode)
7878 gcc_assert (i != NR_C_MODES);
7880 for (c = pool->constants[i]; c != NULL; c = c->next)
7881 if (rtx_equal_p (val, c->value))
7886 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
7889 /* Check whether INSN is an execute. Return the label_ref to its
7890 execute target template if so, NULL_RTX otherwise. */
7893 s390_execute_label (rtx insn)
7895 if (NONJUMP_INSN_P (insn)
7896 && GET_CODE (PATTERN (insn)) == PARALLEL
7897 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
7898 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
7899 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
7904 /* Add execute target for INSN to the constant pool POOL. */
7907 s390_add_execute (struct constant_pool *pool, rtx insn)
7911 for (c = pool->execute; c != NULL; c = c->next)
7912 if (INSN_UID (insn) == INSN_UID (c->value))
7917 c = (struct constant *) xmalloc (sizeof *c);
7919 c->label = gen_label_rtx ();
7920 c->next = pool->execute;
7926 /* Find execute target for INSN in the constant pool POOL.
7927 Return an RTX describing the distance from the start of
7928 the pool to the location of the execute target. */
7931 s390_find_execute (struct constant_pool *pool, rtx insn)
7935 for (c = pool->execute; c != NULL; c = c->next)
7936 if (INSN_UID (insn) == INSN_UID (c->value))
7941 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
7944 /* For an execute INSN, extract the execute target template. */
7947 s390_execute_target (rtx insn)
7949 rtx pattern = PATTERN (insn);
7950 gcc_assert (s390_execute_label (insn));
7952 if (XVECLEN (pattern, 0) == 2)
7954 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
7958 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
7961 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
7962 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
7964 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
7970 /* Indicate that INSN cannot be duplicated. This is the case for
7971 execute insns that carry a unique label. */
7974 s390_cannot_copy_insn_p (rtx_insn *insn)
7976 rtx label = s390_execute_label (insn);
7977 return label && label != const0_rtx;
7980 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
7981 do not emit the pool base label. */
7984 s390_dump_pool (struct constant_pool *pool, bool remote_label)
7987 rtx_insn *insn = pool->pool_insn;
7990 /* Switch to rodata section. */
7991 if (TARGET_CPU_ZARCH)
7993 insn = emit_insn_after (gen_pool_section_start (), insn);
7994 INSN_ADDRESSES_NEW (insn, -1);
7997 /* Ensure minimum pool alignment. */
7998 if (TARGET_CPU_ZARCH)
7999 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
8001 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
8002 INSN_ADDRESSES_NEW (insn, -1);
8004 /* Emit pool base label. */
8007 insn = emit_label_after (pool->label, insn);
8008 INSN_ADDRESSES_NEW (insn, -1);
8011 /* Dump constants in descending alignment requirement order,
8012 ensuring proper alignment for every constant. */
8013 for (i = 0; i < NR_C_MODES; i++)
8014 for (c = pool->constants[i]; c; c = c->next)
8016 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
8017 rtx value = copy_rtx (c->value);
8018 if (GET_CODE (value) == CONST
8019 && GET_CODE (XEXP (value, 0)) == UNSPEC
8020 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
8021 && XVECLEN (XEXP (value, 0), 0) == 1)
8022 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
8024 insn = emit_label_after (c->label, insn);
8025 INSN_ADDRESSES_NEW (insn, -1);
8027 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
8028 gen_rtvec (1, value),
8029 UNSPECV_POOL_ENTRY);
8030 insn = emit_insn_after (value, insn);
8031 INSN_ADDRESSES_NEW (insn, -1);
8034 /* Ensure minimum alignment for instructions. */
8035 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
8036 INSN_ADDRESSES_NEW (insn, -1);
8038 /* Output in-pool execute template insns. */
8039 for (c = pool->execute; c; c = c->next)
8041 insn = emit_label_after (c->label, insn);
8042 INSN_ADDRESSES_NEW (insn, -1);
8044 insn = emit_insn_after (s390_execute_target (c->value), insn);
8045 INSN_ADDRESSES_NEW (insn, -1);
8048 /* Switch back to previous section. */
8049 if (TARGET_CPU_ZARCH)
8051 insn = emit_insn_after (gen_pool_section_end (), insn);
8052 INSN_ADDRESSES_NEW (insn, -1);
8055 insn = emit_barrier_after (insn);
8056 INSN_ADDRESSES_NEW (insn, -1);
8058 /* Remove placeholder insn. */
8059 remove_insn (pool->pool_insn);
8062 /* Free all memory used by POOL. */
8065 s390_free_pool (struct constant_pool *pool)
8067 struct constant *c, *next;
8070 for (i = 0; i < NR_C_MODES; i++)
8071 for (c = pool->constants[i]; c; c = next)
8077 for (c = pool->execute; c; c = next)
8083 BITMAP_FREE (pool->insns);
8088 /* Collect main literal pool. Return NULL on overflow. */
8090 static struct constant_pool *
8091 s390_mainpool_start (void)
8093 struct constant_pool *pool;
8096 pool = s390_alloc_pool ();
8098 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8100 if (NONJUMP_INSN_P (insn)
8101 && GET_CODE (PATTERN (insn)) == SET
8102 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
8103 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
8105 /* There might be two main_pool instructions if base_reg
8106 is call-clobbered; one for shrink-wrapped code and one
8107 for the rest. We want to keep the first. */
8108 if (pool->pool_insn)
8110 insn = PREV_INSN (insn);
8111 delete_insn (NEXT_INSN (insn));
8114 pool->pool_insn = insn;
8117 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
8119 s390_add_execute (pool, insn);
8121 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8123 rtx pool_ref = NULL_RTX;
8124 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8127 rtx constant = get_pool_constant (pool_ref);
8128 machine_mode mode = get_pool_mode (pool_ref);
8129 s390_add_constant (pool, constant, mode);
8133 /* If hot/cold partitioning is enabled we have to make sure that
8134 the literal pool is emitted in the same section where the
8135 initialization of the literal pool base pointer takes place.
8136 emit_pool_after is only used in the non-overflow case on non
8137 Z cpus where we can emit the literal pool at the end of the
8138 function body within the text section. */
8140 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
8141 && !pool->emit_pool_after)
8142 pool->emit_pool_after = PREV_INSN (insn);
8145 gcc_assert (pool->pool_insn || pool->size == 0);
8147 if (pool->size >= 4096)
8149 /* We're going to chunkify the pool, so remove the main
8150 pool placeholder insn. */
8151 remove_insn (pool->pool_insn);
8153 s390_free_pool (pool);
8157 /* If the functions ends with the section where the literal pool
8158 should be emitted set the marker to its end. */
8159 if (pool && !pool->emit_pool_after)
8160 pool->emit_pool_after = get_last_insn ();
8165 /* POOL holds the main literal pool as collected by s390_mainpool_start.
8166 Modify the current function to output the pool constants as well as
8167 the pool register setup instruction. */
8170 s390_mainpool_finish (struct constant_pool *pool)
8172 rtx base_reg = cfun->machine->base_reg;
8174 /* If the pool is empty, we're done. */
8175 if (pool->size == 0)
8177 /* We don't actually need a base register after all. */
8178 cfun->machine->base_reg = NULL_RTX;
8180 if (pool->pool_insn)
8181 remove_insn (pool->pool_insn);
8182 s390_free_pool (pool);
8186 /* We need correct insn addresses. */
8187 shorten_branches (get_insns ());
8189 /* On zSeries, we use a LARL to load the pool register. The pool is
8190 located in the .rodata section, so we emit it after the function. */
8191 if (TARGET_CPU_ZARCH)
8193 rtx set = gen_main_base_64 (base_reg, pool->label);
8194 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
8195 INSN_ADDRESSES_NEW (insn, -1);
8196 remove_insn (pool->pool_insn);
8198 insn = get_last_insn ();
8199 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8200 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8202 s390_dump_pool (pool, 0);
8205 /* On S/390, if the total size of the function's code plus literal pool
8206 does not exceed 4096 bytes, we use BASR to set up a function base
8207 pointer, and emit the literal pool at the end of the function. */
8208 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
8209 + pool->size + 8 /* alignment slop */ < 4096)
8211 rtx set = gen_main_base_31_small (base_reg, pool->label);
8212 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
8213 INSN_ADDRESSES_NEW (insn, -1);
8214 remove_insn (pool->pool_insn);
8216 insn = emit_label_after (pool->label, insn);
8217 INSN_ADDRESSES_NEW (insn, -1);
8219 /* emit_pool_after will be set by s390_mainpool_start to the
8220 last insn of the section where the literal pool should be
8222 insn = pool->emit_pool_after;
8224 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8225 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8227 s390_dump_pool (pool, 1);
8230 /* Otherwise, we emit an inline literal pool and use BASR to branch
8231 over it, setting up the pool register at the same time. */
8234 rtx_code_label *pool_end = gen_label_rtx ();
8236 rtx pat = gen_main_base_31_large (base_reg, pool->label, pool_end);
8237 rtx_insn *insn = emit_jump_insn_after (pat, pool->pool_insn);
8238 JUMP_LABEL (insn) = pool_end;
8239 INSN_ADDRESSES_NEW (insn, -1);
8240 remove_insn (pool->pool_insn);
8242 insn = emit_label_after (pool->label, insn);
8243 INSN_ADDRESSES_NEW (insn, -1);
8245 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8246 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8248 insn = emit_label_after (pool_end, pool->pool_insn);
8249 INSN_ADDRESSES_NEW (insn, -1);
8251 s390_dump_pool (pool, 1);
8255 /* Replace all literal pool references. */
8257 for (rtx_insn *insn = get_insns (); insn; insn = NEXT_INSN (insn))
8260 replace_ltrel_base (&PATTERN (insn));
8262 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8264 rtx addr, pool_ref = NULL_RTX;
8265 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8268 if (s390_execute_label (insn))
8269 addr = s390_find_execute (pool, insn);
8271 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
8272 get_pool_mode (pool_ref));
8274 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
8275 INSN_CODE (insn) = -1;
8281 /* Free the pool. */
8282 s390_free_pool (pool);
8285 /* POOL holds the main literal pool as collected by s390_mainpool_start.
8286 We have decided we cannot use this pool, so revert all changes
8287 to the current function that were done by s390_mainpool_start. */
8289 s390_mainpool_cancel (struct constant_pool *pool)
8291 /* We didn't actually change the instruction stream, so simply
8292 free the pool memory. */
8293 s390_free_pool (pool);
8297 /* Chunkify the literal pool. */
8299 #define S390_POOL_CHUNK_MIN 0xc00
8300 #define S390_POOL_CHUNK_MAX 0xe00
8302 static struct constant_pool *
8303 s390_chunkify_start (void)
8305 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
8308 rtx pending_ltrel = NULL_RTX;
8311 rtx (*gen_reload_base) (rtx, rtx) =
8312 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
8315 /* We need correct insn addresses. */
8317 shorten_branches (get_insns ());
8319 /* Scan all insns and move literals to pool chunks. */
8321 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8323 bool section_switch_p = false;
8325 /* Check for pending LTREL_BASE. */
8328 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
8331 gcc_assert (ltrel_base == pending_ltrel);
8332 pending_ltrel = NULL_RTX;
8336 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
8339 curr_pool = s390_start_pool (&pool_list, insn);
8341 s390_add_execute (curr_pool, insn);
8342 s390_add_pool_insn (curr_pool, insn);
8344 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8346 rtx pool_ref = NULL_RTX;
8347 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8350 rtx constant = get_pool_constant (pool_ref);
8351 machine_mode mode = get_pool_mode (pool_ref);
8354 curr_pool = s390_start_pool (&pool_list, insn);
8356 s390_add_constant (curr_pool, constant, mode);
8357 s390_add_pool_insn (curr_pool, insn);
8359 /* Don't split the pool chunk between a LTREL_OFFSET load
8360 and the corresponding LTREL_BASE. */
8361 if (GET_CODE (constant) == CONST
8362 && GET_CODE (XEXP (constant, 0)) == UNSPEC
8363 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
8365 gcc_assert (!pending_ltrel);
8366 pending_ltrel = pool_ref;
8371 if (JUMP_P (insn) || JUMP_TABLE_DATA_P (insn) || LABEL_P (insn))
8374 s390_add_pool_insn (curr_pool, insn);
8375 /* An LTREL_BASE must follow within the same basic block. */
8376 gcc_assert (!pending_ltrel);
8380 switch (NOTE_KIND (insn))
8382 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
8383 section_switch_p = true;
8385 case NOTE_INSN_VAR_LOCATION:
8386 case NOTE_INSN_CALL_ARG_LOCATION:
8393 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
8394 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
8397 if (TARGET_CPU_ZARCH)
8399 if (curr_pool->size < S390_POOL_CHUNK_MAX)
8402 s390_end_pool (curr_pool, NULL);
8407 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
8408 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
8411 /* We will later have to insert base register reload insns.
8412 Those will have an effect on code size, which we need to
8413 consider here. This calculation makes rather pessimistic
8414 worst-case assumptions. */
8418 if (chunk_size < S390_POOL_CHUNK_MIN
8419 && curr_pool->size < S390_POOL_CHUNK_MIN
8420 && !section_switch_p)
8423 /* Pool chunks can only be inserted after BARRIERs ... */
8424 if (BARRIER_P (insn))
8426 s390_end_pool (curr_pool, insn);
8431 /* ... so if we don't find one in time, create one. */
8432 else if (chunk_size > S390_POOL_CHUNK_MAX
8433 || curr_pool->size > S390_POOL_CHUNK_MAX
8434 || section_switch_p)
8436 rtx_insn *label, *jump, *barrier, *next, *prev;
8438 if (!section_switch_p)
8440 /* We can insert the barrier only after a 'real' insn. */
8441 if (! NONJUMP_INSN_P (insn) && ! CALL_P (insn))
8443 if (get_attr_length (insn) == 0)
8445 /* Don't separate LTREL_BASE from the corresponding
8446 LTREL_OFFSET load. */
8453 next = NEXT_INSN (insn);
8457 && (NOTE_KIND (next) == NOTE_INSN_VAR_LOCATION
8458 || NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION));
8462 gcc_assert (!pending_ltrel);
8464 /* The old pool has to end before the section switch
8465 note in order to make it part of the current
8467 insn = PREV_INSN (insn);
8470 label = gen_label_rtx ();
8472 if (prev && NOTE_P (prev))
8473 prev = prev_nonnote_insn (prev);
8475 jump = emit_jump_insn_after_setloc (gen_jump (label), insn,
8476 INSN_LOCATION (prev));
8478 jump = emit_jump_insn_after_noloc (gen_jump (label), insn);
8479 barrier = emit_barrier_after (jump);
8480 insn = emit_label_after (label, barrier);
8481 JUMP_LABEL (jump) = label;
8482 LABEL_NUSES (label) = 1;
8484 INSN_ADDRESSES_NEW (jump, -1);
8485 INSN_ADDRESSES_NEW (barrier, -1);
8486 INSN_ADDRESSES_NEW (insn, -1);
8488 s390_end_pool (curr_pool, barrier);
8496 s390_end_pool (curr_pool, NULL);
8497 gcc_assert (!pending_ltrel);
8499 /* Find all labels that are branched into
8500 from an insn belonging to a different chunk. */
8502 far_labels = BITMAP_ALLOC (NULL);
8504 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8506 rtx_jump_table_data *table;
8508 /* Labels marked with LABEL_PRESERVE_P can be target
8509 of non-local jumps, so we have to mark them.
8510 The same holds for named labels.
8512 Don't do that, however, if it is the label before
8516 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
8518 rtx_insn *vec_insn = NEXT_INSN (insn);
8519 if (! vec_insn || ! JUMP_TABLE_DATA_P (vec_insn))
8520 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
8522 /* Check potential targets in a table jump (casesi_jump). */
8523 else if (tablejump_p (insn, NULL, &table))
8525 rtx vec_pat = PATTERN (table);
8526 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
8528 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
8530 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
8532 if (s390_find_pool (pool_list, label)
8533 != s390_find_pool (pool_list, insn))
8534 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
8537 /* If we have a direct jump (conditional or unconditional),
8538 check all potential targets. */
8539 else if (JUMP_P (insn))
8541 rtx pat = PATTERN (insn);
8543 if (GET_CODE (pat) == PARALLEL)
8544 pat = XVECEXP (pat, 0, 0);
8546 if (GET_CODE (pat) == SET)
8548 rtx label = JUMP_LABEL (insn);
8549 if (label && !ANY_RETURN_P (label))
8551 if (s390_find_pool (pool_list, label)
8552 != s390_find_pool (pool_list, insn))
8553 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
8559 /* Insert base register reload insns before every pool. */
8561 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
8563 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
8565 rtx_insn *insn = curr_pool->first_insn;
8566 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
8569 /* Insert base register reload insns at every far label. */
8571 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8573 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
8575 struct constant_pool *pool = s390_find_pool (pool_list, insn);
8578 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
8580 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
8585 BITMAP_FREE (far_labels);
8588 /* Recompute insn addresses. */
8590 init_insn_lengths ();
8591 shorten_branches (get_insns ());
8596 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
8597 After we have decided to use this list, finish implementing
8598 all changes to the current function as required. */
8601 s390_chunkify_finish (struct constant_pool *pool_list)
8603 struct constant_pool *curr_pool = NULL;
8607 /* Replace all literal pool references. */
8609 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8612 replace_ltrel_base (&PATTERN (insn));
8614 curr_pool = s390_find_pool (pool_list, insn);
8618 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8620 rtx addr, pool_ref = NULL_RTX;
8621 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8624 if (s390_execute_label (insn))
8625 addr = s390_find_execute (curr_pool, insn);
8627 addr = s390_find_constant (curr_pool,
8628 get_pool_constant (pool_ref),
8629 get_pool_mode (pool_ref));
8631 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
8632 INSN_CODE (insn) = -1;
8637 /* Dump out all literal pools. */
8639 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
8640 s390_dump_pool (curr_pool, 0);
8642 /* Free pool list. */
8646 struct constant_pool *next = pool_list->next;
8647 s390_free_pool (pool_list);
8652 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
8653 We have decided we cannot use this list, so revert all changes
8654 to the current function that were done by s390_chunkify_start. */
8657 s390_chunkify_cancel (struct constant_pool *pool_list)
8659 struct constant_pool *curr_pool = NULL;
8662 /* Remove all pool placeholder insns. */
8664 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
8666 /* Did we insert an extra barrier? Remove it. */
8667 rtx_insn *barrier = PREV_INSN (curr_pool->pool_insn);
8668 rtx_insn *jump = barrier? PREV_INSN (barrier) : NULL;
8669 rtx_insn *label = NEXT_INSN (curr_pool->pool_insn);
8671 if (jump && JUMP_P (jump)
8672 && barrier && BARRIER_P (barrier)
8673 && label && LABEL_P (label)
8674 && GET_CODE (PATTERN (jump)) == SET
8675 && SET_DEST (PATTERN (jump)) == pc_rtx
8676 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
8677 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
8680 remove_insn (barrier);
8681 remove_insn (label);
8684 remove_insn (curr_pool->pool_insn);
8687 /* Remove all base register reload insns. */
8689 for (insn = get_insns (); insn; )
8691 rtx_insn *next_insn = NEXT_INSN (insn);
8693 if (NONJUMP_INSN_P (insn)
8694 && GET_CODE (PATTERN (insn)) == SET
8695 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
8696 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
8702 /* Free pool list. */
8706 struct constant_pool *next = pool_list->next;
8707 s390_free_pool (pool_list);
8712 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
8715 s390_output_pool_entry (rtx exp, machine_mode mode, unsigned int align)
8719 switch (GET_MODE_CLASS (mode))
8722 case MODE_DECIMAL_FLOAT:
8723 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
8725 REAL_VALUE_FROM_CONST_DOUBLE (r, exp);
8726 assemble_real (r, mode, align);
8730 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
8731 mark_symbol_refs_as_used (exp);
8734 case MODE_VECTOR_INT:
8735 case MODE_VECTOR_FLOAT:
8738 machine_mode inner_mode;
8739 gcc_assert (GET_CODE (exp) == CONST_VECTOR);
8741 inner_mode = GET_MODE_INNER (GET_MODE (exp));
8742 for (i = 0; i < XVECLEN (exp, 0); i++)
8743 s390_output_pool_entry (XVECEXP (exp, 0, i),
8747 : GET_MODE_BITSIZE (inner_mode));
8757 /* Return an RTL expression representing the value of the return address
8758 for the frame COUNT steps up from the current frame. FRAME is the
8759 frame pointer of that frame. */
8762 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
8767 /* Without backchain, we fail for all but the current frame. */
8769 if (!TARGET_BACKCHAIN && count > 0)
8772 /* For the current frame, we need to make sure the initial
8773 value of RETURN_REGNUM is actually saved. */
8777 /* On non-z architectures branch splitting could overwrite r14. */
8778 if (TARGET_CPU_ZARCH)
8779 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
8782 cfun_frame_layout.save_return_addr_p = true;
8783 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
8787 if (TARGET_PACKED_STACK)
8788 offset = -2 * UNITS_PER_LONG;
8790 offset = RETURN_REGNUM * UNITS_PER_LONG;
8792 addr = plus_constant (Pmode, frame, offset);
8793 addr = memory_address (Pmode, addr);
8794 return gen_rtx_MEM (Pmode, addr);
8797 /* Return an RTL expression representing the back chain stored in
8798 the current stack frame. */
8801 s390_back_chain_rtx (void)
8805 gcc_assert (TARGET_BACKCHAIN);
8807 if (TARGET_PACKED_STACK)
8808 chain = plus_constant (Pmode, stack_pointer_rtx,
8809 STACK_POINTER_OFFSET - UNITS_PER_LONG);
8811 chain = stack_pointer_rtx;
8813 chain = gen_rtx_MEM (Pmode, chain);
8817 /* Find first call clobbered register unused in a function.
8818 This could be used as base register in a leaf function
8819 or for holding the return address before epilogue. */
8822 find_unused_clobbered_reg (void)
8825 for (i = 0; i < 6; i++)
8826 if (!df_regs_ever_live_p (i))
8832 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
8833 clobbered hard regs in SETREG. */
8836 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
8838 char *regs_ever_clobbered = (char *)data;
8839 unsigned int i, regno;
8840 machine_mode mode = GET_MODE (setreg);
8842 if (GET_CODE (setreg) == SUBREG)
8844 rtx inner = SUBREG_REG (setreg);
8845 if (!GENERAL_REG_P (inner) && !FP_REG_P (inner))
8847 regno = subreg_regno (setreg);
8849 else if (GENERAL_REG_P (setreg) || FP_REG_P (setreg))
8850 regno = REGNO (setreg);
8855 i < regno + HARD_REGNO_NREGS (regno, mode);
8857 regs_ever_clobbered[i] = 1;
8860 /* Walks through all basic blocks of the current function looking
8861 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
8862 of the passed integer array REGS_EVER_CLOBBERED are set to one for
8863 each of those regs. */
8866 s390_regs_ever_clobbered (char regs_ever_clobbered[])
8872 memset (regs_ever_clobbered, 0, 32);
8874 /* For non-leaf functions we have to consider all call clobbered regs to be
8878 for (i = 0; i < 32; i++)
8879 regs_ever_clobbered[i] = call_really_used_regs[i];
8882 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
8883 this work is done by liveness analysis (mark_regs_live_at_end).
8884 Special care is needed for functions containing landing pads. Landing pads
8885 may use the eh registers, but the code which sets these registers is not
8886 contained in that function. Hence s390_regs_ever_clobbered is not able to
8887 deal with this automatically. */
8888 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
8889 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
8890 if (crtl->calls_eh_return
8891 || (cfun->machine->has_landing_pad_p
8892 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
8893 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
8895 /* For nonlocal gotos all call-saved registers have to be saved.
8896 This flag is also set for the unwinding code in libgcc.
8897 See expand_builtin_unwind_init. For regs_ever_live this is done by
8899 if (crtl->saves_all_registers)
8900 for (i = 0; i < 32; i++)
8901 if (!call_really_used_regs[i])
8902 regs_ever_clobbered[i] = 1;
8904 FOR_EACH_BB_FN (cur_bb, cfun)
8906 FOR_BB_INSNS (cur_bb, cur_insn)
8910 if (!INSN_P (cur_insn))
8913 pat = PATTERN (cur_insn);
8915 /* Ignore GPR restore insns. */
8916 if (epilogue_completed && RTX_FRAME_RELATED_P (cur_insn))
8918 if (GET_CODE (pat) == SET
8919 && GENERAL_REG_P (SET_DEST (pat)))
8922 if (GET_MODE (SET_SRC (pat)) == DImode
8923 && FP_REG_P (SET_SRC (pat)))
8927 if (GET_CODE (SET_SRC (pat)) == MEM)
8932 if (GET_CODE (pat) == PARALLEL
8933 && load_multiple_operation (pat, VOIDmode))
8938 s390_reg_clobbered_rtx,
8939 regs_ever_clobbered);
8944 /* Determine the frame area which actually has to be accessed
8945 in the function epilogue. The values are stored at the
8946 given pointers AREA_BOTTOM (address of the lowest used stack
8947 address) and AREA_TOP (address of the first item which does
8948 not belong to the stack frame). */
8951 s390_frame_area (int *area_bottom, int *area_top)
8958 if (cfun_frame_layout.first_restore_gpr != -1)
8960 b = (cfun_frame_layout.gprs_offset
8961 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
8962 t = b + (cfun_frame_layout.last_restore_gpr
8963 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
8966 if (TARGET_64BIT && cfun_save_high_fprs_p)
8968 b = MIN (b, cfun_frame_layout.f8_offset);
8969 t = MAX (t, (cfun_frame_layout.f8_offset
8970 + cfun_frame_layout.high_fprs * 8));
8975 if (cfun_fpr_save_p (FPR4_REGNUM))
8977 b = MIN (b, cfun_frame_layout.f4_offset);
8978 t = MAX (t, cfun_frame_layout.f4_offset + 8);
8980 if (cfun_fpr_save_p (FPR6_REGNUM))
8982 b = MIN (b, cfun_frame_layout.f4_offset + 8);
8983 t = MAX (t, cfun_frame_layout.f4_offset + 16);
8989 /* Update gpr_save_slots in the frame layout trying to make use of
8990 FPRs as GPR save slots.
8991 This is a helper routine of s390_register_info. */
8994 s390_register_info_gprtofpr ()
8996 int save_reg_slot = FPR0_REGNUM;
8999 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
9002 for (i = 15; i >= 6; i--)
9004 if (cfun_gpr_save_slot (i) == 0)
9007 /* Advance to the next FP register which can be used as a
9009 while ((!call_really_used_regs[save_reg_slot]
9010 || df_regs_ever_live_p (save_reg_slot)
9011 || cfun_fpr_save_p (save_reg_slot))
9012 && FP_REGNO_P (save_reg_slot))
9014 if (!FP_REGNO_P (save_reg_slot))
9016 /* We only want to use ldgr/lgdr if we can get rid of
9017 stm/lm entirely. So undo the gpr slot allocation in
9018 case we ran out of FPR save slots. */
9019 for (j = 6; j <= 15; j++)
9020 if (FP_REGNO_P (cfun_gpr_save_slot (j)))
9021 cfun_gpr_save_slot (j) = -1;
9024 cfun_gpr_save_slot (i) = save_reg_slot++;
9028 /* Set the bits in fpr_bitmap for FPRs which need to be saved due to
9030 This is a helper routine for s390_register_info. */
9033 s390_register_info_stdarg_fpr ()
9039 /* Save the FP argument regs for stdarg. f0, f2 for 31 bit and
9040 f0-f4 for 64 bit. */
9042 || !TARGET_HARD_FLOAT
9043 || !cfun->va_list_fpr_size
9044 || crtl->args.info.fprs >= FP_ARG_NUM_REG)
9047 min_fpr = crtl->args.info.fprs;
9048 max_fpr = min_fpr + cfun->va_list_fpr_size;
9049 if (max_fpr > FP_ARG_NUM_REG)
9050 max_fpr = FP_ARG_NUM_REG;
9052 for (i = min_fpr; i < max_fpr; i++)
9053 cfun_set_fpr_save (i + FPR0_REGNUM);
9056 /* Reserve the GPR save slots for GPRs which need to be saved due to
9058 This is a helper routine for s390_register_info. */
9061 s390_register_info_stdarg_gpr ()
9068 || !cfun->va_list_gpr_size
9069 || crtl->args.info.gprs >= GP_ARG_NUM_REG)
9072 min_gpr = crtl->args.info.gprs;
9073 max_gpr = min_gpr + cfun->va_list_gpr_size;
9074 if (max_gpr > GP_ARG_NUM_REG)
9075 max_gpr = GP_ARG_NUM_REG;
9077 for (i = min_gpr; i < max_gpr; i++)
9078 cfun_gpr_save_slot (2 + i) = -1;
9081 /* The GPR and FPR save slots in cfun->machine->frame_layout are set
9082 for registers which need to be saved in function prologue.
9083 This function can be used until the insns emitted for save/restore
9084 of the regs are visible in the RTL stream. */
9087 s390_register_info ()
9090 char clobbered_regs[32];
9092 gcc_assert (!epilogue_completed);
9094 if (reload_completed)
9095 /* After reload we rely on our own routine to determine which
9096 registers need saving. */
9097 s390_regs_ever_clobbered (clobbered_regs);
9099 /* During reload we use regs_ever_live as a base since reload
9100 does changes in there which we otherwise would not be aware
9102 for (i = 0; i < 32; i++)
9103 clobbered_regs[i] = df_regs_ever_live_p (i);
9105 for (i = 0; i < 32; i++)
9106 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
9108 /* Mark the call-saved FPRs which need to be saved.
9109 This needs to be done before checking the special GPRs since the
9110 stack pointer usage depends on whether high FPRs have to be saved
9112 cfun_frame_layout.fpr_bitmap = 0;
9113 cfun_frame_layout.high_fprs = 0;
9114 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
9115 if (clobbered_regs[i] && !call_really_used_regs[i])
9117 cfun_set_fpr_save (i);
9118 if (i >= FPR8_REGNUM)
9119 cfun_frame_layout.high_fprs++;
9123 clobbered_regs[PIC_OFFSET_TABLE_REGNUM]
9124 |= !!df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
9126 clobbered_regs[BASE_REGNUM]
9127 |= (cfun->machine->base_reg
9128 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
9130 clobbered_regs[HARD_FRAME_POINTER_REGNUM]
9131 |= !!frame_pointer_needed;
9133 /* On pre z900 machines this might take until machine dependent
9135 save_return_addr_p will only be set on non-zarch machines so
9136 there is no risk that r14 goes into an FPR instead of a stack
9138 clobbered_regs[RETURN_REGNUM]
9140 || TARGET_TPF_PROFILING
9141 || cfun->machine->split_branches_pending_p
9142 || cfun_frame_layout.save_return_addr_p
9143 || crtl->calls_eh_return);
9145 clobbered_regs[STACK_POINTER_REGNUM]
9147 || TARGET_TPF_PROFILING
9148 || cfun_save_high_fprs_p
9149 || get_frame_size () > 0
9150 || (reload_completed && cfun_frame_layout.frame_size > 0)
9151 || cfun->calls_alloca);
9153 memset (cfun_frame_layout.gpr_save_slots, 0, 16);
9155 for (i = 6; i < 16; i++)
9156 if (clobbered_regs[i])
9157 cfun_gpr_save_slot (i) = -1;
9159 s390_register_info_stdarg_fpr ();
9160 s390_register_info_gprtofpr ();
9162 /* First find the range of GPRs to be restored. Vararg regs don't
9163 need to be restored so we do it before assigning slots to the
9165 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != -1; i++);
9166 for (j = 15; j > i && cfun_gpr_save_slot (j) != -1; j--);
9167 cfun_frame_layout.first_restore_gpr = (i == 16) ? -1 : i;
9168 cfun_frame_layout.last_restore_gpr = (i == 16) ? -1 : j;
9170 /* stdarg functions might need to save GPRs 2 to 6. This might
9171 override the GPR->FPR save decision made above for r6 since
9172 vararg regs must go to the stack. */
9173 s390_register_info_stdarg_gpr ();
9175 /* Now the range of GPRs which need saving. */
9176 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != -1; i++);
9177 for (j = 15; j > i && cfun_gpr_save_slot (j) != -1; j--);
9178 cfun_frame_layout.first_save_gpr = (i == 16) ? -1 : i;
9179 cfun_frame_layout.last_save_gpr = (i == 16) ? -1 : j;
9182 /* This function is called by s390_optimize_prologue in order to get
9183 rid of unnecessary GPR save/restore instructions. The register info
9184 for the GPRs is re-computed and the ranges are re-calculated. */
9187 s390_optimize_register_info ()
9189 char clobbered_regs[32];
9192 gcc_assert (epilogue_completed);
9193 gcc_assert (!cfun->machine->split_branches_pending_p);
9195 s390_regs_ever_clobbered (clobbered_regs);
9197 for (i = 0; i < 32; i++)
9198 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
9200 /* There is still special treatment needed for cases invisible to
9201 s390_regs_ever_clobbered. */
9202 clobbered_regs[RETURN_REGNUM]
9203 |= (TARGET_TPF_PROFILING
9204 /* When expanding builtin_return_addr in ESA mode we do not
9205 know whether r14 will later be needed as scratch reg when
9206 doing branch splitting. So the builtin always accesses the
9207 r14 save slot and we need to stick to the save/restore
9208 decision for r14 even if it turns out that it didn't get
9210 || cfun_frame_layout.save_return_addr_p
9211 || crtl->calls_eh_return);
9213 memset (cfun_frame_layout.gpr_save_slots, 0, 6);
9215 for (i = 6; i < 16; i++)
9216 if (!clobbered_regs[i])
9217 cfun_gpr_save_slot (i) = 0;
9219 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != -1; i++);
9220 for (j = 15; j > i && cfun_gpr_save_slot (j) != -1; j--);
9221 cfun_frame_layout.first_restore_gpr = (i == 16) ? -1 : i;
9222 cfun_frame_layout.last_restore_gpr = (i == 16) ? -1 : j;
9224 s390_register_info_stdarg_gpr ();
9226 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != -1; i++);
9227 for (j = 15; j > i && cfun_gpr_save_slot (j) != -1; j--);
9228 cfun_frame_layout.first_save_gpr = (i == 16) ? -1 : i;
9229 cfun_frame_layout.last_save_gpr = (i == 16) ? -1 : j;
9232 /* Fill cfun->machine with info about frame of current function. */
9235 s390_frame_info (void)
9237 HOST_WIDE_INT lowest_offset;
9239 cfun_frame_layout.first_save_gpr_slot = cfun_frame_layout.first_save_gpr;
9240 cfun_frame_layout.last_save_gpr_slot = cfun_frame_layout.last_save_gpr;
9242 /* The va_arg builtin uses a constant distance of 16 *
9243 UNITS_PER_LONG (r0-r15) to reach the FPRs from the reg_save_area
9244 pointer. So even if we are going to save the stack pointer in an
9245 FPR we need the stack space in order to keep the offsets
9247 if (cfun->stdarg && cfun_save_arg_fprs_p)
9249 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
9251 if (cfun_frame_layout.first_save_gpr_slot == -1)
9252 cfun_frame_layout.first_save_gpr_slot = STACK_POINTER_REGNUM;
9255 cfun_frame_layout.frame_size = get_frame_size ();
9256 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
9257 fatal_error (input_location,
9258 "total size of local variables exceeds architecture limit");
9260 if (!TARGET_PACKED_STACK)
9262 /* Fixed stack layout. */
9263 cfun_frame_layout.backchain_offset = 0;
9264 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
9265 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
9266 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
9267 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
9270 else if (TARGET_BACKCHAIN)
9272 /* Kernel stack layout - packed stack, backchain, no float */
9273 gcc_assert (TARGET_SOFT_FLOAT);
9274 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
9277 /* The distance between the backchain and the return address
9278 save slot must not change. So we always need a slot for the
9279 stack pointer which resides in between. */
9280 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
9282 cfun_frame_layout.gprs_offset
9283 = cfun_frame_layout.backchain_offset - cfun_gprs_save_area_size;
9285 /* FPRs will not be saved. Nevertheless pick sane values to
9286 keep area calculations valid. */
9287 cfun_frame_layout.f0_offset =
9288 cfun_frame_layout.f4_offset =
9289 cfun_frame_layout.f8_offset = cfun_frame_layout.gprs_offset;
9295 /* Packed stack layout without backchain. */
9297 /* With stdarg FPRs need their dedicated slots. */
9298 num_fprs = (TARGET_64BIT && cfun->stdarg ? 2
9299 : (cfun_fpr_save_p (FPR4_REGNUM) +
9300 cfun_fpr_save_p (FPR6_REGNUM)));
9301 cfun_frame_layout.f4_offset = STACK_POINTER_OFFSET - 8 * num_fprs;
9303 num_fprs = (cfun->stdarg ? 2
9304 : (cfun_fpr_save_p (FPR0_REGNUM)
9305 + cfun_fpr_save_p (FPR2_REGNUM)));
9306 cfun_frame_layout.f0_offset = cfun_frame_layout.f4_offset - 8 * num_fprs;
9308 cfun_frame_layout.gprs_offset
9309 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
9311 cfun_frame_layout.f8_offset = (cfun_frame_layout.gprs_offset
9312 - cfun_frame_layout.high_fprs * 8);
9315 if (cfun_save_high_fprs_p)
9316 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
9319 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
9321 /* In the following cases we have to allocate a STACK_POINTER_OFFSET
9322 sized area at the bottom of the stack. This is required also for
9323 leaf functions. When GCC generates a local stack reference it
9324 will always add STACK_POINTER_OFFSET to all these references. */
9326 && !TARGET_TPF_PROFILING
9327 && cfun_frame_layout.frame_size == 0
9328 && !cfun->calls_alloca)
9331 /* Calculate the number of bytes we have used in our own register
9332 save area. With the packed stack layout we can re-use the
9333 remaining bytes for normal stack elements. */
9335 if (TARGET_PACKED_STACK)
9336 lowest_offset = MIN (MIN (cfun_frame_layout.f0_offset,
9337 cfun_frame_layout.f4_offset),
9338 cfun_frame_layout.gprs_offset);
9342 if (TARGET_BACKCHAIN)
9343 lowest_offset = MIN (lowest_offset, cfun_frame_layout.backchain_offset);
9345 cfun_frame_layout.frame_size += STACK_POINTER_OFFSET - lowest_offset;
9347 /* If under 31 bit an odd number of gprs has to be saved we have to
9348 adjust the frame size to sustain 8 byte alignment of stack
9350 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
9351 STACK_BOUNDARY / BITS_PER_UNIT - 1)
9352 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
9355 /* Generate frame layout. Fills in register and frame data for the current
9356 function in cfun->machine. This routine can be called multiple times;
9357 it will re-do the complete frame layout every time. */
9360 s390_init_frame_layout (void)
9362 HOST_WIDE_INT frame_size;
9365 /* After LRA the frame layout is supposed to be read-only and should
9366 not be re-computed. */
9367 if (reload_completed)
9370 /* On S/390 machines, we may need to perform branch splitting, which
9371 will require both base and return address register. We have no
9372 choice but to assume we're going to need them until right at the
9373 end of the machine dependent reorg phase. */
9374 if (!TARGET_CPU_ZARCH)
9375 cfun->machine->split_branches_pending_p = true;
9379 frame_size = cfun_frame_layout.frame_size;
9381 /* Try to predict whether we'll need the base register. */
9382 base_used = cfun->machine->split_branches_pending_p
9383 || crtl->uses_const_pool
9384 || (!DISP_IN_RANGE (frame_size)
9385 && !CONST_OK_FOR_K (frame_size));
9387 /* Decide which register to use as literal pool base. In small
9388 leaf functions, try to use an unused call-clobbered register
9389 as base register to avoid save/restore overhead. */
9391 cfun->machine->base_reg = NULL_RTX;
9392 else if (crtl->is_leaf && !df_regs_ever_live_p (5))
9393 cfun->machine->base_reg = gen_rtx_REG (Pmode, 5);
9395 cfun->machine->base_reg = gen_rtx_REG (Pmode, BASE_REGNUM);
9397 s390_register_info ();
9400 while (frame_size != cfun_frame_layout.frame_size);
9403 /* Remove the FPR clobbers from a tbegin insn if it can be proven that
9404 the TX is nonescaping. A transaction is considered escaping if
9405 there is at least one path from tbegin returning CC0 to the
9406 function exit block without an tend.
9408 The check so far has some limitations:
9409 - only single tbegin/tend BBs are supported
9410 - the first cond jump after tbegin must separate the CC0 path from ~CC0
9411 - when CC is copied to a GPR and the CC0 check is done with the GPR
9412 this is not supported
9416 s390_optimize_nonescaping_tx (void)
9418 const unsigned int CC0 = 1 << 3;
9419 basic_block tbegin_bb = NULL;
9420 basic_block tend_bb = NULL;
9425 rtx_insn *tbegin_insn = NULL;
9427 if (!cfun->machine->tbegin_p)
9430 for (bb_index = 0; bb_index < n_basic_blocks_for_fn (cfun); bb_index++)
9432 bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
9437 FOR_BB_INSNS (bb, insn)
9439 rtx ite, cc, pat, target;
9440 unsigned HOST_WIDE_INT mask;
9442 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
9445 pat = PATTERN (insn);
9447 if (GET_CODE (pat) == PARALLEL)
9448 pat = XVECEXP (pat, 0, 0);
9450 if (GET_CODE (pat) != SET
9451 || GET_CODE (SET_SRC (pat)) != UNSPEC_VOLATILE)
9454 if (XINT (SET_SRC (pat), 1) == UNSPECV_TBEGIN)
9460 /* Just return if the tbegin doesn't have clobbers. */
9461 if (GET_CODE (PATTERN (insn)) != PARALLEL)
9464 if (tbegin_bb != NULL)
9467 /* Find the next conditional jump. */
9468 for (tmp = NEXT_INSN (insn);
9470 tmp = NEXT_INSN (tmp))
9472 if (reg_set_p (gen_rtx_REG (CCmode, CC_REGNUM), tmp))
9477 ite = SET_SRC (PATTERN (tmp));
9478 if (GET_CODE (ite) != IF_THEN_ELSE)
9481 cc = XEXP (XEXP (ite, 0), 0);
9482 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc))
9483 || GET_MODE (cc) != CCRAWmode
9484 || GET_CODE (XEXP (XEXP (ite, 0), 1)) != CONST_INT)
9487 if (bb->succs->length () != 2)
9490 mask = INTVAL (XEXP (XEXP (ite, 0), 1));
9491 if (GET_CODE (XEXP (ite, 0)) == NE)
9495 target = XEXP (ite, 1);
9496 else if (mask == (CC0 ^ 0xf))
9497 target = XEXP (ite, 2);
9505 ei = ei_start (bb->succs);
9506 e1 = ei_safe_edge (ei);
9508 e2 = ei_safe_edge (ei);
9510 if (e2->flags & EDGE_FALLTHRU)
9513 e1 = ei_safe_edge (ei);
9516 if (!(e1->flags & EDGE_FALLTHRU))
9519 tbegin_bb = (target == pc_rtx) ? e1->dest : e2->dest;
9521 if (tmp == BB_END (bb))
9526 if (XINT (SET_SRC (pat), 1) == UNSPECV_TEND)
9528 if (tend_bb != NULL)
9535 /* Either we successfully remove the FPR clobbers here or we are not
9536 able to do anything for this TX. Both cases don't qualify for
9538 cfun->machine->tbegin_p = false;
9540 if (tbegin_bb == NULL || tend_bb == NULL)
9543 calculate_dominance_info (CDI_POST_DOMINATORS);
9544 result = dominated_by_p (CDI_POST_DOMINATORS, tbegin_bb, tend_bb);
9545 free_dominance_info (CDI_POST_DOMINATORS);
9550 PATTERN (tbegin_insn) = gen_rtx_PARALLEL (VOIDmode,
9552 XVECEXP (PATTERN (tbegin_insn), 0, 0),
9553 XVECEXP (PATTERN (tbegin_insn), 0, 1)));
9554 INSN_CODE (tbegin_insn) = -1;
9555 df_insn_rescan (tbegin_insn);
9560 /* Return true if it is legal to put a value with MODE into REGNO. */
9563 s390_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
9565 if (!TARGET_VX && VECTOR_NOFP_REGNO_P (regno))
9568 switch (REGNO_REG_CLASS (regno))
9571 return ((GET_MODE_CLASS (mode) == MODE_INT
9572 && s390_class_max_nregs (VEC_REGS, mode) == 1)
9574 || s390_vector_mode_supported_p (mode));
9578 && ((GET_MODE_CLASS (mode) == MODE_INT
9579 && s390_class_max_nregs (FP_REGS, mode) == 1)
9581 || s390_vector_mode_supported_p (mode)))
9584 if (REGNO_PAIR_OK (regno, mode))
9586 if (mode == SImode || mode == DImode)
9589 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
9594 if (FRAME_REGNO_P (regno) && mode == Pmode)
9599 if (REGNO_PAIR_OK (regno, mode))
9602 || (mode != TFmode && mode != TCmode && mode != TDmode))
9607 if (GET_MODE_CLASS (mode) == MODE_CC)
9611 if (REGNO_PAIR_OK (regno, mode))
9613 if (mode == SImode || mode == Pmode)
9624 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
9627 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
9629 /* Once we've decided upon a register to use as base register, it must
9630 no longer be used for any other purpose. */
9631 if (cfun->machine->base_reg)
9632 if (REGNO (cfun->machine->base_reg) == old_reg
9633 || REGNO (cfun->machine->base_reg) == new_reg)
9636 /* Prevent regrename from using call-saved regs which haven't
9637 actually been saved. This is necessary since regrename assumes
9638 the backend save/restore decisions are based on
9639 df_regs_ever_live. Since we have our own routine we have to tell
9640 regrename manually about it. */
9641 if (GENERAL_REGNO_P (new_reg)
9642 && !call_really_used_regs[new_reg]
9643 && cfun_gpr_save_slot (new_reg) == 0)
9649 /* Return nonzero if register REGNO can be used as a scratch register
9653 s390_hard_regno_scratch_ok (unsigned int regno)
9655 /* See s390_hard_regno_rename_ok. */
9656 if (GENERAL_REGNO_P (regno)
9657 && !call_really_used_regs[regno]
9658 && cfun_gpr_save_slot (regno) == 0)
9664 /* Maximum number of registers to represent a value of mode MODE
9665 in a register of class RCLASS. */
9668 s390_class_max_nregs (enum reg_class rclass, machine_mode mode)
9671 bool reg_pair_required_p = false;
9677 reg_size = TARGET_VX ? 16 : 8;
9679 /* TF and TD modes would fit into a VR but we put them into a
9680 register pair since we do not have 128bit FP instructions on
9683 && SCALAR_FLOAT_MODE_P (mode)
9684 && GET_MODE_SIZE (mode) >= 16)
9685 reg_pair_required_p = true;
9687 /* Even if complex types would fit into a single FPR/VR we force
9688 them into a register pair to deal with the parts more easily.
9689 (FIXME: What about complex ints?) */
9690 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
9691 reg_pair_required_p = true;
9697 reg_size = UNITS_PER_WORD;
9701 if (reg_pair_required_p)
9702 return 2 * ((GET_MODE_SIZE (mode) / 2 + reg_size - 1) / reg_size);
9704 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
9707 /* Return TRUE if changing mode from FROM to TO should not be allowed
9708 for register class CLASS. */
9711 s390_cannot_change_mode_class (machine_mode from_mode,
9712 machine_mode to_mode,
9713 enum reg_class rclass)
9715 machine_mode small_mode;
9716 machine_mode big_mode;
9718 if (GET_MODE_SIZE (from_mode) == GET_MODE_SIZE (to_mode))
9721 if (GET_MODE_SIZE (from_mode) < GET_MODE_SIZE (to_mode))
9723 small_mode = from_mode;
9728 small_mode = to_mode;
9729 big_mode = from_mode;
9732 /* Values residing in VRs are little-endian style. All modes are
9733 placed left-aligned in an VR. This means that we cannot allow
9734 switching between modes with differing sizes. Also if the vector
9735 facility is available we still place TFmode values in VR register
9736 pairs, since the only instructions we have operating on TFmodes
9737 only deal with register pairs. Therefore we have to allow DFmode
9738 subregs of TFmodes to enable the TFmode splitters. */
9739 if (reg_classes_intersect_p (VEC_REGS, rclass)
9740 && (GET_MODE_SIZE (small_mode) < 8
9741 || s390_class_max_nregs (VEC_REGS, big_mode) == 1))
9744 /* Likewise for access registers, since they have only half the
9745 word size on 64-bit. */
9746 if (reg_classes_intersect_p (ACCESS_REGS, rclass))
9752 /* Return true if we use LRA instead of reload pass. */
9756 return s390_lra_flag;
9759 /* Return true if register FROM can be eliminated via register TO. */
9762 s390_can_eliminate (const int from, const int to)
9764 /* On zSeries machines, we have not marked the base register as fixed.
9765 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
9766 If a function requires the base register, we say here that this
9767 elimination cannot be performed. This will cause reload to free
9768 up the base register (as if it were fixed). On the other hand,
9769 if the current function does *not* require the base register, we
9770 say here the elimination succeeds, which in turn allows reload
9771 to allocate the base register for any other purpose. */
9772 if (from == BASE_REGNUM && to == BASE_REGNUM)
9774 if (TARGET_CPU_ZARCH)
9776 s390_init_frame_layout ();
9777 return cfun->machine->base_reg == NULL_RTX;
9783 /* Everything else must point into the stack frame. */
9784 gcc_assert (to == STACK_POINTER_REGNUM
9785 || to == HARD_FRAME_POINTER_REGNUM);
9787 gcc_assert (from == FRAME_POINTER_REGNUM
9788 || from == ARG_POINTER_REGNUM
9789 || from == RETURN_ADDRESS_POINTER_REGNUM);
9791 /* Make sure we actually saved the return address. */
9792 if (from == RETURN_ADDRESS_POINTER_REGNUM)
9793 if (!crtl->calls_eh_return
9795 && !cfun_frame_layout.save_return_addr_p)
9801 /* Return offset between register FROM and TO initially after prolog. */
9804 s390_initial_elimination_offset (int from, int to)
9806 HOST_WIDE_INT offset;
9808 /* ??? Why are we called for non-eliminable pairs? */
9809 if (!s390_can_eliminate (from, to))
9814 case FRAME_POINTER_REGNUM:
9815 offset = (get_frame_size()
9816 + STACK_POINTER_OFFSET
9817 + crtl->outgoing_args_size);
9820 case ARG_POINTER_REGNUM:
9821 s390_init_frame_layout ();
9822 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
9825 case RETURN_ADDRESS_POINTER_REGNUM:
9826 s390_init_frame_layout ();
9828 if (cfun_frame_layout.first_save_gpr_slot == -1)
9830 /* If it turns out that for stdarg nothing went into the reg
9831 save area we also do not need the return address
9833 if (cfun->stdarg && !cfun_save_arg_fprs_p)
9839 /* In order to make the following work it is not necessary for
9840 r14 to have a save slot. It is sufficient if one other GPR
9841 got one. Since the GPRs are always stored without gaps we
9842 are able to calculate where the r14 save slot would
9844 offset = (cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset +
9845 (RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot) *
9860 /* Emit insn to save fpr REGNUM at offset OFFSET relative
9861 to register BASE. Return generated insn. */
9864 save_fpr (rtx base, int offset, int regnum)
9867 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
9869 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
9870 set_mem_alias_set (addr, get_varargs_alias_set ());
9872 set_mem_alias_set (addr, get_frame_alias_set ());
9874 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
9877 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
9878 to register BASE. Return generated insn. */
9881 restore_fpr (rtx base, int offset, int regnum)
9884 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
9885 set_mem_alias_set (addr, get_frame_alias_set ());
9887 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
9890 /* Return true if REGNO is a global register, but not one
9891 of the special ones that need to be saved/restored in anyway. */
9894 global_not_special_regno_p (int regno)
9896 return (global_regs[regno]
9897 /* These registers are special and need to be
9898 restored in any case. */
9899 && !(regno == STACK_POINTER_REGNUM
9900 || regno == RETURN_REGNUM
9901 || regno == BASE_REGNUM
9902 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
9905 /* Generate insn to save registers FIRST to LAST into
9906 the register save area located at offset OFFSET
9907 relative to register BASE. */
9910 save_gprs (rtx base, int offset, int first, int last)
9912 rtx addr, insn, note;
9915 addr = plus_constant (Pmode, base, offset);
9916 addr = gen_rtx_MEM (Pmode, addr);
9918 set_mem_alias_set (addr, get_frame_alias_set ());
9920 /* Special-case single register. */
9924 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
9926 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
9928 if (!global_not_special_regno_p (first))
9929 RTX_FRAME_RELATED_P (insn) = 1;
9934 insn = gen_store_multiple (addr,
9935 gen_rtx_REG (Pmode, first),
9936 GEN_INT (last - first + 1));
9938 if (first <= 6 && cfun->stdarg)
9939 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
9941 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
9944 set_mem_alias_set (mem, get_varargs_alias_set ());
9947 /* We need to set the FRAME_RELATED flag on all SETs
9948 inside the store-multiple pattern.
9950 However, we must not emit DWARF records for registers 2..5
9951 if they are stored for use by variable arguments ...
9953 ??? Unfortunately, it is not enough to simply not the
9954 FRAME_RELATED flags for those SETs, because the first SET
9955 of the PARALLEL is always treated as if it had the flag
9956 set, even if it does not. Therefore we emit a new pattern
9957 without those registers as REG_FRAME_RELATED_EXPR note. */
9959 if (first >= 6 && !global_not_special_regno_p (first))
9961 rtx pat = PATTERN (insn);
9963 for (i = 0; i < XVECLEN (pat, 0); i++)
9964 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
9965 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
9967 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
9969 RTX_FRAME_RELATED_P (insn) = 1;
9975 for (start = first >= 6 ? first : 6; start <= last; start++)
9976 if (!global_not_special_regno_p (start))
9982 addr = plus_constant (Pmode, base,
9983 offset + (start - first) * UNITS_PER_LONG);
9988 note = gen_movdi (gen_rtx_MEM (Pmode, addr),
9989 gen_rtx_REG (Pmode, start));
9991 note = gen_movsi (gen_rtx_MEM (Pmode, addr),
9992 gen_rtx_REG (Pmode, start));
9993 note = PATTERN (note);
9995 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
9996 RTX_FRAME_RELATED_P (insn) = 1;
10001 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
10002 gen_rtx_REG (Pmode, start),
10003 GEN_INT (last - start + 1));
10004 note = PATTERN (note);
10006 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
10008 for (i = 0; i < XVECLEN (note, 0); i++)
10009 if (GET_CODE (XVECEXP (note, 0, i)) == SET
10010 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
10012 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
10014 RTX_FRAME_RELATED_P (insn) = 1;
10020 /* Generate insn to restore registers FIRST to LAST from
10021 the register save area located at offset OFFSET
10022 relative to register BASE. */
10025 restore_gprs (rtx base, int offset, int first, int last)
10029 addr = plus_constant (Pmode, base, offset);
10030 addr = gen_rtx_MEM (Pmode, addr);
10031 set_mem_alias_set (addr, get_frame_alias_set ());
10033 /* Special-case single register. */
10037 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
10039 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
10041 RTX_FRAME_RELATED_P (insn) = 1;
10045 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
10047 GEN_INT (last - first + 1));
10048 RTX_FRAME_RELATED_P (insn) = 1;
10052 /* Return insn sequence to load the GOT register. */
10054 static GTY(()) rtx got_symbol;
10056 s390_load_got (void)
10060 /* We cannot use pic_offset_table_rtx here since we use this
10061 function also for non-pic if __tls_get_offset is called and in
10062 that case PIC_OFFSET_TABLE_REGNUM as well as pic_offset_table_rtx
10064 rtx got_rtx = gen_rtx_REG (Pmode, 12);
10068 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
10069 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
10074 if (TARGET_CPU_ZARCH)
10076 emit_move_insn (got_rtx, got_symbol);
10082 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
10083 UNSPEC_LTREL_OFFSET);
10084 offset = gen_rtx_CONST (Pmode, offset);
10085 offset = force_const_mem (Pmode, offset);
10087 emit_move_insn (got_rtx, offset);
10089 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
10090 UNSPEC_LTREL_BASE);
10091 offset = gen_rtx_PLUS (Pmode, got_rtx, offset);
10093 emit_move_insn (got_rtx, offset);
10096 insns = get_insns ();
10101 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
10102 and the change to the stack pointer. */
10105 s390_emit_stack_tie (void)
10107 rtx mem = gen_frame_mem (BLKmode,
10108 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
10110 emit_insn (gen_stack_tie (mem));
10113 /* Copy GPRS into FPR save slots. */
10116 s390_save_gprs_to_fprs (void)
10120 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
10123 for (i = 6; i < 16; i++)
10125 if (FP_REGNO_P (cfun_gpr_save_slot (i)))
10128 emit_move_insn (gen_rtx_REG (DImode, cfun_gpr_save_slot (i)),
10129 gen_rtx_REG (DImode, i));
10130 RTX_FRAME_RELATED_P (insn) = 1;
10131 /* This prevents dwarf2cfi from interpreting the set. Doing
10132 so it might emit def_cfa_register infos setting an FPR as
10134 add_reg_note (insn, REG_CFA_REGISTER, PATTERN (insn));
10139 /* Restore GPRs from FPR save slots. */
10142 s390_restore_gprs_from_fprs (void)
10146 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
10149 for (i = 6; i < 16; i++)
10151 if (FP_REGNO_P (cfun_gpr_save_slot (i)))
10154 emit_move_insn (gen_rtx_REG (DImode, i),
10155 gen_rtx_REG (DImode, cfun_gpr_save_slot (i)));
10156 df_set_regs_ever_live (i, true);
10157 add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, i));
10158 if (i == STACK_POINTER_REGNUM)
10159 add_reg_note (insn, REG_CFA_DEF_CFA,
10160 plus_constant (Pmode, stack_pointer_rtx,
10161 STACK_POINTER_OFFSET));
10162 RTX_FRAME_RELATED_P (insn) = 1;
10168 /* A pass run immediately before shrink-wrapping and prologue and epilogue
10173 const pass_data pass_data_s390_early_mach =
10175 RTL_PASS, /* type */
10176 "early_mach", /* name */
10177 OPTGROUP_NONE, /* optinfo_flags */
10178 TV_MACH_DEP, /* tv_id */
10179 0, /* properties_required */
10180 0, /* properties_provided */
10181 0, /* properties_destroyed */
10182 0, /* todo_flags_start */
10183 ( TODO_df_verify | TODO_df_finish ), /* todo_flags_finish */
10186 class pass_s390_early_mach : public rtl_opt_pass
10189 pass_s390_early_mach (gcc::context *ctxt)
10190 : rtl_opt_pass (pass_data_s390_early_mach, ctxt)
10193 /* opt_pass methods: */
10194 virtual unsigned int execute (function *);
10196 }; // class pass_s390_early_mach
10199 pass_s390_early_mach::execute (function *fun)
10203 /* Try to get rid of the FPR clobbers. */
10204 s390_optimize_nonescaping_tx ();
10206 /* Re-compute register info. */
10207 s390_register_info ();
10209 /* If we're using a base register, ensure that it is always valid for
10210 the first non-prologue instruction. */
10211 if (fun->machine->base_reg)
10212 emit_insn_at_entry (gen_main_pool (fun->machine->base_reg));
10214 /* Annotate all constant pool references to let the scheduler know
10215 they implicitly use the base register. */
10216 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10219 annotate_constant_pool_refs (&PATTERN (insn));
10220 df_insn_rescan (insn);
10225 } // anon namespace
10227 /* Expand the prologue into a bunch of separate insns. */
10230 s390_emit_prologue (void)
10238 /* Choose best register to use for temp use within prologue.
10239 See below for why TPF must use the register 1. */
10241 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
10243 && !TARGET_TPF_PROFILING)
10244 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
10246 temp_reg = gen_rtx_REG (Pmode, 1);
10248 s390_save_gprs_to_fprs ();
10250 /* Save call saved gprs. */
10251 if (cfun_frame_layout.first_save_gpr != -1)
10253 insn = save_gprs (stack_pointer_rtx,
10254 cfun_frame_layout.gprs_offset +
10255 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
10256 - cfun_frame_layout.first_save_gpr_slot),
10257 cfun_frame_layout.first_save_gpr,
10258 cfun_frame_layout.last_save_gpr);
10262 /* Dummy insn to mark literal pool slot. */
10264 if (cfun->machine->base_reg)
10265 emit_insn (gen_main_pool (cfun->machine->base_reg));
10267 offset = cfun_frame_layout.f0_offset;
10269 /* Save f0 and f2. */
10270 for (i = FPR0_REGNUM; i <= FPR0_REGNUM + 1; i++)
10272 if (cfun_fpr_save_p (i))
10274 save_fpr (stack_pointer_rtx, offset, i);
10277 else if (!TARGET_PACKED_STACK || cfun->stdarg)
10281 /* Save f4 and f6. */
10282 offset = cfun_frame_layout.f4_offset;
10283 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
10285 if (cfun_fpr_save_p (i))
10287 insn = save_fpr (stack_pointer_rtx, offset, i);
10290 /* If f4 and f6 are call clobbered they are saved due to
10291 stdargs and therefore are not frame related. */
10292 if (!call_really_used_regs[i])
10293 RTX_FRAME_RELATED_P (insn) = 1;
10295 else if (!TARGET_PACKED_STACK || call_really_used_regs[i])
10299 if (TARGET_PACKED_STACK
10300 && cfun_save_high_fprs_p
10301 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
10303 offset = (cfun_frame_layout.f8_offset
10304 + (cfun_frame_layout.high_fprs - 1) * 8);
10306 for (i = FPR15_REGNUM; i >= FPR8_REGNUM && offset >= 0; i--)
10307 if (cfun_fpr_save_p (i))
10309 insn = save_fpr (stack_pointer_rtx, offset, i);
10311 RTX_FRAME_RELATED_P (insn) = 1;
10314 if (offset >= cfun_frame_layout.f8_offset)
10318 if (!TARGET_PACKED_STACK)
10319 next_fpr = cfun_save_high_fprs_p ? FPR15_REGNUM : 0;
10321 if (flag_stack_usage_info)
10322 current_function_static_stack_size = cfun_frame_layout.frame_size;
10324 /* Decrement stack pointer. */
10326 if (cfun_frame_layout.frame_size > 0)
10328 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
10329 rtx real_frame_off;
10331 if (s390_stack_size)
10333 HOST_WIDE_INT stack_guard;
10335 if (s390_stack_guard)
10336 stack_guard = s390_stack_guard;
10339 /* If no value for stack guard is provided the smallest power of 2
10340 larger than the current frame size is chosen. */
10342 while (stack_guard < cfun_frame_layout.frame_size)
10346 if (cfun_frame_layout.frame_size >= s390_stack_size)
10348 warning (0, "frame size of function %qs is %wd"
10349 " bytes exceeding user provided stack limit of "
10351 "An unconditional trap is added.",
10352 current_function_name(), cfun_frame_layout.frame_size,
10354 emit_insn (gen_trap ());
10359 /* stack_guard has to be smaller than s390_stack_size.
10360 Otherwise we would emit an AND with zero which would
10361 not match the test under mask pattern. */
10362 if (stack_guard >= s390_stack_size)
10364 warning (0, "frame size of function %qs is %wd"
10365 " bytes which is more than half the stack size. "
10366 "The dynamic check would not be reliable. "
10367 "No check emitted for this function.",
10368 current_function_name(),
10369 cfun_frame_layout.frame_size);
10373 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
10374 & ~(stack_guard - 1));
10376 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
10377 GEN_INT (stack_check_mask));
10379 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
10381 t, const0_rtx, const0_rtx));
10383 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
10385 t, const0_rtx, const0_rtx));
10390 if (s390_warn_framesize > 0
10391 && cfun_frame_layout.frame_size >= s390_warn_framesize)
10392 warning (0, "frame size of %qs is %wd bytes",
10393 current_function_name (), cfun_frame_layout.frame_size);
10395 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
10396 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
10398 /* Save incoming stack pointer into temp reg. */
10399 if (TARGET_BACKCHAIN || next_fpr)
10400 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
10402 /* Subtract frame size from stack pointer. */
10404 if (DISP_IN_RANGE (INTVAL (frame_off)))
10406 insn = gen_rtx_SET (stack_pointer_rtx,
10407 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
10409 insn = emit_insn (insn);
10413 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
10414 frame_off = force_const_mem (Pmode, frame_off);
10416 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
10417 annotate_constant_pool_refs (&PATTERN (insn));
10420 RTX_FRAME_RELATED_P (insn) = 1;
10421 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
10422 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
10423 gen_rtx_SET (stack_pointer_rtx,
10424 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
10427 /* Set backchain. */
10429 if (TARGET_BACKCHAIN)
10431 if (cfun_frame_layout.backchain_offset)
10432 addr = gen_rtx_MEM (Pmode,
10433 plus_constant (Pmode, stack_pointer_rtx,
10434 cfun_frame_layout.backchain_offset));
10436 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
10437 set_mem_alias_set (addr, get_frame_alias_set ());
10438 insn = emit_insn (gen_move_insn (addr, temp_reg));
10441 /* If we support non-call exceptions (e.g. for Java),
10442 we need to make sure the backchain pointer is set up
10443 before any possibly trapping memory access. */
10444 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
10446 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
10447 emit_clobber (addr);
10451 /* Save fprs 8 - 15 (64 bit ABI). */
10453 if (cfun_save_high_fprs_p && next_fpr)
10455 /* If the stack might be accessed through a different register
10456 we have to make sure that the stack pointer decrement is not
10457 moved below the use of the stack slots. */
10458 s390_emit_stack_tie ();
10460 insn = emit_insn (gen_add2_insn (temp_reg,
10461 GEN_INT (cfun_frame_layout.f8_offset)));
10465 for (i = FPR8_REGNUM; i <= next_fpr; i++)
10466 if (cfun_fpr_save_p (i))
10468 rtx addr = plus_constant (Pmode, stack_pointer_rtx,
10469 cfun_frame_layout.frame_size
10470 + cfun_frame_layout.f8_offset
10473 insn = save_fpr (temp_reg, offset, i);
10475 RTX_FRAME_RELATED_P (insn) = 1;
10476 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
10477 gen_rtx_SET (gen_rtx_MEM (DFmode, addr),
10478 gen_rtx_REG (DFmode, i)));
10482 /* Set frame pointer, if needed. */
10484 if (frame_pointer_needed)
10486 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
10487 RTX_FRAME_RELATED_P (insn) = 1;
10490 /* Set up got pointer, if needed. */
10492 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
10494 rtx_insn *insns = s390_load_got ();
10496 for (rtx_insn *insn = insns; insn; insn = NEXT_INSN (insn))
10497 annotate_constant_pool_refs (&PATTERN (insn));
10502 if (TARGET_TPF_PROFILING)
10504 /* Generate a BAS instruction to serve as a function
10505 entry intercept to facilitate the use of tracing
10506 algorithms located at the branch target. */
10507 emit_insn (gen_prologue_tpf ());
10509 /* Emit a blockage here so that all code
10510 lies between the profiling mechanisms. */
10511 emit_insn (gen_blockage ());
10515 /* Expand the epilogue into a bunch of separate insns. */
10518 s390_emit_epilogue (bool sibcall)
10520 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
10521 int area_bottom, area_top, offset = 0;
10526 if (TARGET_TPF_PROFILING)
10529 /* Generate a BAS instruction to serve as a function
10530 entry intercept to facilitate the use of tracing
10531 algorithms located at the branch target. */
10533 /* Emit a blockage here so that all code
10534 lies between the profiling mechanisms. */
10535 emit_insn (gen_blockage ());
10537 emit_insn (gen_epilogue_tpf ());
10540 /* Check whether to use frame or stack pointer for restore. */
10542 frame_pointer = (frame_pointer_needed
10543 ? hard_frame_pointer_rtx : stack_pointer_rtx);
10545 s390_frame_area (&area_bottom, &area_top);
10547 /* Check whether we can access the register save area.
10548 If not, increment the frame pointer as required. */
10550 if (area_top <= area_bottom)
10552 /* Nothing to restore. */
10554 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
10555 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
10557 /* Area is in range. */
10558 offset = cfun_frame_layout.frame_size;
10562 rtx insn, frame_off, cfa;
10564 offset = area_bottom < 0 ? -area_bottom : 0;
10565 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
10567 cfa = gen_rtx_SET (frame_pointer,
10568 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
10569 if (DISP_IN_RANGE (INTVAL (frame_off)))
10571 insn = gen_rtx_SET (frame_pointer,
10572 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
10573 insn = emit_insn (insn);
10577 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
10578 frame_off = force_const_mem (Pmode, frame_off);
10580 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
10581 annotate_constant_pool_refs (&PATTERN (insn));
10583 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
10584 RTX_FRAME_RELATED_P (insn) = 1;
10587 /* Restore call saved fprs. */
10591 if (cfun_save_high_fprs_p)
10593 next_offset = cfun_frame_layout.f8_offset;
10594 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
10596 if (cfun_fpr_save_p (i))
10598 restore_fpr (frame_pointer,
10599 offset + next_offset, i);
10601 = alloc_reg_note (REG_CFA_RESTORE,
10602 gen_rtx_REG (DFmode, i), cfa_restores);
10611 next_offset = cfun_frame_layout.f4_offset;
10613 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
10615 if (cfun_fpr_save_p (i))
10617 restore_fpr (frame_pointer,
10618 offset + next_offset, i);
10620 = alloc_reg_note (REG_CFA_RESTORE,
10621 gen_rtx_REG (DFmode, i), cfa_restores);
10624 else if (!TARGET_PACKED_STACK)
10630 /* Return register. */
10632 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
10634 /* Restore call saved gprs. */
10636 if (cfun_frame_layout.first_restore_gpr != -1)
10641 /* Check for global register and save them
10642 to stack location from where they get restored. */
10644 for (i = cfun_frame_layout.first_restore_gpr;
10645 i <= cfun_frame_layout.last_restore_gpr;
10648 if (global_not_special_regno_p (i))
10650 addr = plus_constant (Pmode, frame_pointer,
10651 offset + cfun_frame_layout.gprs_offset
10652 + (i - cfun_frame_layout.first_save_gpr_slot)
10654 addr = gen_rtx_MEM (Pmode, addr);
10655 set_mem_alias_set (addr, get_frame_alias_set ());
10656 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
10660 = alloc_reg_note (REG_CFA_RESTORE,
10661 gen_rtx_REG (Pmode, i), cfa_restores);
10666 /* Fetch return address from stack before load multiple,
10667 this will do good for scheduling.
10669 Only do this if we already decided that r14 needs to be
10670 saved to a stack slot. (And not just because r14 happens to
10671 be in between two GPRs which need saving.) Otherwise it
10672 would be difficult to take that decision back in
10673 s390_optimize_prologue. */
10674 if (cfun_gpr_save_slot (RETURN_REGNUM) == -1)
10676 int return_regnum = find_unused_clobbered_reg();
10677 if (!return_regnum)
10679 return_reg = gen_rtx_REG (Pmode, return_regnum);
10681 addr = plus_constant (Pmode, frame_pointer,
10682 offset + cfun_frame_layout.gprs_offset
10684 - cfun_frame_layout.first_save_gpr_slot)
10686 addr = gen_rtx_MEM (Pmode, addr);
10687 set_mem_alias_set (addr, get_frame_alias_set ());
10688 emit_move_insn (return_reg, addr);
10690 /* Once we did that optimization we have to make sure
10691 s390_optimize_prologue does not try to remove the
10692 store of r14 since we will not be able to find the
10693 load issued here. */
10694 cfun_frame_layout.save_return_addr_p = true;
10698 insn = restore_gprs (frame_pointer,
10699 offset + cfun_frame_layout.gprs_offset
10700 + (cfun_frame_layout.first_restore_gpr
10701 - cfun_frame_layout.first_save_gpr_slot)
10703 cfun_frame_layout.first_restore_gpr,
10704 cfun_frame_layout.last_restore_gpr);
10705 insn = emit_insn (insn);
10706 REG_NOTES (insn) = cfa_restores;
10707 add_reg_note (insn, REG_CFA_DEF_CFA,
10708 plus_constant (Pmode, stack_pointer_rtx,
10709 STACK_POINTER_OFFSET));
10710 RTX_FRAME_RELATED_P (insn) = 1;
10713 s390_restore_gprs_from_fprs ();
10718 /* Return to caller. */
10720 p = rtvec_alloc (2);
10722 RTVEC_ELT (p, 0) = ret_rtx;
10723 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
10724 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
10728 /* Implement TARGET_SET_UP_BY_PROLOGUE. */
10731 s300_set_up_by_prologue (hard_reg_set_container *regs)
10733 if (cfun->machine->base_reg
10734 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
10735 SET_HARD_REG_BIT (regs->set, REGNO (cfun->machine->base_reg));
10738 /* Return true if the function can use simple_return to return outside
10739 of a shrink-wrapped region. At present shrink-wrapping is supported
10743 s390_can_use_simple_return_insn (void)
10748 /* Return true if the epilogue is guaranteed to contain only a return
10749 instruction and if a direct return can therefore be used instead.
10750 One of the main advantages of using direct return instructions
10751 is that we can then use conditional returns. */
10754 s390_can_use_return_insn (void)
10758 if (!reload_completed)
10764 if (TARGET_TPF_PROFILING)
10767 for (i = 0; i < 16; i++)
10768 if (cfun_gpr_save_slot (i))
10771 /* For 31 bit this is not covered by the frame_size check below
10772 since f4, f6 are saved in the register save area without needing
10773 additional stack space. */
10775 && (cfun_fpr_save_p (FPR4_REGNUM) || cfun_fpr_save_p (FPR6_REGNUM)))
10778 if (cfun->machine->base_reg
10779 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
10782 return cfun_frame_layout.frame_size == 0;
10785 /* The VX ABI differs for vararg functions. Therefore we need the
10786 prototype of the callee to be available when passing vector type
10788 static const char *
10789 s390_invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
10791 return ((TARGET_VX_ABI
10793 && VECTOR_TYPE_P (TREE_TYPE (val))
10794 && (funcdecl == NULL_TREE
10795 || (TREE_CODE (funcdecl) == FUNCTION_DECL
10796 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
10797 ? N_("Vector argument passed to unprototyped function")
10802 /* Return the size in bytes of a function argument of
10803 type TYPE and/or mode MODE. At least one of TYPE or
10804 MODE must be specified. */
10807 s390_function_arg_size (machine_mode mode, const_tree type)
10810 return int_size_in_bytes (type);
10812 /* No type info available for some library calls ... */
10813 if (mode != BLKmode)
10814 return GET_MODE_SIZE (mode);
10816 /* If we have neither type nor mode, abort */
10817 gcc_unreachable ();
10820 /* Return true if a function argument of type TYPE and mode MODE
10821 is to be passed in a vector register, if available. */
10824 s390_function_arg_vector (machine_mode mode, const_tree type)
10826 if (!TARGET_VX_ABI)
10829 if (s390_function_arg_size (mode, type) > 16)
10832 /* No type info available for some library calls ... */
10834 return VECTOR_MODE_P (mode);
10836 /* The ABI says that record types with a single member are treated
10837 just like that member would be. */
10838 while (TREE_CODE (type) == RECORD_TYPE)
10840 tree field, single = NULL_TREE;
10842 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
10844 if (TREE_CODE (field) != FIELD_DECL)
10847 if (single == NULL_TREE)
10848 single = TREE_TYPE (field);
10853 if (single == NULL_TREE)
10857 /* If the field declaration adds extra byte due to
10858 e.g. padding this is not accepted as vector type. */
10859 if (int_size_in_bytes (single) <= 0
10860 || int_size_in_bytes (single) != int_size_in_bytes (type))
10866 return VECTOR_TYPE_P (type);
10869 /* Return true if a function argument of type TYPE and mode MODE
10870 is to be passed in a floating-point register, if available. */
10873 s390_function_arg_float (machine_mode mode, const_tree type)
10875 if (s390_function_arg_size (mode, type) > 8)
10878 /* Soft-float changes the ABI: no floating-point registers are used. */
10879 if (TARGET_SOFT_FLOAT)
10882 /* No type info available for some library calls ... */
10884 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
10886 /* The ABI says that record types with a single member are treated
10887 just like that member would be. */
10888 while (TREE_CODE (type) == RECORD_TYPE)
10890 tree field, single = NULL_TREE;
10892 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
10894 if (TREE_CODE (field) != FIELD_DECL)
10897 if (single == NULL_TREE)
10898 single = TREE_TYPE (field);
10903 if (single == NULL_TREE)
10909 return TREE_CODE (type) == REAL_TYPE;
10912 /* Return true if a function argument of type TYPE and mode MODE
10913 is to be passed in an integer register, or a pair of integer
10914 registers, if available. */
10917 s390_function_arg_integer (machine_mode mode, const_tree type)
10919 int size = s390_function_arg_size (mode, type);
10923 /* No type info available for some library calls ... */
10925 return GET_MODE_CLASS (mode) == MODE_INT
10926 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
10928 /* We accept small integral (and similar) types. */
10929 if (INTEGRAL_TYPE_P (type)
10930 || POINTER_TYPE_P (type)
10931 || TREE_CODE (type) == NULLPTR_TYPE
10932 || TREE_CODE (type) == OFFSET_TYPE
10933 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
10936 /* We also accept structs of size 1, 2, 4, 8 that are not
10937 passed in floating-point registers. */
10938 if (AGGREGATE_TYPE_P (type)
10939 && exact_log2 (size) >= 0
10940 && !s390_function_arg_float (mode, type))
10946 /* Return 1 if a function argument of type TYPE and mode MODE
10947 is to be passed by reference. The ABI specifies that only
10948 structures of size 1, 2, 4, or 8 bytes are passed by value,
10949 all other structures (and complex numbers) are passed by
10953 s390_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
10954 machine_mode mode, const_tree type,
10955 bool named ATTRIBUTE_UNUSED)
10957 int size = s390_function_arg_size (mode, type);
10959 if (s390_function_arg_vector (mode, type))
10967 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
10970 if (TREE_CODE (type) == COMPLEX_TYPE
10971 || TREE_CODE (type) == VECTOR_TYPE)
10978 /* Update the data in CUM to advance over an argument of mode MODE and
10979 data type TYPE. (TYPE is null for libcalls where that information
10980 may not be available.). The boolean NAMED specifies whether the
10981 argument is a named argument (as opposed to an unnamed argument
10982 matching an ellipsis). */
10985 s390_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
10986 const_tree type, bool named)
10988 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
10990 if (s390_function_arg_vector (mode, type))
10992 /* We are called for unnamed vector stdarg arguments which are
10993 passed on the stack. In this case this hook does not have to
10994 do anything since stack arguments are tracked by common
11000 else if (s390_function_arg_float (mode, type))
11004 else if (s390_function_arg_integer (mode, type))
11006 int size = s390_function_arg_size (mode, type);
11007 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
11010 gcc_unreachable ();
11013 /* Define where to put the arguments to a function.
11014 Value is zero to push the argument on the stack,
11015 or a hard register in which to store the argument.
11017 MODE is the argument's machine mode.
11018 TYPE is the data type of the argument (as a tree).
11019 This is null for libcalls where that information may
11021 CUM is a variable of type CUMULATIVE_ARGS which gives info about
11022 the preceding args and about the function being called.
11023 NAMED is nonzero if this argument is a named parameter
11024 (otherwise it is an extra parameter matching an ellipsis).
11026 On S/390, we use general purpose registers 2 through 6 to
11027 pass integer, pointer, and certain structure arguments, and
11028 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
11029 to pass floating point arguments. All remaining arguments
11030 are pushed to the stack. */
11033 s390_function_arg (cumulative_args_t cum_v, machine_mode mode,
11034 const_tree type, bool named)
11036 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11039 s390_check_type_for_vector_abi (type, true, false);
11041 if (s390_function_arg_vector (mode, type))
11043 /* Vector arguments being part of the ellipsis are passed on the
11045 if (!named || (cum->vrs + 1 > VEC_ARG_NUM_REG))
11048 return gen_rtx_REG (mode, cum->vrs + FIRST_VEC_ARG_REGNO);
11050 else if (s390_function_arg_float (mode, type))
11052 if (cum->fprs + 1 > FP_ARG_NUM_REG)
11055 return gen_rtx_REG (mode, cum->fprs + 16);
11057 else if (s390_function_arg_integer (mode, type))
11059 int size = s390_function_arg_size (mode, type);
11060 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
11062 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
11064 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
11065 return gen_rtx_REG (mode, cum->gprs + 2);
11066 else if (n_gprs == 2)
11068 rtvec p = rtvec_alloc (2);
11071 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
11074 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
11077 return gen_rtx_PARALLEL (mode, p);
11081 /* After the real arguments, expand_call calls us once again
11082 with a void_type_node type. Whatever we return here is
11083 passed as operand 2 to the call expanders.
11085 We don't need this feature ... */
11086 else if (type == void_type_node)
11089 gcc_unreachable ();
11092 /* Return true if return values of type TYPE should be returned
11093 in a memory buffer whose address is passed by the caller as
11094 hidden first argument. */
11097 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
11099 /* We accept small integral (and similar) types. */
11100 if (INTEGRAL_TYPE_P (type)
11101 || POINTER_TYPE_P (type)
11102 || TREE_CODE (type) == OFFSET_TYPE
11103 || TREE_CODE (type) == REAL_TYPE)
11104 return int_size_in_bytes (type) > 8;
11106 /* vector types which fit into a VR. */
11108 && VECTOR_TYPE_P (type)
11109 && int_size_in_bytes (type) <= 16)
11112 /* Aggregates and similar constructs are always returned
11114 if (AGGREGATE_TYPE_P (type)
11115 || TREE_CODE (type) == COMPLEX_TYPE
11116 || VECTOR_TYPE_P (type))
11119 /* ??? We get called on all sorts of random stuff from
11120 aggregate_value_p. We can't abort, but it's not clear
11121 what's safe to return. Pretend it's a struct I guess. */
11125 /* Function arguments and return values are promoted to word size. */
11127 static machine_mode
11128 s390_promote_function_mode (const_tree type, machine_mode mode,
11130 const_tree fntype ATTRIBUTE_UNUSED,
11131 int for_return ATTRIBUTE_UNUSED)
11133 if (INTEGRAL_MODE_P (mode)
11134 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
11136 if (type != NULL_TREE && POINTER_TYPE_P (type))
11137 *punsignedp = POINTERS_EXTEND_UNSIGNED;
11144 /* Define where to return a (scalar) value of type RET_TYPE.
11145 If RET_TYPE is null, define where to return a (scalar)
11146 value of mode MODE from a libcall. */
11149 s390_function_and_libcall_value (machine_mode mode,
11150 const_tree ret_type,
11151 const_tree fntype_or_decl,
11152 bool outgoing ATTRIBUTE_UNUSED)
11154 /* For vector return types it is important to use the RET_TYPE
11155 argument whenever available since the middle-end might have
11156 changed the mode to a scalar mode. */
11157 bool vector_ret_type_p = ((ret_type && VECTOR_TYPE_P (ret_type))
11158 || (!ret_type && VECTOR_MODE_P (mode)));
11160 /* For normal functions perform the promotion as
11161 promote_function_mode would do. */
11164 int unsignedp = TYPE_UNSIGNED (ret_type);
11165 mode = promote_function_mode (ret_type, mode, &unsignedp,
11166 fntype_or_decl, 1);
11169 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT
11170 || SCALAR_FLOAT_MODE_P (mode)
11171 || (TARGET_VX_ABI && vector_ret_type_p));
11172 gcc_assert (GET_MODE_SIZE (mode) <= (TARGET_VX_ABI ? 16 : 8));
11174 if (TARGET_VX_ABI && vector_ret_type_p)
11175 return gen_rtx_REG (mode, FIRST_VEC_ARG_REGNO);
11176 else if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
11177 return gen_rtx_REG (mode, 16);
11178 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
11179 || UNITS_PER_LONG == UNITS_PER_WORD)
11180 return gen_rtx_REG (mode, 2);
11181 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
11183 /* This case is triggered when returning a 64 bit value with
11184 -m31 -mzarch. Although the value would fit into a single
11185 register it has to be forced into a 32 bit register pair in
11186 order to match the ABI. */
11187 rtvec p = rtvec_alloc (2);
11190 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
11192 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
11194 return gen_rtx_PARALLEL (mode, p);
11197 gcc_unreachable ();
11200 /* Define where to return a scalar return value of type RET_TYPE. */
11203 s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
11206 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
11207 fn_decl_or_type, outgoing);
11210 /* Define where to return a scalar libcall return value of mode
11214 s390_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
11216 return s390_function_and_libcall_value (mode, NULL_TREE,
11221 /* Create and return the va_list datatype.
11223 On S/390, va_list is an array type equivalent to
11225 typedef struct __va_list_tag
11229 void *__overflow_arg_area;
11230 void *__reg_save_area;
11233 where __gpr and __fpr hold the number of general purpose
11234 or floating point arguments used up to now, respectively,
11235 __overflow_arg_area points to the stack location of the
11236 next argument passed on the stack, and __reg_save_area
11237 always points to the start of the register area in the
11238 call frame of the current function. The function prologue
11239 saves all registers used for argument passing into this
11240 area if the function uses variable arguments. */
11243 s390_build_builtin_va_list (void)
11245 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
11247 record = lang_hooks.types.make_type (RECORD_TYPE);
11250 build_decl (BUILTINS_LOCATION,
11251 TYPE_DECL, get_identifier ("__va_list_tag"), record);
11253 f_gpr = build_decl (BUILTINS_LOCATION,
11254 FIELD_DECL, get_identifier ("__gpr"),
11255 long_integer_type_node);
11256 f_fpr = build_decl (BUILTINS_LOCATION,
11257 FIELD_DECL, get_identifier ("__fpr"),
11258 long_integer_type_node);
11259 f_ovf = build_decl (BUILTINS_LOCATION,
11260 FIELD_DECL, get_identifier ("__overflow_arg_area"),
11262 f_sav = build_decl (BUILTINS_LOCATION,
11263 FIELD_DECL, get_identifier ("__reg_save_area"),
11266 va_list_gpr_counter_field = f_gpr;
11267 va_list_fpr_counter_field = f_fpr;
11269 DECL_FIELD_CONTEXT (f_gpr) = record;
11270 DECL_FIELD_CONTEXT (f_fpr) = record;
11271 DECL_FIELD_CONTEXT (f_ovf) = record;
11272 DECL_FIELD_CONTEXT (f_sav) = record;
11274 TYPE_STUB_DECL (record) = type_decl;
11275 TYPE_NAME (record) = type_decl;
11276 TYPE_FIELDS (record) = f_gpr;
11277 DECL_CHAIN (f_gpr) = f_fpr;
11278 DECL_CHAIN (f_fpr) = f_ovf;
11279 DECL_CHAIN (f_ovf) = f_sav;
11281 layout_type (record);
11283 /* The correct type is an array type of one element. */
11284 return build_array_type (record, build_index_type (size_zero_node));
11287 /* Implement va_start by filling the va_list structure VALIST.
11288 STDARG_P is always true, and ignored.
11289 NEXTARG points to the first anonymous stack argument.
11291 The following global variables are used to initialize
11292 the va_list structure:
11295 holds number of gprs and fprs used for named arguments.
11296 crtl->args.arg_offset_rtx:
11297 holds the offset of the first anonymous stack argument
11298 (relative to the virtual arg pointer). */
11301 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
11303 HOST_WIDE_INT n_gpr, n_fpr;
11305 tree f_gpr, f_fpr, f_ovf, f_sav;
11306 tree gpr, fpr, ovf, sav, t;
11308 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
11309 f_fpr = DECL_CHAIN (f_gpr);
11310 f_ovf = DECL_CHAIN (f_fpr);
11311 f_sav = DECL_CHAIN (f_ovf);
11313 valist = build_simple_mem_ref (valist);
11314 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
11315 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
11316 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
11317 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
11319 /* Count number of gp and fp argument registers used. */
11321 n_gpr = crtl->args.info.gprs;
11322 n_fpr = crtl->args.info.fprs;
11324 if (cfun->va_list_gpr_size)
11326 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
11327 build_int_cst (NULL_TREE, n_gpr));
11328 TREE_SIDE_EFFECTS (t) = 1;
11329 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11332 if (cfun->va_list_fpr_size)
11334 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
11335 build_int_cst (NULL_TREE, n_fpr));
11336 TREE_SIDE_EFFECTS (t) = 1;
11337 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11340 /* Find the overflow area.
11341 FIXME: This currently is too pessimistic when the vector ABI is
11342 enabled. In that case we *always* set up the overflow area
11344 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
11345 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG
11348 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
11350 off = INTVAL (crtl->args.arg_offset_rtx);
11351 off = off < 0 ? 0 : off;
11352 if (TARGET_DEBUG_ARG)
11353 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
11354 (int)n_gpr, (int)n_fpr, off);
11356 t = fold_build_pointer_plus_hwi (t, off);
11358 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
11359 TREE_SIDE_EFFECTS (t) = 1;
11360 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11363 /* Find the register save area. */
11364 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
11365 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
11367 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
11368 t = fold_build_pointer_plus_hwi (t, -RETURN_REGNUM * UNITS_PER_LONG);
11370 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
11371 TREE_SIDE_EFFECTS (t) = 1;
11372 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11376 /* Implement va_arg by updating the va_list structure
11377 VALIST as required to retrieve an argument of type
11378 TYPE, and returning that argument.
11380 Generates code equivalent to:
11382 if (integral value) {
11383 if (size <= 4 && args.gpr < 5 ||
11384 size > 4 && args.gpr < 4 )
11385 ret = args.reg_save_area[args.gpr+8]
11387 ret = *args.overflow_arg_area++;
11388 } else if (vector value) {
11389 ret = *args.overflow_arg_area;
11390 args.overflow_arg_area += size / 8;
11391 } else if (float value) {
11393 ret = args.reg_save_area[args.fpr+64]
11395 ret = *args.overflow_arg_area++;
11396 } else if (aggregate value) {
11398 ret = *args.reg_save_area[args.gpr]
11400 ret = **args.overflow_arg_area++;
11404 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
11405 gimple_seq *post_p ATTRIBUTE_UNUSED)
11407 tree f_gpr, f_fpr, f_ovf, f_sav;
11408 tree gpr, fpr, ovf, sav, reg, t, u;
11409 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
11410 tree lab_false, lab_over;
11411 tree addr = create_tmp_var (ptr_type_node, "addr");
11412 bool left_align_p; /* How a value < UNITS_PER_LONG is aligned within
11415 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
11416 f_fpr = DECL_CHAIN (f_gpr);
11417 f_ovf = DECL_CHAIN (f_fpr);
11418 f_sav = DECL_CHAIN (f_ovf);
11420 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
11421 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
11422 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
11424 /* The tree for args* cannot be shared between gpr/fpr and ovf since
11425 both appear on a lhs. */
11426 valist = unshare_expr (valist);
11427 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
11429 size = int_size_in_bytes (type);
11431 s390_check_type_for_vector_abi (type, true, false);
11433 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
11435 if (TARGET_DEBUG_ARG)
11437 fprintf (stderr, "va_arg: aggregate type");
11441 /* Aggregates are passed by reference. */
11446 /* kernel stack layout on 31 bit: It is assumed here that no padding
11447 will be added by s390_frame_info because for va_args always an even
11448 number of gprs has to be saved r15-r2 = 14 regs. */
11449 sav_ofs = 2 * UNITS_PER_LONG;
11450 sav_scale = UNITS_PER_LONG;
11451 size = UNITS_PER_LONG;
11452 max_reg = GP_ARG_NUM_REG - n_reg;
11453 left_align_p = false;
11455 else if (s390_function_arg_vector (TYPE_MODE (type), type))
11457 if (TARGET_DEBUG_ARG)
11459 fprintf (stderr, "va_arg: vector type");
11469 left_align_p = true;
11471 else if (s390_function_arg_float (TYPE_MODE (type), type))
11473 if (TARGET_DEBUG_ARG)
11475 fprintf (stderr, "va_arg: float type");
11479 /* FP args go in FP registers, if present. */
11483 sav_ofs = 16 * UNITS_PER_LONG;
11485 max_reg = FP_ARG_NUM_REG - n_reg;
11486 left_align_p = false;
11490 if (TARGET_DEBUG_ARG)
11492 fprintf (stderr, "va_arg: other type");
11496 /* Otherwise into GP registers. */
11499 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
11501 /* kernel stack layout on 31 bit: It is assumed here that no padding
11502 will be added by s390_frame_info because for va_args always an even
11503 number of gprs has to be saved r15-r2 = 14 regs. */
11504 sav_ofs = 2 * UNITS_PER_LONG;
11506 if (size < UNITS_PER_LONG)
11507 sav_ofs += UNITS_PER_LONG - size;
11509 sav_scale = UNITS_PER_LONG;
11510 max_reg = GP_ARG_NUM_REG - n_reg;
11511 left_align_p = false;
11514 /* Pull the value out of the saved registers ... */
11516 if (reg != NULL_TREE)
11519 if (reg > ((typeof (reg))max_reg))
11522 addr = sav + sav_ofs + reg * save_scale;
11529 lab_false = create_artificial_label (UNKNOWN_LOCATION);
11530 lab_over = create_artificial_label (UNKNOWN_LOCATION);
11532 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
11533 t = build2 (GT_EXPR, boolean_type_node, reg, t);
11534 u = build1 (GOTO_EXPR, void_type_node, lab_false);
11535 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
11536 gimplify_and_add (t, pre_p);
11538 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
11539 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
11540 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
11541 t = fold_build_pointer_plus (t, u);
11543 gimplify_assign (addr, t, pre_p);
11545 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
11547 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
11550 /* ... Otherwise out of the overflow area. */
11553 if (size < UNITS_PER_LONG && !left_align_p)
11554 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG - size);
11556 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
11558 gimplify_assign (addr, t, pre_p);
11560 if (size < UNITS_PER_LONG && left_align_p)
11561 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG);
11563 t = fold_build_pointer_plus_hwi (t, size);
11565 gimplify_assign (ovf, t, pre_p);
11567 if (reg != NULL_TREE)
11568 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
11571 /* Increment register save count. */
11575 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
11576 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
11577 gimplify_and_add (u, pre_p);
11582 t = build_pointer_type_for_mode (build_pointer_type (type),
11584 addr = fold_convert (t, addr);
11585 addr = build_va_arg_indirect_ref (addr);
11589 t = build_pointer_type_for_mode (type, ptr_mode, true);
11590 addr = fold_convert (t, addr);
11593 return build_va_arg_indirect_ref (addr);
11596 /* Emit rtl for the tbegin or tbegin_retry (RETRY != NULL_RTX)
11598 DEST - Register location where CC will be stored.
11599 TDB - Pointer to a 256 byte area where to store the transaction.
11600 diagnostic block. NULL if TDB is not needed.
11601 RETRY - Retry count value. If non-NULL a retry loop for CC2
11603 CLOBBER_FPRS_P - If true clobbers for all FPRs are emitted as part
11604 of the tbegin instruction pattern. */
11607 s390_expand_tbegin (rtx dest, rtx tdb, rtx retry, bool clobber_fprs_p)
11609 rtx retry_plus_two = gen_reg_rtx (SImode);
11610 rtx retry_reg = gen_reg_rtx (SImode);
11611 rtx_code_label *retry_label = NULL;
11613 if (retry != NULL_RTX)
11615 emit_move_insn (retry_reg, retry);
11616 emit_insn (gen_addsi3 (retry_plus_two, retry_reg, const2_rtx));
11617 emit_insn (gen_addsi3 (retry_reg, retry_reg, const1_rtx));
11618 retry_label = gen_label_rtx ();
11619 emit_label (retry_label);
11622 if (clobber_fprs_p)
11625 emit_insn (gen_tbegin_1_z13 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
11628 emit_insn (gen_tbegin_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
11632 emit_insn (gen_tbegin_nofloat_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
11635 emit_move_insn (dest, gen_rtx_UNSPEC (SImode,
11636 gen_rtvec (1, gen_rtx_REG (CCRAWmode,
11638 UNSPEC_CC_TO_INT));
11639 if (retry != NULL_RTX)
11641 const int CC0 = 1 << 3;
11642 const int CC1 = 1 << 2;
11643 const int CC3 = 1 << 0;
11645 rtx count = gen_reg_rtx (SImode);
11646 rtx_code_label *leave_label = gen_label_rtx ();
11648 /* Exit for success and permanent failures. */
11649 jump = s390_emit_jump (leave_label,
11650 gen_rtx_EQ (VOIDmode,
11651 gen_rtx_REG (CCRAWmode, CC_REGNUM),
11652 gen_rtx_CONST_INT (VOIDmode, CC0 | CC1 | CC3)));
11653 LABEL_NUSES (leave_label) = 1;
11655 /* CC2 - transient failure. Perform retry with ppa. */
11656 emit_move_insn (count, retry_plus_two);
11657 emit_insn (gen_subsi3 (count, count, retry_reg));
11658 emit_insn (gen_tx_assist (count));
11659 jump = emit_jump_insn (gen_doloop_si64 (retry_label,
11662 JUMP_LABEL (jump) = retry_label;
11663 LABEL_NUSES (retry_label) = 1;
11664 emit_label (leave_label);
11669 /* Return the decl for the target specific builtin with the function
11673 s390_builtin_decl (unsigned fcode, bool initialized_p ATTRIBUTE_UNUSED)
11675 if (fcode >= S390_BUILTIN_MAX)
11676 return error_mark_node;
11678 return s390_builtin_decls[fcode];
11681 /* We call mcount before the function prologue. So a profiled leaf
11682 function should stay a leaf function. */
11685 s390_keep_leaf_when_profiled ()
11690 /* Output assembly code for the trampoline template to
11693 On S/390, we use gpr 1 internally in the trampoline code;
11694 gpr 0 is used to hold the static chain. */
11697 s390_asm_trampoline_template (FILE *file)
11700 op[0] = gen_rtx_REG (Pmode, 0);
11701 op[1] = gen_rtx_REG (Pmode, 1);
11705 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
11706 output_asm_insn ("lmg\t%0,%1,14(%1)", op); /* 6 byte */
11707 output_asm_insn ("br\t%1", op); /* 2 byte */
11708 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
11712 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
11713 output_asm_insn ("lm\t%0,%1,6(%1)", op); /* 4 byte */
11714 output_asm_insn ("br\t%1", op); /* 2 byte */
11715 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
11719 /* Emit RTL insns to initialize the variable parts of a trampoline.
11720 FNADDR is an RTX for the address of the function's pure code.
11721 CXT is an RTX for the static chain value for the function. */
11724 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
11726 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
11729 emit_block_move (m_tramp, assemble_trampoline_template (),
11730 GEN_INT (2 * UNITS_PER_LONG), BLOCK_OP_NORMAL);
11732 mem = adjust_address (m_tramp, Pmode, 2 * UNITS_PER_LONG);
11733 emit_move_insn (mem, cxt);
11734 mem = adjust_address (m_tramp, Pmode, 3 * UNITS_PER_LONG);
11735 emit_move_insn (mem, fnaddr);
11738 /* Output assembler code to FILE to increment profiler label # LABELNO
11739 for profiling a function entry. */
11742 s390_function_profiler (FILE *file, int labelno)
11747 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
11749 fprintf (file, "# function profiler \n");
11751 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
11752 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
11753 op[1] = gen_rtx_MEM (Pmode, plus_constant (Pmode, op[1], UNITS_PER_LONG));
11755 op[2] = gen_rtx_REG (Pmode, 1);
11756 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
11757 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
11759 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
11762 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
11763 op[4] = gen_rtx_CONST (Pmode, op[4]);
11768 output_asm_insn ("stg\t%0,%1", op);
11769 output_asm_insn ("larl\t%2,%3", op);
11770 output_asm_insn ("brasl\t%0,%4", op);
11771 output_asm_insn ("lg\t%0,%1", op);
11773 else if (!flag_pic)
11775 op[6] = gen_label_rtx ();
11777 output_asm_insn ("st\t%0,%1", op);
11778 output_asm_insn ("bras\t%2,%l6", op);
11779 output_asm_insn (".long\t%4", op);
11780 output_asm_insn (".long\t%3", op);
11781 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
11782 output_asm_insn ("l\t%0,0(%2)", op);
11783 output_asm_insn ("l\t%2,4(%2)", op);
11784 output_asm_insn ("basr\t%0,%0", op);
11785 output_asm_insn ("l\t%0,%1", op);
11789 op[5] = gen_label_rtx ();
11790 op[6] = gen_label_rtx ();
11792 output_asm_insn ("st\t%0,%1", op);
11793 output_asm_insn ("bras\t%2,%l6", op);
11794 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
11795 output_asm_insn (".long\t%4-%l5", op);
11796 output_asm_insn (".long\t%3-%l5", op);
11797 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
11798 output_asm_insn ("lr\t%0,%2", op);
11799 output_asm_insn ("a\t%0,0(%2)", op);
11800 output_asm_insn ("a\t%2,4(%2)", op);
11801 output_asm_insn ("basr\t%0,%0", op);
11802 output_asm_insn ("l\t%0,%1", op);
11806 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
11807 into its SYMBOL_REF_FLAGS. */
11810 s390_encode_section_info (tree decl, rtx rtl, int first)
11812 default_encode_section_info (decl, rtl, first);
11814 if (TREE_CODE (decl) == VAR_DECL)
11816 /* If a variable has a forced alignment to < 2 bytes, mark it
11817 with SYMBOL_FLAG_ALIGN1 to prevent it from being used as LARL
11819 if (DECL_USER_ALIGN (decl) && DECL_ALIGN (decl) < 16)
11820 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_ALIGN1;
11821 if (!DECL_SIZE (decl)
11822 || !DECL_ALIGN (decl)
11823 || !tree_fits_shwi_p (DECL_SIZE (decl))
11824 || (DECL_ALIGN (decl) <= 64
11825 && DECL_ALIGN (decl) != tree_to_shwi (DECL_SIZE (decl))))
11826 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
11829 /* Literal pool references don't have a decl so they are handled
11830 differently here. We rely on the information in the MEM_ALIGN
11831 entry to decide upon natural alignment. */
11833 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
11834 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0))
11835 && (MEM_ALIGN (rtl) == 0
11836 || GET_MODE_BITSIZE (GET_MODE (rtl)) == 0
11837 || MEM_ALIGN (rtl) < GET_MODE_BITSIZE (GET_MODE (rtl))))
11838 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
11841 /* Output thunk to FILE that implements a C++ virtual function call (with
11842 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
11843 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
11844 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
11845 relative to the resulting this pointer. */
11848 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
11849 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
11855 /* Make sure unwind info is emitted for the thunk if needed. */
11856 final_start_function (emit_barrier (), file, 1);
11858 /* Operand 0 is the target function. */
11859 op[0] = XEXP (DECL_RTL (function), 0);
11860 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
11863 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
11864 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
11865 op[0] = gen_rtx_CONST (Pmode, op[0]);
11868 /* Operand 1 is the 'this' pointer. */
11869 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
11870 op[1] = gen_rtx_REG (Pmode, 3);
11872 op[1] = gen_rtx_REG (Pmode, 2);
11874 /* Operand 2 is the delta. */
11875 op[2] = GEN_INT (delta);
11877 /* Operand 3 is the vcall_offset. */
11878 op[3] = GEN_INT (vcall_offset);
11880 /* Operand 4 is the temporary register. */
11881 op[4] = gen_rtx_REG (Pmode, 1);
11883 /* Operands 5 to 8 can be used as labels. */
11889 /* Operand 9 can be used for temporary register. */
11892 /* Generate code. */
11895 /* Setup literal pool pointer if required. */
11896 if ((!DISP_IN_RANGE (delta)
11897 && !CONST_OK_FOR_K (delta)
11898 && !CONST_OK_FOR_Os (delta))
11899 || (!DISP_IN_RANGE (vcall_offset)
11900 && !CONST_OK_FOR_K (vcall_offset)
11901 && !CONST_OK_FOR_Os (vcall_offset)))
11903 op[5] = gen_label_rtx ();
11904 output_asm_insn ("larl\t%4,%5", op);
11907 /* Add DELTA to this pointer. */
11910 if (CONST_OK_FOR_J (delta))
11911 output_asm_insn ("la\t%1,%2(%1)", op);
11912 else if (DISP_IN_RANGE (delta))
11913 output_asm_insn ("lay\t%1,%2(%1)", op);
11914 else if (CONST_OK_FOR_K (delta))
11915 output_asm_insn ("aghi\t%1,%2", op);
11916 else if (CONST_OK_FOR_Os (delta))
11917 output_asm_insn ("agfi\t%1,%2", op);
11920 op[6] = gen_label_rtx ();
11921 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
11925 /* Perform vcall adjustment. */
11928 if (DISP_IN_RANGE (vcall_offset))
11930 output_asm_insn ("lg\t%4,0(%1)", op);
11931 output_asm_insn ("ag\t%1,%3(%4)", op);
11933 else if (CONST_OK_FOR_K (vcall_offset))
11935 output_asm_insn ("lghi\t%4,%3", op);
11936 output_asm_insn ("ag\t%4,0(%1)", op);
11937 output_asm_insn ("ag\t%1,0(%4)", op);
11939 else if (CONST_OK_FOR_Os (vcall_offset))
11941 output_asm_insn ("lgfi\t%4,%3", op);
11942 output_asm_insn ("ag\t%4,0(%1)", op);
11943 output_asm_insn ("ag\t%1,0(%4)", op);
11947 op[7] = gen_label_rtx ();
11948 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
11949 output_asm_insn ("ag\t%4,0(%1)", op);
11950 output_asm_insn ("ag\t%1,0(%4)", op);
11954 /* Jump to target. */
11955 output_asm_insn ("jg\t%0", op);
11957 /* Output literal pool if required. */
11960 output_asm_insn (".align\t4", op);
11961 targetm.asm_out.internal_label (file, "L",
11962 CODE_LABEL_NUMBER (op[5]));
11966 targetm.asm_out.internal_label (file, "L",
11967 CODE_LABEL_NUMBER (op[6]));
11968 output_asm_insn (".long\t%2", op);
11972 targetm.asm_out.internal_label (file, "L",
11973 CODE_LABEL_NUMBER (op[7]));
11974 output_asm_insn (".long\t%3", op);
11979 /* Setup base pointer if required. */
11981 || (!DISP_IN_RANGE (delta)
11982 && !CONST_OK_FOR_K (delta)
11983 && !CONST_OK_FOR_Os (delta))
11984 || (!DISP_IN_RANGE (delta)
11985 && !CONST_OK_FOR_K (vcall_offset)
11986 && !CONST_OK_FOR_Os (vcall_offset)))
11988 op[5] = gen_label_rtx ();
11989 output_asm_insn ("basr\t%4,0", op);
11990 targetm.asm_out.internal_label (file, "L",
11991 CODE_LABEL_NUMBER (op[5]));
11994 /* Add DELTA to this pointer. */
11997 if (CONST_OK_FOR_J (delta))
11998 output_asm_insn ("la\t%1,%2(%1)", op);
11999 else if (DISP_IN_RANGE (delta))
12000 output_asm_insn ("lay\t%1,%2(%1)", op);
12001 else if (CONST_OK_FOR_K (delta))
12002 output_asm_insn ("ahi\t%1,%2", op);
12003 else if (CONST_OK_FOR_Os (delta))
12004 output_asm_insn ("afi\t%1,%2", op);
12007 op[6] = gen_label_rtx ();
12008 output_asm_insn ("a\t%1,%6-%5(%4)", op);
12012 /* Perform vcall adjustment. */
12015 if (CONST_OK_FOR_J (vcall_offset))
12017 output_asm_insn ("l\t%4,0(%1)", op);
12018 output_asm_insn ("a\t%1,%3(%4)", op);
12020 else if (DISP_IN_RANGE (vcall_offset))
12022 output_asm_insn ("l\t%4,0(%1)", op);
12023 output_asm_insn ("ay\t%1,%3(%4)", op);
12025 else if (CONST_OK_FOR_K (vcall_offset))
12027 output_asm_insn ("lhi\t%4,%3", op);
12028 output_asm_insn ("a\t%4,0(%1)", op);
12029 output_asm_insn ("a\t%1,0(%4)", op);
12031 else if (CONST_OK_FOR_Os (vcall_offset))
12033 output_asm_insn ("iilf\t%4,%3", op);
12034 output_asm_insn ("a\t%4,0(%1)", op);
12035 output_asm_insn ("a\t%1,0(%4)", op);
12039 op[7] = gen_label_rtx ();
12040 output_asm_insn ("l\t%4,%7-%5(%4)", op);
12041 output_asm_insn ("a\t%4,0(%1)", op);
12042 output_asm_insn ("a\t%1,0(%4)", op);
12045 /* We had to clobber the base pointer register.
12046 Re-setup the base pointer (with a different base). */
12047 op[5] = gen_label_rtx ();
12048 output_asm_insn ("basr\t%4,0", op);
12049 targetm.asm_out.internal_label (file, "L",
12050 CODE_LABEL_NUMBER (op[5]));
12053 /* Jump to target. */
12054 op[8] = gen_label_rtx ();
12057 output_asm_insn ("l\t%4,%8-%5(%4)", op);
12058 else if (!nonlocal)
12059 output_asm_insn ("a\t%4,%8-%5(%4)", op);
12060 /* We cannot call through .plt, since .plt requires %r12 loaded. */
12061 else if (flag_pic == 1)
12063 output_asm_insn ("a\t%4,%8-%5(%4)", op);
12064 output_asm_insn ("l\t%4,%0(%4)", op);
12066 else if (flag_pic == 2)
12068 op[9] = gen_rtx_REG (Pmode, 0);
12069 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
12070 output_asm_insn ("a\t%4,%8-%5(%4)", op);
12071 output_asm_insn ("ar\t%4,%9", op);
12072 output_asm_insn ("l\t%4,0(%4)", op);
12075 output_asm_insn ("br\t%4", op);
12077 /* Output literal pool. */
12078 output_asm_insn (".align\t4", op);
12080 if (nonlocal && flag_pic == 2)
12081 output_asm_insn (".long\t%0", op);
12084 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
12085 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
12088 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
12090 output_asm_insn (".long\t%0", op);
12092 output_asm_insn (".long\t%0-%5", op);
12096 targetm.asm_out.internal_label (file, "L",
12097 CODE_LABEL_NUMBER (op[6]));
12098 output_asm_insn (".long\t%2", op);
12102 targetm.asm_out.internal_label (file, "L",
12103 CODE_LABEL_NUMBER (op[7]));
12104 output_asm_insn (".long\t%3", op);
12107 final_end_function ();
12111 s390_valid_pointer_mode (machine_mode mode)
12113 return (mode == SImode || (TARGET_64BIT && mode == DImode));
12116 /* Checks whether the given CALL_EXPR would use a caller
12117 saved register. This is used to decide whether sibling call
12118 optimization could be performed on the respective function
12122 s390_call_saved_register_used (tree call_expr)
12124 CUMULATIVE_ARGS cum_v;
12125 cumulative_args_t cum;
12132 INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0);
12133 cum = pack_cumulative_args (&cum_v);
12135 for (i = 0; i < call_expr_nargs (call_expr); i++)
12137 parameter = CALL_EXPR_ARG (call_expr, i);
12138 gcc_assert (parameter);
12140 /* For an undeclared variable passed as parameter we will get
12141 an ERROR_MARK node here. */
12142 if (TREE_CODE (parameter) == ERROR_MARK)
12145 type = TREE_TYPE (parameter);
12148 mode = TYPE_MODE (type);
12151 /* We assume that in the target function all parameters are
12152 named. This only has an impact on vector argument register
12153 usage none of which is call-saved. */
12154 if (pass_by_reference (&cum_v, mode, type, true))
12157 type = build_pointer_type (type);
12160 parm_rtx = s390_function_arg (cum, mode, type, true);
12162 s390_function_arg_advance (cum, mode, type, true);
12167 if (REG_P (parm_rtx))
12170 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
12172 if (!call_used_regs[reg + REGNO (parm_rtx)])
12176 if (GET_CODE (parm_rtx) == PARALLEL)
12180 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
12182 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
12184 gcc_assert (REG_P (r));
12187 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
12189 if (!call_used_regs[reg + REGNO (r)])
12198 /* Return true if the given call expression can be
12199 turned into a sibling call.
12200 DECL holds the declaration of the function to be called whereas
12201 EXP is the call expression itself. */
12204 s390_function_ok_for_sibcall (tree decl, tree exp)
12206 /* The TPF epilogue uses register 1. */
12207 if (TARGET_TPF_PROFILING)
12210 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
12211 which would have to be restored before the sibcall. */
12212 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
12215 /* Register 6 on s390 is available as an argument register but unfortunately
12216 "caller saved". This makes functions needing this register for arguments
12217 not suitable for sibcalls. */
12218 return !s390_call_saved_register_used (exp);
12221 /* Return the fixed registers used for condition codes. */
12224 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
12227 *p2 = INVALID_REGNUM;
12232 /* This function is used by the call expanders of the machine description.
12233 It emits the call insn itself together with the necessary operations
12234 to adjust the target address and returns the emitted insn.
12235 ADDR_LOCATION is the target address rtx
12236 TLS_CALL the location of the thread-local symbol
12237 RESULT_REG the register where the result of the call should be stored
12238 RETADDR_REG the register where the return address should be stored
12239 If this parameter is NULL_RTX the call is considered
12240 to be a sibling call. */
12243 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
12246 bool plt_call = false;
12252 /* Direct function calls need special treatment. */
12253 if (GET_CODE (addr_location) == SYMBOL_REF)
12255 /* When calling a global routine in PIC mode, we must
12256 replace the symbol itself with the PLT stub. */
12257 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
12259 if (retaddr_reg != NULL_RTX)
12261 addr_location = gen_rtx_UNSPEC (Pmode,
12262 gen_rtvec (1, addr_location),
12264 addr_location = gen_rtx_CONST (Pmode, addr_location);
12268 /* For -fpic code the PLT entries might use r12 which is
12269 call-saved. Therefore we cannot do a sibcall when
12270 calling directly using a symbol ref. When reaching
12271 this point we decided (in s390_function_ok_for_sibcall)
12272 to do a sibcall for a function pointer but one of the
12273 optimizers was able to get rid of the function pointer
12274 by propagating the symbol ref into the call. This
12275 optimization is illegal for S/390 so we turn the direct
12276 call into a indirect call again. */
12277 addr_location = force_reg (Pmode, addr_location);
12280 /* Unless we can use the bras(l) insn, force the
12281 routine address into a register. */
12282 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
12285 addr_location = legitimize_pic_address (addr_location, 0);
12287 addr_location = force_reg (Pmode, addr_location);
12291 /* If it is already an indirect call or the code above moved the
12292 SYMBOL_REF to somewhere else make sure the address can be found in
12294 if (retaddr_reg == NULL_RTX
12295 && GET_CODE (addr_location) != SYMBOL_REF
12298 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
12299 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
12302 addr_location = gen_rtx_MEM (QImode, addr_location);
12303 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
12305 if (result_reg != NULL_RTX)
12306 call = gen_rtx_SET (result_reg, call);
12308 if (retaddr_reg != NULL_RTX)
12310 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
12312 if (tls_call != NULL_RTX)
12313 vec = gen_rtvec (3, call, clobber,
12314 gen_rtx_USE (VOIDmode, tls_call));
12316 vec = gen_rtvec (2, call, clobber);
12318 call = gen_rtx_PARALLEL (VOIDmode, vec);
12321 insn = emit_call_insn (call);
12323 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
12324 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
12326 /* s390_function_ok_for_sibcall should
12327 have denied sibcalls in this case. */
12328 gcc_assert (retaddr_reg != NULL_RTX);
12329 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, 12));
12334 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
12337 s390_conditional_register_usage (void)
12343 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
12344 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
12346 if (TARGET_CPU_ZARCH)
12348 fixed_regs[BASE_REGNUM] = 0;
12349 call_used_regs[BASE_REGNUM] = 0;
12350 fixed_regs[RETURN_REGNUM] = 0;
12351 call_used_regs[RETURN_REGNUM] = 0;
12355 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
12356 call_used_regs[i] = call_really_used_regs[i] = 0;
12360 call_used_regs[FPR4_REGNUM] = call_really_used_regs[FPR4_REGNUM] = 0;
12361 call_used_regs[FPR6_REGNUM] = call_really_used_regs[FPR6_REGNUM] = 0;
12364 if (TARGET_SOFT_FLOAT)
12366 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
12367 call_used_regs[i] = fixed_regs[i] = 1;
12370 /* Disable v16 - v31 for non-vector target. */
12373 for (i = VR16_REGNUM; i <= VR31_REGNUM; i++)
12374 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
12378 /* Corresponding function to eh_return expander. */
12380 static GTY(()) rtx s390_tpf_eh_return_symbol;
12382 s390_emit_tpf_eh_return (rtx target)
12387 if (!s390_tpf_eh_return_symbol)
12388 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
12390 reg = gen_rtx_REG (Pmode, 2);
12391 orig_ra = gen_rtx_REG (Pmode, 3);
12393 emit_move_insn (reg, target);
12394 emit_move_insn (orig_ra, get_hard_reg_initial_val (Pmode, RETURN_REGNUM));
12395 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
12396 gen_rtx_REG (Pmode, RETURN_REGNUM));
12397 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
12398 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), orig_ra);
12400 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
12403 /* Rework the prologue/epilogue to avoid saving/restoring
12404 registers unnecessarily. */
12407 s390_optimize_prologue (void)
12409 rtx_insn *insn, *new_insn, *next_insn;
12411 /* Do a final recompute of the frame-related data. */
12412 s390_optimize_register_info ();
12414 /* If all special registers are in fact used, there's nothing we
12415 can do, so no point in walking the insn list. */
12417 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
12418 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
12419 && (TARGET_CPU_ZARCH
12420 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
12421 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
12424 /* Search for prologue/epilogue insns and replace them. */
12426 for (insn = get_insns (); insn; insn = next_insn)
12428 int first, last, off;
12429 rtx set, base, offset;
12432 next_insn = NEXT_INSN (insn);
12434 if (! NONJUMP_INSN_P (insn) || ! RTX_FRAME_RELATED_P (insn))
12437 pat = PATTERN (insn);
12439 /* Remove ldgr/lgdr instructions used for saving and restore
12440 GPRs if possible. */
12442 && GET_CODE (pat) == SET
12443 && GET_MODE (SET_SRC (pat)) == DImode
12444 && REG_P (SET_SRC (pat))
12445 && REG_P (SET_DEST (pat)))
12447 int src_regno = REGNO (SET_SRC (pat));
12448 int dest_regno = REGNO (SET_DEST (pat));
12452 if (!((GENERAL_REGNO_P (src_regno) && FP_REGNO_P (dest_regno))
12453 || (FP_REGNO_P (src_regno) && GENERAL_REGNO_P (dest_regno))))
12456 gpr_regno = GENERAL_REGNO_P (src_regno) ? src_regno : dest_regno;
12457 fpr_regno = FP_REGNO_P (src_regno) ? src_regno : dest_regno;
12459 /* GPR must be call-saved, FPR must be call-clobbered. */
12460 if (!call_really_used_regs[fpr_regno]
12461 || call_really_used_regs[gpr_regno])
12464 /* It must not happen that what we once saved in an FPR now
12465 needs a stack slot. */
12466 gcc_assert (cfun_gpr_save_slot (gpr_regno) != -1);
12468 if (cfun_gpr_save_slot (gpr_regno) == 0)
12470 remove_insn (insn);
12475 if (GET_CODE (pat) == PARALLEL
12476 && store_multiple_operation (pat, VOIDmode))
12478 set = XVECEXP (pat, 0, 0);
12479 first = REGNO (SET_SRC (set));
12480 last = first + XVECLEN (pat, 0) - 1;
12481 offset = const0_rtx;
12482 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
12483 off = INTVAL (offset);
12485 if (GET_CODE (base) != REG || off < 0)
12487 if (cfun_frame_layout.first_save_gpr != -1
12488 && (cfun_frame_layout.first_save_gpr < first
12489 || cfun_frame_layout.last_save_gpr > last))
12491 if (REGNO (base) != STACK_POINTER_REGNUM
12492 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
12494 if (first > BASE_REGNUM || last < BASE_REGNUM)
12497 if (cfun_frame_layout.first_save_gpr != -1)
12499 rtx s_pat = save_gprs (base,
12500 off + (cfun_frame_layout.first_save_gpr
12501 - first) * UNITS_PER_LONG,
12502 cfun_frame_layout.first_save_gpr,
12503 cfun_frame_layout.last_save_gpr);
12504 new_insn = emit_insn_before (s_pat, insn);
12505 INSN_ADDRESSES_NEW (new_insn, -1);
12508 remove_insn (insn);
12512 if (cfun_frame_layout.first_save_gpr == -1
12513 && GET_CODE (pat) == SET
12514 && GENERAL_REG_P (SET_SRC (pat))
12515 && GET_CODE (SET_DEST (pat)) == MEM)
12518 first = REGNO (SET_SRC (set));
12519 offset = const0_rtx;
12520 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
12521 off = INTVAL (offset);
12523 if (GET_CODE (base) != REG || off < 0)
12525 if (REGNO (base) != STACK_POINTER_REGNUM
12526 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
12529 remove_insn (insn);
12533 if (GET_CODE (pat) == PARALLEL
12534 && load_multiple_operation (pat, VOIDmode))
12536 set = XVECEXP (pat, 0, 0);
12537 first = REGNO (SET_DEST (set));
12538 last = first + XVECLEN (pat, 0) - 1;
12539 offset = const0_rtx;
12540 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
12541 off = INTVAL (offset);
12543 if (GET_CODE (base) != REG || off < 0)
12546 if (cfun_frame_layout.first_restore_gpr != -1
12547 && (cfun_frame_layout.first_restore_gpr < first
12548 || cfun_frame_layout.last_restore_gpr > last))
12550 if (REGNO (base) != STACK_POINTER_REGNUM
12551 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
12553 if (first > BASE_REGNUM || last < BASE_REGNUM)
12556 if (cfun_frame_layout.first_restore_gpr != -1)
12558 rtx rpat = restore_gprs (base,
12559 off + (cfun_frame_layout.first_restore_gpr
12560 - first) * UNITS_PER_LONG,
12561 cfun_frame_layout.first_restore_gpr,
12562 cfun_frame_layout.last_restore_gpr);
12564 /* Remove REG_CFA_RESTOREs for registers that we no
12565 longer need to save. */
12566 REG_NOTES (rpat) = REG_NOTES (insn);
12567 for (rtx *ptr = ®_NOTES (rpat); *ptr; )
12568 if (REG_NOTE_KIND (*ptr) == REG_CFA_RESTORE
12569 && ((int) REGNO (XEXP (*ptr, 0))
12570 < cfun_frame_layout.first_restore_gpr))
12571 *ptr = XEXP (*ptr, 1);
12573 ptr = &XEXP (*ptr, 1);
12574 new_insn = emit_insn_before (rpat, insn);
12575 RTX_FRAME_RELATED_P (new_insn) = 1;
12576 INSN_ADDRESSES_NEW (new_insn, -1);
12579 remove_insn (insn);
12583 if (cfun_frame_layout.first_restore_gpr == -1
12584 && GET_CODE (pat) == SET
12585 && GENERAL_REG_P (SET_DEST (pat))
12586 && GET_CODE (SET_SRC (pat)) == MEM)
12589 first = REGNO (SET_DEST (set));
12590 offset = const0_rtx;
12591 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
12592 off = INTVAL (offset);
12594 if (GET_CODE (base) != REG || off < 0)
12597 if (REGNO (base) != STACK_POINTER_REGNUM
12598 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
12601 remove_insn (insn);
12607 /* On z10 and later the dynamic branch prediction must see the
12608 backward jump within a certain windows. If not it falls back to
12609 the static prediction. This function rearranges the loop backward
12610 branch in a way which makes the static prediction always correct.
12611 The function returns true if it added an instruction. */
12613 s390_fix_long_loop_prediction (rtx_insn *insn)
12615 rtx set = single_set (insn);
12616 rtx code_label, label_ref, new_label;
12617 rtx_insn *uncond_jump;
12618 rtx_insn *cur_insn;
12622 /* This will exclude branch on count and branch on index patterns
12623 since these are correctly statically predicted. */
12625 || SET_DEST (set) != pc_rtx
12626 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
12629 /* Skip conditional returns. */
12630 if (ANY_RETURN_P (XEXP (SET_SRC (set), 1))
12631 && XEXP (SET_SRC (set), 2) == pc_rtx)
12634 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
12635 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
12637 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
12639 code_label = XEXP (label_ref, 0);
12641 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
12642 || INSN_ADDRESSES (INSN_UID (insn)) == -1
12643 || (INSN_ADDRESSES (INSN_UID (insn))
12644 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
12647 for (distance = 0, cur_insn = PREV_INSN (insn);
12648 distance < PREDICT_DISTANCE - 6;
12649 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
12650 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
12653 new_label = gen_label_rtx ();
12654 uncond_jump = emit_jump_insn_after (
12655 gen_rtx_SET (pc_rtx,
12656 gen_rtx_LABEL_REF (VOIDmode, code_label)),
12658 emit_label_after (new_label, uncond_jump);
12660 tmp = XEXP (SET_SRC (set), 1);
12661 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
12662 XEXP (SET_SRC (set), 2) = tmp;
12663 INSN_CODE (insn) = -1;
12665 XEXP (label_ref, 0) = new_label;
12666 JUMP_LABEL (insn) = new_label;
12667 JUMP_LABEL (uncond_jump) = code_label;
12672 /* Returns 1 if INSN reads the value of REG for purposes not related
12673 to addressing of memory, and 0 otherwise. */
12675 s390_non_addr_reg_read_p (rtx reg, rtx_insn *insn)
12677 return reg_referenced_p (reg, PATTERN (insn))
12678 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
12681 /* Starting from INSN find_cond_jump looks downwards in the insn
12682 stream for a single jump insn which is the last user of the
12683 condition code set in INSN. */
12685 find_cond_jump (rtx_insn *insn)
12687 for (; insn; insn = NEXT_INSN (insn))
12691 if (LABEL_P (insn))
12694 if (!JUMP_P (insn))
12696 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
12701 /* This will be triggered by a return. */
12702 if (GET_CODE (PATTERN (insn)) != SET)
12705 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
12706 ite = SET_SRC (PATTERN (insn));
12708 if (GET_CODE (ite) != IF_THEN_ELSE)
12711 cc = XEXP (XEXP (ite, 0), 0);
12712 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
12715 if (find_reg_note (insn, REG_DEAD, cc))
12723 /* Swap the condition in COND and the operands in OP0 and OP1 so that
12724 the semantics does not change. If NULL_RTX is passed as COND the
12725 function tries to find the conditional jump starting with INSN. */
12727 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx_insn *insn)
12731 if (cond == NULL_RTX)
12733 rtx_insn *jump = find_cond_jump (NEXT_INSN (insn));
12734 rtx set = jump ? single_set (jump) : NULL_RTX;
12736 if (set == NULL_RTX)
12739 cond = XEXP (SET_SRC (set), 0);
12744 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
12747 /* On z10, instructions of the compare-and-branch family have the
12748 property to access the register occurring as second operand with
12749 its bits complemented. If such a compare is grouped with a second
12750 instruction that accesses the same register non-complemented, and
12751 if that register's value is delivered via a bypass, then the
12752 pipeline recycles, thereby causing significant performance decline.
12753 This function locates such situations and exchanges the two
12754 operands of the compare. The function return true whenever it
12757 s390_z10_optimize_cmp (rtx_insn *insn)
12759 rtx_insn *prev_insn, *next_insn;
12760 bool insn_added_p = false;
12761 rtx cond, *op0, *op1;
12763 if (GET_CODE (PATTERN (insn)) == PARALLEL)
12765 /* Handle compare and branch and branch on count
12767 rtx pattern = single_set (insn);
12770 || SET_DEST (pattern) != pc_rtx
12771 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
12774 cond = XEXP (SET_SRC (pattern), 0);
12775 op0 = &XEXP (cond, 0);
12776 op1 = &XEXP (cond, 1);
12778 else if (GET_CODE (PATTERN (insn)) == SET)
12782 /* Handle normal compare instructions. */
12783 src = SET_SRC (PATTERN (insn));
12784 dest = SET_DEST (PATTERN (insn));
12787 || !CC_REGNO_P (REGNO (dest))
12788 || GET_CODE (src) != COMPARE)
12791 /* s390_swap_cmp will try to find the conditional
12792 jump when passing NULL_RTX as condition. */
12794 op0 = &XEXP (src, 0);
12795 op1 = &XEXP (src, 1);
12800 if (!REG_P (*op0) || !REG_P (*op1))
12803 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
12806 /* Swap the COMPARE arguments and its mask if there is a
12807 conflicting access in the previous insn. */
12808 prev_insn = prev_active_insn (insn);
12809 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
12810 && reg_referenced_p (*op1, PATTERN (prev_insn)))
12811 s390_swap_cmp (cond, op0, op1, insn);
12813 /* Check if there is a conflict with the next insn. If there
12814 was no conflict with the previous insn, then swap the
12815 COMPARE arguments and its mask. If we already swapped
12816 the operands, or if swapping them would cause a conflict
12817 with the previous insn, issue a NOP after the COMPARE in
12818 order to separate the two instuctions. */
12819 next_insn = next_active_insn (insn);
12820 if (next_insn != NULL_RTX && INSN_P (next_insn)
12821 && s390_non_addr_reg_read_p (*op1, next_insn))
12823 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
12824 && s390_non_addr_reg_read_p (*op0, prev_insn))
12826 if (REGNO (*op1) == 0)
12827 emit_insn_after (gen_nop1 (), insn);
12829 emit_insn_after (gen_nop (), insn);
12830 insn_added_p = true;
12833 s390_swap_cmp (cond, op0, op1, insn);
12835 return insn_added_p;
12838 /* Perform machine-dependent processing. */
12843 bool pool_overflow = false;
12844 int hw_before, hw_after;
12846 /* Make sure all splits have been performed; splits after
12847 machine_dependent_reorg might confuse insn length counts. */
12848 split_all_insns_noflow ();
12850 /* Install the main literal pool and the associated base
12851 register load insns.
12853 In addition, there are two problematic situations we need
12856 - the literal pool might be > 4096 bytes in size, so that
12857 some of its elements cannot be directly accessed
12859 - a branch target might be > 64K away from the branch, so that
12860 it is not possible to use a PC-relative instruction.
12862 To fix those, we split the single literal pool into multiple
12863 pool chunks, reloading the pool base register at various
12864 points throughout the function to ensure it always points to
12865 the pool chunk the following code expects, and / or replace
12866 PC-relative branches by absolute branches.
12868 However, the two problems are interdependent: splitting the
12869 literal pool can move a branch further away from its target,
12870 causing the 64K limit to overflow, and on the other hand,
12871 replacing a PC-relative branch by an absolute branch means
12872 we need to put the branch target address into the literal
12873 pool, possibly causing it to overflow.
12875 So, we loop trying to fix up both problems until we manage
12876 to satisfy both conditions at the same time. Note that the
12877 loop is guaranteed to terminate as every pass of the loop
12878 strictly decreases the total number of PC-relative branches
12879 in the function. (This is not completely true as there
12880 might be branch-over-pool insns introduced by chunkify_start.
12881 Those never need to be split however.) */
12885 struct constant_pool *pool = NULL;
12887 /* Collect the literal pool. */
12888 if (!pool_overflow)
12890 pool = s390_mainpool_start ();
12892 pool_overflow = true;
12895 /* If literal pool overflowed, start to chunkify it. */
12897 pool = s390_chunkify_start ();
12899 /* Split out-of-range branches. If this has created new
12900 literal pool entries, cancel current chunk list and
12901 recompute it. zSeries machines have large branch
12902 instructions, so we never need to split a branch. */
12903 if (!TARGET_CPU_ZARCH && s390_split_branches ())
12906 s390_chunkify_cancel (pool);
12908 s390_mainpool_cancel (pool);
12913 /* If we made it up to here, both conditions are satisfied.
12914 Finish up literal pool related changes. */
12916 s390_chunkify_finish (pool);
12918 s390_mainpool_finish (pool);
12920 /* We're done splitting branches. */
12921 cfun->machine->split_branches_pending_p = false;
12925 /* Generate out-of-pool execute target insns. */
12926 if (TARGET_CPU_ZARCH)
12928 rtx_insn *insn, *target;
12931 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12933 label = s390_execute_label (insn);
12937 gcc_assert (label != const0_rtx);
12939 target = emit_label (XEXP (label, 0));
12940 INSN_ADDRESSES_NEW (target, -1);
12942 target = emit_insn (s390_execute_target (insn));
12943 INSN_ADDRESSES_NEW (target, -1);
12947 /* Try to optimize prologue and epilogue further. */
12948 s390_optimize_prologue ();
12950 /* Walk over the insns and do some >=z10 specific changes. */
12951 if (s390_tune >= PROCESSOR_2097_Z10)
12954 bool insn_added_p = false;
12956 /* The insn lengths and addresses have to be up to date for the
12957 following manipulations. */
12958 shorten_branches (get_insns ());
12960 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12962 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
12966 insn_added_p |= s390_fix_long_loop_prediction (insn);
12968 if ((GET_CODE (PATTERN (insn)) == PARALLEL
12969 || GET_CODE (PATTERN (insn)) == SET)
12970 && s390_tune == PROCESSOR_2097_Z10)
12971 insn_added_p |= s390_z10_optimize_cmp (insn);
12974 /* Adjust branches if we added new instructions. */
12976 shorten_branches (get_insns ());
12979 s390_function_num_hotpatch_hw (current_function_decl, &hw_before, &hw_after);
12984 /* Insert NOPs for hotpatching. */
12985 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12987 1. inside the area covered by debug information to allow setting
12988 breakpoints at the NOPs,
12989 2. before any insn which results in an asm instruction,
12990 3. before in-function labels to avoid jumping to the NOPs, for
12991 example as part of a loop,
12992 4. before any barrier in case the function is completely empty
12993 (__builtin_unreachable ()) and has neither internal labels nor
12996 if (active_insn_p (insn) || BARRIER_P (insn) || LABEL_P (insn))
12998 /* Output a series of NOPs before the first active insn. */
12999 while (insn && hw_after > 0)
13001 if (hw_after >= 3 && TARGET_CPU_ZARCH)
13003 emit_insn_before (gen_nop_6_byte (), insn);
13006 else if (hw_after >= 2)
13008 emit_insn_before (gen_nop_4_byte (), insn);
13013 emit_insn_before (gen_nop_2_byte (), insn);
13020 /* Return true if INSN is a fp load insn writing register REGNO. */
13022 s390_fpload_toreg (rtx_insn *insn, unsigned int regno)
13025 enum attr_type flag = s390_safe_attr_type (insn);
13027 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
13030 set = single_set (insn);
13032 if (set == NULL_RTX)
13035 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
13038 if (REGNO (SET_DEST (set)) != regno)
13044 /* This value describes the distance to be avoided between an
13045 aritmetic fp instruction and an fp load writing the same register.
13046 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
13047 fine but the exact value has to be avoided. Otherwise the FP
13048 pipeline will throw an exception causing a major penalty. */
13049 #define Z10_EARLYLOAD_DISTANCE 7
13051 /* Rearrange the ready list in order to avoid the situation described
13052 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
13053 moved to the very end of the ready list. */
13055 s390_z10_prevent_earlyload_conflicts (rtx_insn **ready, int *nready_p)
13057 unsigned int regno;
13058 int nready = *nready_p;
13063 enum attr_type flag;
13066 /* Skip DISTANCE - 1 active insns. */
13067 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
13068 distance > 0 && insn != NULL_RTX;
13069 distance--, insn = prev_active_insn (insn))
13070 if (CALL_P (insn) || JUMP_P (insn))
13073 if (insn == NULL_RTX)
13076 set = single_set (insn);
13078 if (set == NULL_RTX || !REG_P (SET_DEST (set))
13079 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
13082 flag = s390_safe_attr_type (insn);
13084 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
13087 regno = REGNO (SET_DEST (set));
13090 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
13097 memmove (&ready[1], &ready[0], sizeof (rtx_insn *) * i);
13102 /* The s390_sched_state variable tracks the state of the current or
13103 the last instruction group.
13105 0,1,2 number of instructions scheduled in the current group
13106 3 the last group is complete - normal insns
13107 4 the last group was a cracked/expanded insn */
13109 static int s390_sched_state;
13111 #define S390_OOO_SCHED_STATE_NORMAL 3
13112 #define S390_OOO_SCHED_STATE_CRACKED 4
13114 #define S390_OOO_SCHED_ATTR_MASK_CRACKED 0x1
13115 #define S390_OOO_SCHED_ATTR_MASK_EXPANDED 0x2
13116 #define S390_OOO_SCHED_ATTR_MASK_ENDGROUP 0x4
13117 #define S390_OOO_SCHED_ATTR_MASK_GROUPALONE 0x8
13119 static unsigned int
13120 s390_get_sched_attrmask (rtx_insn *insn)
13122 unsigned int mask = 0;
13124 if (get_attr_ooo_cracked (insn))
13125 mask |= S390_OOO_SCHED_ATTR_MASK_CRACKED;
13126 if (get_attr_ooo_expanded (insn))
13127 mask |= S390_OOO_SCHED_ATTR_MASK_EXPANDED;
13128 if (get_attr_ooo_endgroup (insn))
13129 mask |= S390_OOO_SCHED_ATTR_MASK_ENDGROUP;
13130 if (get_attr_ooo_groupalone (insn))
13131 mask |= S390_OOO_SCHED_ATTR_MASK_GROUPALONE;
13135 /* Return the scheduling score for INSN. The higher the score the
13136 better. The score is calculated from the OOO scheduling attributes
13137 of INSN and the scheduling state s390_sched_state. */
13139 s390_sched_score (rtx_insn *insn)
13141 unsigned int mask = s390_get_sched_attrmask (insn);
13144 switch (s390_sched_state)
13147 /* Try to put insns into the first slot which would otherwise
13149 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
13150 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
13152 if ((mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
13155 /* Prefer not cracked insns while trying to put together a
13157 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
13158 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0
13159 && (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) == 0)
13161 if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) == 0)
13165 /* Prefer not cracked insns while trying to put together a
13167 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
13168 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0
13169 && (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) == 0)
13171 /* Prefer endgroup insns in the last slot. */
13172 if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) != 0)
13175 case S390_OOO_SCHED_STATE_NORMAL:
13176 /* Prefer not cracked insns if the last was not cracked. */
13177 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
13178 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0)
13180 if ((mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
13183 case S390_OOO_SCHED_STATE_CRACKED:
13184 /* Try to keep cracked insns together to prevent them from
13185 interrupting groups. */
13186 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
13187 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
13194 /* This function is called via hook TARGET_SCHED_REORDER before
13195 issuing one insn from list READY which contains *NREADYP entries.
13196 For target z10 it reorders load instructions to avoid early load
13197 conflicts in the floating point pipeline */
13199 s390_sched_reorder (FILE *file, int verbose,
13200 rtx_insn **ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
13202 if (s390_tune == PROCESSOR_2097_Z10
13203 && reload_completed
13205 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
13207 if (s390_tune >= PROCESSOR_2827_ZEC12
13208 && reload_completed
13212 int last_index = *nreadyp - 1;
13213 int max_index = -1;
13214 int max_score = -1;
13217 /* Just move the insn with the highest score to the top (the
13218 end) of the list. A full sort is not needed since a conflict
13219 in the hazard recognition cannot happen. So the top insn in
13220 the ready list will always be taken. */
13221 for (i = last_index; i >= 0; i--)
13225 if (recog_memoized (ready[i]) < 0)
13228 score = s390_sched_score (ready[i]);
13229 if (score > max_score)
13236 if (max_index != -1)
13238 if (max_index != last_index)
13240 tmp = ready[max_index];
13241 ready[max_index] = ready[last_index];
13242 ready[last_index] = tmp;
13246 "move insn %d to the top of list\n",
13247 INSN_UID (ready[last_index]));
13249 else if (verbose > 5)
13251 "best insn %d already on top\n",
13252 INSN_UID (ready[last_index]));
13257 fprintf (file, "ready list ooo attributes - sched state: %d\n",
13260 for (i = last_index; i >= 0; i--)
13262 if (recog_memoized (ready[i]) < 0)
13264 fprintf (file, "insn %d score: %d: ", INSN_UID (ready[i]),
13265 s390_sched_score (ready[i]));
13266 #define PRINT_OOO_ATTR(ATTR) fprintf (file, "%s ", get_attr_##ATTR (ready[i]) ? #ATTR : "!" #ATTR);
13267 PRINT_OOO_ATTR (ooo_cracked);
13268 PRINT_OOO_ATTR (ooo_expanded);
13269 PRINT_OOO_ATTR (ooo_endgroup);
13270 PRINT_OOO_ATTR (ooo_groupalone);
13271 #undef PRINT_OOO_ATTR
13272 fprintf (file, "\n");
13277 return s390_issue_rate ();
13281 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
13282 the scheduler has issued INSN. It stores the last issued insn into
13283 last_scheduled_insn in order to make it available for
13284 s390_sched_reorder. */
13286 s390_sched_variable_issue (FILE *file, int verbose, rtx_insn *insn, int more)
13288 last_scheduled_insn = insn;
13290 if (s390_tune >= PROCESSOR_2827_ZEC12
13291 && reload_completed
13292 && recog_memoized (insn) >= 0)
13294 unsigned int mask = s390_get_sched_attrmask (insn);
13296 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
13297 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
13298 s390_sched_state = S390_OOO_SCHED_STATE_CRACKED;
13299 else if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) != 0
13300 || (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
13301 s390_sched_state = S390_OOO_SCHED_STATE_NORMAL;
13304 /* Only normal insns are left (mask == 0). */
13305 switch (s390_sched_state)
13310 case S390_OOO_SCHED_STATE_NORMAL:
13311 if (s390_sched_state == S390_OOO_SCHED_STATE_NORMAL)
13312 s390_sched_state = 1;
13314 s390_sched_state++;
13317 case S390_OOO_SCHED_STATE_CRACKED:
13318 s390_sched_state = S390_OOO_SCHED_STATE_NORMAL;
13324 fprintf (file, "insn %d: ", INSN_UID (insn));
13325 #define PRINT_OOO_ATTR(ATTR) \
13326 fprintf (file, "%s ", get_attr_##ATTR (insn) ? #ATTR : "");
13327 PRINT_OOO_ATTR (ooo_cracked);
13328 PRINT_OOO_ATTR (ooo_expanded);
13329 PRINT_OOO_ATTR (ooo_endgroup);
13330 PRINT_OOO_ATTR (ooo_groupalone);
13331 #undef PRINT_OOO_ATTR
13332 fprintf (file, "\n");
13333 fprintf (file, "sched state: %d\n", s390_sched_state);
13337 if (GET_CODE (PATTERN (insn)) != USE
13338 && GET_CODE (PATTERN (insn)) != CLOBBER)
13345 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
13346 int verbose ATTRIBUTE_UNUSED,
13347 int max_ready ATTRIBUTE_UNUSED)
13349 last_scheduled_insn = NULL;
13350 s390_sched_state = 0;
13353 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
13354 a new number struct loop *loop should be unrolled if tuned for cpus with
13355 a built-in stride prefetcher.
13356 The loop is analyzed for memory accesses by calling check_dpu for
13357 each rtx of the loop. Depending on the loop_depth and the amount of
13358 memory accesses a new number <=nunroll is returned to improve the
13359 behaviour of the hardware prefetch unit. */
13361 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
13366 unsigned mem_count = 0;
13368 if (s390_tune < PROCESSOR_2097_Z10)
13371 /* Count the number of memory references within the loop body. */
13372 bbs = get_loop_body (loop);
13373 subrtx_iterator::array_type array;
13374 for (i = 0; i < loop->num_nodes; i++)
13375 FOR_BB_INSNS (bbs[i], insn)
13376 if (INSN_P (insn) && INSN_CODE (insn) != -1)
13377 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
13382 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
13383 if (mem_count == 0)
13386 switch (loop_depth(loop))
13389 return MIN (nunroll, 28 / mem_count);
13391 return MIN (nunroll, 22 / mem_count);
13393 return MIN (nunroll, 16 / mem_count);
13398 s390_option_override (void)
13401 cl_deferred_option *opt;
13402 vec<cl_deferred_option> *v =
13403 (vec<cl_deferred_option> *) s390_deferred_options;
13406 FOR_EACH_VEC_ELT (*v, i, opt)
13408 switch (opt->opt_index)
13410 case OPT_mhotpatch_:
13417 strncpy (s, opt->arg, 256);
13419 t = strchr (s, ',');
13424 val1 = integral_argument (s);
13425 val2 = integral_argument (t);
13432 if (val1 == -1 || val2 == -1)
13434 /* argument is not a plain number */
13435 error ("arguments to %qs should be non-negative integers",
13439 else if (val1 > s390_hotpatch_hw_max
13440 || val2 > s390_hotpatch_hw_max)
13442 error ("argument to %qs is too large (max. %d)",
13443 "-mhotpatch=n,m", s390_hotpatch_hw_max);
13446 s390_hotpatch_hw_before_label = val1;
13447 s390_hotpatch_hw_after_label = val2;
13451 gcc_unreachable ();
13455 /* Set up function hooks. */
13456 init_machine_status = s390_init_machine_status;
13458 /* Architecture mode defaults according to ABI. */
13459 if (!(target_flags_explicit & MASK_ZARCH))
13462 target_flags |= MASK_ZARCH;
13464 target_flags &= ~MASK_ZARCH;
13467 /* Set the march default in case it hasn't been specified on
13469 if (s390_arch == PROCESSOR_max)
13471 s390_arch_string = TARGET_ZARCH? "z900" : "g5";
13472 s390_arch = TARGET_ZARCH ? PROCESSOR_2064_Z900 : PROCESSOR_9672_G5;
13473 s390_arch_flags = processor_flags_table[(int)s390_arch];
13476 /* Determine processor to tune for. */
13477 if (s390_tune == PROCESSOR_max)
13479 s390_tune = s390_arch;
13480 s390_tune_flags = s390_arch_flags;
13483 /* Sanity checks. */
13484 if (s390_arch == PROCESSOR_NATIVE || s390_tune == PROCESSOR_NATIVE)
13485 gcc_unreachable ();
13486 if (TARGET_ZARCH && !TARGET_CPU_ZARCH)
13487 error ("z/Architecture mode not supported on %s", s390_arch_string);
13488 if (TARGET_64BIT && !TARGET_ZARCH)
13489 error ("64-bit ABI not supported in ESA/390 mode");
13491 /* Use hardware DFP if available and not explicitly disabled by
13492 user. E.g. with -m31 -march=z10 -mzarch */
13493 if (!(target_flags_explicit & MASK_HARD_DFP) && TARGET_DFP)
13494 target_flags |= MASK_HARD_DFP;
13496 /* Enable hardware transactions if available and not explicitly
13497 disabled by user. E.g. with -m31 -march=zEC12 -mzarch */
13498 if (!(target_flags_explicit & MASK_OPT_HTM) && TARGET_CPU_HTM && TARGET_ZARCH)
13499 target_flags |= MASK_OPT_HTM;
13501 if (target_flags_explicit & MASK_OPT_VX)
13505 if (!TARGET_CPU_VX)
13506 error ("hardware vector support not available on %s",
13508 if (TARGET_SOFT_FLOAT)
13509 error ("hardware vector support not available with -msoft-float");
13512 else if (TARGET_CPU_VX)
13513 /* Enable vector support if available and not explicitly disabled
13514 by user. E.g. with -m31 -march=z13 -mzarch */
13515 target_flags |= MASK_OPT_VX;
13517 if (TARGET_HARD_DFP && !TARGET_DFP)
13519 if (target_flags_explicit & MASK_HARD_DFP)
13521 if (!TARGET_CPU_DFP)
13522 error ("hardware decimal floating point instructions"
13523 " not available on %s", s390_arch_string);
13525 error ("hardware decimal floating point instructions"
13526 " not available in ESA/390 mode");
13529 target_flags &= ~MASK_HARD_DFP;
13532 if ((target_flags_explicit & MASK_SOFT_FLOAT) && TARGET_SOFT_FLOAT)
13534 if ((target_flags_explicit & MASK_HARD_DFP) && TARGET_HARD_DFP)
13535 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
13537 target_flags &= ~MASK_HARD_DFP;
13540 /* Set processor cost function. */
13543 case PROCESSOR_2084_Z990:
13544 s390_cost = &z990_cost;
13546 case PROCESSOR_2094_Z9_109:
13547 case PROCESSOR_2094_Z9_EC:
13548 s390_cost = &z9_109_cost;
13550 case PROCESSOR_2097_Z10:
13551 s390_cost = &z10_cost;
13553 case PROCESSOR_2817_Z196:
13554 s390_cost = &z196_cost;
13556 case PROCESSOR_2827_ZEC12:
13557 case PROCESSOR_2964_Z13:
13558 s390_cost = &zEC12_cost;
13561 s390_cost = &z900_cost;
13564 if (TARGET_BACKCHAIN && TARGET_PACKED_STACK && TARGET_HARD_FLOAT)
13565 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
13568 if (s390_stack_size)
13570 if (s390_stack_guard >= s390_stack_size)
13571 error ("stack size must be greater than the stack guard value");
13572 else if (s390_stack_size > 1 << 16)
13573 error ("stack size must not be greater than 64k");
13575 else if (s390_stack_guard)
13576 error ("-mstack-guard implies use of -mstack-size");
13578 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
13579 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
13580 target_flags |= MASK_LONG_DOUBLE_128;
13583 if (s390_tune >= PROCESSOR_2097_Z10)
13585 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
13586 global_options.x_param_values,
13587 global_options_set.x_param_values);
13588 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
13589 global_options.x_param_values,
13590 global_options_set.x_param_values);
13591 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
13592 global_options.x_param_values,
13593 global_options_set.x_param_values);
13594 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
13595 global_options.x_param_values,
13596 global_options_set.x_param_values);
13599 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
13600 global_options.x_param_values,
13601 global_options_set.x_param_values);
13602 /* values for loop prefetching */
13603 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
13604 global_options.x_param_values,
13605 global_options_set.x_param_values);
13606 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
13607 global_options.x_param_values,
13608 global_options_set.x_param_values);
13609 /* s390 has more than 2 levels and the size is much larger. Since
13610 we are always running virtualized assume that we only get a small
13611 part of the caches above l1. */
13612 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
13613 global_options.x_param_values,
13614 global_options_set.x_param_values);
13615 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
13616 global_options.x_param_values,
13617 global_options_set.x_param_values);
13618 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
13619 global_options.x_param_values,
13620 global_options_set.x_param_values);
13622 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
13623 requires the arch flags to be evaluated already. Since prefetching
13624 is beneficial on s390, we enable it if available. */
13625 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
13626 flag_prefetch_loop_arrays = 1;
13628 /* Use the alternative scheduling-pressure algorithm by default. */
13629 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
13630 global_options.x_param_values,
13631 global_options_set.x_param_values);
13635 /* Don't emit DWARF3/4 unless specifically selected. The TPF
13636 debuggers do not yet support DWARF 3/4. */
13637 if (!global_options_set.x_dwarf_strict)
13639 if (!global_options_set.x_dwarf_version)
13643 /* Register a target-specific optimization-and-lowering pass
13644 to run immediately before prologue and epilogue generation.
13646 Registering the pass must be done at start up. It's
13647 convenient to do it here. */
13648 opt_pass *new_pass = new pass_s390_early_mach (g);
13649 struct register_pass_info insert_pass_s390_early_mach =
13651 new_pass, /* pass */
13652 "pro_and_epilogue", /* reference_pass_name */
13653 1, /* ref_pass_instance_number */
13654 PASS_POS_INSERT_BEFORE /* po_op */
13656 register_pass (&insert_pass_s390_early_mach);
13659 /* Implement TARGET_USE_BY_PIECES_INFRASTRUCTURE_P. */
13662 s390_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size,
13663 unsigned int align ATTRIBUTE_UNUSED,
13664 enum by_pieces_operation op ATTRIBUTE_UNUSED,
13665 bool speed_p ATTRIBUTE_UNUSED)
13667 return (size == 1 || size == 2
13668 || size == 4 || (TARGET_ZARCH && size == 8));
13671 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
13674 s390_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
13676 tree sfpc = s390_builtin_decls[S390_BUILTIN_s390_sfpc];
13677 tree efpc = s390_builtin_decls[S390_BUILTIN_s390_efpc];
13678 tree call_efpc = build_call_expr (efpc, 0);
13679 tree fenv_var = create_tmp_var (unsigned_type_node);
13681 #define FPC_EXCEPTION_MASK HOST_WIDE_INT_UC (0xf8000000)
13682 #define FPC_FLAGS_MASK HOST_WIDE_INT_UC (0x00f80000)
13683 #define FPC_DXC_MASK HOST_WIDE_INT_UC (0x0000ff00)
13684 #define FPC_EXCEPTION_MASK_SHIFT HOST_WIDE_INT_UC (24)
13685 #define FPC_FLAGS_SHIFT HOST_WIDE_INT_UC (16)
13686 #define FPC_DXC_SHIFT HOST_WIDE_INT_UC (8)
13688 /* Generates the equivalent of feholdexcept (&fenv_var)
13690 fenv_var = __builtin_s390_efpc ();
13691 __builtin_s390_sfpc (fenv_var & mask) */
13692 tree old_fpc = build2 (MODIFY_EXPR, unsigned_type_node, fenv_var, call_efpc);
13694 build2 (BIT_AND_EXPR, unsigned_type_node, fenv_var,
13695 build_int_cst (unsigned_type_node,
13696 ~(FPC_DXC_MASK | FPC_FLAGS_MASK |
13697 FPC_EXCEPTION_MASK)));
13698 tree set_new_fpc = build_call_expr (sfpc, 1, new_fpc);
13699 *hold = build2 (COMPOUND_EXPR, void_type_node, old_fpc, set_new_fpc);
13701 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT)
13703 __builtin_s390_sfpc (__builtin_s390_efpc () & mask) */
13704 new_fpc = build2 (BIT_AND_EXPR, unsigned_type_node, call_efpc,
13705 build_int_cst (unsigned_type_node,
13706 ~(FPC_DXC_MASK | FPC_FLAGS_MASK)));
13707 *clear = build_call_expr (sfpc, 1, new_fpc);
13709 /* Generates the equivalent of feupdateenv (fenv_var)
13711 old_fpc = __builtin_s390_efpc ();
13712 __builtin_s390_sfpc (fenv_var);
13713 __atomic_feraiseexcept ((old_fpc & FPC_FLAGS_MASK) >> FPC_FLAGS_SHIFT); */
13715 old_fpc = create_tmp_var (unsigned_type_node);
13716 tree store_old_fpc = build2 (MODIFY_EXPR, void_type_node,
13717 old_fpc, call_efpc);
13719 set_new_fpc = build_call_expr (sfpc, 1, fenv_var);
13721 tree raise_old_except = build2 (BIT_AND_EXPR, unsigned_type_node, old_fpc,
13722 build_int_cst (unsigned_type_node,
13724 raise_old_except = build2 (RSHIFT_EXPR, unsigned_type_node, raise_old_except,
13725 build_int_cst (unsigned_type_node,
13727 tree atomic_feraiseexcept
13728 = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
13729 raise_old_except = build_call_expr (atomic_feraiseexcept,
13730 1, raise_old_except);
13732 *update = build2 (COMPOUND_EXPR, void_type_node,
13733 build2 (COMPOUND_EXPR, void_type_node,
13734 store_old_fpc, set_new_fpc),
13737 #undef FPC_EXCEPTION_MASK
13738 #undef FPC_FLAGS_MASK
13739 #undef FPC_DXC_MASK
13740 #undef FPC_EXCEPTION_MASK_SHIFT
13741 #undef FPC_FLAGS_SHIFT
13742 #undef FPC_DXC_SHIFT
13745 /* Return the vector mode to be used for inner mode MODE when doing
13747 static machine_mode
13748 s390_preferred_simd_mode (machine_mode mode)
13768 /* Our hardware does not require vectors to be strictly aligned. */
13770 s390_support_vector_misalignment (machine_mode mode ATTRIBUTE_UNUSED,
13771 const_tree type ATTRIBUTE_UNUSED,
13772 int misalignment ATTRIBUTE_UNUSED,
13773 bool is_packed ATTRIBUTE_UNUSED)
13778 return default_builtin_support_vector_misalignment (mode, type, misalignment,
13782 /* The vector ABI requires vector types to be aligned on an 8 byte
13783 boundary (our stack alignment). However, we allow this to be
13784 overriden by the user, while this definitely breaks the ABI. */
13785 static HOST_WIDE_INT
13786 s390_vector_alignment (const_tree type)
13788 if (!TARGET_VX_ABI)
13789 return default_vector_alignment (type);
13791 if (TYPE_USER_ALIGN (type))
13792 return TYPE_ALIGN (type);
13794 return MIN (64, tree_to_shwi (TYPE_SIZE (type)));
13797 /* Implement TARGET_ASM_FILE_END. */
13799 s390_asm_file_end (void)
13801 #ifdef HAVE_AS_GNU_ATTRIBUTE
13802 varpool_node *vnode;
13803 cgraph_node *cnode;
13805 FOR_EACH_VARIABLE (vnode)
13806 if (TREE_PUBLIC (vnode->decl))
13807 s390_check_type_for_vector_abi (TREE_TYPE (vnode->decl), false, false);
13809 FOR_EACH_FUNCTION (cnode)
13810 if (TREE_PUBLIC (cnode->decl))
13811 s390_check_type_for_vector_abi (TREE_TYPE (cnode->decl), false, false);
13814 if (s390_vector_abi != 0)
13815 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
13818 file_end_indicate_exec_stack ();
13821 /* Return true if TYPE is a vector bool type. */
13823 s390_vector_bool_type_p (const_tree type)
13825 return TYPE_VECTOR_OPAQUE (type);
13828 /* Return the diagnostic message string if the binary operation OP is
13829 not permitted on TYPE1 and TYPE2, NULL otherwise. */
13831 s390_invalid_binary_op (int op ATTRIBUTE_UNUSED, const_tree type1, const_tree type2)
13833 bool bool1_p, bool2_p;
13837 machine_mode mode1, mode2;
13839 if (!TARGET_ZVECTOR)
13842 if (!VECTOR_TYPE_P (type1) || !VECTOR_TYPE_P (type2))
13845 bool1_p = s390_vector_bool_type_p (type1);
13846 bool2_p = s390_vector_bool_type_p (type2);
13848 /* Mixing signed and unsigned types is forbidden for all
13850 if (!bool1_p && !bool2_p
13851 && TYPE_UNSIGNED (type1) != TYPE_UNSIGNED (type2))
13852 return N_("types differ in signess");
13854 plusminus_p = (op == PLUS_EXPR || op == MINUS_EXPR);
13855 muldiv_p = (op == MULT_EXPR || op == RDIV_EXPR || op == TRUNC_DIV_EXPR
13856 || op == CEIL_DIV_EXPR || op == FLOOR_DIV_EXPR
13857 || op == ROUND_DIV_EXPR);
13858 compare_p = (op == LT_EXPR || op == LE_EXPR || op == GT_EXPR || op == GE_EXPR
13859 || op == EQ_EXPR || op == NE_EXPR);
13861 if (bool1_p && bool2_p && (plusminus_p || muldiv_p))
13862 return N_("binary operator does not support two vector bool operands");
13864 if (bool1_p != bool2_p && (muldiv_p || compare_p))
13865 return N_("binary operator does not support vector bool operand");
13867 mode1 = TYPE_MODE (type1);
13868 mode2 = TYPE_MODE (type2);
13870 if (bool1_p != bool2_p && plusminus_p
13871 && (GET_MODE_CLASS (mode1) == MODE_VECTOR_FLOAT
13872 || GET_MODE_CLASS (mode2) == MODE_VECTOR_FLOAT))
13873 return N_("binary operator does not support mixing vector "
13874 "bool with floating point vector operands");
13879 /* Initialize GCC target structure. */
13881 #undef TARGET_ASM_ALIGNED_HI_OP
13882 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
13883 #undef TARGET_ASM_ALIGNED_DI_OP
13884 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
13885 #undef TARGET_ASM_INTEGER
13886 #define TARGET_ASM_INTEGER s390_assemble_integer
13888 #undef TARGET_ASM_OPEN_PAREN
13889 #define TARGET_ASM_OPEN_PAREN ""
13891 #undef TARGET_ASM_CLOSE_PAREN
13892 #define TARGET_ASM_CLOSE_PAREN ""
13894 #undef TARGET_OPTION_OVERRIDE
13895 #define TARGET_OPTION_OVERRIDE s390_option_override
13897 #undef TARGET_ENCODE_SECTION_INFO
13898 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
13900 #undef TARGET_SCALAR_MODE_SUPPORTED_P
13901 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
13904 #undef TARGET_HAVE_TLS
13905 #define TARGET_HAVE_TLS true
13907 #undef TARGET_CANNOT_FORCE_CONST_MEM
13908 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
13910 #undef TARGET_DELEGITIMIZE_ADDRESS
13911 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
13913 #undef TARGET_LEGITIMIZE_ADDRESS
13914 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
13916 #undef TARGET_RETURN_IN_MEMORY
13917 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
13919 #undef TARGET_INIT_BUILTINS
13920 #define TARGET_INIT_BUILTINS s390_init_builtins
13921 #undef TARGET_EXPAND_BUILTIN
13922 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
13923 #undef TARGET_BUILTIN_DECL
13924 #define TARGET_BUILTIN_DECL s390_builtin_decl
13926 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
13927 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
13929 #undef TARGET_ASM_OUTPUT_MI_THUNK
13930 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
13931 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
13932 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
13934 #undef TARGET_SCHED_ADJUST_PRIORITY
13935 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
13936 #undef TARGET_SCHED_ISSUE_RATE
13937 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
13938 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
13939 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
13941 #undef TARGET_SCHED_VARIABLE_ISSUE
13942 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
13943 #undef TARGET_SCHED_REORDER
13944 #define TARGET_SCHED_REORDER s390_sched_reorder
13945 #undef TARGET_SCHED_INIT
13946 #define TARGET_SCHED_INIT s390_sched_init
13948 #undef TARGET_CANNOT_COPY_INSN_P
13949 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
13950 #undef TARGET_RTX_COSTS
13951 #define TARGET_RTX_COSTS s390_rtx_costs
13952 #undef TARGET_ADDRESS_COST
13953 #define TARGET_ADDRESS_COST s390_address_cost
13954 #undef TARGET_REGISTER_MOVE_COST
13955 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
13956 #undef TARGET_MEMORY_MOVE_COST
13957 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
13959 #undef TARGET_MACHINE_DEPENDENT_REORG
13960 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
13962 #undef TARGET_VALID_POINTER_MODE
13963 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
13965 #undef TARGET_BUILD_BUILTIN_VA_LIST
13966 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
13967 #undef TARGET_EXPAND_BUILTIN_VA_START
13968 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
13969 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
13970 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
13972 #undef TARGET_PROMOTE_FUNCTION_MODE
13973 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
13974 #undef TARGET_PASS_BY_REFERENCE
13975 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
13977 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
13978 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
13979 #undef TARGET_FUNCTION_ARG
13980 #define TARGET_FUNCTION_ARG s390_function_arg
13981 #undef TARGET_FUNCTION_ARG_ADVANCE
13982 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
13983 #undef TARGET_FUNCTION_VALUE
13984 #define TARGET_FUNCTION_VALUE s390_function_value
13985 #undef TARGET_LIBCALL_VALUE
13986 #define TARGET_LIBCALL_VALUE s390_libcall_value
13987 #undef TARGET_STRICT_ARGUMENT_NAMING
13988 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
13990 #undef TARGET_KEEP_LEAF_WHEN_PROFILED
13991 #define TARGET_KEEP_LEAF_WHEN_PROFILED s390_keep_leaf_when_profiled
13993 #undef TARGET_FIXED_CONDITION_CODE_REGS
13994 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
13996 #undef TARGET_CC_MODES_COMPATIBLE
13997 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
13999 #undef TARGET_INVALID_WITHIN_DOLOOP
14000 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_insn_null
14003 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
14004 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
14007 #undef TARGET_DWARF_FRAME_REG_MODE
14008 #define TARGET_DWARF_FRAME_REG_MODE s390_dwarf_frame_reg_mode
14010 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
14011 #undef TARGET_MANGLE_TYPE
14012 #define TARGET_MANGLE_TYPE s390_mangle_type
14015 #undef TARGET_SCALAR_MODE_SUPPORTED_P
14016 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
14018 #undef TARGET_VECTOR_MODE_SUPPORTED_P
14019 #define TARGET_VECTOR_MODE_SUPPORTED_P s390_vector_mode_supported_p
14021 #undef TARGET_PREFERRED_RELOAD_CLASS
14022 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
14024 #undef TARGET_SECONDARY_RELOAD
14025 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
14027 #undef TARGET_LIBGCC_CMP_RETURN_MODE
14028 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
14030 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
14031 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
14033 #undef TARGET_LEGITIMATE_ADDRESS_P
14034 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
14036 #undef TARGET_LEGITIMATE_CONSTANT_P
14037 #define TARGET_LEGITIMATE_CONSTANT_P s390_legitimate_constant_p
14039 #undef TARGET_LRA_P
14040 #define TARGET_LRA_P s390_lra_p
14042 #undef TARGET_CAN_ELIMINATE
14043 #define TARGET_CAN_ELIMINATE s390_can_eliminate
14045 #undef TARGET_CONDITIONAL_REGISTER_USAGE
14046 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
14048 #undef TARGET_LOOP_UNROLL_ADJUST
14049 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
14051 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
14052 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
14053 #undef TARGET_TRAMPOLINE_INIT
14054 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
14056 #undef TARGET_UNWIND_WORD_MODE
14057 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
14059 #undef TARGET_CANONICALIZE_COMPARISON
14060 #define TARGET_CANONICALIZE_COMPARISON s390_canonicalize_comparison
14062 #undef TARGET_HARD_REGNO_SCRATCH_OK
14063 #define TARGET_HARD_REGNO_SCRATCH_OK s390_hard_regno_scratch_ok
14065 #undef TARGET_ATTRIBUTE_TABLE
14066 #define TARGET_ATTRIBUTE_TABLE s390_attribute_table
14068 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
14069 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
14071 #undef TARGET_SET_UP_BY_PROLOGUE
14072 #define TARGET_SET_UP_BY_PROLOGUE s300_set_up_by_prologue
14074 #undef TARGET_USE_BY_PIECES_INFRASTRUCTURE_P
14075 #define TARGET_USE_BY_PIECES_INFRASTRUCTURE_P \
14076 s390_use_by_pieces_infrastructure_p
14078 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
14079 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV s390_atomic_assign_expand_fenv
14081 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
14082 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN s390_invalid_arg_for_unprototyped_fn
14084 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
14085 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE s390_preferred_simd_mode
14087 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
14088 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT s390_support_vector_misalignment
14090 #undef TARGET_VECTOR_ALIGNMENT
14091 #define TARGET_VECTOR_ALIGNMENT s390_vector_alignment
14093 #undef TARGET_INVALID_BINARY_OP
14094 #define TARGET_INVALID_BINARY_OP s390_invalid_binary_op
14096 #undef TARGET_ASM_FILE_END
14097 #define TARGET_ASM_FILE_END s390_asm_file_end
14099 struct gcc_target targetm = TARGET_INITIALIZER;
14101 #include "gt-s390.h"