1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999-2016 Free Software Foundation, Inc.
3 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
4 Ulrich Weigand (uweigand@de.ibm.com) and
5 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
28 #include "target-globals.h"
36 #include "stringpool.h"
43 #include "diagnostic-core.h"
44 #include "diagnostic.h"
46 #include "fold-const.h"
47 #include "print-tree.h"
48 #include "stor-layout.h"
51 #include "conditions.h"
53 #include "insn-attr.h"
65 #include "cfgcleanup.h"
67 #include "langhooks.h"
68 #include "internal-fn.h"
69 #include "gimple-fold.h"
74 #include "tree-pass.h"
79 #include "tm-constrs.h"
81 /* This file should be included last. */
82 #include "target-def.h"
84 /* Remember the last target of s390_set_current_function. */
85 static GTY(()) tree s390_previous_fndecl;
87 /* Define the specific costs for a given cpu. */
89 struct processor_costs
92 const int m; /* cost of an M instruction. */
93 const int mghi; /* cost of an MGHI instruction. */
94 const int mh; /* cost of an MH instruction. */
95 const int mhi; /* cost of an MHI instruction. */
96 const int ml; /* cost of an ML instruction. */
97 const int mr; /* cost of an MR instruction. */
98 const int ms; /* cost of an MS instruction. */
99 const int msg; /* cost of an MSG instruction. */
100 const int msgf; /* cost of an MSGF instruction. */
101 const int msgfr; /* cost of an MSGFR instruction. */
102 const int msgr; /* cost of an MSGR instruction. */
103 const int msr; /* cost of an MSR instruction. */
104 const int mult_df; /* cost of multiplication in DFmode. */
107 const int sqxbr; /* cost of square root in TFmode. */
108 const int sqdbr; /* cost of square root in DFmode. */
109 const int sqebr; /* cost of square root in SFmode. */
110 /* multiply and add */
111 const int madbr; /* cost of multiply and add in DFmode. */
112 const int maebr; /* cost of multiply and add in SFmode. */
124 #define s390_cost ((const struct processor_costs *)(s390_cost_pointer))
127 struct processor_costs z900_cost =
129 COSTS_N_INSNS (5), /* M */
130 COSTS_N_INSNS (10), /* MGHI */
131 COSTS_N_INSNS (5), /* MH */
132 COSTS_N_INSNS (4), /* MHI */
133 COSTS_N_INSNS (5), /* ML */
134 COSTS_N_INSNS (5), /* MR */
135 COSTS_N_INSNS (4), /* MS */
136 COSTS_N_INSNS (15), /* MSG */
137 COSTS_N_INSNS (7), /* MSGF */
138 COSTS_N_INSNS (7), /* MSGFR */
139 COSTS_N_INSNS (10), /* MSGR */
140 COSTS_N_INSNS (4), /* MSR */
141 COSTS_N_INSNS (7), /* multiplication in DFmode */
142 COSTS_N_INSNS (13), /* MXBR */
143 COSTS_N_INSNS (136), /* SQXBR */
144 COSTS_N_INSNS (44), /* SQDBR */
145 COSTS_N_INSNS (35), /* SQEBR */
146 COSTS_N_INSNS (18), /* MADBR */
147 COSTS_N_INSNS (13), /* MAEBR */
148 COSTS_N_INSNS (134), /* DXBR */
149 COSTS_N_INSNS (30), /* DDBR */
150 COSTS_N_INSNS (27), /* DEBR */
151 COSTS_N_INSNS (220), /* DLGR */
152 COSTS_N_INSNS (34), /* DLR */
153 COSTS_N_INSNS (34), /* DR */
154 COSTS_N_INSNS (32), /* DSGFR */
155 COSTS_N_INSNS (32), /* DSGR */
159 struct processor_costs z990_cost =
161 COSTS_N_INSNS (4), /* M */
162 COSTS_N_INSNS (2), /* MGHI */
163 COSTS_N_INSNS (2), /* MH */
164 COSTS_N_INSNS (2), /* MHI */
165 COSTS_N_INSNS (4), /* ML */
166 COSTS_N_INSNS (4), /* MR */
167 COSTS_N_INSNS (5), /* MS */
168 COSTS_N_INSNS (6), /* MSG */
169 COSTS_N_INSNS (4), /* MSGF */
170 COSTS_N_INSNS (4), /* MSGFR */
171 COSTS_N_INSNS (4), /* MSGR */
172 COSTS_N_INSNS (4), /* MSR */
173 COSTS_N_INSNS (1), /* multiplication in DFmode */
174 COSTS_N_INSNS (28), /* MXBR */
175 COSTS_N_INSNS (130), /* SQXBR */
176 COSTS_N_INSNS (66), /* SQDBR */
177 COSTS_N_INSNS (38), /* SQEBR */
178 COSTS_N_INSNS (1), /* MADBR */
179 COSTS_N_INSNS (1), /* MAEBR */
180 COSTS_N_INSNS (60), /* DXBR */
181 COSTS_N_INSNS (40), /* DDBR */
182 COSTS_N_INSNS (26), /* DEBR */
183 COSTS_N_INSNS (176), /* DLGR */
184 COSTS_N_INSNS (31), /* DLR */
185 COSTS_N_INSNS (31), /* DR */
186 COSTS_N_INSNS (31), /* DSGFR */
187 COSTS_N_INSNS (31), /* DSGR */
191 struct processor_costs z9_109_cost =
193 COSTS_N_INSNS (4), /* M */
194 COSTS_N_INSNS (2), /* MGHI */
195 COSTS_N_INSNS (2), /* MH */
196 COSTS_N_INSNS (2), /* MHI */
197 COSTS_N_INSNS (4), /* ML */
198 COSTS_N_INSNS (4), /* MR */
199 COSTS_N_INSNS (5), /* MS */
200 COSTS_N_INSNS (6), /* MSG */
201 COSTS_N_INSNS (4), /* MSGF */
202 COSTS_N_INSNS (4), /* MSGFR */
203 COSTS_N_INSNS (4), /* MSGR */
204 COSTS_N_INSNS (4), /* MSR */
205 COSTS_N_INSNS (1), /* multiplication in DFmode */
206 COSTS_N_INSNS (28), /* MXBR */
207 COSTS_N_INSNS (130), /* SQXBR */
208 COSTS_N_INSNS (66), /* SQDBR */
209 COSTS_N_INSNS (38), /* SQEBR */
210 COSTS_N_INSNS (1), /* MADBR */
211 COSTS_N_INSNS (1), /* MAEBR */
212 COSTS_N_INSNS (60), /* DXBR */
213 COSTS_N_INSNS (40), /* DDBR */
214 COSTS_N_INSNS (26), /* DEBR */
215 COSTS_N_INSNS (30), /* DLGR */
216 COSTS_N_INSNS (23), /* DLR */
217 COSTS_N_INSNS (23), /* DR */
218 COSTS_N_INSNS (24), /* DSGFR */
219 COSTS_N_INSNS (24), /* DSGR */
223 struct processor_costs z10_cost =
225 COSTS_N_INSNS (10), /* M */
226 COSTS_N_INSNS (10), /* MGHI */
227 COSTS_N_INSNS (10), /* MH */
228 COSTS_N_INSNS (10), /* MHI */
229 COSTS_N_INSNS (10), /* ML */
230 COSTS_N_INSNS (10), /* MR */
231 COSTS_N_INSNS (10), /* MS */
232 COSTS_N_INSNS (10), /* MSG */
233 COSTS_N_INSNS (10), /* MSGF */
234 COSTS_N_INSNS (10), /* MSGFR */
235 COSTS_N_INSNS (10), /* MSGR */
236 COSTS_N_INSNS (10), /* MSR */
237 COSTS_N_INSNS (1) , /* multiplication in DFmode */
238 COSTS_N_INSNS (50), /* MXBR */
239 COSTS_N_INSNS (120), /* SQXBR */
240 COSTS_N_INSNS (52), /* SQDBR */
241 COSTS_N_INSNS (38), /* SQEBR */
242 COSTS_N_INSNS (1), /* MADBR */
243 COSTS_N_INSNS (1), /* MAEBR */
244 COSTS_N_INSNS (111), /* DXBR */
245 COSTS_N_INSNS (39), /* DDBR */
246 COSTS_N_INSNS (32), /* DEBR */
247 COSTS_N_INSNS (160), /* DLGR */
248 COSTS_N_INSNS (71), /* DLR */
249 COSTS_N_INSNS (71), /* DR */
250 COSTS_N_INSNS (71), /* DSGFR */
251 COSTS_N_INSNS (71), /* DSGR */
255 struct processor_costs z196_cost =
257 COSTS_N_INSNS (7), /* M */
258 COSTS_N_INSNS (5), /* MGHI */
259 COSTS_N_INSNS (5), /* MH */
260 COSTS_N_INSNS (5), /* MHI */
261 COSTS_N_INSNS (7), /* ML */
262 COSTS_N_INSNS (7), /* MR */
263 COSTS_N_INSNS (6), /* MS */
264 COSTS_N_INSNS (8), /* MSG */
265 COSTS_N_INSNS (6), /* MSGF */
266 COSTS_N_INSNS (6), /* MSGFR */
267 COSTS_N_INSNS (8), /* MSGR */
268 COSTS_N_INSNS (6), /* MSR */
269 COSTS_N_INSNS (1) , /* multiplication in DFmode */
270 COSTS_N_INSNS (40), /* MXBR B+40 */
271 COSTS_N_INSNS (100), /* SQXBR B+100 */
272 COSTS_N_INSNS (42), /* SQDBR B+42 */
273 COSTS_N_INSNS (28), /* SQEBR B+28 */
274 COSTS_N_INSNS (1), /* MADBR B */
275 COSTS_N_INSNS (1), /* MAEBR B */
276 COSTS_N_INSNS (101), /* DXBR B+101 */
277 COSTS_N_INSNS (29), /* DDBR */
278 COSTS_N_INSNS (22), /* DEBR */
279 COSTS_N_INSNS (160), /* DLGR cracked */
280 COSTS_N_INSNS (160), /* DLR cracked */
281 COSTS_N_INSNS (160), /* DR expanded */
282 COSTS_N_INSNS (160), /* DSGFR cracked */
283 COSTS_N_INSNS (160), /* DSGR cracked */
287 struct processor_costs zEC12_cost =
289 COSTS_N_INSNS (7), /* M */
290 COSTS_N_INSNS (5), /* MGHI */
291 COSTS_N_INSNS (5), /* MH */
292 COSTS_N_INSNS (5), /* MHI */
293 COSTS_N_INSNS (7), /* ML */
294 COSTS_N_INSNS (7), /* MR */
295 COSTS_N_INSNS (6), /* MS */
296 COSTS_N_INSNS (8), /* MSG */
297 COSTS_N_INSNS (6), /* MSGF */
298 COSTS_N_INSNS (6), /* MSGFR */
299 COSTS_N_INSNS (8), /* MSGR */
300 COSTS_N_INSNS (6), /* MSR */
301 COSTS_N_INSNS (1) , /* multiplication in DFmode */
302 COSTS_N_INSNS (40), /* MXBR B+40 */
303 COSTS_N_INSNS (100), /* SQXBR B+100 */
304 COSTS_N_INSNS (42), /* SQDBR B+42 */
305 COSTS_N_INSNS (28), /* SQEBR B+28 */
306 COSTS_N_INSNS (1), /* MADBR B */
307 COSTS_N_INSNS (1), /* MAEBR B */
308 COSTS_N_INSNS (131), /* DXBR B+131 */
309 COSTS_N_INSNS (29), /* DDBR */
310 COSTS_N_INSNS (22), /* DEBR */
311 COSTS_N_INSNS (160), /* DLGR cracked */
312 COSTS_N_INSNS (160), /* DLR cracked */
313 COSTS_N_INSNS (160), /* DR expanded */
314 COSTS_N_INSNS (160), /* DSGFR cracked */
315 COSTS_N_INSNS (160), /* DSGR cracked */
320 const char *const name;
321 const enum processor_type processor;
322 const struct processor_costs *cost;
324 const processor_table[] =
326 { "g5", PROCESSOR_9672_G5, &z900_cost },
327 { "g6", PROCESSOR_9672_G6, &z900_cost },
328 { "z900", PROCESSOR_2064_Z900, &z900_cost },
329 { "z990", PROCESSOR_2084_Z990, &z990_cost },
330 { "z9-109", PROCESSOR_2094_Z9_109, &z9_109_cost },
331 { "z9-ec", PROCESSOR_2094_Z9_EC, &z9_109_cost },
332 { "z10", PROCESSOR_2097_Z10, &z10_cost },
333 { "z196", PROCESSOR_2817_Z196, &z196_cost },
334 { "zEC12", PROCESSOR_2827_ZEC12, &zEC12_cost },
335 { "z13", PROCESSOR_2964_Z13, &zEC12_cost },
336 { "native", PROCESSOR_NATIVE, NULL }
339 extern int reload_completed;
341 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
342 static rtx_insn *last_scheduled_insn;
344 /* Structure used to hold the components of a S/390 memory
345 address. A legitimate address on S/390 is of the general
347 base + index + displacement
348 where any of the components is optional.
350 base and index are registers of the class ADDR_REGS,
351 displacement is an unsigned 12-bit immediate constant. */
362 /* The following structure is embedded in the machine
363 specific part of struct function. */
365 struct GTY (()) s390_frame_layout
367 /* Offset within stack frame. */
368 HOST_WIDE_INT gprs_offset;
369 HOST_WIDE_INT f0_offset;
370 HOST_WIDE_INT f4_offset;
371 HOST_WIDE_INT f8_offset;
372 HOST_WIDE_INT backchain_offset;
374 /* Number of first and last gpr where slots in the register
375 save area are reserved for. */
376 int first_save_gpr_slot;
377 int last_save_gpr_slot;
379 /* Location (FP register number) where GPRs (r0-r15) should
381 0 - does not need to be saved at all
383 #define SAVE_SLOT_NONE 0
384 #define SAVE_SLOT_STACK -1
385 signed char gpr_save_slots[16];
387 /* Number of first and last gpr to be saved, restored. */
389 int first_restore_gpr;
391 int last_restore_gpr;
393 /* Bits standing for floating point registers. Set, if the
394 respective register has to be saved. Starting with reg 16 (f0)
395 at the rightmost bit.
396 Bit 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
397 fpr 15 13 11 9 14 12 10 8 7 5 3 1 6 4 2 0
398 reg 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 */
399 unsigned int fpr_bitmap;
401 /* Number of floating point registers f8-f15 which must be saved. */
404 /* Set if return address needs to be saved.
405 This flag is set by s390_return_addr_rtx if it could not use
406 the initial value of r14 and therefore depends on r14 saved
408 bool save_return_addr_p;
410 /* Size of stack frame. */
411 HOST_WIDE_INT frame_size;
414 /* Define the structure for the machine field in struct function. */
416 struct GTY(()) machine_function
418 struct s390_frame_layout frame_layout;
420 /* Literal pool base register. */
423 /* True if we may need to perform branch splitting. */
424 bool split_branches_pending_p;
426 bool has_landing_pad_p;
428 /* True if the current function may contain a tbegin clobbering
433 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
435 #define cfun_frame_layout (cfun->machine->frame_layout)
436 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
437 #define cfun_save_arg_fprs_p (!!(TARGET_64BIT \
438 ? cfun_frame_layout.fpr_bitmap & 0x0f \
439 : cfun_frame_layout.fpr_bitmap & 0x03))
440 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
441 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
442 #define cfun_set_fpr_save(REGNO) (cfun->machine->frame_layout.fpr_bitmap |= \
443 (1 << (REGNO - FPR0_REGNUM)))
444 #define cfun_fpr_save_p(REGNO) (!!(cfun->machine->frame_layout.fpr_bitmap & \
445 (1 << (REGNO - FPR0_REGNUM))))
446 #define cfun_gpr_save_slot(REGNO) \
447 cfun->machine->frame_layout.gpr_save_slots[REGNO]
449 /* Number of GPRs and FPRs used for argument passing. */
450 #define GP_ARG_NUM_REG 5
451 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
452 #define VEC_ARG_NUM_REG 8
454 /* A couple of shortcuts. */
455 #define CONST_OK_FOR_J(x) \
456 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
457 #define CONST_OK_FOR_K(x) \
458 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
459 #define CONST_OK_FOR_Os(x) \
460 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
461 #define CONST_OK_FOR_Op(x) \
462 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
463 #define CONST_OK_FOR_On(x) \
464 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
466 #define REGNO_PAIR_OK(REGNO, MODE) \
467 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
469 /* That's the read ahead of the dynamic branch prediction unit in
470 bytes on a z10 (or higher) CPU. */
471 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
474 /* Indicate which ABI has been used for passing vector args.
475 0 - no vector type arguments have been passed where the ABI is relevant
476 1 - the old ABI has been used
477 2 - a vector type argument has been passed either in a vector register
478 or on the stack by value */
479 static int s390_vector_abi = 0;
481 /* Set the vector ABI marker if TYPE is subject to the vector ABI
482 switch. The vector ABI affects only vector data types. There are
483 two aspects of the vector ABI relevant here:
485 1. vectors >= 16 bytes have an alignment of 8 bytes with the new
486 ABI and natural alignment with the old.
488 2. vector <= 16 bytes are passed in VRs or by value on the stack
489 with the new ABI but by reference on the stack with the old.
491 If ARG_P is true TYPE is used for a function argument or return
492 value. The ABI marker then is set for all vector data types. If
493 ARG_P is false only type 1 vectors are being checked. */
496 s390_check_type_for_vector_abi (const_tree type, bool arg_p, bool in_struct_p)
498 static hash_set<const_tree> visited_types_hash;
503 if (type == NULL_TREE || TREE_CODE (type) == ERROR_MARK)
506 if (visited_types_hash.contains (type))
509 visited_types_hash.add (type);
511 if (VECTOR_TYPE_P (type))
513 int type_size = int_size_in_bytes (type);
515 /* Outside arguments only the alignment is changing and this
516 only happens for vector types >= 16 bytes. */
517 if (!arg_p && type_size < 16)
520 /* In arguments vector types > 16 are passed as before (GCC
521 never enforced the bigger alignment for arguments which was
522 required by the old vector ABI). However, it might still be
523 ABI relevant due to the changed alignment if it is a struct
525 if (arg_p && type_size > 16 && !in_struct_p)
528 s390_vector_abi = TARGET_VX_ABI ? 2 : 1;
530 else if (POINTER_TYPE_P (type) || TREE_CODE (type) == ARRAY_TYPE)
532 /* ARRAY_TYPE: Since with neither of the ABIs we have more than
533 natural alignment there will never be ABI dependent padding
534 in an array type. That's why we do not set in_struct_p to
536 s390_check_type_for_vector_abi (TREE_TYPE (type), arg_p, in_struct_p);
538 else if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
542 /* Check the return type. */
543 s390_check_type_for_vector_abi (TREE_TYPE (type), true, false);
545 for (arg_chain = TYPE_ARG_TYPES (type);
547 arg_chain = TREE_CHAIN (arg_chain))
548 s390_check_type_for_vector_abi (TREE_VALUE (arg_chain), true, false);
550 else if (RECORD_OR_UNION_TYPE_P (type))
554 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
556 if (TREE_CODE (field) != FIELD_DECL)
559 s390_check_type_for_vector_abi (TREE_TYPE (field), arg_p, true);
565 /* System z builtins. */
567 #include "s390-builtins.h"
569 const unsigned int bflags_builtin[S390_BUILTIN_MAX + 1] =
574 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, ...) BFLAGS,
576 #define OB_DEF_VAR(...)
577 #include "s390-builtins.def"
581 const unsigned int opflags_builtin[S390_BUILTIN_MAX + 1] =
586 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, OPFLAGS, ...) OPFLAGS,
588 #define OB_DEF_VAR(...)
589 #include "s390-builtins.def"
593 const unsigned int bflags_overloaded_builtin[S390_OVERLOADED_BUILTIN_MAX + 1] =
599 #define OB_DEF(NAME, FIRST_VAR_NAME, LAST_VAR_NAME, BFLAGS, ...) BFLAGS,
600 #define OB_DEF_VAR(...)
601 #include "s390-builtins.def"
606 opflags_overloaded_builtin_var[S390_OVERLOADED_BUILTIN_VAR_MAX + 1] =
613 #define OB_DEF_VAR(NAME, PATTERN, FLAGS, FNTYPE) FLAGS,
614 #include "s390-builtins.def"
618 tree s390_builtin_types[BT_MAX];
619 tree s390_builtin_fn_types[BT_FN_MAX];
620 tree s390_builtin_decls[S390_BUILTIN_MAX +
621 S390_OVERLOADED_BUILTIN_MAX +
622 S390_OVERLOADED_BUILTIN_VAR_MAX];
624 static enum insn_code const code_for_builtin[S390_BUILTIN_MAX + 1] = {
628 #define B_DEF(NAME, PATTERN, ...) CODE_FOR_##PATTERN,
630 #define OB_DEF_VAR(...)
632 #include "s390-builtins.def"
637 s390_init_builtins (void)
639 /* These definitions are being used in s390-builtins.def. */
640 tree returns_twice_attr = tree_cons (get_identifier ("returns_twice"),
642 tree noreturn_attr = tree_cons (get_identifier ("noreturn"), NULL, NULL);
643 tree c_uint64_type_node;
645 /* The uint64_type_node from tree.c is not compatible to the C99
646 uint64_t data type. What we want is c_uint64_type_node from
647 c-common.c. But since backend code is not supposed to interface
648 with the frontend we recreate it here. */
650 c_uint64_type_node = long_unsigned_type_node;
652 c_uint64_type_node = long_long_unsigned_type_node;
655 #define DEF_TYPE(INDEX, BFLAGS, NODE, CONST_P) \
656 if (s390_builtin_types[INDEX] == NULL) \
657 s390_builtin_types[INDEX] = (!CONST_P) ? \
658 (NODE) : build_type_variant ((NODE), 1, 0);
660 #undef DEF_POINTER_TYPE
661 #define DEF_POINTER_TYPE(INDEX, BFLAGS, INDEX_BASE) \
662 if (s390_builtin_types[INDEX] == NULL) \
663 s390_builtin_types[INDEX] = \
664 build_pointer_type (s390_builtin_types[INDEX_BASE]);
666 #undef DEF_DISTINCT_TYPE
667 #define DEF_DISTINCT_TYPE(INDEX, BFLAGS, INDEX_BASE) \
668 if (s390_builtin_types[INDEX] == NULL) \
669 s390_builtin_types[INDEX] = \
670 build_distinct_type_copy (s390_builtin_types[INDEX_BASE]);
672 #undef DEF_VECTOR_TYPE
673 #define DEF_VECTOR_TYPE(INDEX, BFLAGS, INDEX_BASE, ELEMENTS) \
674 if (s390_builtin_types[INDEX] == NULL) \
675 s390_builtin_types[INDEX] = \
676 build_vector_type (s390_builtin_types[INDEX_BASE], ELEMENTS);
678 #undef DEF_OPAQUE_VECTOR_TYPE
679 #define DEF_OPAQUE_VECTOR_TYPE(INDEX, BFLAGS, INDEX_BASE, ELEMENTS) \
680 if (s390_builtin_types[INDEX] == NULL) \
681 s390_builtin_types[INDEX] = \
682 build_opaque_vector_type (s390_builtin_types[INDEX_BASE], ELEMENTS);
685 #define DEF_FN_TYPE(INDEX, BFLAGS, args...) \
686 if (s390_builtin_fn_types[INDEX] == NULL) \
687 s390_builtin_fn_types[INDEX] = \
688 build_function_type_list (args, NULL_TREE);
690 #define DEF_OV_TYPE(...)
691 #include "s390-builtin-types.def"
694 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, OPFLAGS, FNTYPE) \
695 if (s390_builtin_decls[S390_BUILTIN_##NAME] == NULL) \
696 s390_builtin_decls[S390_BUILTIN_##NAME] = \
697 add_builtin_function ("__builtin_" #NAME, \
698 s390_builtin_fn_types[FNTYPE], \
699 S390_BUILTIN_##NAME, \
704 #define OB_DEF(NAME, FIRST_VAR_NAME, LAST_VAR_NAME, BFLAGS, FNTYPE) \
705 if (s390_builtin_decls[S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX] \
707 s390_builtin_decls[S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX] = \
708 add_builtin_function ("__builtin_" #NAME, \
709 s390_builtin_fn_types[FNTYPE], \
710 S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX, \
715 #define OB_DEF_VAR(...)
716 #include "s390-builtins.def"
720 /* Return true if ARG is appropriate as argument number ARGNUM of
721 builtin DECL. The operand flags from s390-builtins.def have to
722 passed as OP_FLAGS. */
724 s390_const_operand_ok (tree arg, int argnum, int op_flags, tree decl)
726 if (O_UIMM_P (op_flags))
728 int bitwidths[] = { 1, 2, 3, 4, 5, 8, 12, 16, 32 };
729 int bitwidth = bitwidths[op_flags - O_U1];
731 if (!tree_fits_uhwi_p (arg)
732 || tree_to_uhwi (arg) > ((unsigned HOST_WIDE_INT)1 << bitwidth) - 1)
734 error("constant argument %d for builtin %qF is out of range (0.."
735 HOST_WIDE_INT_PRINT_UNSIGNED ")",
737 ((unsigned HOST_WIDE_INT)1 << bitwidth) - 1);
742 if (O_SIMM_P (op_flags))
744 int bitwidths[] = { 2, 3, 4, 5, 8, 12, 16, 32 };
745 int bitwidth = bitwidths[op_flags - O_S2];
747 if (!tree_fits_shwi_p (arg)
748 || tree_to_shwi (arg) < -((HOST_WIDE_INT)1 << (bitwidth - 1))
749 || tree_to_shwi (arg) > (((HOST_WIDE_INT)1 << (bitwidth - 1)) - 1))
751 error("constant argument %d for builtin %qF is out of range ("
752 HOST_WIDE_INT_PRINT_DEC ".."
753 HOST_WIDE_INT_PRINT_DEC ")",
755 -((HOST_WIDE_INT)1 << (bitwidth - 1)),
756 ((HOST_WIDE_INT)1 << (bitwidth - 1)) - 1);
763 /* Expand an expression EXP that calls a built-in function,
764 with result going to TARGET if that's convenient
765 (and in mode MODE if that's convenient).
766 SUBTARGET may be used as the target for computing one of EXP's operands.
767 IGNORE is nonzero if the value is to be ignored. */
770 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
771 machine_mode mode ATTRIBUTE_UNUSED,
772 int ignore ATTRIBUTE_UNUSED)
776 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
777 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
778 enum insn_code icode;
779 rtx op[MAX_ARGS], pat;
783 call_expr_arg_iterator iter;
784 unsigned int all_op_flags = opflags_for_builtin (fcode);
785 machine_mode last_vec_mode = VOIDmode;
787 if (TARGET_DEBUG_ARG)
790 "s390_expand_builtin, code = %4d, %s, bflags = 0x%x\n",
791 (int)fcode, IDENTIFIER_POINTER (DECL_NAME (fndecl)),
792 bflags_for_builtin (fcode));
795 if (S390_USE_TARGET_ATTRIBUTE)
799 bflags = bflags_for_builtin (fcode);
800 if ((bflags & B_HTM) && !TARGET_HTM)
802 error ("Builtin %qF is not supported without -mhtm "
803 "(default with -march=zEC12 and higher).", fndecl);
806 if ((bflags & B_VX) && !TARGET_VX)
808 error ("Builtin %qF is not supported without -mvx "
809 "(default with -march=z13 and higher).", fndecl);
813 if (fcode >= S390_OVERLOADED_BUILTIN_VAR_OFFSET
814 && fcode < S390_ALL_BUILTIN_MAX)
818 else if (fcode < S390_OVERLOADED_BUILTIN_OFFSET)
820 icode = code_for_builtin[fcode];
821 /* Set a flag in the machine specific cfun part in order to support
822 saving/restoring of FPRs. */
823 if (fcode == S390_BUILTIN_tbegin || fcode == S390_BUILTIN_tbegin_retry)
824 cfun->machine->tbegin_p = true;
826 else if (fcode < S390_OVERLOADED_BUILTIN_VAR_OFFSET)
828 error ("Unresolved overloaded builtin");
832 internal_error ("bad builtin fcode");
835 internal_error ("bad builtin icode");
837 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
841 machine_mode tmode = insn_data[icode].operand[0].mode;
843 || GET_MODE (target) != tmode
844 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
845 target = gen_reg_rtx (tmode);
847 /* There are builtins (e.g. vec_promote) with no vector
848 arguments but an element selector. So we have to also look
849 at the vector return type when emitting the modulo
851 if (VECTOR_MODE_P (insn_data[icode].operand[0].mode))
852 last_vec_mode = insn_data[icode].operand[0].mode;
856 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
858 const struct insn_operand_data *insn_op;
859 unsigned int op_flags = all_op_flags & ((1 << O_SHIFT) - 1);
861 all_op_flags = all_op_flags >> O_SHIFT;
863 if (arg == error_mark_node)
865 if (arity >= MAX_ARGS)
868 if (O_IMM_P (op_flags)
869 && TREE_CODE (arg) != INTEGER_CST)
871 error ("constant value required for builtin %qF argument %d",
876 if (!s390_const_operand_ok (arg, arity + 1, op_flags, fndecl))
879 insn_op = &insn_data[icode].operand[arity + nonvoid];
880 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
882 /* expand_expr truncates constants to the target mode only if it
883 is "convenient". However, our checks below rely on this
885 if (CONST_INT_P (op[arity])
886 && SCALAR_INT_MODE_P (insn_op->mode)
887 && GET_MODE (op[arity]) != insn_op->mode)
888 op[arity] = GEN_INT (trunc_int_for_mode (INTVAL (op[arity]),
891 /* Wrap the expanded RTX for pointer types into a MEM expr with
892 the proper mode. This allows us to use e.g. (match_operand
893 "memory_operand"..) in the insn patterns instead of (mem
894 (match_operand "address_operand)). This is helpful for
895 patterns not just accepting MEMs. */
896 if (POINTER_TYPE_P (TREE_TYPE (arg))
897 && insn_op->predicate != address_operand)
898 op[arity] = gen_rtx_MEM (insn_op->mode, op[arity]);
900 /* Expand the module operation required on element selectors. */
901 if (op_flags == O_ELEM)
903 gcc_assert (last_vec_mode != VOIDmode);
904 op[arity] = simplify_expand_binop (SImode, code_to_optab (AND),
906 GEN_INT (GET_MODE_NUNITS (last_vec_mode) - 1),
907 NULL_RTX, 1, OPTAB_DIRECT);
910 /* Record the vector mode used for an element selector. This assumes:
911 1. There is no builtin with two different vector modes and an element selector
912 2. The element selector comes after the vector type it is referring to.
913 This currently the true for all the builtins but FIXME we
914 should better check for that. */
915 if (VECTOR_MODE_P (insn_op->mode))
916 last_vec_mode = insn_op->mode;
918 if (insn_op->predicate (op[arity], insn_op->mode))
924 if (MEM_P (op[arity])
925 && insn_op->predicate == memory_operand
926 && (GET_MODE (XEXP (op[arity], 0)) == Pmode
927 || GET_MODE (XEXP (op[arity], 0)) == VOIDmode))
929 op[arity] = replace_equiv_address (op[arity],
930 copy_to_mode_reg (Pmode,
931 XEXP (op[arity], 0)));
933 else if (GET_MODE (op[arity]) == insn_op->mode
934 || GET_MODE (op[arity]) == VOIDmode
935 || (insn_op->predicate == address_operand
936 && GET_MODE (op[arity]) == Pmode))
938 /* An address_operand usually has VOIDmode in the expander
939 so we cannot use this. */
940 machine_mode target_mode =
941 (insn_op->predicate == address_operand
942 ? Pmode : insn_op->mode);
943 op[arity] = copy_to_mode_reg (target_mode, op[arity]);
946 if (!insn_op->predicate (op[arity], insn_op->mode))
948 error ("Invalid argument %d for builtin %qF", arity + 1, fndecl);
957 pat = GEN_FCN (icode) (target);
961 pat = GEN_FCN (icode) (target, op[0]);
963 pat = GEN_FCN (icode) (op[0]);
967 pat = GEN_FCN (icode) (target, op[0], op[1]);
969 pat = GEN_FCN (icode) (op[0], op[1]);
973 pat = GEN_FCN (icode) (target, op[0], op[1], op[2]);
975 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
979 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3]);
981 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
985 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4]);
987 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]);
991 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4], op[5]);
993 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4], op[5]);
1009 static const int s390_hotpatch_hw_max = 1000000;
1010 static int s390_hotpatch_hw_before_label = 0;
1011 static int s390_hotpatch_hw_after_label = 0;
1013 /* Check whether the hotpatch attribute is applied to a function and, if it has
1014 an argument, the argument is valid. */
1017 s390_handle_hotpatch_attribute (tree *node, tree name, tree args,
1018 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1024 if (TREE_CODE (*node) != FUNCTION_DECL)
1026 warning (OPT_Wattributes, "%qE attribute only applies to functions",
1028 *no_add_attrs = true;
1030 if (args != NULL && TREE_CHAIN (args) != NULL)
1032 expr = TREE_VALUE (args);
1033 expr2 = TREE_VALUE (TREE_CHAIN (args));
1035 if (args == NULL || TREE_CHAIN (args) == NULL)
1037 else if (TREE_CODE (expr) != INTEGER_CST
1038 || !INTEGRAL_TYPE_P (TREE_TYPE (expr))
1039 || wi::gtu_p (expr, s390_hotpatch_hw_max))
1041 else if (TREE_CODE (expr2) != INTEGER_CST
1042 || !INTEGRAL_TYPE_P (TREE_TYPE (expr2))
1043 || wi::gtu_p (expr2, s390_hotpatch_hw_max))
1049 error ("requested %qE attribute is not a comma separated pair of"
1050 " non-negative integer constants or too large (max. %d)", name,
1051 s390_hotpatch_hw_max);
1052 *no_add_attrs = true;
1058 /* Expand the s390_vector_bool type attribute. */
1061 s390_handle_vectorbool_attribute (tree *node, tree name ATTRIBUTE_UNUSED,
1062 tree args ATTRIBUTE_UNUSED,
1063 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1065 tree type = *node, result = NULL_TREE;
1068 while (POINTER_TYPE_P (type)
1069 || TREE_CODE (type) == FUNCTION_TYPE
1070 || TREE_CODE (type) == METHOD_TYPE
1071 || TREE_CODE (type) == ARRAY_TYPE)
1072 type = TREE_TYPE (type);
1074 mode = TYPE_MODE (type);
1077 case DImode: case V2DImode: result = s390_builtin_types[BT_BV2DI]; break;
1078 case SImode: case V4SImode: result = s390_builtin_types[BT_BV4SI]; break;
1079 case HImode: case V8HImode: result = s390_builtin_types[BT_BV8HI]; break;
1080 case QImode: case V16QImode: result = s390_builtin_types[BT_BV16QI];
1084 *no_add_attrs = true; /* No need to hang on to the attribute. */
1087 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
1092 static const struct attribute_spec s390_attribute_table[] = {
1093 { "hotpatch", 2, 2, true, false, false, s390_handle_hotpatch_attribute, false },
1094 { "s390_vector_bool", 0, 0, false, true, false, s390_handle_vectorbool_attribute, true },
1096 { NULL, 0, 0, false, false, false, NULL, false }
1099 /* Return the alignment for LABEL. We default to the -falign-labels
1100 value except for the literal pool base label. */
1102 s390_label_align (rtx label)
1104 rtx_insn *prev_insn = prev_active_insn (label);
1107 if (prev_insn == NULL_RTX)
1110 set = single_set (prev_insn);
1112 if (set == NULL_RTX)
1115 src = SET_SRC (set);
1117 /* Don't align literal pool base labels. */
1118 if (GET_CODE (src) == UNSPEC
1119 && XINT (src, 1) == UNSPEC_MAIN_BASE)
1123 return align_labels_log;
1127 s390_libgcc_cmp_return_mode (void)
1129 return TARGET_64BIT ? DImode : SImode;
1133 s390_libgcc_shift_count_mode (void)
1135 return TARGET_64BIT ? DImode : SImode;
1139 s390_unwind_word_mode (void)
1141 return TARGET_64BIT ? DImode : SImode;
1144 /* Return true if the back end supports mode MODE. */
1146 s390_scalar_mode_supported_p (machine_mode mode)
1148 /* In contrast to the default implementation reject TImode constants on 31bit
1149 TARGET_ZARCH for ABI compliance. */
1150 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
1153 if (DECIMAL_FLOAT_MODE_P (mode))
1154 return default_decimal_float_supported_p ();
1156 return default_scalar_mode_supported_p (mode);
1159 /* Return true if the back end supports vector mode MODE. */
1161 s390_vector_mode_supported_p (machine_mode mode)
1165 if (!VECTOR_MODE_P (mode)
1167 || GET_MODE_SIZE (mode) > 16)
1170 inner = GET_MODE_INNER (mode);
1188 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
1191 s390_set_has_landing_pad_p (bool value)
1193 cfun->machine->has_landing_pad_p = value;
1196 /* If two condition code modes are compatible, return a condition code
1197 mode which is compatible with both. Otherwise, return
1201 s390_cc_modes_compatible (machine_mode m1, machine_mode m2)
1209 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
1210 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
1231 /* Return true if SET either doesn't set the CC register, or else
1232 the source and destination have matching CC modes and that
1233 CC mode is at least as constrained as REQ_MODE. */
1236 s390_match_ccmode_set (rtx set, machine_mode req_mode)
1238 machine_mode set_mode;
1240 gcc_assert (GET_CODE (set) == SET);
1242 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
1245 set_mode = GET_MODE (SET_DEST (set));
1264 if (req_mode != set_mode)
1269 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
1270 && req_mode != CCSRmode && req_mode != CCURmode)
1276 if (req_mode != CCAmode)
1284 return (GET_MODE (SET_SRC (set)) == set_mode);
1287 /* Return true if every SET in INSN that sets the CC register
1288 has source and destination with matching CC modes and that
1289 CC mode is at least as constrained as REQ_MODE.
1290 If REQ_MODE is VOIDmode, always return false. */
1293 s390_match_ccmode (rtx_insn *insn, machine_mode req_mode)
1297 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
1298 if (req_mode == VOIDmode)
1301 if (GET_CODE (PATTERN (insn)) == SET)
1302 return s390_match_ccmode_set (PATTERN (insn), req_mode);
1304 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1305 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1307 rtx set = XVECEXP (PATTERN (insn), 0, i);
1308 if (GET_CODE (set) == SET)
1309 if (!s390_match_ccmode_set (set, req_mode))
1316 /* If a test-under-mask instruction can be used to implement
1317 (compare (and ... OP1) OP2), return the CC mode required
1318 to do that. Otherwise, return VOIDmode.
1319 MIXED is true if the instruction can distinguish between
1320 CC1 and CC2 for mixed selected bits (TMxx), it is false
1321 if the instruction cannot (TM). */
1324 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
1328 /* ??? Fixme: should work on CONST_WIDE_INT as well. */
1329 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
1332 /* Selected bits all zero: CC0.
1333 e.g.: int a; if ((a & (16 + 128)) == 0) */
1334 if (INTVAL (op2) == 0)
1337 /* Selected bits all one: CC3.
1338 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
1339 if (INTVAL (op2) == INTVAL (op1))
1342 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
1344 if ((a & (16 + 128)) == 16) -> CCT1
1345 if ((a & (16 + 128)) == 128) -> CCT2 */
1348 bit1 = exact_log2 (INTVAL (op2));
1349 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
1350 if (bit0 != -1 && bit1 != -1)
1351 return bit0 > bit1 ? CCT1mode : CCT2mode;
1357 /* Given a comparison code OP (EQ, NE, etc.) and the operands
1358 OP0 and OP1 of a COMPARE, return the mode to be used for the
1362 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
1365 && register_operand (op0, DFmode)
1366 && register_operand (op1, DFmode))
1368 /* LT, LE, UNGT, UNGE require swapping OP0 and OP1. Either
1369 s390_emit_compare or s390_canonicalize_comparison will take
1391 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
1392 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1394 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
1395 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
1397 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1398 || GET_CODE (op1) == NEG)
1399 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1402 if (GET_CODE (op0) == AND)
1404 /* Check whether we can potentially do it via TM. */
1405 machine_mode ccmode;
1406 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
1407 if (ccmode != VOIDmode)
1409 /* Relax CCTmode to CCZmode to allow fall-back to AND
1410 if that turns out to be beneficial. */
1411 return ccmode == CCTmode ? CCZmode : ccmode;
1415 if (register_operand (op0, HImode)
1416 && GET_CODE (op1) == CONST_INT
1417 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
1419 if (register_operand (op0, QImode)
1420 && GET_CODE (op1) == CONST_INT
1421 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
1430 /* The only overflow condition of NEG and ABS happens when
1431 -INT_MAX is used as parameter, which stays negative. So
1432 we have an overflow from a positive value to a negative.
1433 Using CCAP mode the resulting cc can be used for comparisons. */
1434 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
1435 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1438 /* If constants are involved in an add instruction it is possible to use
1439 the resulting cc for comparisons with zero. Knowing the sign of the
1440 constant the overflow behavior gets predictable. e.g.:
1441 int a, b; if ((b = a + c) > 0)
1442 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
1443 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
1444 && (CONST_OK_FOR_K (INTVAL (XEXP (op0, 1)))
1445 || (CONST_OK_FOR_CONSTRAINT_P (INTVAL (XEXP (op0, 1)), 'O', "Os")
1446 /* Avoid INT32_MIN on 32 bit. */
1447 && (!TARGET_ZARCH || INTVAL (XEXP (op0, 1)) != -0x7fffffff - 1))))
1449 if (INTVAL (XEXP((op0), 1)) < 0)
1463 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1464 && GET_CODE (op1) != CONST_INT)
1470 if (GET_CODE (op0) == PLUS
1471 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1474 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1475 && GET_CODE (op1) != CONST_INT)
1481 if (GET_CODE (op0) == MINUS
1482 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1485 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1486 && GET_CODE (op1) != CONST_INT)
1495 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
1496 that we can implement more efficiently. */
1499 s390_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
1500 bool op0_preserve_value)
1502 if (op0_preserve_value)
1505 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
1506 if ((*code == EQ || *code == NE)
1507 && *op1 == const0_rtx
1508 && GET_CODE (*op0) == ZERO_EXTRACT
1509 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
1510 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
1511 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
1513 rtx inner = XEXP (*op0, 0);
1514 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
1515 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
1516 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
1518 if (len > 0 && len < modesize
1519 && pos >= 0 && pos + len <= modesize
1520 && modesize <= HOST_BITS_PER_WIDE_INT)
1522 unsigned HOST_WIDE_INT block;
1523 block = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
1524 block <<= modesize - pos - len;
1526 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
1527 gen_int_mode (block, GET_MODE (inner)));
1531 /* Narrow AND of memory against immediate to enable TM. */
1532 if ((*code == EQ || *code == NE)
1533 && *op1 == const0_rtx
1534 && GET_CODE (*op0) == AND
1535 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
1536 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
1538 rtx inner = XEXP (*op0, 0);
1539 rtx mask = XEXP (*op0, 1);
1541 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
1542 if (GET_CODE (inner) == SUBREG
1543 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
1544 && (GET_MODE_SIZE (GET_MODE (inner))
1545 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
1547 & GET_MODE_MASK (GET_MODE (inner))
1548 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
1550 inner = SUBREG_REG (inner);
1552 /* Do not change volatile MEMs. */
1553 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
1555 int part = s390_single_part (XEXP (*op0, 1),
1556 GET_MODE (inner), QImode, 0);
1559 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
1560 inner = adjust_address_nv (inner, QImode, part);
1561 *op0 = gen_rtx_AND (QImode, inner, mask);
1566 /* Narrow comparisons against 0xffff to HImode if possible. */
1567 if ((*code == EQ || *code == NE)
1568 && GET_CODE (*op1) == CONST_INT
1569 && INTVAL (*op1) == 0xffff
1570 && SCALAR_INT_MODE_P (GET_MODE (*op0))
1571 && (nonzero_bits (*op0, GET_MODE (*op0))
1572 & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
1574 *op0 = gen_lowpart (HImode, *op0);
1578 /* Remove redundant UNSPEC_STRCMPCC_TO_INT conversions if possible. */
1579 if (GET_CODE (*op0) == UNSPEC
1580 && XINT (*op0, 1) == UNSPEC_STRCMPCC_TO_INT
1581 && XVECLEN (*op0, 0) == 1
1582 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
1583 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
1584 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
1585 && *op1 == const0_rtx)
1587 enum rtx_code new_code = UNKNOWN;
1590 case EQ: new_code = EQ; break;
1591 case NE: new_code = NE; break;
1592 case LT: new_code = GTU; break;
1593 case GT: new_code = LTU; break;
1594 case LE: new_code = GEU; break;
1595 case GE: new_code = LEU; break;
1599 if (new_code != UNKNOWN)
1601 *op0 = XVECEXP (*op0, 0, 0);
1606 /* Remove redundant UNSPEC_CC_TO_INT conversions if possible. */
1607 if (GET_CODE (*op0) == UNSPEC
1608 && XINT (*op0, 1) == UNSPEC_CC_TO_INT
1609 && XVECLEN (*op0, 0) == 1
1610 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
1611 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
1612 && CONST_INT_P (*op1))
1614 enum rtx_code new_code = UNKNOWN;
1615 switch (GET_MODE (XVECEXP (*op0, 0, 0)))
1621 case EQ: new_code = EQ; break;
1622 case NE: new_code = NE; break;
1629 if (new_code != UNKNOWN)
1631 /* For CCRAWmode put the required cc mask into the second
1633 if (GET_MODE (XVECEXP (*op0, 0, 0)) == CCRAWmode
1634 && INTVAL (*op1) >= 0 && INTVAL (*op1) <= 3)
1635 *op1 = gen_rtx_CONST_INT (VOIDmode, 1 << (3 - INTVAL (*op1)));
1636 *op0 = XVECEXP (*op0, 0, 0);
1641 /* Simplify cascaded EQ, NE with const0_rtx. */
1642 if ((*code == NE || *code == EQ)
1643 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
1644 && GET_MODE (*op0) == SImode
1645 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
1646 && REG_P (XEXP (*op0, 0))
1647 && XEXP (*op0, 1) == const0_rtx
1648 && *op1 == const0_rtx)
1650 if ((*code == EQ && GET_CODE (*op0) == NE)
1651 || (*code == NE && GET_CODE (*op0) == EQ))
1655 *op0 = XEXP (*op0, 0);
1658 /* Prefer register over memory as first operand. */
1659 if (MEM_P (*op0) && REG_P (*op1))
1661 rtx tem = *op0; *op0 = *op1; *op1 = tem;
1662 *code = (int)swap_condition ((enum rtx_code)*code);
1665 /* Using the scalar variants of vector instructions for 64 bit FP
1666 comparisons might require swapping the operands. */
1668 && register_operand (*op0, DFmode)
1669 && register_operand (*op1, DFmode)
1670 && (*code == LT || *code == LE || *code == UNGT || *code == UNGE))
1676 case LT: *code = GT; break;
1677 case LE: *code = GE; break;
1678 case UNGT: *code = UNLE; break;
1679 case UNGE: *code = UNLT; break;
1682 tmp = *op0; *op0 = *op1; *op1 = tmp;
1686 /* Helper function for s390_emit_compare. If possible emit a 64 bit
1687 FP compare using the single element variant of vector instructions.
1688 Replace CODE with the comparison code to be used in the CC reg
1689 compare and return the condition code register RTX in CC. */
1692 s390_expand_vec_compare_scalar (enum rtx_code *code, rtx cmp1, rtx cmp2,
1695 machine_mode cmp_mode;
1696 bool swap_p = false;
1700 case EQ: cmp_mode = CCVEQmode; break;
1701 case NE: cmp_mode = CCVEQmode; break;
1702 case GT: cmp_mode = CCVFHmode; break;
1703 case GE: cmp_mode = CCVFHEmode; break;
1704 case UNLE: cmp_mode = CCVFHmode; break;
1705 case UNLT: cmp_mode = CCVFHEmode; break;
1706 case LT: cmp_mode = CCVFHmode; *code = GT; swap_p = true; break;
1707 case LE: cmp_mode = CCVFHEmode; *code = GE; swap_p = true; break;
1708 case UNGE: cmp_mode = CCVFHmode; *code = UNLE; swap_p = true; break;
1709 case UNGT: cmp_mode = CCVFHEmode; *code = UNLT; swap_p = true; break;
1710 default: return false;
1719 *cc = gen_rtx_REG (cmp_mode, CC_REGNUM);
1720 emit_insn (gen_rtx_PARALLEL (VOIDmode,
1723 gen_rtx_COMPARE (cmp_mode, cmp1,
1725 gen_rtx_CLOBBER (VOIDmode,
1726 gen_rtx_SCRATCH (V2DImode)))));
1731 /* Emit a compare instruction suitable to implement the comparison
1732 OP0 CODE OP1. Return the correct condition RTL to be placed in
1733 the IF_THEN_ELSE of the conditional branch testing the result. */
1736 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
1738 machine_mode mode = s390_select_ccmode (code, op0, op1);
1742 && register_operand (op0, DFmode)
1743 && register_operand (op1, DFmode)
1744 && s390_expand_vec_compare_scalar (&code, op0, op1, &cc))
1746 /* Work has been done by s390_expand_vec_compare_scalar already. */
1748 else if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
1750 /* Do not output a redundant compare instruction if a
1751 compare_and_swap pattern already computed the result and the
1752 machine modes are compatible. */
1753 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
1759 cc = gen_rtx_REG (mode, CC_REGNUM);
1760 emit_insn (gen_rtx_SET (cc, gen_rtx_COMPARE (mode, op0, op1)));
1763 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
1766 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
1768 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
1769 conditional branch testing the result. */
1772 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem,
1773 rtx cmp, rtx new_rtx)
1775 emit_insn (gen_atomic_compare_and_swapsi_internal (old, mem, cmp, new_rtx));
1776 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM),
1780 /* Emit a jump instruction to TARGET and return it. If COND is
1781 NULL_RTX, emit an unconditional jump, else a conditional jump under
1785 s390_emit_jump (rtx target, rtx cond)
1789 target = gen_rtx_LABEL_REF (VOIDmode, target);
1791 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
1793 insn = gen_rtx_SET (pc_rtx, target);
1794 return emit_jump_insn (insn);
1797 /* Return branch condition mask to implement a branch
1798 specified by CODE. Return -1 for invalid comparisons. */
1801 s390_branch_condition_mask (rtx code)
1803 const int CC0 = 1 << 3;
1804 const int CC1 = 1 << 2;
1805 const int CC2 = 1 << 1;
1806 const int CC3 = 1 << 0;
1808 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
1809 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
1810 gcc_assert (XEXP (code, 1) == const0_rtx
1811 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
1812 && CONST_INT_P (XEXP (code, 1))));
1815 switch (GET_MODE (XEXP (code, 0)))
1819 switch (GET_CODE (code))
1821 case EQ: return CC0;
1822 case NE: return CC1 | CC2 | CC3;
1828 switch (GET_CODE (code))
1830 case EQ: return CC1;
1831 case NE: return CC0 | CC2 | CC3;
1837 switch (GET_CODE (code))
1839 case EQ: return CC2;
1840 case NE: return CC0 | CC1 | CC3;
1846 switch (GET_CODE (code))
1848 case EQ: return CC3;
1849 case NE: return CC0 | CC1 | CC2;
1855 switch (GET_CODE (code))
1857 case EQ: return CC0 | CC2;
1858 case NE: return CC1 | CC3;
1864 switch (GET_CODE (code))
1866 case LTU: return CC2 | CC3; /* carry */
1867 case GEU: return CC0 | CC1; /* no carry */
1873 switch (GET_CODE (code))
1875 case GTU: return CC0 | CC1; /* borrow */
1876 case LEU: return CC2 | CC3; /* no borrow */
1882 switch (GET_CODE (code))
1884 case EQ: return CC0 | CC2;
1885 case NE: return CC1 | CC3;
1886 case LTU: return CC1;
1887 case GTU: return CC3;
1888 case LEU: return CC1 | CC2;
1889 case GEU: return CC2 | CC3;
1894 switch (GET_CODE (code))
1896 case EQ: return CC0;
1897 case NE: return CC1 | CC2 | CC3;
1898 case LTU: return CC1;
1899 case GTU: return CC2;
1900 case LEU: return CC0 | CC1;
1901 case GEU: return CC0 | CC2;
1907 switch (GET_CODE (code))
1909 case EQ: return CC0;
1910 case NE: return CC2 | CC1 | CC3;
1911 case LTU: return CC2;
1912 case GTU: return CC1;
1913 case LEU: return CC0 | CC2;
1914 case GEU: return CC0 | CC1;
1920 switch (GET_CODE (code))
1922 case EQ: return CC0;
1923 case NE: return CC1 | CC2 | CC3;
1924 case LT: return CC1 | CC3;
1925 case GT: return CC2;
1926 case LE: return CC0 | CC1 | CC3;
1927 case GE: return CC0 | CC2;
1933 switch (GET_CODE (code))
1935 case EQ: return CC0;
1936 case NE: return CC1 | CC2 | CC3;
1937 case LT: return CC1;
1938 case GT: return CC2 | CC3;
1939 case LE: return CC0 | CC1;
1940 case GE: return CC0 | CC2 | CC3;
1946 switch (GET_CODE (code))
1948 case EQ: return CC0;
1949 case NE: return CC1 | CC2 | CC3;
1950 case LT: return CC1;
1951 case GT: return CC2;
1952 case LE: return CC0 | CC1;
1953 case GE: return CC0 | CC2;
1954 case UNORDERED: return CC3;
1955 case ORDERED: return CC0 | CC1 | CC2;
1956 case UNEQ: return CC0 | CC3;
1957 case UNLT: return CC1 | CC3;
1958 case UNGT: return CC2 | CC3;
1959 case UNLE: return CC0 | CC1 | CC3;
1960 case UNGE: return CC0 | CC2 | CC3;
1961 case LTGT: return CC1 | CC2;
1967 switch (GET_CODE (code))
1969 case EQ: return CC0;
1970 case NE: return CC2 | CC1 | CC3;
1971 case LT: return CC2;
1972 case GT: return CC1;
1973 case LE: return CC0 | CC2;
1974 case GE: return CC0 | CC1;
1975 case UNORDERED: return CC3;
1976 case ORDERED: return CC0 | CC2 | CC1;
1977 case UNEQ: return CC0 | CC3;
1978 case UNLT: return CC2 | CC3;
1979 case UNGT: return CC1 | CC3;
1980 case UNLE: return CC0 | CC2 | CC3;
1981 case UNGE: return CC0 | CC1 | CC3;
1982 case LTGT: return CC2 | CC1;
1987 /* Vector comparison modes. */
1990 switch (GET_CODE (code))
1992 case EQ: return CC0;
1993 case NE: return CC3;
1998 switch (GET_CODE (code))
2000 case EQ: return CC0 | CC1;
2001 case NE: return CC3 | CC1;
2005 /* Integer vector compare modes. */
2008 switch (GET_CODE (code))
2010 case GT: return CC0;
2011 case LE: return CC3;
2016 switch (GET_CODE (code))
2018 case GT: return CC0 | CC1;
2019 case LE: return CC3 | CC1;
2024 switch (GET_CODE (code))
2026 case GTU: return CC0;
2027 case LEU: return CC3;
2032 switch (GET_CODE (code))
2034 case GTU: return CC0 | CC1;
2035 case LEU: return CC3 | CC1;
2039 /* FP vector compare modes. */
2042 switch (GET_CODE (code))
2044 case GT: return CC0;
2045 case UNLE: return CC3;
2050 switch (GET_CODE (code))
2052 case GT: return CC0 | CC1;
2053 case UNLE: return CC3 | CC1;
2058 switch (GET_CODE (code))
2060 case GE: return CC0;
2061 case UNLT: return CC3;
2066 switch (GET_CODE (code))
2068 case GE: return CC0 | CC1;
2069 case UNLT: return CC3 | CC1;
2075 switch (GET_CODE (code))
2078 return INTVAL (XEXP (code, 1));
2080 return (INTVAL (XEXP (code, 1))) ^ 0xf;
2091 /* Return branch condition mask to implement a compare and branch
2092 specified by CODE. Return -1 for invalid comparisons. */
2095 s390_compare_and_branch_condition_mask (rtx code)
2097 const int CC0 = 1 << 3;
2098 const int CC1 = 1 << 2;
2099 const int CC2 = 1 << 1;
2101 switch (GET_CODE (code))
2125 /* If INV is false, return assembler mnemonic string to implement
2126 a branch specified by CODE. If INV is true, return mnemonic
2127 for the corresponding inverted branch. */
2130 s390_branch_condition_mnemonic (rtx code, int inv)
2134 static const char *const mnemonic[16] =
2136 NULL, "o", "h", "nle",
2137 "l", "nhe", "lh", "ne",
2138 "e", "nlh", "he", "nl",
2139 "le", "nh", "no", NULL
2142 if (GET_CODE (XEXP (code, 0)) == REG
2143 && REGNO (XEXP (code, 0)) == CC_REGNUM
2144 && (XEXP (code, 1) == const0_rtx
2145 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
2146 && CONST_INT_P (XEXP (code, 1)))))
2147 mask = s390_branch_condition_mask (code);
2149 mask = s390_compare_and_branch_condition_mask (code);
2151 gcc_assert (mask >= 0);
2156 gcc_assert (mask >= 1 && mask <= 14);
2158 return mnemonic[mask];
2161 /* Return the part of op which has a value different from def.
2162 The size of the part is determined by mode.
2163 Use this function only if you already know that op really
2164 contains such a part. */
2166 unsigned HOST_WIDE_INT
2167 s390_extract_part (rtx op, machine_mode mode, int def)
2169 unsigned HOST_WIDE_INT value = 0;
2170 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
2171 int part_bits = GET_MODE_BITSIZE (mode);
2172 unsigned HOST_WIDE_INT part_mask
2173 = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;
2176 for (i = 0; i < max_parts; i++)
2179 value = (unsigned HOST_WIDE_INT) INTVAL (op);
2181 value >>= part_bits;
2183 if ((value & part_mask) != (def & part_mask))
2184 return value & part_mask;
2190 /* If OP is an integer constant of mode MODE with exactly one
2191 part of mode PART_MODE unequal to DEF, return the number of that
2192 part. Otherwise, return -1. */
2195 s390_single_part (rtx op,
2197 machine_mode part_mode,
2200 unsigned HOST_WIDE_INT value = 0;
2201 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
2202 unsigned HOST_WIDE_INT part_mask
2203 = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;
2206 if (GET_CODE (op) != CONST_INT)
2209 for (i = 0; i < n_parts; i++)
2212 value = (unsigned HOST_WIDE_INT) INTVAL (op);
2214 value >>= GET_MODE_BITSIZE (part_mode);
2216 if ((value & part_mask) != (def & part_mask))
2224 return part == -1 ? -1 : n_parts - 1 - part;
2227 /* Return true if IN contains a contiguous bitfield in the lower SIZE
2228 bits and no other bits are set in IN. POS and LENGTH can be used
2229 to obtain the start position and the length of the bitfield.
2231 POS gives the position of the first bit of the bitfield counting
2232 from the lowest order bit starting with zero. In order to use this
2233 value for S/390 instructions this has to be converted to "bits big
2237 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, int size,
2238 int *pos, int *length)
2243 unsigned HOST_WIDE_INT mask = 1ULL;
2244 bool contiguous = false;
2246 for (i = 0; i < size; mask <<= 1, i++)
2270 /* Calculate a mask for all bits beyond the contiguous bits. */
2271 mask = (-1LL & ~(((1ULL << (tmp_length + tmp_pos - 1)) << 1) - 1));
2273 if ((unsigned)size < sizeof (HOST_WIDE_INT) * BITS_PER_UNIT)
2274 mask &= (HOST_WIDE_INT_1U << size) - 1;
2279 if (tmp_length + tmp_pos - 1 > size)
2283 *length = tmp_length;
2291 /* Return true if OP contains the same contiguous bitfield in *all*
2292 its elements. START and END can be used to obtain the start and
2293 end position of the bitfield.
2295 START/STOP give the position of the first/last bit of the bitfield
2296 counting from the lowest order bit starting with zero. In order to
2297 use these values for S/390 instructions this has to be converted to
2298 "bits big endian" style. */
2301 s390_contiguous_bitmask_vector_p (rtx op, int *start, int *end)
2303 unsigned HOST_WIDE_INT mask;
2307 if (!const_vec_duplicate_p (op, &elt)
2308 || !CONST_INT_P (elt))
2311 size = GET_MODE_UNIT_BITSIZE (GET_MODE (op));
2313 /* We cannot deal with V1TI/V1TF. This would require a vgmq. */
2317 mask = UINTVAL (elt);
2318 if (s390_contiguous_bitmask_p (mask, size, start,
2319 end != NULL ? &length : NULL))
2322 *end = *start + length - 1;
2325 /* 0xff00000f style immediates can be covered by swapping start and
2326 end indices in vgm. */
2327 if (s390_contiguous_bitmask_p (~mask, size, start,
2328 end != NULL ? &length : NULL))
2333 *start = *start + length;
2339 /* Return true if C consists only of byte chunks being either 0 or
2340 0xff. If MASK is !=NULL a byte mask is generated which is
2341 appropriate for the vector generate byte mask instruction. */
2344 s390_bytemask_vector_p (rtx op, unsigned *mask)
2347 unsigned tmp_mask = 0;
2348 int nunit, unit_size;
2350 if (!VECTOR_MODE_P (GET_MODE (op))
2351 || GET_CODE (op) != CONST_VECTOR
2352 || !CONST_INT_P (XVECEXP (op, 0, 0)))
2355 nunit = GET_MODE_NUNITS (GET_MODE (op));
2356 unit_size = GET_MODE_UNIT_SIZE (GET_MODE (op));
2358 for (i = 0; i < nunit; i++)
2360 unsigned HOST_WIDE_INT c;
2363 if (!CONST_INT_P (XVECEXP (op, 0, i)))
2366 c = UINTVAL (XVECEXP (op, 0, i));
2367 for (j = 0; j < unit_size; j++)
2369 if ((c & 0xff) != 0 && (c & 0xff) != 0xff)
2371 tmp_mask |= (c & 1) << ((nunit - 1 - i) * unit_size + j);
2372 c = c >> BITS_PER_UNIT;
2382 /* Check whether a rotate of ROTL followed by an AND of CONTIG is
2383 equivalent to a shift followed by the AND. In particular, CONTIG
2384 should not overlap the (rotated) bit 0/bit 63 gap. Negative values
2385 for ROTL indicate a rotate to the right. */
2388 s390_extzv_shift_ok (int bitsize, int rotl, unsigned HOST_WIDE_INT contig)
2393 ok = s390_contiguous_bitmask_p (contig, bitsize, &pos, &len);
2396 return ((rotl >= 0 && rotl <= pos)
2397 || (rotl < 0 && -rotl <= bitsize - len - pos));
2400 /* Check whether we can (and want to) split a double-word
2401 move in mode MODE from SRC to DST into two single-word
2402 moves, moving the subword FIRST_SUBWORD first. */
2405 s390_split_ok_p (rtx dst, rtx src, machine_mode mode, int first_subword)
2407 /* Floating point and vector registers cannot be split. */
2408 if (FP_REG_P (src) || FP_REG_P (dst) || VECTOR_REG_P (src) || VECTOR_REG_P (dst))
2411 /* We don't need to split if operands are directly accessible. */
2412 if (s_operand (src, mode) || s_operand (dst, mode))
2415 /* Non-offsettable memory references cannot be split. */
2416 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
2417 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
2420 /* Moving the first subword must not clobber a register
2421 needed to move the second subword. */
2422 if (register_operand (dst, mode))
2424 rtx subreg = operand_subword (dst, first_subword, 0, mode);
2425 if (reg_overlap_mentioned_p (subreg, src))
2432 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
2433 and [MEM2, MEM2 + SIZE] do overlap and false
2437 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
2439 rtx addr1, addr2, addr_delta;
2440 HOST_WIDE_INT delta;
2442 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
2448 addr1 = XEXP (mem1, 0);
2449 addr2 = XEXP (mem2, 0);
2451 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
2453 /* This overlapping check is used by peepholes merging memory block operations.
2454 Overlapping operations would otherwise be recognized by the S/390 hardware
2455 and would fall back to a slower implementation. Allowing overlapping
2456 operations would lead to slow code but not to wrong code. Therefore we are
2457 somewhat optimistic if we cannot prove that the memory blocks are
2459 That's why we return false here although this may accept operations on
2460 overlapping memory areas. */
2461 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
2464 delta = INTVAL (addr_delta);
2467 || (delta > 0 && delta < size)
2468 || (delta < 0 && -delta < size))
2474 /* Check whether the address of memory reference MEM2 equals exactly
2475 the address of memory reference MEM1 plus DELTA. Return true if
2476 we can prove this to be the case, false otherwise. */
2479 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
2481 rtx addr1, addr2, addr_delta;
2483 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
2486 addr1 = XEXP (mem1, 0);
2487 addr2 = XEXP (mem2, 0);
2489 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
2490 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
2496 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
2499 s390_expand_logical_operator (enum rtx_code code, machine_mode mode,
2502 machine_mode wmode = mode;
2503 rtx dst = operands[0];
2504 rtx src1 = operands[1];
2505 rtx src2 = operands[2];
2508 /* If we cannot handle the operation directly, use a temp register. */
2509 if (!s390_logical_operator_ok_p (operands))
2510 dst = gen_reg_rtx (mode);
2512 /* QImode and HImode patterns make sense only if we have a destination
2513 in memory. Otherwise perform the operation in SImode. */
2514 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
2517 /* Widen operands if required. */
2520 if (GET_CODE (dst) == SUBREG
2521 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
2523 else if (REG_P (dst))
2524 dst = gen_rtx_SUBREG (wmode, dst, 0);
2526 dst = gen_reg_rtx (wmode);
2528 if (GET_CODE (src1) == SUBREG
2529 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
2531 else if (GET_MODE (src1) != VOIDmode)
2532 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
2534 if (GET_CODE (src2) == SUBREG
2535 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
2537 else if (GET_MODE (src2) != VOIDmode)
2538 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
2541 /* Emit the instruction. */
2542 op = gen_rtx_SET (dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
2543 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
2544 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
2546 /* Fix up the destination if needed. */
2547 if (dst != operands[0])
2548 emit_move_insn (operands[0], gen_lowpart (mode, dst));
2551 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
2554 s390_logical_operator_ok_p (rtx *operands)
2556 /* If the destination operand is in memory, it needs to coincide
2557 with one of the source operands. After reload, it has to be
2558 the first source operand. */
2559 if (GET_CODE (operands[0]) == MEM)
2560 return rtx_equal_p (operands[0], operands[1])
2561 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
2566 /* Narrow logical operation CODE of memory operand MEMOP with immediate
2567 operand IMMOP to switch from SS to SI type instructions. */
2570 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
2572 int def = code == AND ? -1 : 0;
2576 gcc_assert (GET_CODE (*memop) == MEM);
2577 gcc_assert (!MEM_VOLATILE_P (*memop));
2579 mask = s390_extract_part (*immop, QImode, def);
2580 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
2581 gcc_assert (part >= 0);
2583 *memop = adjust_address (*memop, QImode, part);
2584 *immop = gen_int_mode (mask, QImode);
2588 /* How to allocate a 'struct machine_function'. */
2590 static struct machine_function *
2591 s390_init_machine_status (void)
2593 return ggc_cleared_alloc<machine_function> ();
2596 /* Map for smallest class containing reg regno. */
2598 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
2599 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 0 */
2600 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 4 */
2601 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 8 */
2602 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 12 */
2603 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 16 */
2604 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 20 */
2605 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 24 */
2606 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 28 */
2607 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS, /* 32 */
2608 ACCESS_REGS, ACCESS_REGS, VEC_REGS, VEC_REGS, /* 36 */
2609 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 40 */
2610 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 44 */
2611 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 48 */
2612 VEC_REGS, VEC_REGS /* 52 */
2615 /* Return attribute type of insn. */
2617 static enum attr_type
2618 s390_safe_attr_type (rtx_insn *insn)
2620 if (recog_memoized (insn) >= 0)
2621 return get_attr_type (insn);
2626 /* Return true if DISP is a valid short displacement. */
2629 s390_short_displacement (rtx disp)
2631 /* No displacement is OK. */
2635 /* Without the long displacement facility we don't need to
2636 distingiush between long and short displacement. */
2637 if (!TARGET_LONG_DISPLACEMENT)
2640 /* Integer displacement in range. */
2641 if (GET_CODE (disp) == CONST_INT)
2642 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
2644 /* GOT offset is not OK, the GOT can be large. */
2645 if (GET_CODE (disp) == CONST
2646 && GET_CODE (XEXP (disp, 0)) == UNSPEC
2647 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
2648 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
2651 /* All other symbolic constants are literal pool references,
2652 which are OK as the literal pool must be small. */
2653 if (GET_CODE (disp) == CONST)
2659 /* Decompose a RTL expression ADDR for a memory address into
2660 its components, returned in OUT.
2662 Returns false if ADDR is not a valid memory address, true
2663 otherwise. If OUT is NULL, don't return the components,
2664 but check for validity only.
2666 Note: Only addresses in canonical form are recognized.
2667 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
2668 canonical form so that they will be recognized. */
2671 s390_decompose_address (rtx addr, struct s390_address *out)
2673 HOST_WIDE_INT offset = 0;
2674 rtx base = NULL_RTX;
2675 rtx indx = NULL_RTX;
2676 rtx disp = NULL_RTX;
2678 bool pointer = false;
2679 bool base_ptr = false;
2680 bool indx_ptr = false;
2681 bool literal_pool = false;
2683 /* We may need to substitute the literal pool base register into the address
2684 below. However, at this point we do not know which register is going to
2685 be used as base, so we substitute the arg pointer register. This is going
2686 to be treated as holding a pointer below -- it shouldn't be used for any
2688 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
2690 /* Decompose address into base + index + displacement. */
2692 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
2695 else if (GET_CODE (addr) == PLUS)
2697 rtx op0 = XEXP (addr, 0);
2698 rtx op1 = XEXP (addr, 1);
2699 enum rtx_code code0 = GET_CODE (op0);
2700 enum rtx_code code1 = GET_CODE (op1);
2702 if (code0 == REG || code0 == UNSPEC)
2704 if (code1 == REG || code1 == UNSPEC)
2706 indx = op0; /* index + base */
2712 base = op0; /* base + displacement */
2717 else if (code0 == PLUS)
2719 indx = XEXP (op0, 0); /* index + base + disp */
2720 base = XEXP (op0, 1);
2731 disp = addr; /* displacement */
2733 /* Extract integer part of displacement. */
2737 if (GET_CODE (disp) == CONST_INT)
2739 offset = INTVAL (disp);
2742 else if (GET_CODE (disp) == CONST
2743 && GET_CODE (XEXP (disp, 0)) == PLUS
2744 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
2746 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
2747 disp = XEXP (XEXP (disp, 0), 0);
2751 /* Strip off CONST here to avoid special case tests later. */
2752 if (disp && GET_CODE (disp) == CONST)
2753 disp = XEXP (disp, 0);
2755 /* We can convert literal pool addresses to
2756 displacements by basing them off the base register. */
2757 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
2759 /* Either base or index must be free to hold the base register. */
2761 base = fake_pool_base, literal_pool = true;
2763 indx = fake_pool_base, literal_pool = true;
2767 /* Mark up the displacement. */
2768 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
2769 UNSPEC_LTREL_OFFSET);
2772 /* Validate base register. */
2775 if (GET_CODE (base) == UNSPEC)
2776 switch (XINT (base, 1))
2780 disp = gen_rtx_UNSPEC (Pmode,
2781 gen_rtvec (1, XVECEXP (base, 0, 0)),
2782 UNSPEC_LTREL_OFFSET);
2786 base = XVECEXP (base, 0, 1);
2789 case UNSPEC_LTREL_BASE:
2790 if (XVECLEN (base, 0) == 1)
2791 base = fake_pool_base, literal_pool = true;
2793 base = XVECEXP (base, 0, 1);
2801 || (GET_MODE (base) != SImode
2802 && GET_MODE (base) != Pmode))
2805 if (REGNO (base) == STACK_POINTER_REGNUM
2806 || REGNO (base) == FRAME_POINTER_REGNUM
2807 || ((reload_completed || reload_in_progress)
2808 && frame_pointer_needed
2809 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
2810 || REGNO (base) == ARG_POINTER_REGNUM
2812 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
2813 pointer = base_ptr = true;
2815 if ((reload_completed || reload_in_progress)
2816 && base == cfun->machine->base_reg)
2817 pointer = base_ptr = literal_pool = true;
2820 /* Validate index register. */
2823 if (GET_CODE (indx) == UNSPEC)
2824 switch (XINT (indx, 1))
2828 disp = gen_rtx_UNSPEC (Pmode,
2829 gen_rtvec (1, XVECEXP (indx, 0, 0)),
2830 UNSPEC_LTREL_OFFSET);
2834 indx = XVECEXP (indx, 0, 1);
2837 case UNSPEC_LTREL_BASE:
2838 if (XVECLEN (indx, 0) == 1)
2839 indx = fake_pool_base, literal_pool = true;
2841 indx = XVECEXP (indx, 0, 1);
2849 || (GET_MODE (indx) != SImode
2850 && GET_MODE (indx) != Pmode))
2853 if (REGNO (indx) == STACK_POINTER_REGNUM
2854 || REGNO (indx) == FRAME_POINTER_REGNUM
2855 || ((reload_completed || reload_in_progress)
2856 && frame_pointer_needed
2857 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
2858 || REGNO (indx) == ARG_POINTER_REGNUM
2860 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
2861 pointer = indx_ptr = true;
2863 if ((reload_completed || reload_in_progress)
2864 && indx == cfun->machine->base_reg)
2865 pointer = indx_ptr = literal_pool = true;
2868 /* Prefer to use pointer as base, not index. */
2869 if (base && indx && !base_ptr
2870 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
2877 /* Validate displacement. */
2880 /* If virtual registers are involved, the displacement will change later
2881 anyway as the virtual registers get eliminated. This could make a
2882 valid displacement invalid, but it is more likely to make an invalid
2883 displacement valid, because we sometimes access the register save area
2884 via negative offsets to one of those registers.
2885 Thus we don't check the displacement for validity here. If after
2886 elimination the displacement turns out to be invalid after all,
2887 this is fixed up by reload in any case. */
2888 /* LRA maintains always displacements up to date and we need to
2889 know the displacement is right during all LRA not only at the
2890 final elimination. */
2892 || (base != arg_pointer_rtx
2893 && indx != arg_pointer_rtx
2894 && base != return_address_pointer_rtx
2895 && indx != return_address_pointer_rtx
2896 && base != frame_pointer_rtx
2897 && indx != frame_pointer_rtx
2898 && base != virtual_stack_vars_rtx
2899 && indx != virtual_stack_vars_rtx))
2900 if (!DISP_IN_RANGE (offset))
2905 /* All the special cases are pointers. */
2908 /* In the small-PIC case, the linker converts @GOT
2909 and @GOTNTPOFF offsets to possible displacements. */
2910 if (GET_CODE (disp) == UNSPEC
2911 && (XINT (disp, 1) == UNSPEC_GOT
2912 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
2918 /* Accept pool label offsets. */
2919 else if (GET_CODE (disp) == UNSPEC
2920 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
2923 /* Accept literal pool references. */
2924 else if (GET_CODE (disp) == UNSPEC
2925 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
2927 /* In case CSE pulled a non literal pool reference out of
2928 the pool we have to reject the address. This is
2929 especially important when loading the GOT pointer on non
2930 zarch CPUs. In this case the literal pool contains an lt
2931 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
2932 will most likely exceed the displacement. */
2933 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
2934 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
2937 orig_disp = gen_rtx_CONST (Pmode, disp);
2940 /* If we have an offset, make sure it does not
2941 exceed the size of the constant pool entry. */
2942 rtx sym = XVECEXP (disp, 0, 0);
2943 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
2946 orig_disp = plus_constant (Pmode, orig_disp, offset);
2961 out->disp = orig_disp;
2962 out->pointer = pointer;
2963 out->literal_pool = literal_pool;
2969 /* Decompose a RTL expression OP for a shift count into its components,
2970 and return the base register in BASE and the offset in OFFSET.
2972 Return true if OP is a valid shift count, false if not. */
2975 s390_decompose_shift_count (rtx op, rtx *base, HOST_WIDE_INT *offset)
2977 HOST_WIDE_INT off = 0;
2979 /* We can have an integer constant, an address register,
2980 or a sum of the two. */
2981 if (GET_CODE (op) == CONST_INT)
2986 if (op && GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT)
2988 off = INTVAL (XEXP (op, 1));
2991 while (op && GET_CODE (op) == SUBREG)
2992 op = SUBREG_REG (op);
2994 if (op && GET_CODE (op) != REG)
3006 /* Return true if CODE is a valid address without index. */
3009 s390_legitimate_address_without_index_p (rtx op)
3011 struct s390_address addr;
3013 if (!s390_decompose_address (XEXP (op, 0), &addr))
3022 /* Return TRUE if ADDR is an operand valid for a load/store relative
3023 instruction. Be aware that the alignment of the operand needs to
3024 be checked separately.
3025 Valid addresses are single references or a sum of a reference and a
3026 constant integer. Return these parts in SYMREF and ADDEND. You can
3027 pass NULL in REF and/or ADDEND if you are not interested in these
3028 values. Literal pool references are *not* considered symbol
3032 s390_loadrelative_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
3034 HOST_WIDE_INT tmpaddend = 0;
3036 if (GET_CODE (addr) == CONST)
3037 addr = XEXP (addr, 0);
3039 if (GET_CODE (addr) == PLUS)
3041 if (!CONST_INT_P (XEXP (addr, 1)))
3044 tmpaddend = INTVAL (XEXP (addr, 1));
3045 addr = XEXP (addr, 0);
3048 if ((GET_CODE (addr) == SYMBOL_REF && !CONSTANT_POOL_ADDRESS_P (addr))
3049 || (GET_CODE (addr) == UNSPEC
3050 && (XINT (addr, 1) == UNSPEC_GOTENT
3051 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
3056 *addend = tmpaddend;
3063 /* Return true if the address in OP is valid for constraint letter C
3064 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
3065 pool MEMs should be accepted. Only the Q, R, S, T constraint
3066 letters are allowed for C. */
3069 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
3071 struct s390_address addr;
3072 bool decomposed = false;
3074 /* This check makes sure that no symbolic address (except literal
3075 pool references) are accepted by the R or T constraints. */
3076 if (s390_loadrelative_operand_p (op, NULL, NULL))
3079 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
3082 if (!s390_decompose_address (op, &addr))
3084 if (addr.literal_pool)
3091 case 'Q': /* no index short displacement */
3092 if (!decomposed && !s390_decompose_address (op, &addr))
3096 if (!s390_short_displacement (addr.disp))
3100 case 'R': /* with index short displacement */
3101 if (TARGET_LONG_DISPLACEMENT)
3103 if (!decomposed && !s390_decompose_address (op, &addr))
3105 if (!s390_short_displacement (addr.disp))
3108 /* Any invalid address here will be fixed up by reload,
3109 so accept it for the most generic constraint. */
3112 case 'S': /* no index long displacement */
3113 if (!TARGET_LONG_DISPLACEMENT)
3115 if (!decomposed && !s390_decompose_address (op, &addr))
3119 if (s390_short_displacement (addr.disp))
3123 case 'T': /* with index long displacement */
3124 if (!TARGET_LONG_DISPLACEMENT)
3126 /* Any invalid address here will be fixed up by reload,
3127 so accept it for the most generic constraint. */
3128 if ((decomposed || s390_decompose_address (op, &addr))
3129 && s390_short_displacement (addr.disp))
3139 /* Evaluates constraint strings described by the regular expression
3140 ([A|B|Z](Q|R|S|T))|U|W|Y and returns 1 if OP is a valid operand for
3141 the constraint given in STR, or 0 else. */
3144 s390_mem_constraint (const char *str, rtx op)
3151 /* Check for offsettable variants of memory constraints. */
3152 if (!MEM_P (op) || MEM_VOLATILE_P (op))
3154 if ((reload_completed || reload_in_progress)
3155 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
3157 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
3159 /* Check for non-literal-pool variants of memory constraints. */
3162 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
3167 if (GET_CODE (op) != MEM)
3169 return s390_check_qrst_address (c, XEXP (op, 0), true);
3171 return (s390_check_qrst_address ('Q', op, true)
3172 || s390_check_qrst_address ('R', op, true));
3174 return (s390_check_qrst_address ('S', op, true)
3175 || s390_check_qrst_address ('T', op, true));
3177 /* Simply check for the basic form of a shift count. Reload will
3178 take care of making sure we have a proper base register. */
3179 if (!s390_decompose_shift_count (op, NULL, NULL))
3183 return s390_check_qrst_address (str[1], op, true);
3191 /* Evaluates constraint strings starting with letter O. Input
3192 parameter C is the second letter following the "O" in the constraint
3193 string. Returns 1 if VALUE meets the respective constraint and 0
3197 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
3205 return trunc_int_for_mode (value, SImode) == value;
3209 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
3212 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
3220 /* Evaluates constraint strings starting with letter N. Parameter STR
3221 contains the letters following letter "N" in the constraint string.
3222 Returns true if VALUE matches the constraint. */
3225 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
3227 machine_mode mode, part_mode;
3229 int part, part_goal;
3235 part_goal = str[0] - '0';
3279 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
3282 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
3285 if (part_goal != -1 && part_goal != part)
3292 /* Returns true if the input parameter VALUE is a float zero. */
3295 s390_float_const_zero_p (rtx value)
3297 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
3298 && value == CONST0_RTX (GET_MODE (value)));
3301 /* Implement TARGET_REGISTER_MOVE_COST. */
3304 s390_register_move_cost (machine_mode mode,
3305 reg_class_t from, reg_class_t to)
3307 /* On s390, copy between fprs and gprs is expensive. */
3309 /* It becomes somewhat faster having ldgr/lgdr. */
3310 if (TARGET_Z10 && GET_MODE_SIZE (mode) == 8)
3312 /* ldgr is single cycle. */
3313 if (reg_classes_intersect_p (from, GENERAL_REGS)
3314 && reg_classes_intersect_p (to, FP_REGS))
3316 /* lgdr needs 3 cycles. */
3317 if (reg_classes_intersect_p (to, GENERAL_REGS)
3318 && reg_classes_intersect_p (from, FP_REGS))
3322 /* Otherwise copying is done via memory. */
3323 if ((reg_classes_intersect_p (from, GENERAL_REGS)
3324 && reg_classes_intersect_p (to, FP_REGS))
3325 || (reg_classes_intersect_p (from, FP_REGS)
3326 && reg_classes_intersect_p (to, GENERAL_REGS)))
3332 /* Implement TARGET_MEMORY_MOVE_COST. */
3335 s390_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
3336 reg_class_t rclass ATTRIBUTE_UNUSED,
3337 bool in ATTRIBUTE_UNUSED)
3342 /* Compute a (partial) cost for rtx X. Return true if the complete
3343 cost has been computed, and false if subexpressions should be
3344 scanned. In either case, *TOTAL contains the cost result.
3345 OUTER_CODE contains the code of the superexpression of x. */
3348 s390_rtx_costs (rtx x, machine_mode mode, int outer_code,
3349 int opno ATTRIBUTE_UNUSED,
3350 int *total, bool speed ATTRIBUTE_UNUSED)
3352 int code = GET_CODE (x);
3360 case CONST_WIDE_INT:
3367 if (GET_CODE (XEXP (x, 0)) == AND
3368 && GET_CODE (XEXP (x, 1)) == ASHIFT
3369 && REG_P (XEXP (XEXP (x, 0), 0))
3370 && REG_P (XEXP (XEXP (x, 1), 0))
3371 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
3372 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
3373 && (UINTVAL (XEXP (XEXP (x, 0), 1)) ==
3374 (1UL << UINTVAL (XEXP (XEXP (x, 1), 1))) - 1))
3376 *total = COSTS_N_INSNS (2);
3388 *total = COSTS_N_INSNS (1);
3393 *total = COSTS_N_INSNS (1);
3401 rtx left = XEXP (x, 0);
3402 rtx right = XEXP (x, 1);
3403 if (GET_CODE (right) == CONST_INT
3404 && CONST_OK_FOR_K (INTVAL (right)))
3405 *total = s390_cost->mhi;
3406 else if (GET_CODE (left) == SIGN_EXTEND)
3407 *total = s390_cost->mh;
3409 *total = s390_cost->ms; /* msr, ms, msy */
3414 rtx left = XEXP (x, 0);
3415 rtx right = XEXP (x, 1);
3418 if (GET_CODE (right) == CONST_INT
3419 && CONST_OK_FOR_K (INTVAL (right)))
3420 *total = s390_cost->mghi;
3421 else if (GET_CODE (left) == SIGN_EXTEND)
3422 *total = s390_cost->msgf;
3424 *total = s390_cost->msg; /* msgr, msg */
3426 else /* TARGET_31BIT */
3428 if (GET_CODE (left) == SIGN_EXTEND
3429 && GET_CODE (right) == SIGN_EXTEND)
3430 /* mulsidi case: mr, m */
3431 *total = s390_cost->m;
3432 else if (GET_CODE (left) == ZERO_EXTEND
3433 && GET_CODE (right) == ZERO_EXTEND
3434 && TARGET_CPU_ZARCH)
3435 /* umulsidi case: ml, mlr */
3436 *total = s390_cost->ml;
3438 /* Complex calculation is required. */
3439 *total = COSTS_N_INSNS (40);
3445 *total = s390_cost->mult_df;
3448 *total = s390_cost->mxbr;
3459 *total = s390_cost->madbr;
3462 *total = s390_cost->maebr;
3467 /* Negate in the third argument is free: FMSUB. */
3468 if (GET_CODE (XEXP (x, 2)) == NEG)
3470 *total += (rtx_cost (XEXP (x, 0), mode, FMA, 0, speed)
3471 + rtx_cost (XEXP (x, 1), mode, FMA, 1, speed)
3472 + rtx_cost (XEXP (XEXP (x, 2), 0), mode, FMA, 2, speed));
3479 if (mode == TImode) /* 128 bit division */
3480 *total = s390_cost->dlgr;
3481 else if (mode == DImode)
3483 rtx right = XEXP (x, 1);
3484 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
3485 *total = s390_cost->dlr;
3486 else /* 64 by 64 bit division */
3487 *total = s390_cost->dlgr;
3489 else if (mode == SImode) /* 32 bit division */
3490 *total = s390_cost->dlr;
3497 rtx right = XEXP (x, 1);
3498 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
3500 *total = s390_cost->dsgfr;
3502 *total = s390_cost->dr;
3503 else /* 64 by 64 bit division */
3504 *total = s390_cost->dsgr;
3506 else if (mode == SImode) /* 32 bit division */
3507 *total = s390_cost->dlr;
3508 else if (mode == SFmode)
3510 *total = s390_cost->debr;
3512 else if (mode == DFmode)
3514 *total = s390_cost->ddbr;
3516 else if (mode == TFmode)
3518 *total = s390_cost->dxbr;
3524 *total = s390_cost->sqebr;
3525 else if (mode == DFmode)
3526 *total = s390_cost->sqdbr;
3528 *total = s390_cost->sqxbr;
3533 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
3534 || outer_code == PLUS || outer_code == MINUS
3535 || outer_code == COMPARE)
3540 *total = COSTS_N_INSNS (1);
3541 if (GET_CODE (XEXP (x, 0)) == AND
3542 && GET_CODE (XEXP (x, 1)) == CONST_INT
3543 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
3545 rtx op0 = XEXP (XEXP (x, 0), 0);
3546 rtx op1 = XEXP (XEXP (x, 0), 1);
3547 rtx op2 = XEXP (x, 1);
3549 if (memory_operand (op0, GET_MODE (op0))
3550 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
3552 if (register_operand (op0, GET_MODE (op0))
3553 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
3563 /* Return the cost of an address rtx ADDR. */
3566 s390_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
3567 addr_space_t as ATTRIBUTE_UNUSED,
3568 bool speed ATTRIBUTE_UNUSED)
3570 struct s390_address ad;
3571 if (!s390_decompose_address (addr, &ad))
3574 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
3577 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
3578 otherwise return 0. */
3581 tls_symbolic_operand (rtx op)
3583 if (GET_CODE (op) != SYMBOL_REF)
3585 return SYMBOL_REF_TLS_MODEL (op);
3588 /* Split DImode access register reference REG (on 64-bit) into its constituent
3589 low and high parts, and store them into LO and HI. Note that gen_lowpart/
3590 gen_highpart cannot be used as they assume all registers are word-sized,
3591 while our access registers have only half that size. */
3594 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
3596 gcc_assert (TARGET_64BIT);
3597 gcc_assert (ACCESS_REG_P (reg));
3598 gcc_assert (GET_MODE (reg) == DImode);
3599 gcc_assert (!(REGNO (reg) & 1));
3601 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
3602 *hi = gen_rtx_REG (SImode, REGNO (reg));
3605 /* Return true if OP contains a symbol reference */
3608 symbolic_reference_mentioned_p (rtx op)
3613 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
3616 fmt = GET_RTX_FORMAT (GET_CODE (op));
3617 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3623 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3624 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3628 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
3635 /* Return true if OP contains a reference to a thread-local symbol. */
3638 tls_symbolic_reference_mentioned_p (rtx op)
3643 if (GET_CODE (op) == SYMBOL_REF)
3644 return tls_symbolic_operand (op);
3646 fmt = GET_RTX_FORMAT (GET_CODE (op));
3647 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3653 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3654 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3658 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
3666 /* Return true if OP is a legitimate general operand when
3667 generating PIC code. It is given that flag_pic is on
3668 and that OP satisfies CONSTANT_P. */
3671 legitimate_pic_operand_p (rtx op)
3673 /* Accept all non-symbolic constants. */
3674 if (!SYMBOLIC_CONST (op))
3677 /* Reject everything else; must be handled
3678 via emit_symbolic_move. */
3682 /* Returns true if the constant value OP is a legitimate general operand.
3683 It is given that OP satisfies CONSTANT_P. */
3686 s390_legitimate_constant_p (machine_mode mode, rtx op)
3688 if (TARGET_VX && VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
3690 if (GET_MODE_SIZE (mode) != 16)
3693 if (!satisfies_constraint_j00 (op)
3694 && !satisfies_constraint_jm1 (op)
3695 && !satisfies_constraint_jKK (op)
3696 && !satisfies_constraint_jxx (op)
3697 && !satisfies_constraint_jyy (op))
3701 /* Accept all non-symbolic constants. */
3702 if (!SYMBOLIC_CONST (op))
3705 /* Accept immediate LARL operands. */
3706 if (TARGET_CPU_ZARCH && larl_operand (op, mode))
3709 /* Thread-local symbols are never legal constants. This is
3710 so that emit_call knows that computing such addresses
3711 might require a function call. */
3712 if (TLS_SYMBOLIC_CONST (op))
3715 /* In the PIC case, symbolic constants must *not* be
3716 forced into the literal pool. We accept them here,
3717 so that they will be handled by emit_symbolic_move. */
3721 /* All remaining non-PIC symbolic constants are
3722 forced into the literal pool. */
3726 /* Determine if it's legal to put X into the constant pool. This
3727 is not possible if X contains the address of a symbol that is
3728 not constant (TLS) or not known at final link time (PIC). */
3731 s390_cannot_force_const_mem (machine_mode mode, rtx x)
3733 switch (GET_CODE (x))
3737 case CONST_WIDE_INT:
3739 /* Accept all non-symbolic constants. */
3743 /* Labels are OK iff we are non-PIC. */
3744 return flag_pic != 0;
3747 /* 'Naked' TLS symbol references are never OK,
3748 non-TLS symbols are OK iff we are non-PIC. */
3749 if (tls_symbolic_operand (x))
3752 return flag_pic != 0;
3755 return s390_cannot_force_const_mem (mode, XEXP (x, 0));
3758 return s390_cannot_force_const_mem (mode, XEXP (x, 0))
3759 || s390_cannot_force_const_mem (mode, XEXP (x, 1));
3762 switch (XINT (x, 1))
3764 /* Only lt-relative or GOT-relative UNSPECs are OK. */
3765 case UNSPEC_LTREL_OFFSET:
3773 case UNSPEC_GOTNTPOFF:
3774 case UNSPEC_INDNTPOFF:
3777 /* If the literal pool shares the code section, be put
3778 execute template placeholders into the pool as well. */
3780 return TARGET_CPU_ZARCH;
3792 /* Returns true if the constant value OP is a legitimate general
3793 operand during and after reload. The difference to
3794 legitimate_constant_p is that this function will not accept
3795 a constant that would need to be forced to the literal pool
3796 before it can be used as operand.
3797 This function accepts all constants which can be loaded directly
3801 legitimate_reload_constant_p (rtx op)
3803 /* Accept la(y) operands. */
3804 if (GET_CODE (op) == CONST_INT
3805 && DISP_IN_RANGE (INTVAL (op)))
3808 /* Accept l(g)hi/l(g)fi operands. */
3809 if (GET_CODE (op) == CONST_INT
3810 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
3813 /* Accept lliXX operands. */
3815 && GET_CODE (op) == CONST_INT
3816 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
3817 && s390_single_part (op, word_mode, HImode, 0) >= 0)
3821 && GET_CODE (op) == CONST_INT
3822 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
3823 && s390_single_part (op, word_mode, SImode, 0) >= 0)
3826 /* Accept larl operands. */
3827 if (TARGET_CPU_ZARCH
3828 && larl_operand (op, VOIDmode))
3831 /* Accept floating-point zero operands that fit into a single GPR. */
3832 if (GET_CODE (op) == CONST_DOUBLE
3833 && s390_float_const_zero_p (op)
3834 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
3837 /* Accept double-word operands that can be split. */
3838 if (GET_CODE (op) == CONST_WIDE_INT
3839 || (GET_CODE (op) == CONST_INT
3840 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op)))
3842 machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
3843 rtx hi = operand_subword (op, 0, 0, dword_mode);
3844 rtx lo = operand_subword (op, 1, 0, dword_mode);
3845 return legitimate_reload_constant_p (hi)
3846 && legitimate_reload_constant_p (lo);
3849 /* Everything else cannot be handled without reload. */
3853 /* Returns true if the constant value OP is a legitimate fp operand
3854 during and after reload.
3855 This function accepts all constants which can be loaded directly
3859 legitimate_reload_fp_constant_p (rtx op)
3861 /* Accept floating-point zero operands if the load zero instruction
3862 can be used. Prior to z196 the load fp zero instruction caused a
3863 performance penalty if the result is used as BFP number. */
3865 && GET_CODE (op) == CONST_DOUBLE
3866 && s390_float_const_zero_p (op))
3872 /* Returns true if the constant value OP is a legitimate vector operand
3873 during and after reload.
3874 This function accepts all constants which can be loaded directly
3878 legitimate_reload_vector_constant_p (rtx op)
3880 if (TARGET_VX && GET_MODE_SIZE (GET_MODE (op)) == 16
3881 && (satisfies_constraint_j00 (op)
3882 || satisfies_constraint_jm1 (op)
3883 || satisfies_constraint_jKK (op)
3884 || satisfies_constraint_jxx (op)
3885 || satisfies_constraint_jyy (op)))
3891 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
3892 return the class of reg to actually use. */
3895 s390_preferred_reload_class (rtx op, reg_class_t rclass)
3897 switch (GET_CODE (op))
3899 /* Constants we cannot reload into general registers
3900 must be forced into the literal pool. */
3904 case CONST_WIDE_INT:
3905 if (reg_class_subset_p (GENERAL_REGS, rclass)
3906 && legitimate_reload_constant_p (op))
3907 return GENERAL_REGS;
3908 else if (reg_class_subset_p (ADDR_REGS, rclass)
3909 && legitimate_reload_constant_p (op))
3911 else if (reg_class_subset_p (FP_REGS, rclass)
3912 && legitimate_reload_fp_constant_p (op))
3914 else if (reg_class_subset_p (VEC_REGS, rclass)
3915 && legitimate_reload_vector_constant_p (op))
3920 /* If a symbolic constant or a PLUS is reloaded,
3921 it is most likely being used as an address, so
3922 prefer ADDR_REGS. If 'class' is not a superset
3923 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
3925 /* Symrefs cannot be pushed into the literal pool with -fPIC
3926 so we *MUST NOT* return NO_REGS for these cases
3927 (s390_cannot_force_const_mem will return true).
3929 On the other hand we MUST return NO_REGS for symrefs with
3930 invalid addend which might have been pushed to the literal
3931 pool (no -fPIC). Usually we would expect them to be
3932 handled via secondary reload but this does not happen if
3933 they are used as literal pool slot replacement in reload
3934 inheritance (see emit_input_reload_insns). */
3935 if (TARGET_CPU_ZARCH
3936 && GET_CODE (XEXP (op, 0)) == PLUS
3937 && GET_CODE (XEXP (XEXP(op, 0), 0)) == SYMBOL_REF
3938 && GET_CODE (XEXP (XEXP(op, 0), 1)) == CONST_INT)
3940 if (flag_pic && reg_class_subset_p (ADDR_REGS, rclass))
3948 if (!legitimate_reload_constant_p (op))
3952 /* load address will be used. */
3953 if (reg_class_subset_p (ADDR_REGS, rclass))
3965 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
3966 multiple of ALIGNMENT and the SYMBOL_REF being naturally
3970 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
3972 HOST_WIDE_INT addend;
3975 /* The "required alignment" might be 0 (e.g. for certain structs
3976 accessed via BLKmode). Early abort in this case, as well as when
3977 an alignment > 8 is required. */
3978 if (alignment < 2 || alignment > 8)
3981 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
3984 if (addend & (alignment - 1))
3987 if (GET_CODE (symref) == SYMBOL_REF)
3989 /* We have load-relative instructions for 2-byte, 4-byte, and
3990 8-byte alignment so allow only these. */
3993 case 8: return !SYMBOL_FLAG_NOTALIGN8_P (symref);
3994 case 4: return !SYMBOL_FLAG_NOTALIGN4_P (symref);
3995 case 2: return !SYMBOL_FLAG_NOTALIGN2_P (symref);
3996 default: return false;
4000 if (GET_CODE (symref) == UNSPEC
4001 && alignment <= UNITS_PER_LONG)
4007 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
4008 operand SCRATCH is used to reload the even part of the address and
4012 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
4014 HOST_WIDE_INT addend;
4017 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
4021 /* Easy case. The addend is even so larl will do fine. */
4022 emit_move_insn (reg, addr);
4025 /* We can leave the scratch register untouched if the target
4026 register is a valid base register. */
4027 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
4028 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
4031 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
4032 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
4035 emit_move_insn (scratch,
4036 gen_rtx_CONST (Pmode,
4037 gen_rtx_PLUS (Pmode, symref,
4038 GEN_INT (addend - 1))));
4040 emit_move_insn (scratch, symref);
4042 /* Increment the address using la in order to avoid clobbering cc. */
4043 s390_load_address (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
4047 /* Generate what is necessary to move between REG and MEM using
4048 SCRATCH. The direction is given by TOMEM. */
4051 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
4053 /* Reload might have pulled a constant out of the literal pool.
4054 Force it back in. */
4055 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
4056 || GET_CODE (mem) == CONST_WIDE_INT
4057 || GET_CODE (mem) == CONST_VECTOR
4058 || GET_CODE (mem) == CONST)
4059 mem = force_const_mem (GET_MODE (reg), mem);
4061 gcc_assert (MEM_P (mem));
4063 /* For a load from memory we can leave the scratch register
4064 untouched if the target register is a valid base register. */
4066 && REGNO (reg) < FIRST_PSEUDO_REGISTER
4067 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
4068 && GET_MODE (reg) == GET_MODE (scratch))
4071 /* Load address into scratch register. Since we can't have a
4072 secondary reload for a secondary reload we have to cover the case
4073 where larl would need a secondary reload here as well. */
4074 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
4076 /* Now we can use a standard load/store to do the move. */
4078 emit_move_insn (replace_equiv_address (mem, scratch), reg);
4080 emit_move_insn (reg, replace_equiv_address (mem, scratch));
4083 /* Inform reload about cases where moving X with a mode MODE to a register in
4084 RCLASS requires an extra scratch or immediate register. Return the class
4085 needed for the immediate register. */
4088 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
4089 machine_mode mode, secondary_reload_info *sri)
4091 enum reg_class rclass = (enum reg_class) rclass_i;
4093 /* Intermediate register needed. */
4094 if (reg_classes_intersect_p (CC_REGS, rclass))
4095 return GENERAL_REGS;
4099 /* The vst/vl vector move instructions allow only for short
4102 && GET_CODE (XEXP (x, 0)) == PLUS
4103 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4104 && !SHORT_DISP_IN_RANGE(INTVAL (XEXP (XEXP (x, 0), 1)))
4105 && reg_class_subset_p (rclass, VEC_REGS)
4106 && (!reg_class_subset_p (rclass, FP_REGS)
4107 || (GET_MODE_SIZE (mode) > 8
4108 && s390_class_max_nregs (FP_REGS, mode) == 1)))
4111 sri->icode = (TARGET_64BIT ?
4112 CODE_FOR_reloaddi_la_in :
4113 CODE_FOR_reloadsi_la_in);
4115 sri->icode = (TARGET_64BIT ?
4116 CODE_FOR_reloaddi_la_out :
4117 CODE_FOR_reloadsi_la_out);
4123 HOST_WIDE_INT offset;
4126 /* On z10 several optimizer steps may generate larl operands with
4129 && s390_loadrelative_operand_p (x, &symref, &offset)
4131 && !SYMBOL_FLAG_NOTALIGN2_P (symref)
4132 && (offset & 1) == 1)
4133 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
4134 : CODE_FOR_reloadsi_larl_odd_addend_z10);
4136 /* Handle all the (mem (symref)) accesses we cannot use the z10
4137 instructions for. */
4139 && s390_loadrelative_operand_p (XEXP (x, 0), NULL, NULL)
4141 || !reg_class_subset_p (rclass, GENERAL_REGS)
4142 || GET_MODE_SIZE (mode) > UNITS_PER_WORD
4143 || !s390_check_symref_alignment (XEXP (x, 0),
4144 GET_MODE_SIZE (mode))))
4146 #define __SECONDARY_RELOAD_CASE(M,m) \
4149 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
4150 CODE_FOR_reload##m##di_tomem_z10; \
4152 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
4153 CODE_FOR_reload##m##si_tomem_z10; \
4156 switch (GET_MODE (x))
4158 __SECONDARY_RELOAD_CASE (QI, qi);
4159 __SECONDARY_RELOAD_CASE (HI, hi);
4160 __SECONDARY_RELOAD_CASE (SI, si);
4161 __SECONDARY_RELOAD_CASE (DI, di);
4162 __SECONDARY_RELOAD_CASE (TI, ti);
4163 __SECONDARY_RELOAD_CASE (SF, sf);
4164 __SECONDARY_RELOAD_CASE (DF, df);
4165 __SECONDARY_RELOAD_CASE (TF, tf);
4166 __SECONDARY_RELOAD_CASE (SD, sd);
4167 __SECONDARY_RELOAD_CASE (DD, dd);
4168 __SECONDARY_RELOAD_CASE (TD, td);
4169 __SECONDARY_RELOAD_CASE (V1QI, v1qi);
4170 __SECONDARY_RELOAD_CASE (V2QI, v2qi);
4171 __SECONDARY_RELOAD_CASE (V4QI, v4qi);
4172 __SECONDARY_RELOAD_CASE (V8QI, v8qi);
4173 __SECONDARY_RELOAD_CASE (V16QI, v16qi);
4174 __SECONDARY_RELOAD_CASE (V1HI, v1hi);
4175 __SECONDARY_RELOAD_CASE (V2HI, v2hi);
4176 __SECONDARY_RELOAD_CASE (V4HI, v4hi);
4177 __SECONDARY_RELOAD_CASE (V8HI, v8hi);
4178 __SECONDARY_RELOAD_CASE (V1SI, v1si);
4179 __SECONDARY_RELOAD_CASE (V2SI, v2si);
4180 __SECONDARY_RELOAD_CASE (V4SI, v4si);
4181 __SECONDARY_RELOAD_CASE (V1DI, v1di);
4182 __SECONDARY_RELOAD_CASE (V2DI, v2di);
4183 __SECONDARY_RELOAD_CASE (V1TI, v1ti);
4184 __SECONDARY_RELOAD_CASE (V1SF, v1sf);
4185 __SECONDARY_RELOAD_CASE (V2SF, v2sf);
4186 __SECONDARY_RELOAD_CASE (V4SF, v4sf);
4187 __SECONDARY_RELOAD_CASE (V1DF, v1df);
4188 __SECONDARY_RELOAD_CASE (V2DF, v2df);
4189 __SECONDARY_RELOAD_CASE (V1TF, v1tf);
4193 #undef __SECONDARY_RELOAD_CASE
4197 /* We need a scratch register when loading a PLUS expression which
4198 is not a legitimate operand of the LOAD ADDRESS instruction. */
4199 /* LRA can deal with transformation of plus op very well -- so we
4200 don't need to prompt LRA in this case. */
4201 if (! lra_in_progress && in_p && s390_plus_operand (x, mode))
4202 sri->icode = (TARGET_64BIT ?
4203 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
4205 /* Performing a multiword move from or to memory we have to make sure the
4206 second chunk in memory is addressable without causing a displacement
4207 overflow. If that would be the case we calculate the address in
4208 a scratch register. */
4210 && GET_CODE (XEXP (x, 0)) == PLUS
4211 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4212 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
4213 + GET_MODE_SIZE (mode) - 1))
4215 /* For GENERAL_REGS a displacement overflow is no problem if occurring
4216 in a s_operand address since we may fallback to lm/stm. So we only
4217 have to care about overflows in the b+i+d case. */
4218 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
4219 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
4220 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
4221 /* For FP_REGS no lm/stm is available so this check is triggered
4222 for displacement overflows in b+i+d and b+d like addresses. */
4223 || (reg_classes_intersect_p (FP_REGS, rclass)
4224 && s390_class_max_nregs (FP_REGS, mode) > 1))
4227 sri->icode = (TARGET_64BIT ?
4228 CODE_FOR_reloaddi_la_in :
4229 CODE_FOR_reloadsi_la_in);
4231 sri->icode = (TARGET_64BIT ?
4232 CODE_FOR_reloaddi_la_out :
4233 CODE_FOR_reloadsi_la_out);
4237 /* A scratch address register is needed when a symbolic constant is
4238 copied to r0 compiling with -fPIC. In other cases the target
4239 register might be used as temporary (see legitimize_pic_address). */
4240 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
4241 sri->icode = (TARGET_64BIT ?
4242 CODE_FOR_reloaddi_PIC_addr :
4243 CODE_FOR_reloadsi_PIC_addr);
4245 /* Either scratch or no register needed. */
4249 /* Generate code to load SRC, which is PLUS that is not a
4250 legitimate operand for the LA instruction, into TARGET.
4251 SCRATCH may be used as scratch register. */
4254 s390_expand_plus_operand (rtx target, rtx src,
4258 struct s390_address ad;
4260 /* src must be a PLUS; get its two operands. */
4261 gcc_assert (GET_CODE (src) == PLUS);
4262 gcc_assert (GET_MODE (src) == Pmode);
4264 /* Check if any of the two operands is already scheduled
4265 for replacement by reload. This can happen e.g. when
4266 float registers occur in an address. */
4267 sum1 = find_replacement (&XEXP (src, 0));
4268 sum2 = find_replacement (&XEXP (src, 1));
4269 src = gen_rtx_PLUS (Pmode, sum1, sum2);
4271 /* If the address is already strictly valid, there's nothing to do. */
4272 if (!s390_decompose_address (src, &ad)
4273 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4274 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
4276 /* Otherwise, one of the operands cannot be an address register;
4277 we reload its value into the scratch register. */
4278 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
4280 emit_move_insn (scratch, sum1);
4283 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
4285 emit_move_insn (scratch, sum2);
4289 /* According to the way these invalid addresses are generated
4290 in reload.c, it should never happen (at least on s390) that
4291 *neither* of the PLUS components, after find_replacements
4292 was applied, is an address register. */
4293 if (sum1 == scratch && sum2 == scratch)
4299 src = gen_rtx_PLUS (Pmode, sum1, sum2);
4302 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
4303 is only ever performed on addresses, so we can mark the
4304 sum as legitimate for LA in any case. */
4305 s390_load_address (target, src);
4309 /* Return true if ADDR is a valid memory address.
4310 STRICT specifies whether strict register checking applies. */
4313 s390_legitimate_address_p (machine_mode mode, rtx addr, bool strict)
4315 struct s390_address ad;
4318 && larl_operand (addr, VOIDmode)
4319 && (mode == VOIDmode
4320 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
4323 if (!s390_decompose_address (addr, &ad))
4328 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4331 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
4337 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
4338 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
4342 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
4343 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
4349 /* Return true if OP is a valid operand for the LA instruction.
4350 In 31-bit, we need to prove that the result is used as an
4351 address, as LA performs only a 31-bit addition. */
4354 legitimate_la_operand_p (rtx op)
4356 struct s390_address addr;
4357 if (!s390_decompose_address (op, &addr))
4360 return (TARGET_64BIT || addr.pointer);
4363 /* Return true if it is valid *and* preferable to use LA to
4364 compute the sum of OP1 and OP2. */
4367 preferred_la_operand_p (rtx op1, rtx op2)
4369 struct s390_address addr;
4371 if (op2 != const0_rtx)
4372 op1 = gen_rtx_PLUS (Pmode, op1, op2);
4374 if (!s390_decompose_address (op1, &addr))
4376 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
4378 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
4381 /* Avoid LA instructions with index register on z196; it is
4382 preferable to use regular add instructions when possible.
4383 Starting with zEC12 the la with index register is "uncracked"
4385 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
4388 if (!TARGET_64BIT && !addr.pointer)
4394 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
4395 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
4401 /* Emit a forced load-address operation to load SRC into DST.
4402 This will use the LOAD ADDRESS instruction even in situations
4403 where legitimate_la_operand_p (SRC) returns false. */
4406 s390_load_address (rtx dst, rtx src)
4409 emit_move_insn (dst, src);
4411 emit_insn (gen_force_la_31 (dst, src));
4414 /* Return a legitimate reference for ORIG (an address) using the
4415 register REG. If REG is 0, a new pseudo is generated.
4417 There are two types of references that must be handled:
4419 1. Global data references must load the address from the GOT, via
4420 the PIC reg. An insn is emitted to do this load, and the reg is
4423 2. Static data references, constant pool addresses, and code labels
4424 compute the address as an offset from the GOT, whose base is in
4425 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
4426 differentiate them from global data objects. The returned
4427 address is the PIC reg + an unspec constant.
4429 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
4430 reg also appears in the address. */
4433 legitimize_pic_address (rtx orig, rtx reg)
4436 rtx addend = const0_rtx;
4439 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
4441 if (GET_CODE (addr) == CONST)
4442 addr = XEXP (addr, 0);
4444 if (GET_CODE (addr) == PLUS)
4446 addend = XEXP (addr, 1);
4447 addr = XEXP (addr, 0);
4450 if ((GET_CODE (addr) == LABEL_REF
4451 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr))
4452 || (GET_CODE (addr) == UNSPEC &&
4453 (XINT (addr, 1) == UNSPEC_GOTENT
4454 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
4455 && GET_CODE (addend) == CONST_INT)
4457 /* This can be locally addressed. */
4459 /* larl_operand requires UNSPECs to be wrapped in a const rtx. */
4460 rtx const_addr = (GET_CODE (addr) == UNSPEC ?
4461 gen_rtx_CONST (Pmode, addr) : addr);
4463 if (TARGET_CPU_ZARCH
4464 && larl_operand (const_addr, VOIDmode)
4465 && INTVAL (addend) < (HOST_WIDE_INT)1 << 31
4466 && INTVAL (addend) >= -((HOST_WIDE_INT)1 << 31))
4468 if (INTVAL (addend) & 1)
4470 /* LARL can't handle odd offsets, so emit a pair of LARL
4472 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4474 if (!DISP_IN_RANGE (INTVAL (addend)))
4476 HOST_WIDE_INT even = INTVAL (addend) - 1;
4477 addr = gen_rtx_PLUS (Pmode, addr, GEN_INT (even));
4478 addr = gen_rtx_CONST (Pmode, addr);
4479 addend = const1_rtx;
4482 emit_move_insn (temp, addr);
4483 new_rtx = gen_rtx_PLUS (Pmode, temp, addend);
4487 s390_load_address (reg, new_rtx);
4493 /* If the offset is even, we can just use LARL. This
4494 will happen automatically. */
4499 /* No larl - Access local symbols relative to the GOT. */
4501 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4503 if (reload_in_progress || reload_completed)
4504 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4506 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
4507 if (addend != const0_rtx)
4508 addr = gen_rtx_PLUS (Pmode, addr, addend);
4509 addr = gen_rtx_CONST (Pmode, addr);
4510 addr = force_const_mem (Pmode, addr);
4511 emit_move_insn (temp, addr);
4513 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4516 s390_load_address (reg, new_rtx);
4521 else if (GET_CODE (addr) == SYMBOL_REF && addend == const0_rtx)
4523 /* A non-local symbol reference without addend.
4525 The symbol ref is wrapped into an UNSPEC to make sure the
4526 proper operand modifier (@GOT or @GOTENT) will be emitted.
4527 This will tell the linker to put the symbol into the GOT.
4529 Additionally the code dereferencing the GOT slot is emitted here.
4531 An addend to the symref needs to be added afterwards.
4532 legitimize_pic_address calls itself recursively to handle
4533 that case. So no need to do it here. */
4536 reg = gen_reg_rtx (Pmode);
4540 /* Use load relative if possible.
4541 lgrl <target>, sym@GOTENT */
4542 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
4543 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4544 new_rtx = gen_const_mem (GET_MODE (reg), new_rtx);
4546 emit_move_insn (reg, new_rtx);
4549 else if (flag_pic == 1)
4551 /* Assume GOT offset is a valid displacement operand (< 4k
4552 or < 512k with z990). This is handled the same way in
4553 both 31- and 64-bit code (@GOT).
4554 lg <target>, sym@GOT(r12) */
4556 if (reload_in_progress || reload_completed)
4557 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4559 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
4560 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4561 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
4562 new_rtx = gen_const_mem (Pmode, new_rtx);
4563 emit_move_insn (reg, new_rtx);
4566 else if (TARGET_CPU_ZARCH)
4568 /* If the GOT offset might be >= 4k, we determine the position
4569 of the GOT entry via a PC-relative LARL (@GOTENT).
4570 larl temp, sym@GOTENT
4571 lg <target>, 0(temp) */
4573 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
4575 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
4576 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
4578 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
4579 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4580 emit_move_insn (temp, new_rtx);
4582 new_rtx = gen_const_mem (Pmode, temp);
4583 emit_move_insn (reg, new_rtx);
4589 /* If the GOT offset might be >= 4k, we have to load it
4590 from the literal pool (@GOT).
4592 lg temp, lit-litbase(r13)
4593 lg <target>, 0(temp)
4594 lit: .long sym@GOT */
4596 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
4598 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
4599 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
4601 if (reload_in_progress || reload_completed)
4602 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4604 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
4605 addr = gen_rtx_CONST (Pmode, addr);
4606 addr = force_const_mem (Pmode, addr);
4607 emit_move_insn (temp, addr);
4609 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4610 new_rtx = gen_const_mem (Pmode, new_rtx);
4611 emit_move_insn (reg, new_rtx);
4615 else if (GET_CODE (addr) == UNSPEC && GET_CODE (addend) == CONST_INT)
4617 gcc_assert (XVECLEN (addr, 0) == 1);
4618 switch (XINT (addr, 1))
4620 /* These address symbols (or PLT slots) relative to the GOT
4621 (not GOT slots!). In general this will exceed the
4622 displacement range so these value belong into the literal
4626 new_rtx = force_const_mem (Pmode, orig);
4629 /* For -fPIC the GOT size might exceed the displacement
4630 range so make sure the value is in the literal pool. */
4633 new_rtx = force_const_mem (Pmode, orig);
4636 /* For @GOTENT larl is used. This is handled like local
4642 /* @PLT is OK as is on 64-bit, must be converted to
4643 GOT-relative @PLTOFF on 31-bit. */
4645 if (!TARGET_CPU_ZARCH)
4647 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4649 if (reload_in_progress || reload_completed)
4650 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4652 addr = XVECEXP (addr, 0, 0);
4653 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
4655 if (addend != const0_rtx)
4656 addr = gen_rtx_PLUS (Pmode, addr, addend);
4657 addr = gen_rtx_CONST (Pmode, addr);
4658 addr = force_const_mem (Pmode, addr);
4659 emit_move_insn (temp, addr);
4661 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4664 s390_load_address (reg, new_rtx);
4669 /* On 64 bit larl can be used. This case is handled like
4670 local symbol refs. */
4674 /* Everything else cannot happen. */
4679 else if (addend != const0_rtx)
4681 /* Otherwise, compute the sum. */
4683 rtx base = legitimize_pic_address (addr, reg);
4684 new_rtx = legitimize_pic_address (addend,
4685 base == reg ? NULL_RTX : reg);
4686 if (GET_CODE (new_rtx) == CONST_INT)
4687 new_rtx = plus_constant (Pmode, base, INTVAL (new_rtx));
4690 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
4692 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
4693 new_rtx = XEXP (new_rtx, 1);
4695 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
4698 if (GET_CODE (new_rtx) == CONST)
4699 new_rtx = XEXP (new_rtx, 0);
4700 new_rtx = force_operand (new_rtx, 0);
4706 /* Load the thread pointer into a register. */
4709 s390_get_thread_pointer (void)
4711 rtx tp = gen_reg_rtx (Pmode);
4713 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
4714 mark_reg_pointer (tp, BITS_PER_WORD);
4719 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
4720 in s390_tls_symbol which always refers to __tls_get_offset.
4721 The returned offset is written to RESULT_REG and an USE rtx is
4722 generated for TLS_CALL. */
4724 static GTY(()) rtx s390_tls_symbol;
4727 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
4732 emit_insn (s390_load_got ());
4734 if (!s390_tls_symbol)
4735 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
4737 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
4738 gen_rtx_REG (Pmode, RETURN_REGNUM));
4740 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
4741 RTL_CONST_CALL_P (insn) = 1;
4744 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
4745 this (thread-local) address. REG may be used as temporary. */
4748 legitimize_tls_address (rtx addr, rtx reg)
4750 rtx new_rtx, tls_call, temp, base, r2, insn;
4752 if (GET_CODE (addr) == SYMBOL_REF)
4753 switch (tls_symbolic_operand (addr))
4755 case TLS_MODEL_GLOBAL_DYNAMIC:
4757 r2 = gen_rtx_REG (Pmode, 2);
4758 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
4759 new_rtx = gen_rtx_CONST (Pmode, tls_call);
4760 new_rtx = force_const_mem (Pmode, new_rtx);
4761 emit_move_insn (r2, new_rtx);
4762 s390_emit_tls_call_insn (r2, tls_call);
4763 insn = get_insns ();
4766 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
4767 temp = gen_reg_rtx (Pmode);
4768 emit_libcall_block (insn, temp, r2, new_rtx);
4770 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4773 s390_load_address (reg, new_rtx);
4778 case TLS_MODEL_LOCAL_DYNAMIC:
4780 r2 = gen_rtx_REG (Pmode, 2);
4781 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
4782 new_rtx = gen_rtx_CONST (Pmode, tls_call);
4783 new_rtx = force_const_mem (Pmode, new_rtx);
4784 emit_move_insn (r2, new_rtx);
4785 s390_emit_tls_call_insn (r2, tls_call);
4786 insn = get_insns ();
4789 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
4790 temp = gen_reg_rtx (Pmode);
4791 emit_libcall_block (insn, temp, r2, new_rtx);
4793 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4794 base = gen_reg_rtx (Pmode);
4795 s390_load_address (base, new_rtx);
4797 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
4798 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4799 new_rtx = force_const_mem (Pmode, new_rtx);
4800 temp = gen_reg_rtx (Pmode);
4801 emit_move_insn (temp, new_rtx);
4803 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
4806 s390_load_address (reg, new_rtx);
4811 case TLS_MODEL_INITIAL_EXEC:
4814 /* Assume GOT offset < 4k. This is handled the same way
4815 in both 31- and 64-bit code. */
4817 if (reload_in_progress || reload_completed)
4818 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4820 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
4821 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4822 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
4823 new_rtx = gen_const_mem (Pmode, new_rtx);
4824 temp = gen_reg_rtx (Pmode);
4825 emit_move_insn (temp, new_rtx);
4827 else if (TARGET_CPU_ZARCH)
4829 /* If the GOT offset might be >= 4k, we determine the position
4830 of the GOT entry via a PC-relative LARL. */
4832 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
4833 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4834 temp = gen_reg_rtx (Pmode);
4835 emit_move_insn (temp, new_rtx);
4837 new_rtx = gen_const_mem (Pmode, temp);
4838 temp = gen_reg_rtx (Pmode);
4839 emit_move_insn (temp, new_rtx);
4843 /* If the GOT offset might be >= 4k, we have to load it
4844 from the literal pool. */
4846 if (reload_in_progress || reload_completed)
4847 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4849 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
4850 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4851 new_rtx = force_const_mem (Pmode, new_rtx);
4852 temp = gen_reg_rtx (Pmode);
4853 emit_move_insn (temp, new_rtx);
4855 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4856 new_rtx = gen_const_mem (Pmode, new_rtx);
4858 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
4859 temp = gen_reg_rtx (Pmode);
4860 emit_insn (gen_rtx_SET (temp, new_rtx));
4864 /* In position-dependent code, load the absolute address of
4865 the GOT entry from the literal pool. */
4867 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
4868 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4869 new_rtx = force_const_mem (Pmode, new_rtx);
4870 temp = gen_reg_rtx (Pmode);
4871 emit_move_insn (temp, new_rtx);
4874 new_rtx = gen_const_mem (Pmode, new_rtx);
4875 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
4876 temp = gen_reg_rtx (Pmode);
4877 emit_insn (gen_rtx_SET (temp, new_rtx));
4880 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4883 s390_load_address (reg, new_rtx);
4888 case TLS_MODEL_LOCAL_EXEC:
4889 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
4890 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4891 new_rtx = force_const_mem (Pmode, new_rtx);
4892 temp = gen_reg_rtx (Pmode);
4893 emit_move_insn (temp, new_rtx);
4895 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4898 s390_load_address (reg, new_rtx);
4907 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
4909 switch (XINT (XEXP (addr, 0), 1))
4911 case UNSPEC_INDNTPOFF:
4912 gcc_assert (TARGET_CPU_ZARCH);
4921 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
4922 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
4924 new_rtx = XEXP (XEXP (addr, 0), 0);
4925 if (GET_CODE (new_rtx) != SYMBOL_REF)
4926 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4928 new_rtx = legitimize_tls_address (new_rtx, reg);
4929 new_rtx = plus_constant (Pmode, new_rtx,
4930 INTVAL (XEXP (XEXP (addr, 0), 1)));
4931 new_rtx = force_operand (new_rtx, 0);
4935 gcc_unreachable (); /* for now ... */
4940 /* Emit insns making the address in operands[1] valid for a standard
4941 move to operands[0]. operands[1] is replaced by an address which
4942 should be used instead of the former RTX to emit the move
4946 emit_symbolic_move (rtx *operands)
4948 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
4950 if (GET_CODE (operands[0]) == MEM)
4951 operands[1] = force_reg (Pmode, operands[1]);
4952 else if (TLS_SYMBOLIC_CONST (operands[1]))
4953 operands[1] = legitimize_tls_address (operands[1], temp);
4955 operands[1] = legitimize_pic_address (operands[1], temp);
4958 /* Try machine-dependent ways of modifying an illegitimate address X
4959 to be legitimate. If we find one, return the new, valid address.
4961 OLDX is the address as it was before break_out_memory_refs was called.
4962 In some cases it is useful to look at this to decide what needs to be done.
4964 MODE is the mode of the operand pointed to by X.
4966 When -fpic is used, special handling is needed for symbolic references.
4967 See comments by legitimize_pic_address for details. */
4970 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
4971 machine_mode mode ATTRIBUTE_UNUSED)
4973 rtx constant_term = const0_rtx;
4975 if (TLS_SYMBOLIC_CONST (x))
4977 x = legitimize_tls_address (x, 0);
4979 if (s390_legitimate_address_p (mode, x, FALSE))
4982 else if (GET_CODE (x) == PLUS
4983 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
4984 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
4990 if (SYMBOLIC_CONST (x)
4991 || (GET_CODE (x) == PLUS
4992 && (SYMBOLIC_CONST (XEXP (x, 0))
4993 || SYMBOLIC_CONST (XEXP (x, 1)))))
4994 x = legitimize_pic_address (x, 0);
4996 if (s390_legitimate_address_p (mode, x, FALSE))
5000 x = eliminate_constant_term (x, &constant_term);
5002 /* Optimize loading of large displacements by splitting them
5003 into the multiple of 4K and the rest; this allows the
5004 former to be CSE'd if possible.
5006 Don't do this if the displacement is added to a register
5007 pointing into the stack frame, as the offsets will
5008 change later anyway. */
5010 if (GET_CODE (constant_term) == CONST_INT
5011 && !TARGET_LONG_DISPLACEMENT
5012 && !DISP_IN_RANGE (INTVAL (constant_term))
5013 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
5015 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
5016 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
5018 rtx temp = gen_reg_rtx (Pmode);
5019 rtx val = force_operand (GEN_INT (upper), temp);
5021 emit_move_insn (temp, val);
5023 x = gen_rtx_PLUS (Pmode, x, temp);
5024 constant_term = GEN_INT (lower);
5027 if (GET_CODE (x) == PLUS)
5029 if (GET_CODE (XEXP (x, 0)) == REG)
5031 rtx temp = gen_reg_rtx (Pmode);
5032 rtx val = force_operand (XEXP (x, 1), temp);
5034 emit_move_insn (temp, val);
5036 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
5039 else if (GET_CODE (XEXP (x, 1)) == REG)
5041 rtx temp = gen_reg_rtx (Pmode);
5042 rtx val = force_operand (XEXP (x, 0), temp);
5044 emit_move_insn (temp, val);
5046 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
5050 if (constant_term != const0_rtx)
5051 x = gen_rtx_PLUS (Pmode, x, constant_term);
5056 /* Try a machine-dependent way of reloading an illegitimate address AD
5057 operand. If we find one, push the reload and return the new address.
5059 MODE is the mode of the enclosing MEM. OPNUM is the operand number
5060 and TYPE is the reload type of the current reload. */
5063 legitimize_reload_address (rtx ad, machine_mode mode ATTRIBUTE_UNUSED,
5064 int opnum, int type)
5066 if (!optimize || TARGET_LONG_DISPLACEMENT)
5069 if (GET_CODE (ad) == PLUS)
5071 rtx tem = simplify_binary_operation (PLUS, Pmode,
5072 XEXP (ad, 0), XEXP (ad, 1));
5077 if (GET_CODE (ad) == PLUS
5078 && GET_CODE (XEXP (ad, 0)) == REG
5079 && GET_CODE (XEXP (ad, 1)) == CONST_INT
5080 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
5082 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
5083 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
5084 rtx cst, tem, new_rtx;
5086 cst = GEN_INT (upper);
5087 if (!legitimate_reload_constant_p (cst))
5088 cst = force_const_mem (Pmode, cst);
5090 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
5091 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
5093 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
5094 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
5095 opnum, (enum reload_type) type);
5102 /* Emit code to move LEN bytes from DST to SRC. */
5105 s390_expand_movmem (rtx dst, rtx src, rtx len)
5107 /* When tuning for z10 or higher we rely on the Glibc functions to
5108 do the right thing. Only for constant lengths below 64k we will
5109 generate inline code. */
5110 if (s390_tune >= PROCESSOR_2097_Z10
5111 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
5114 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
5116 if (INTVAL (len) > 0)
5117 emit_insn (gen_movmem_short (dst, src, GEN_INT (INTVAL (len) - 1)));
5120 else if (TARGET_MVCLE)
5122 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
5127 rtx dst_addr, src_addr, count, blocks, temp;
5128 rtx_code_label *loop_start_label = gen_label_rtx ();
5129 rtx_code_label *loop_end_label = gen_label_rtx ();
5130 rtx_code_label *end_label = gen_label_rtx ();
5133 mode = GET_MODE (len);
5134 if (mode == VOIDmode)
5137 dst_addr = gen_reg_rtx (Pmode);
5138 src_addr = gen_reg_rtx (Pmode);
5139 count = gen_reg_rtx (mode);
5140 blocks = gen_reg_rtx (mode);
5142 convert_move (count, len, 1);
5143 emit_cmp_and_jump_insns (count, const0_rtx,
5144 EQ, NULL_RTX, mode, 1, end_label);
5146 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
5147 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
5148 dst = change_address (dst, VOIDmode, dst_addr);
5149 src = change_address (src, VOIDmode, src_addr);
5151 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5154 emit_move_insn (count, temp);
5156 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5159 emit_move_insn (blocks, temp);
5161 emit_cmp_and_jump_insns (blocks, const0_rtx,
5162 EQ, NULL_RTX, mode, 1, loop_end_label);
5164 emit_label (loop_start_label);
5167 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
5171 /* Issue a read prefetch for the +3 cache line. */
5172 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
5173 const0_rtx, const0_rtx);
5174 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5175 emit_insn (prefetch);
5177 /* Issue a write prefetch for the +3 cache line. */
5178 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
5179 const1_rtx, const0_rtx);
5180 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5181 emit_insn (prefetch);
5184 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
5185 s390_load_address (dst_addr,
5186 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
5187 s390_load_address (src_addr,
5188 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
5190 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5193 emit_move_insn (blocks, temp);
5195 emit_cmp_and_jump_insns (blocks, const0_rtx,
5196 EQ, NULL_RTX, mode, 1, loop_end_label);
5198 emit_jump (loop_start_label);
5199 emit_label (loop_end_label);
5201 emit_insn (gen_movmem_short (dst, src,
5202 convert_to_mode (Pmode, count, 1)));
5203 emit_label (end_label);
5208 /* Emit code to set LEN bytes at DST to VAL.
5209 Make use of clrmem if VAL is zero. */
5212 s390_expand_setmem (rtx dst, rtx len, rtx val)
5214 if (GET_CODE (len) == CONST_INT && INTVAL (len) == 0)
5217 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
5219 if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
5221 if (val == const0_rtx && INTVAL (len) <= 256)
5222 emit_insn (gen_clrmem_short (dst, GEN_INT (INTVAL (len) - 1)));
5225 /* Initialize memory by storing the first byte. */
5226 emit_move_insn (adjust_address (dst, QImode, 0), val);
5228 if (INTVAL (len) > 1)
5230 /* Initiate 1 byte overlap move.
5231 The first byte of DST is propagated through DSTP1.
5232 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
5233 DST is set to size 1 so the rest of the memory location
5234 does not count as source operand. */
5235 rtx dstp1 = adjust_address (dst, VOIDmode, 1);
5236 set_mem_size (dst, 1);
5238 emit_insn (gen_movmem_short (dstp1, dst,
5239 GEN_INT (INTVAL (len) - 2)));
5244 else if (TARGET_MVCLE)
5246 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
5248 emit_insn (gen_setmem_long_di (dst, convert_to_mode (Pmode, len, 1),
5251 emit_insn (gen_setmem_long_si (dst, convert_to_mode (Pmode, len, 1),
5257 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
5258 rtx_code_label *loop_start_label = gen_label_rtx ();
5259 rtx_code_label *loop_end_label = gen_label_rtx ();
5260 rtx_code_label *end_label = gen_label_rtx ();
5263 mode = GET_MODE (len);
5264 if (mode == VOIDmode)
5267 dst_addr = gen_reg_rtx (Pmode);
5268 count = gen_reg_rtx (mode);
5269 blocks = gen_reg_rtx (mode);
5271 convert_move (count, len, 1);
5272 emit_cmp_and_jump_insns (count, const0_rtx,
5273 EQ, NULL_RTX, mode, 1, end_label);
5275 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
5276 dst = change_address (dst, VOIDmode, dst_addr);
5278 if (val == const0_rtx)
5279 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5283 dstp1 = adjust_address (dst, VOIDmode, 1);
5284 set_mem_size (dst, 1);
5286 /* Initialize memory by storing the first byte. */
5287 emit_move_insn (adjust_address (dst, QImode, 0), val);
5289 /* If count is 1 we are done. */
5290 emit_cmp_and_jump_insns (count, const1_rtx,
5291 EQ, NULL_RTX, mode, 1, end_label);
5293 temp = expand_binop (mode, add_optab, count, GEN_INT (-2), count, 1,
5297 emit_move_insn (count, temp);
5299 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5302 emit_move_insn (blocks, temp);
5304 emit_cmp_and_jump_insns (blocks, const0_rtx,
5305 EQ, NULL_RTX, mode, 1, loop_end_label);
5307 emit_label (loop_start_label);
5310 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
5312 /* Issue a write prefetch for the +4 cache line. */
5313 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
5315 const1_rtx, const0_rtx);
5316 emit_insn (prefetch);
5317 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5320 if (val == const0_rtx)
5321 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
5323 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (255)));
5324 s390_load_address (dst_addr,
5325 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
5327 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5330 emit_move_insn (blocks, temp);
5332 emit_cmp_and_jump_insns (blocks, const0_rtx,
5333 EQ, NULL_RTX, mode, 1, loop_end_label);
5335 emit_jump (loop_start_label);
5336 emit_label (loop_end_label);
5338 if (val == const0_rtx)
5339 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
5341 emit_insn (gen_movmem_short (dstp1, dst, convert_to_mode (Pmode, count, 1)));
5342 emit_label (end_label);
5346 /* Emit code to compare LEN bytes at OP0 with those at OP1,
5347 and return the result in TARGET. */
5350 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
5352 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
5355 /* When tuning for z10 or higher we rely on the Glibc functions to
5356 do the right thing. Only for constant lengths below 64k we will
5357 generate inline code. */
5358 if (s390_tune >= PROCESSOR_2097_Z10
5359 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
5362 /* As the result of CMPINT is inverted compared to what we need,
5363 we have to swap the operands. */
5364 tmp = op0; op0 = op1; op1 = tmp;
5366 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
5368 if (INTVAL (len) > 0)
5370 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
5371 emit_insn (gen_cmpint (target, ccreg));
5374 emit_move_insn (target, const0_rtx);
5376 else if (TARGET_MVCLE)
5378 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
5379 emit_insn (gen_cmpint (target, ccreg));
5383 rtx addr0, addr1, count, blocks, temp;
5384 rtx_code_label *loop_start_label = gen_label_rtx ();
5385 rtx_code_label *loop_end_label = gen_label_rtx ();
5386 rtx_code_label *end_label = gen_label_rtx ();
5389 mode = GET_MODE (len);
5390 if (mode == VOIDmode)
5393 addr0 = gen_reg_rtx (Pmode);
5394 addr1 = gen_reg_rtx (Pmode);
5395 count = gen_reg_rtx (mode);
5396 blocks = gen_reg_rtx (mode);
5398 convert_move (count, len, 1);
5399 emit_cmp_and_jump_insns (count, const0_rtx,
5400 EQ, NULL_RTX, mode, 1, end_label);
5402 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
5403 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
5404 op0 = change_address (op0, VOIDmode, addr0);
5405 op1 = change_address (op1, VOIDmode, addr1);
5407 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5410 emit_move_insn (count, temp);
5412 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5415 emit_move_insn (blocks, temp);
5417 emit_cmp_and_jump_insns (blocks, const0_rtx,
5418 EQ, NULL_RTX, mode, 1, loop_end_label);
5420 emit_label (loop_start_label);
5423 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
5427 /* Issue a read prefetch for the +2 cache line of operand 1. */
5428 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
5429 const0_rtx, const0_rtx);
5430 emit_insn (prefetch);
5431 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5433 /* Issue a read prefetch for the +2 cache line of operand 2. */
5434 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
5435 const0_rtx, const0_rtx);
5436 emit_insn (prefetch);
5437 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5440 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
5441 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
5442 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
5443 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
5444 temp = gen_rtx_SET (pc_rtx, temp);
5445 emit_jump_insn (temp);
5447 s390_load_address (addr0,
5448 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
5449 s390_load_address (addr1,
5450 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
5452 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5455 emit_move_insn (blocks, temp);
5457 emit_cmp_and_jump_insns (blocks, const0_rtx,
5458 EQ, NULL_RTX, mode, 1, loop_end_label);
5460 emit_jump (loop_start_label);
5461 emit_label (loop_end_label);
5463 emit_insn (gen_cmpmem_short (op0, op1,
5464 convert_to_mode (Pmode, count, 1)));
5465 emit_label (end_label);
5467 emit_insn (gen_cmpint (target, ccreg));
5472 /* Emit a conditional jump to LABEL for condition code mask MASK using
5473 comparsion operator COMPARISON. Return the emitted jump insn. */
5476 s390_emit_ccraw_jump (HOST_WIDE_INT mask, enum rtx_code comparison, rtx label)
5480 gcc_assert (comparison == EQ || comparison == NE);
5481 gcc_assert (mask > 0 && mask < 15);
5483 temp = gen_rtx_fmt_ee (comparison, VOIDmode,
5484 gen_rtx_REG (CCRAWmode, CC_REGNUM), GEN_INT (mask));
5485 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
5486 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
5487 temp = gen_rtx_SET (pc_rtx, temp);
5488 return emit_jump_insn (temp);
5491 /* Emit the instructions to implement strlen of STRING and store the
5492 result in TARGET. The string has the known ALIGNMENT. This
5493 version uses vector instructions and is therefore not appropriate
5494 for targets prior to z13. */
5497 s390_expand_vec_strlen (rtx target, rtx string, rtx alignment)
5499 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
5500 int very_likely = REG_BR_PROB_BASE - 1;
5501 rtx highest_index_to_load_reg = gen_reg_rtx (Pmode);
5502 rtx str_reg = gen_reg_rtx (V16QImode);
5503 rtx str_addr_base_reg = gen_reg_rtx (Pmode);
5504 rtx str_idx_reg = gen_reg_rtx (Pmode);
5505 rtx result_reg = gen_reg_rtx (V16QImode);
5506 rtx is_aligned_label = gen_label_rtx ();
5507 rtx into_loop_label = NULL_RTX;
5508 rtx loop_start_label = gen_label_rtx ();
5510 rtx len = gen_reg_rtx (QImode);
5513 s390_load_address (str_addr_base_reg, XEXP (string, 0));
5514 emit_move_insn (str_idx_reg, const0_rtx);
5516 if (INTVAL (alignment) < 16)
5518 /* Check whether the address happens to be aligned properly so
5519 jump directly to the aligned loop. */
5520 emit_cmp_and_jump_insns (gen_rtx_AND (Pmode,
5521 str_addr_base_reg, GEN_INT (15)),
5522 const0_rtx, EQ, NULL_RTX,
5523 Pmode, 1, is_aligned_label);
5525 temp = gen_reg_rtx (Pmode);
5526 temp = expand_binop (Pmode, and_optab, str_addr_base_reg,
5527 GEN_INT (15), temp, 1, OPTAB_DIRECT);
5528 gcc_assert (REG_P (temp));
5529 highest_index_to_load_reg =
5530 expand_binop (Pmode, sub_optab, GEN_INT (15), temp,
5531 highest_index_to_load_reg, 1, OPTAB_DIRECT);
5532 gcc_assert (REG_P (highest_index_to_load_reg));
5533 emit_insn (gen_vllv16qi (str_reg,
5534 convert_to_mode (SImode, highest_index_to_load_reg, 1),
5535 gen_rtx_MEM (BLKmode, str_addr_base_reg)));
5537 into_loop_label = gen_label_rtx ();
5538 s390_emit_jump (into_loop_label, NULL_RTX);
5542 emit_label (is_aligned_label);
5543 LABEL_NUSES (is_aligned_label) = INTVAL (alignment) < 16 ? 2 : 1;
5545 /* Reaching this point we are only performing 16 bytes aligned
5547 emit_move_insn (highest_index_to_load_reg, GEN_INT (15));
5549 emit_label (loop_start_label);
5550 LABEL_NUSES (loop_start_label) = 1;
5552 /* Load 16 bytes of the string into VR. */
5553 emit_move_insn (str_reg,
5554 gen_rtx_MEM (V16QImode,
5555 gen_rtx_PLUS (Pmode, str_idx_reg,
5556 str_addr_base_reg)));
5557 if (into_loop_label != NULL_RTX)
5559 emit_label (into_loop_label);
5560 LABEL_NUSES (into_loop_label) = 1;
5563 /* Increment string index by 16 bytes. */
5564 expand_binop (Pmode, add_optab, str_idx_reg, GEN_INT (16),
5565 str_idx_reg, 1, OPTAB_DIRECT);
5567 emit_insn (gen_vec_vfenesv16qi (result_reg, str_reg, str_reg,
5568 GEN_INT (VSTRING_FLAG_ZS | VSTRING_FLAG_CS)));
5570 add_int_reg_note (s390_emit_ccraw_jump (8, NE, loop_start_label),
5571 REG_BR_PROB, very_likely);
5572 emit_insn (gen_vec_extractv16qi (len, result_reg, GEN_INT (7)));
5574 /* If the string pointer wasn't aligned we have loaded less then 16
5575 bytes and the remaining bytes got filled with zeros (by vll).
5576 Now we have to check whether the resulting index lies within the
5577 bytes actually part of the string. */
5579 cond = s390_emit_compare (GT, convert_to_mode (Pmode, len, 1),
5580 highest_index_to_load_reg);
5581 s390_load_address (highest_index_to_load_reg,
5582 gen_rtx_PLUS (Pmode, highest_index_to_load_reg,
5585 emit_insn (gen_movdicc (str_idx_reg, cond,
5586 highest_index_to_load_reg, str_idx_reg));
5588 emit_insn (gen_movsicc (str_idx_reg, cond,
5589 highest_index_to_load_reg, str_idx_reg));
5591 add_int_reg_note (s390_emit_jump (is_aligned_label, cond), REG_BR_PROB,
5594 expand_binop (Pmode, add_optab, str_idx_reg,
5595 GEN_INT (-16), str_idx_reg, 1, OPTAB_DIRECT);
5596 /* FIXME: len is already zero extended - so avoid the llgcr emitted
5598 temp = expand_binop (Pmode, add_optab, str_idx_reg,
5599 convert_to_mode (Pmode, len, 1),
5600 target, 1, OPTAB_DIRECT);
5602 emit_move_insn (target, temp);
5605 /* Expand conditional increment or decrement using alc/slb instructions.
5606 Should generate code setting DST to either SRC or SRC + INCREMENT,
5607 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
5608 Returns true if successful, false otherwise.
5610 That makes it possible to implement some if-constructs without jumps e.g.:
5611 (borrow = CC0 | CC1 and carry = CC2 | CC3)
5612 unsigned int a, b, c;
5613 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
5614 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
5615 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
5616 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
5618 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
5619 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
5620 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
5621 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
5622 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
5625 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
5626 rtx dst, rtx src, rtx increment)
5628 machine_mode cmp_mode;
5629 machine_mode cc_mode;
5635 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
5636 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
5638 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
5639 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
5644 /* Try ADD LOGICAL WITH CARRY. */
5645 if (increment == const1_rtx)
5647 /* Determine CC mode to use. */
5648 if (cmp_code == EQ || cmp_code == NE)
5650 if (cmp_op1 != const0_rtx)
5652 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
5653 NULL_RTX, 0, OPTAB_WIDEN);
5654 cmp_op1 = const0_rtx;
5657 cmp_code = cmp_code == EQ ? LEU : GTU;
5660 if (cmp_code == LTU || cmp_code == LEU)
5665 cmp_code = swap_condition (cmp_code);
5682 /* Emit comparison instruction pattern. */
5683 if (!register_operand (cmp_op0, cmp_mode))
5684 cmp_op0 = force_reg (cmp_mode, cmp_op0);
5686 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
5687 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
5688 /* We use insn_invalid_p here to add clobbers if required. */
5689 ret = insn_invalid_p (emit_insn (insn), false);
5692 /* Emit ALC instruction pattern. */
5693 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
5694 gen_rtx_REG (cc_mode, CC_REGNUM),
5697 if (src != const0_rtx)
5699 if (!register_operand (src, GET_MODE (dst)))
5700 src = force_reg (GET_MODE (dst), src);
5702 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
5703 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
5706 p = rtvec_alloc (2);
5708 gen_rtx_SET (dst, op_res);
5710 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
5711 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
5716 /* Try SUBTRACT LOGICAL WITH BORROW. */
5717 if (increment == constm1_rtx)
5719 /* Determine CC mode to use. */
5720 if (cmp_code == EQ || cmp_code == NE)
5722 if (cmp_op1 != const0_rtx)
5724 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
5725 NULL_RTX, 0, OPTAB_WIDEN);
5726 cmp_op1 = const0_rtx;
5729 cmp_code = cmp_code == EQ ? LEU : GTU;
5732 if (cmp_code == GTU || cmp_code == GEU)
5737 cmp_code = swap_condition (cmp_code);
5754 /* Emit comparison instruction pattern. */
5755 if (!register_operand (cmp_op0, cmp_mode))
5756 cmp_op0 = force_reg (cmp_mode, cmp_op0);
5758 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
5759 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
5760 /* We use insn_invalid_p here to add clobbers if required. */
5761 ret = insn_invalid_p (emit_insn (insn), false);
5764 /* Emit SLB instruction pattern. */
5765 if (!register_operand (src, GET_MODE (dst)))
5766 src = force_reg (GET_MODE (dst), src);
5768 op_res = gen_rtx_MINUS (GET_MODE (dst),
5769 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
5770 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
5771 gen_rtx_REG (cc_mode, CC_REGNUM),
5773 p = rtvec_alloc (2);
5775 gen_rtx_SET (dst, op_res);
5777 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
5778 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
5786 /* Expand code for the insv template. Return true if successful. */
5789 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
5791 int bitsize = INTVAL (op1);
5792 int bitpos = INTVAL (op2);
5793 machine_mode mode = GET_MODE (dest);
5795 int smode_bsize, mode_bsize;
5798 if (bitsize + bitpos > GET_MODE_BITSIZE (mode))
5801 /* Generate INSERT IMMEDIATE (IILL et al). */
5802 /* (set (ze (reg)) (const_int)). */
5804 && register_operand (dest, word_mode)
5805 && (bitpos % 16) == 0
5806 && (bitsize % 16) == 0
5807 && const_int_operand (src, VOIDmode))
5809 HOST_WIDE_INT val = INTVAL (src);
5810 int regpos = bitpos + bitsize;
5812 while (regpos > bitpos)
5814 machine_mode putmode;
5817 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
5822 putsize = GET_MODE_BITSIZE (putmode);
5824 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
5827 gen_int_mode (val, putmode));
5830 gcc_assert (regpos == bitpos);
5834 smode = smallest_mode_for_size (bitsize, MODE_INT);
5835 smode_bsize = GET_MODE_BITSIZE (smode);
5836 mode_bsize = GET_MODE_BITSIZE (mode);
5838 /* Generate STORE CHARACTERS UNDER MASK (STCM et al). */
5840 && (bitsize % BITS_PER_UNIT) == 0
5842 && (register_operand (src, word_mode)
5843 || const_int_operand (src, VOIDmode)))
5845 /* Emit standard pattern if possible. */
5846 if (smode_bsize == bitsize)
5848 emit_move_insn (adjust_address (dest, smode, 0),
5849 gen_lowpart (smode, src));
5853 /* (set (ze (mem)) (const_int)). */
5854 else if (const_int_operand (src, VOIDmode))
5856 int size = bitsize / BITS_PER_UNIT;
5857 rtx src_mem = adjust_address (force_const_mem (word_mode, src),
5859 UNITS_PER_WORD - size);
5861 dest = adjust_address (dest, BLKmode, 0);
5862 set_mem_size (dest, size);
5863 s390_expand_movmem (dest, src_mem, GEN_INT (size));
5867 /* (set (ze (mem)) (reg)). */
5868 else if (register_operand (src, word_mode))
5871 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
5875 /* Emit st,stcmh sequence. */
5876 int stcmh_width = bitsize - 32;
5877 int size = stcmh_width / BITS_PER_UNIT;
5879 emit_move_insn (adjust_address (dest, SImode, size),
5880 gen_lowpart (SImode, src));
5881 set_mem_size (dest, size);
5882 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
5883 GEN_INT (stcmh_width),
5885 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT (32)));
5891 /* Generate INSERT CHARACTERS UNDER MASK (IC, ICM et al). */
5892 if ((bitpos % BITS_PER_UNIT) == 0
5893 && (bitsize % BITS_PER_UNIT) == 0
5894 && (bitpos & 32) == ((bitpos + bitsize - 1) & 32)
5896 && (mode == DImode || mode == SImode)
5897 && register_operand (dest, mode))
5899 /* Emit a strict_low_part pattern if possible. */
5900 if (smode_bsize == bitsize && bitpos == mode_bsize - smode_bsize)
5902 op = gen_rtx_STRICT_LOW_PART (VOIDmode, gen_lowpart (smode, dest));
5903 op = gen_rtx_SET (op, gen_lowpart (smode, src));
5904 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
5905 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
5909 /* ??? There are more powerful versions of ICM that are not
5910 completely represented in the md file. */
5913 /* For z10, generate ROTATE THEN INSERT SELECTED BITS (RISBG et al). */
5914 if (TARGET_Z10 && (mode == DImode || mode == SImode))
5916 machine_mode mode_s = GET_MODE (src);
5918 if (mode_s == VOIDmode)
5920 /* For constant zero values the representation with AND
5921 appears to be folded in more situations than the (set
5922 (zero_extract) ...).
5923 We only do this when the start and end of the bitfield
5924 remain in the same SImode chunk. That way nihf or nilf
5926 The AND patterns might still generate a risbg for this. */
5927 if (src == const0_rtx && bitpos / 32 == (bitpos + bitsize - 1) / 32)
5930 src = force_reg (mode, src);
5932 else if (mode_s != mode)
5934 gcc_assert (GET_MODE_BITSIZE (mode_s) >= bitsize);
5935 src = force_reg (mode_s, src);
5936 src = gen_lowpart (mode, src);
5939 op = gen_rtx_ZERO_EXTRACT (mode, dest, op1, op2),
5940 op = gen_rtx_SET (op, src);
5944 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
5945 op = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber));
5955 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
5956 register that holds VAL of mode MODE shifted by COUNT bits. */
5959 s390_expand_mask_and_shift (rtx val, machine_mode mode, rtx count)
5961 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
5962 NULL_RTX, 1, OPTAB_DIRECT);
5963 return expand_simple_binop (SImode, ASHIFT, val, count,
5964 NULL_RTX, 1, OPTAB_DIRECT);
5967 /* Generate a vector comparison COND of CMP_OP1 and CMP_OP2 and store
5968 the result in TARGET. */
5971 s390_expand_vec_compare (rtx target, enum rtx_code cond,
5972 rtx cmp_op1, rtx cmp_op2)
5974 machine_mode mode = GET_MODE (target);
5975 bool neg_p = false, swap_p = false;
5978 if (GET_MODE (cmp_op1) == V2DFmode)
5982 /* NE a != b -> !(a == b) */
5983 case NE: cond = EQ; neg_p = true; break;
5984 /* UNGT a u> b -> !(b >= a) */
5985 case UNGT: cond = GE; neg_p = true; swap_p = true; break;
5986 /* UNGE a u>= b -> !(b > a) */
5987 case UNGE: cond = GT; neg_p = true; swap_p = true; break;
5988 /* LE: a <= b -> b >= a */
5989 case LE: cond = GE; swap_p = true; break;
5990 /* UNLE: a u<= b -> !(a > b) */
5991 case UNLE: cond = GT; neg_p = true; break;
5992 /* LT: a < b -> b > a */
5993 case LT: cond = GT; swap_p = true; break;
5994 /* UNLT: a u< b -> !(a >= b) */
5995 case UNLT: cond = GE; neg_p = true; break;
5997 emit_insn (gen_vec_cmpuneqv2df (target, cmp_op1, cmp_op2));
6000 emit_insn (gen_vec_cmpltgtv2df (target, cmp_op1, cmp_op2));
6003 emit_insn (gen_vec_orderedv2df (target, cmp_op1, cmp_op2));
6006 emit_insn (gen_vec_unorderedv2df (target, cmp_op1, cmp_op2));
6015 /* NE: a != b -> !(a == b) */
6016 case NE: cond = EQ; neg_p = true; break;
6017 /* GE: a >= b -> !(b > a) */
6018 case GE: cond = GT; neg_p = true; swap_p = true; break;
6019 /* GEU: a >= b -> !(b > a) */
6020 case GEU: cond = GTU; neg_p = true; swap_p = true; break;
6021 /* LE: a <= b -> !(a > b) */
6022 case LE: cond = GT; neg_p = true; break;
6023 /* LEU: a <= b -> !(a > b) */
6024 case LEU: cond = GTU; neg_p = true; break;
6025 /* LT: a < b -> b > a */
6026 case LT: cond = GT; swap_p = true; break;
6027 /* LTU: a < b -> b > a */
6028 case LTU: cond = GTU; swap_p = true; break;
6035 tmp = cmp_op1; cmp_op1 = cmp_op2; cmp_op2 = tmp;
6038 emit_insn (gen_rtx_SET (target, gen_rtx_fmt_ee (cond,
6040 cmp_op1, cmp_op2)));
6042 emit_insn (gen_rtx_SET (target, gen_rtx_NOT (mode, target)));
6045 /* Expand the comparison CODE of CMP1 and CMP2 and copy 1 or 0 into
6046 TARGET if either all (ALL_P is true) or any (ALL_P is false) of the
6047 elements in CMP1 and CMP2 fulfill the comparison. */
6049 s390_expand_vec_compare_cc (rtx target, enum rtx_code code,
6050 rtx cmp1, rtx cmp2, bool all_p)
6052 enum rtx_code new_code = code;
6053 machine_mode cmp_mode, full_cmp_mode, scratch_mode;
6054 rtx tmp_reg = gen_reg_rtx (SImode);
6055 bool swap_p = false;
6057 if (GET_MODE_CLASS (GET_MODE (cmp1)) == MODE_VECTOR_INT)
6061 case EQ: cmp_mode = CCVEQmode; break;
6062 case NE: cmp_mode = CCVEQmode; break;
6063 case GT: cmp_mode = CCVHmode; break;
6064 case GE: cmp_mode = CCVHmode; new_code = LE; swap_p = true; break;
6065 case LT: cmp_mode = CCVHmode; new_code = GT; swap_p = true; break;
6066 case LE: cmp_mode = CCVHmode; new_code = LE; break;
6067 case GTU: cmp_mode = CCVHUmode; break;
6068 case GEU: cmp_mode = CCVHUmode; new_code = LEU; swap_p = true; break;
6069 case LTU: cmp_mode = CCVHUmode; new_code = GTU; swap_p = true; break;
6070 case LEU: cmp_mode = CCVHUmode; new_code = LEU; break;
6071 default: gcc_unreachable ();
6073 scratch_mode = GET_MODE (cmp1);
6075 else if (GET_MODE (cmp1) == V2DFmode)
6079 case EQ: cmp_mode = CCVEQmode; break;
6080 case NE: cmp_mode = CCVEQmode; break;
6081 case GT: cmp_mode = CCVFHmode; break;
6082 case GE: cmp_mode = CCVFHEmode; break;
6083 case UNLE: cmp_mode = CCVFHmode; break;
6084 case UNLT: cmp_mode = CCVFHEmode; break;
6085 case LT: cmp_mode = CCVFHmode; new_code = GT; swap_p = true; break;
6086 case LE: cmp_mode = CCVFHEmode; new_code = GE; swap_p = true; break;
6087 default: gcc_unreachable ();
6089 scratch_mode = V2DImode;
6097 case CCVEQmode: full_cmp_mode = CCVEQANYmode; break;
6098 case CCVHmode: full_cmp_mode = CCVHANYmode; break;
6099 case CCVHUmode: full_cmp_mode = CCVHUANYmode; break;
6100 case CCVFHmode: full_cmp_mode = CCVFHANYmode; break;
6101 case CCVFHEmode: full_cmp_mode = CCVFHEANYmode; break;
6102 default: gcc_unreachable ();
6105 /* The modes without ANY match the ALL modes. */
6106 full_cmp_mode = cmp_mode;
6115 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6116 gen_rtvec (2, gen_rtx_SET (
6117 gen_rtx_REG (cmp_mode, CC_REGNUM),
6118 gen_rtx_COMPARE (cmp_mode, cmp1, cmp2)),
6119 gen_rtx_CLOBBER (VOIDmode,
6120 gen_rtx_SCRATCH (scratch_mode)))));
6121 emit_move_insn (target, const0_rtx);
6122 emit_move_insn (tmp_reg, const1_rtx);
6124 emit_move_insn (target,
6125 gen_rtx_IF_THEN_ELSE (SImode,
6126 gen_rtx_fmt_ee (new_code, VOIDmode,
6127 gen_rtx_REG (full_cmp_mode, CC_REGNUM),
6132 /* Generate a vector comparison expression loading either elements of
6133 THEN or ELS into TARGET depending on the comparison COND of CMP_OP1
6137 s390_expand_vcond (rtx target, rtx then, rtx els,
6138 enum rtx_code cond, rtx cmp_op1, rtx cmp_op2)
6141 machine_mode result_mode;
6144 machine_mode target_mode = GET_MODE (target);
6145 machine_mode cmp_mode = GET_MODE (cmp_op1);
6146 rtx op = (cond == LT) ? els : then;
6148 /* Try to optimize x < 0 ? -1 : 0 into (signed) x >> 31
6149 and x < 0 ? 1 : 0 into (unsigned) x >> 31. Likewise
6150 for short and byte (x >> 15 and x >> 7 respectively). */
6151 if ((cond == LT || cond == GE)
6152 && target_mode == cmp_mode
6153 && cmp_op2 == CONST0_RTX (cmp_mode)
6154 && op == CONST0_RTX (target_mode)
6155 && s390_vector_mode_supported_p (target_mode)
6156 && GET_MODE_CLASS (target_mode) == MODE_VECTOR_INT)
6158 rtx negop = (cond == LT) ? then : els;
6160 int shift = GET_MODE_BITSIZE (GET_MODE_INNER (target_mode)) - 1;
6162 /* if x < 0 ? 1 : 0 or if x >= 0 ? 0 : 1 */
6163 if (negop == CONST1_RTX (target_mode))
6165 rtx res = expand_simple_binop (cmp_mode, LSHIFTRT, cmp_op1,
6166 GEN_INT (shift), target,
6169 emit_move_insn (target, res);
6173 /* if x < 0 ? -1 : 0 or if x >= 0 ? 0 : -1 */
6174 else if (all_ones_operand (negop, target_mode))
6176 rtx res = expand_simple_binop (cmp_mode, ASHIFTRT, cmp_op1,
6177 GEN_INT (shift), target,
6180 emit_move_insn (target, res);
6185 /* We always use an integral type vector to hold the comparison
6187 result_mode = cmp_mode == V2DFmode ? V2DImode : cmp_mode;
6188 result_target = gen_reg_rtx (result_mode);
6190 /* We allow vector immediates as comparison operands that
6191 can be handled by the optimization above but not by the
6192 following code. Hence, force them into registers here. */
6193 if (!REG_P (cmp_op1))
6194 cmp_op1 = force_reg (target_mode, cmp_op1);
6196 if (!REG_P (cmp_op2))
6197 cmp_op2 = force_reg (target_mode, cmp_op2);
6199 s390_expand_vec_compare (result_target, cond,
6202 /* If the results are supposed to be either -1 or 0 we are done
6203 since this is what our compare instructions generate anyway. */
6204 if (all_ones_operand (then, GET_MODE (then))
6205 && const0_operand (els, GET_MODE (els)))
6207 emit_move_insn (target, gen_rtx_SUBREG (target_mode,
6212 /* Otherwise we will do a vsel afterwards. */
6213 /* This gets triggered e.g.
6214 with gcc.c-torture/compile/pr53410-1.c */
6216 then = force_reg (target_mode, then);
6219 els = force_reg (target_mode, els);
6221 tmp = gen_rtx_fmt_ee (EQ, VOIDmode,
6223 CONST0_RTX (result_mode));
6225 /* We compared the result against zero above so we have to swap then
6227 tmp = gen_rtx_IF_THEN_ELSE (target_mode, tmp, els, then);
6229 gcc_assert (target_mode == GET_MODE (then));
6230 emit_insn (gen_rtx_SET (target, tmp));
6233 /* Emit the RTX necessary to initialize the vector TARGET with values
6236 s390_expand_vec_init (rtx target, rtx vals)
6238 machine_mode mode = GET_MODE (target);
6239 machine_mode inner_mode = GET_MODE_INNER (mode);
6240 int n_elts = GET_MODE_NUNITS (mode);
6241 bool all_same = true, all_regs = true, all_const_int = true;
6245 for (i = 0; i < n_elts; ++i)
6247 x = XVECEXP (vals, 0, i);
6249 if (!CONST_INT_P (x))
6250 all_const_int = false;
6252 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6259 /* Use vector gen mask or vector gen byte mask if possible. */
6260 if (all_same && all_const_int
6261 && (XVECEXP (vals, 0, 0) == const0_rtx
6262 || s390_contiguous_bitmask_vector_p (XVECEXP (vals, 0, 0),
6264 || s390_bytemask_vector_p (XVECEXP (vals, 0, 0), NULL)))
6266 emit_insn (gen_rtx_SET (target,
6267 gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0))));
6273 emit_insn (gen_rtx_SET (target,
6274 gen_rtx_VEC_DUPLICATE (mode,
6275 XVECEXP (vals, 0, 0))));
6279 if (all_regs && REG_P (target) && n_elts == 2 && inner_mode == DImode)
6281 /* Use vector load pair. */
6282 emit_insn (gen_rtx_SET (target,
6283 gen_rtx_VEC_CONCAT (mode,
6284 XVECEXP (vals, 0, 0),
6285 XVECEXP (vals, 0, 1))));
6289 /* We are about to set the vector elements one by one. Zero out the
6290 full register first in order to help the data flow framework to
6291 detect it as full VR set. */
6292 emit_insn (gen_rtx_SET (target, CONST0_RTX (mode)));
6294 /* Unfortunately the vec_init expander is not allowed to fail. So
6295 we have to implement the fallback ourselves. */
6296 for (i = 0; i < n_elts; i++)
6297 emit_insn (gen_rtx_SET (target,
6298 gen_rtx_UNSPEC (mode,
6299 gen_rtvec (3, XVECEXP (vals, 0, i),
6300 GEN_INT (i), target),
6304 /* Structure to hold the initial parameters for a compare_and_swap operation
6305 in HImode and QImode. */
6307 struct alignment_context
6309 rtx memsi; /* SI aligned memory location. */
6310 rtx shift; /* Bit offset with regard to lsb. */
6311 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
6312 rtx modemaski; /* ~modemask */
6313 bool aligned; /* True if memory is aligned, false else. */
6316 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
6317 structure AC for transparent simplifying, if the memory alignment is known
6318 to be at least 32bit. MEM is the memory location for the actual operation
6319 and MODE its mode. */
6322 init_alignment_context (struct alignment_context *ac, rtx mem,
6325 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
6326 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
6329 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
6332 /* Alignment is unknown. */
6333 rtx byteoffset, addr, align;
6335 /* Force the address into a register. */
6336 addr = force_reg (Pmode, XEXP (mem, 0));
6338 /* Align it to SImode. */
6339 align = expand_simple_binop (Pmode, AND, addr,
6340 GEN_INT (-GET_MODE_SIZE (SImode)),
6341 NULL_RTX, 1, OPTAB_DIRECT);
6343 ac->memsi = gen_rtx_MEM (SImode, align);
6344 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
6345 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
6346 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
6348 /* Calculate shiftcount. */
6349 byteoffset = expand_simple_binop (Pmode, AND, addr,
6350 GEN_INT (GET_MODE_SIZE (SImode) - 1),
6351 NULL_RTX, 1, OPTAB_DIRECT);
6352 /* As we already have some offset, evaluate the remaining distance. */
6353 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
6354 NULL_RTX, 1, OPTAB_DIRECT);
6357 /* Shift is the byte count, but we need the bitcount. */
6358 ac->shift = expand_simple_binop (SImode, ASHIFT, ac->shift, GEN_INT (3),
6359 NULL_RTX, 1, OPTAB_DIRECT);
6361 /* Calculate masks. */
6362 ac->modemask = expand_simple_binop (SImode, ASHIFT,
6363 GEN_INT (GET_MODE_MASK (mode)),
6364 ac->shift, NULL_RTX, 1, OPTAB_DIRECT);
6365 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask,
6369 /* A subroutine of s390_expand_cs_hqi. Insert INS into VAL. If possible,
6370 use a single insv insn into SEQ2. Otherwise, put prep insns in SEQ1 and
6371 perform the merge in SEQ2. */
6374 s390_two_part_insv (struct alignment_context *ac, rtx *seq1, rtx *seq2,
6375 machine_mode mode, rtx val, rtx ins)
6382 tmp = copy_to_mode_reg (SImode, val);
6383 if (s390_expand_insv (tmp, GEN_INT (GET_MODE_BITSIZE (mode)),
6387 *seq2 = get_insns ();
6394 /* Failed to use insv. Generate a two part shift and mask. */
6396 tmp = s390_expand_mask_and_shift (ins, mode, ac->shift);
6397 *seq1 = get_insns ();
6401 tmp = expand_simple_binop (SImode, IOR, tmp, val, NULL_RTX, 1, OPTAB_DIRECT);
6402 *seq2 = get_insns ();
6408 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
6409 the memory location, CMP the old value to compare MEM with and NEW_RTX the
6410 value to set if CMP == MEM. */
6413 s390_expand_cs_hqi (machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
6414 rtx cmp, rtx new_rtx, bool is_weak)
6416 struct alignment_context ac;
6417 rtx cmpv, newv, val, cc, seq0, seq1, seq2, seq3;
6418 rtx res = gen_reg_rtx (SImode);
6419 rtx_code_label *csloop = NULL, *csend = NULL;
6421 gcc_assert (MEM_P (mem));
6423 init_alignment_context (&ac, mem, mode);
6425 /* Load full word. Subsequent loads are performed by CS. */
6426 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
6427 NULL_RTX, 1, OPTAB_DIRECT);
6429 /* Prepare insertions of cmp and new_rtx into the loaded value. When
6430 possible, we try to use insv to make this happen efficiently. If
6431 that fails we'll generate code both inside and outside the loop. */
6432 cmpv = s390_two_part_insv (&ac, &seq0, &seq2, mode, val, cmp);
6433 newv = s390_two_part_insv (&ac, &seq1, &seq3, mode, val, new_rtx);
6440 /* Start CS loop. */
6443 /* Begin assuming success. */
6444 emit_move_insn (btarget, const1_rtx);
6446 csloop = gen_label_rtx ();
6447 csend = gen_label_rtx ();
6448 emit_label (csloop);
6451 /* val = "<mem>00..0<mem>"
6452 * cmp = "00..0<cmp>00..0"
6453 * new = "00..0<new>00..0"
6459 cc = s390_emit_compare_and_swap (EQ, res, ac.memsi, cmpv, newv);
6461 emit_insn (gen_cstorecc4 (btarget, cc, XEXP (cc, 0), XEXP (cc, 1)));
6466 /* Jump to end if we're done (likely?). */
6467 s390_emit_jump (csend, cc);
6469 /* Check for changes outside mode, and loop internal if so.
6470 Arrange the moves so that the compare is adjacent to the
6471 branch so that we can generate CRJ. */
6472 tmp = copy_to_reg (val);
6473 force_expand_binop (SImode, and_optab, res, ac.modemaski, val,
6475 cc = s390_emit_compare (NE, val, tmp);
6476 s390_emit_jump (csloop, cc);
6479 emit_move_insn (btarget, const0_rtx);
6483 /* Return the correct part of the bitfield. */
6484 convert_move (vtarget, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
6485 NULL_RTX, 1, OPTAB_DIRECT), 1);
6488 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
6489 and VAL the value to play with. If AFTER is true then store the value
6490 MEM holds after the operation, if AFTER is false then store the value MEM
6491 holds before the operation. If TARGET is zero then discard that value, else
6492 store it to TARGET. */
6495 s390_expand_atomic (machine_mode mode, enum rtx_code code,
6496 rtx target, rtx mem, rtx val, bool after)
6498 struct alignment_context ac;
6500 rtx new_rtx = gen_reg_rtx (SImode);
6501 rtx orig = gen_reg_rtx (SImode);
6502 rtx_code_label *csloop = gen_label_rtx ();
6504 gcc_assert (!target || register_operand (target, VOIDmode));
6505 gcc_assert (MEM_P (mem));
6507 init_alignment_context (&ac, mem, mode);
6509 /* Shift val to the correct bit positions.
6510 Preserve "icm", but prevent "ex icm". */
6511 if (!(ac.aligned && code == SET && MEM_P (val)))
6512 val = s390_expand_mask_and_shift (val, mode, ac.shift);
6514 /* Further preparation insns. */
6515 if (code == PLUS || code == MINUS)
6516 emit_move_insn (orig, val);
6517 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
6518 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
6519 NULL_RTX, 1, OPTAB_DIRECT);
6521 /* Load full word. Subsequent loads are performed by CS. */
6522 cmp = force_reg (SImode, ac.memsi);
6524 /* Start CS loop. */
6525 emit_label (csloop);
6526 emit_move_insn (new_rtx, cmp);
6528 /* Patch new with val at correct position. */
6533 val = expand_simple_binop (SImode, code, new_rtx, orig,
6534 NULL_RTX, 1, OPTAB_DIRECT);
6535 val = expand_simple_binop (SImode, AND, val, ac.modemask,
6536 NULL_RTX, 1, OPTAB_DIRECT);
6539 if (ac.aligned && MEM_P (val))
6540 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0,
6541 0, 0, SImode, val, false);
6544 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
6545 NULL_RTX, 1, OPTAB_DIRECT);
6546 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
6547 NULL_RTX, 1, OPTAB_DIRECT);
6553 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
6554 NULL_RTX, 1, OPTAB_DIRECT);
6556 case MULT: /* NAND */
6557 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
6558 NULL_RTX, 1, OPTAB_DIRECT);
6559 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
6560 NULL_RTX, 1, OPTAB_DIRECT);
6566 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
6567 ac.memsi, cmp, new_rtx));
6569 /* Return the correct part of the bitfield. */
6571 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
6572 after ? new_rtx : cmp, ac.shift,
6573 NULL_RTX, 1, OPTAB_DIRECT), 1);
6576 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
6577 We need to emit DTP-relative relocations. */
6579 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
6582 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
6587 fputs ("\t.long\t", file);
6590 fputs ("\t.quad\t", file);
6595 output_addr_const (file, x);
6596 fputs ("@DTPOFF", file);
6599 /* Return the proper mode for REGNO being represented in the dwarf
6602 s390_dwarf_frame_reg_mode (int regno)
6604 machine_mode save_mode = default_dwarf_frame_reg_mode (regno);
6606 /* Make sure not to return DImode for any GPR with -m31 -mzarch. */
6607 if (GENERAL_REGNO_P (regno))
6610 /* The rightmost 64 bits of vector registers are call-clobbered. */
6611 if (GET_MODE_SIZE (save_mode) > 8)
6617 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
6618 /* Implement TARGET_MANGLE_TYPE. */
6621 s390_mangle_type (const_tree type)
6623 type = TYPE_MAIN_VARIANT (type);
6625 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
6626 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
6629 if (type == s390_builtin_types[BT_BV16QI]) return "U6__boolc";
6630 if (type == s390_builtin_types[BT_BV8HI]) return "U6__bools";
6631 if (type == s390_builtin_types[BT_BV4SI]) return "U6__booli";
6632 if (type == s390_builtin_types[BT_BV2DI]) return "U6__booll";
6634 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
6635 && TARGET_LONG_DOUBLE_128)
6638 /* For all other types, use normal C++ mangling. */
6643 /* In the name of slightly smaller debug output, and to cater to
6644 general assembler lossage, recognize various UNSPEC sequences
6645 and turn them back into a direct symbol reference. */
6648 s390_delegitimize_address (rtx orig_x)
6652 orig_x = delegitimize_mem_from_attrs (orig_x);
6655 /* Extract the symbol ref from:
6656 (plus:SI (reg:SI 12 %r12)
6657 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
6658 UNSPEC_GOTOFF/PLTOFF)))
6660 (plus:SI (reg:SI 12 %r12)
6661 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
6662 UNSPEC_GOTOFF/PLTOFF)
6663 (const_int 4 [0x4])))) */
6664 if (GET_CODE (x) == PLUS
6665 && REG_P (XEXP (x, 0))
6666 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
6667 && GET_CODE (XEXP (x, 1)) == CONST)
6669 HOST_WIDE_INT offset = 0;
6671 /* The const operand. */
6672 y = XEXP (XEXP (x, 1), 0);
6674 if (GET_CODE (y) == PLUS
6675 && GET_CODE (XEXP (y, 1)) == CONST_INT)
6677 offset = INTVAL (XEXP (y, 1));
6681 if (GET_CODE (y) == UNSPEC
6682 && (XINT (y, 1) == UNSPEC_GOTOFF
6683 || XINT (y, 1) == UNSPEC_PLTOFF))
6684 return plus_constant (Pmode, XVECEXP (y, 0, 0), offset);
6687 if (GET_CODE (x) != MEM)
6691 if (GET_CODE (x) == PLUS
6692 && GET_CODE (XEXP (x, 1)) == CONST
6693 && GET_CODE (XEXP (x, 0)) == REG
6694 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
6696 y = XEXP (XEXP (x, 1), 0);
6697 if (GET_CODE (y) == UNSPEC
6698 && XINT (y, 1) == UNSPEC_GOT)
6699 y = XVECEXP (y, 0, 0);
6703 else if (GET_CODE (x) == CONST)
6705 /* Extract the symbol ref from:
6706 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
6707 UNSPEC_PLT/GOTENT))) */
6710 if (GET_CODE (y) == UNSPEC
6711 && (XINT (y, 1) == UNSPEC_GOTENT
6712 || XINT (y, 1) == UNSPEC_PLT))
6713 y = XVECEXP (y, 0, 0);
6720 if (GET_MODE (orig_x) != Pmode)
6722 if (GET_MODE (orig_x) == BLKmode)
6724 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
6731 /* Output operand OP to stdio stream FILE.
6732 OP is an address (register + offset) which is not used to address data;
6733 instead the rightmost bits are interpreted as the value. */
6736 print_shift_count_operand (FILE *file, rtx op)
6738 HOST_WIDE_INT offset;
6741 /* Extract base register and offset. */
6742 if (!s390_decompose_shift_count (op, &base, &offset))
6748 gcc_assert (GET_CODE (base) == REG);
6749 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
6750 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
6753 /* Offsets are constricted to twelve bits. */
6754 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
6756 fprintf (file, "(%s)", reg_names[REGNO (base)]);
6759 /* Assigns the number of NOP halfwords to be emitted before and after the
6760 function label to *HW_BEFORE and *HW_AFTER. Both pointers must not be NULL.
6761 If hotpatching is disabled for the function, the values are set to zero.
6765 s390_function_num_hotpatch_hw (tree decl,
6771 attr = lookup_attribute ("hotpatch", DECL_ATTRIBUTES (decl));
6773 /* Handle the arguments of the hotpatch attribute. The values
6774 specified via attribute might override the cmdline argument
6778 tree args = TREE_VALUE (attr);
6780 *hw_before = TREE_INT_CST_LOW (TREE_VALUE (args));
6781 *hw_after = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (args)));
6785 /* Use the values specified by the cmdline arguments. */
6786 *hw_before = s390_hotpatch_hw_before_label;
6787 *hw_after = s390_hotpatch_hw_after_label;
6791 /* Write the current .machine and .machinemode specification to the assembler
6794 #ifdef HAVE_AS_MACHINE_MACHINEMODE
6796 s390_asm_output_machine_for_arch (FILE *asm_out_file)
6798 fprintf (asm_out_file, "\t.machinemode %s\n",
6799 (TARGET_ZARCH) ? "zarch" : "esa");
6800 fprintf (asm_out_file, "\t.machine \"%s", processor_table[s390_arch].name);
6801 if (S390_USE_ARCHITECTURE_MODIFIERS)
6805 cpu_flags = processor_flags_table[(int) s390_arch];
6806 if (TARGET_HTM && !(cpu_flags & PF_TX))
6807 fprintf (asm_out_file, "+htm");
6808 else if (!TARGET_HTM && (cpu_flags & PF_TX))
6809 fprintf (asm_out_file, "+nohtm");
6810 if (TARGET_VX && !(cpu_flags & PF_VX))
6811 fprintf (asm_out_file, "+vx");
6812 else if (!TARGET_VX && (cpu_flags & PF_VX))
6813 fprintf (asm_out_file, "+novx");
6815 fprintf (asm_out_file, "\"\n");
6818 /* Write an extra function header before the very start of the function. */
6821 s390_asm_output_function_prefix (FILE *asm_out_file,
6822 const char *fnname ATTRIBUTE_UNUSED)
6824 if (DECL_FUNCTION_SPECIFIC_TARGET (current_function_decl) == NULL)
6826 /* Since only the function specific options are saved but not the indications
6827 which options are set, it's too much work here to figure out which options
6828 have actually changed. Thus, generate .machine and .machinemode whenever a
6829 function has the target attribute or pragma. */
6830 fprintf (asm_out_file, "\t.machinemode push\n");
6831 fprintf (asm_out_file, "\t.machine push\n");
6832 s390_asm_output_machine_for_arch (asm_out_file);
6835 /* Write an extra function footer after the very end of the function. */
6838 s390_asm_declare_function_size (FILE *asm_out_file,
6839 const char *fnname, tree decl)
6841 if (!flag_inhibit_size_directive)
6842 ASM_OUTPUT_MEASURED_SIZE (asm_out_file, fnname);
6843 if (DECL_FUNCTION_SPECIFIC_TARGET (decl) == NULL)
6845 fprintf (asm_out_file, "\t.machine pop\n");
6846 fprintf (asm_out_file, "\t.machinemode pop\n");
6850 /* Write the extra assembler code needed to declare a function properly. */
6853 s390_asm_output_function_label (FILE *asm_out_file, const char *fname,
6856 int hw_before, hw_after;
6858 s390_function_num_hotpatch_hw (decl, &hw_before, &hw_after);
6861 unsigned int function_alignment;
6864 /* Add a trampoline code area before the function label and initialize it
6865 with two-byte nop instructions. This area can be overwritten with code
6866 that jumps to a patched version of the function. */
6867 asm_fprintf (asm_out_file, "\tnopr\t%%r7"
6868 "\t# pre-label NOPs for hotpatch (%d halfwords)\n",
6870 for (i = 1; i < hw_before; i++)
6871 fputs ("\tnopr\t%r7\n", asm_out_file);
6873 /* Note: The function label must be aligned so that (a) the bytes of the
6874 following nop do not cross a cacheline boundary, and (b) a jump address
6875 (eight bytes for 64 bit targets, 4 bytes for 32 bit targets) can be
6876 stored directly before the label without crossing a cacheline
6877 boundary. All this is necessary to make sure the trampoline code can
6878 be changed atomically.
6879 This alignment is done automatically using the FOUNCTION_BOUNDARY, but
6880 if there are NOPs before the function label, the alignment is placed
6881 before them. So it is necessary to duplicate the alignment after the
6883 function_alignment = MAX (8, DECL_ALIGN (decl) / BITS_PER_UNIT);
6884 if (! DECL_USER_ALIGN (decl))
6885 function_alignment = MAX (function_alignment,
6886 (unsigned int) align_functions);
6887 fputs ("\t# alignment for hotpatch\n", asm_out_file);
6888 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (function_alignment));
6891 if (S390_USE_TARGET_ATTRIBUTE && TARGET_DEBUG_ARG)
6893 asm_fprintf (asm_out_file, "\t# fn:%s ar%d\n", fname, s390_arch);
6894 asm_fprintf (asm_out_file, "\t# fn:%s tu%d\n", fname, s390_tune);
6895 asm_fprintf (asm_out_file, "\t# fn:%s sg%d\n", fname, s390_stack_guard);
6896 asm_fprintf (asm_out_file, "\t# fn:%s ss%d\n", fname, s390_stack_size);
6897 asm_fprintf (asm_out_file, "\t# fn:%s bc%d\n", fname, s390_branch_cost);
6898 asm_fprintf (asm_out_file, "\t# fn:%s wf%d\n", fname,
6899 s390_warn_framesize);
6900 asm_fprintf (asm_out_file, "\t# fn:%s ba%d\n", fname, TARGET_BACKCHAIN);
6901 asm_fprintf (asm_out_file, "\t# fn:%s hd%d\n", fname, TARGET_HARD_DFP);
6902 asm_fprintf (asm_out_file, "\t# fn:%s hf%d\n", fname, !TARGET_SOFT_FLOAT);
6903 asm_fprintf (asm_out_file, "\t# fn:%s ht%d\n", fname, TARGET_OPT_HTM);
6904 asm_fprintf (asm_out_file, "\t# fn:%s vx%d\n", fname, TARGET_OPT_VX);
6905 asm_fprintf (asm_out_file, "\t# fn:%s ps%d\n", fname,
6906 TARGET_PACKED_STACK);
6907 asm_fprintf (asm_out_file, "\t# fn:%s se%d\n", fname, TARGET_SMALL_EXEC);
6908 asm_fprintf (asm_out_file, "\t# fn:%s mv%d\n", fname, TARGET_MVCLE);
6909 asm_fprintf (asm_out_file, "\t# fn:%s zv%d\n", fname, TARGET_ZVECTOR);
6910 asm_fprintf (asm_out_file, "\t# fn:%s wd%d\n", fname,
6911 s390_warn_dynamicstack_p);
6913 ASM_OUTPUT_LABEL (asm_out_file, fname);
6915 asm_fprintf (asm_out_file,
6916 "\t# post-label NOPs for hotpatch (%d halfwords)\n",
6920 /* Output machine-dependent UNSPECs occurring in address constant X
6921 in assembler syntax to stdio stream FILE. Returns true if the
6922 constant X could be recognized, false otherwise. */
6925 s390_output_addr_const_extra (FILE *file, rtx x)
6927 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
6928 switch (XINT (x, 1))
6931 output_addr_const (file, XVECEXP (x, 0, 0));
6932 fprintf (file, "@GOTENT");
6935 output_addr_const (file, XVECEXP (x, 0, 0));
6936 fprintf (file, "@GOT");
6939 output_addr_const (file, XVECEXP (x, 0, 0));
6940 fprintf (file, "@GOTOFF");
6943 output_addr_const (file, XVECEXP (x, 0, 0));
6944 fprintf (file, "@PLT");
6947 output_addr_const (file, XVECEXP (x, 0, 0));
6948 fprintf (file, "@PLTOFF");
6951 output_addr_const (file, XVECEXP (x, 0, 0));
6952 fprintf (file, "@TLSGD");
6955 assemble_name (file, get_some_local_dynamic_name ());
6956 fprintf (file, "@TLSLDM");
6959 output_addr_const (file, XVECEXP (x, 0, 0));
6960 fprintf (file, "@DTPOFF");
6963 output_addr_const (file, XVECEXP (x, 0, 0));
6964 fprintf (file, "@NTPOFF");
6966 case UNSPEC_GOTNTPOFF:
6967 output_addr_const (file, XVECEXP (x, 0, 0));
6968 fprintf (file, "@GOTNTPOFF");
6970 case UNSPEC_INDNTPOFF:
6971 output_addr_const (file, XVECEXP (x, 0, 0));
6972 fprintf (file, "@INDNTPOFF");
6976 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
6977 switch (XINT (x, 1))
6979 case UNSPEC_POOL_OFFSET:
6980 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
6981 output_addr_const (file, x);
6987 /* Output address operand ADDR in assembler syntax to
6988 stdio stream FILE. */
6991 print_operand_address (FILE *file, rtx addr)
6993 struct s390_address ad;
6995 if (s390_loadrelative_operand_p (addr, NULL, NULL))
6999 output_operand_lossage ("symbolic memory references are "
7000 "only supported on z10 or later");
7003 output_addr_const (file, addr);
7007 if (!s390_decompose_address (addr, &ad)
7008 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7009 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
7010 output_operand_lossage ("cannot decompose address");
7013 output_addr_const (file, ad.disp);
7015 fprintf (file, "0");
7017 if (ad.base && ad.indx)
7018 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
7019 reg_names[REGNO (ad.base)]);
7021 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
7024 /* Output operand X in assembler syntax to stdio stream FILE.
7025 CODE specified the format flag. The following format flags
7028 'C': print opcode suffix for branch condition.
7029 'D': print opcode suffix for inverse branch condition.
7030 'E': print opcode suffix for branch on index instruction.
7031 'G': print the size of the operand in bytes.
7032 'J': print tls_load/tls_gdcall/tls_ldcall suffix
7033 'M': print the second word of a TImode operand.
7034 'N': print the second word of a DImode operand.
7035 'O': print only the displacement of a memory reference or address.
7036 'R': print only the base register of a memory reference or address.
7037 'S': print S-type memory reference (base+displacement).
7038 'Y': print shift count operand.
7040 'b': print integer X as if it's an unsigned byte.
7041 'c': print integer X as if it's an signed byte.
7042 'e': "end" contiguous bitmask X in either DImode or vector inner mode.
7043 'f': "end" contiguous bitmask X in SImode.
7044 'h': print integer X as if it's a signed halfword.
7045 'i': print the first nonzero HImode part of X.
7046 'j': print the first HImode part unequal to -1 of X.
7047 'k': print the first nonzero SImode part of X.
7048 'm': print the first SImode part unequal to -1 of X.
7049 'o': print integer X as if it's an unsigned 32bit word.
7050 's': "start" of contiguous bitmask X in either DImode or vector inner mode.
7051 't': CONST_INT: "start" of contiguous bitmask X in SImode.
7052 CONST_VECTOR: Generate a bitmask for vgbm instruction.
7053 'x': print integer X as if it's an unsigned halfword.
7054 'v': print register number as vector register (v1 instead of f1).
7058 print_operand (FILE *file, rtx x, int code)
7065 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
7069 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
7073 if (GET_CODE (x) == LE)
7074 fprintf (file, "l");
7075 else if (GET_CODE (x) == GT)
7076 fprintf (file, "h");
7078 output_operand_lossage ("invalid comparison operator "
7079 "for 'E' output modifier");
7083 if (GET_CODE (x) == SYMBOL_REF)
7085 fprintf (file, "%s", ":tls_load:");
7086 output_addr_const (file, x);
7088 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
7090 fprintf (file, "%s", ":tls_gdcall:");
7091 output_addr_const (file, XVECEXP (x, 0, 0));
7093 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
7095 fprintf (file, "%s", ":tls_ldcall:");
7096 const char *name = get_some_local_dynamic_name ();
7098 assemble_name (file, name);
7101 output_operand_lossage ("invalid reference for 'J' output modifier");
7105 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
7110 struct s390_address ad;
7113 ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
7116 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7119 output_operand_lossage ("invalid address for 'O' output modifier");
7124 output_addr_const (file, ad.disp);
7126 fprintf (file, "0");
7132 struct s390_address ad;
7135 ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
7138 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7141 output_operand_lossage ("invalid address for 'R' output modifier");
7146 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
7148 fprintf (file, "0");
7154 struct s390_address ad;
7159 output_operand_lossage ("memory reference expected for "
7160 "'S' output modifier");
7163 ret = s390_decompose_address (XEXP (x, 0), &ad);
7166 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7169 output_operand_lossage ("invalid address for 'S' output modifier");
7174 output_addr_const (file, ad.disp);
7176 fprintf (file, "0");
7179 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
7184 if (GET_CODE (x) == REG)
7185 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
7186 else if (GET_CODE (x) == MEM)
7187 x = change_address (x, VOIDmode,
7188 plus_constant (Pmode, XEXP (x, 0), 4));
7190 output_operand_lossage ("register or memory expression expected "
7191 "for 'N' output modifier");
7195 if (GET_CODE (x) == REG)
7196 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
7197 else if (GET_CODE (x) == MEM)
7198 x = change_address (x, VOIDmode,
7199 plus_constant (Pmode, XEXP (x, 0), 8));
7201 output_operand_lossage ("register or memory expression expected "
7202 "for 'M' output modifier");
7206 print_shift_count_operand (file, x);
7210 switch (GET_CODE (x))
7213 /* Print FP regs as fx instead of vx when they are accessed
7214 through non-vector mode. */
7216 || VECTOR_NOFP_REG_P (x)
7217 || (FP_REG_P (x) && VECTOR_MODE_P (GET_MODE (x)))
7218 || (VECTOR_REG_P (x)
7219 && (GET_MODE_SIZE (GET_MODE (x)) /
7220 s390_class_max_nregs (FP_REGS, GET_MODE (x))) > 8))
7221 fprintf (file, "%%v%s", reg_names[REGNO (x)] + 2);
7223 fprintf (file, "%s", reg_names[REGNO (x)]);
7227 output_address (GET_MODE (x), XEXP (x, 0));
7234 output_addr_const (file, x);
7247 ival = ((ival & 0xff) ^ 0x80) - 0x80;
7253 ival = ((ival & 0xffff) ^ 0x8000) - 0x8000;
7256 ival = s390_extract_part (x, HImode, 0);
7259 ival = s390_extract_part (x, HImode, -1);
7262 ival = s390_extract_part (x, SImode, 0);
7265 ival = s390_extract_part (x, SImode, -1);
7276 len = (code == 's' || code == 'e' ? 64 : 32);
7277 ok = s390_contiguous_bitmask_p (ival, len, &pos, &len);
7279 if (code == 's' || code == 't')
7280 ival = 64 - pos - len;
7282 ival = 64 - 1 - pos;
7286 output_operand_lossage ("invalid constant for output modifier '%c'", code);
7288 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
7291 case CONST_WIDE_INT:
7293 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7294 CONST_WIDE_INT_ELT (x, 0) & 0xff);
7295 else if (code == 'x')
7296 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7297 CONST_WIDE_INT_ELT (x, 0) & 0xffff);
7298 else if (code == 'h')
7299 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7300 ((CONST_WIDE_INT_ELT (x, 0) & 0xffff) ^ 0x8000) - 0x8000);
7304 output_operand_lossage ("invalid constant - try using "
7305 "an output modifier");
7307 output_operand_lossage ("invalid constant for output modifier '%c'",
7315 gcc_assert (const_vec_duplicate_p (x));
7316 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7317 ((INTVAL (XVECEXP (x, 0, 0)) & 0xffff) ^ 0x8000) - 0x8000);
7322 int start, stop, inner_len;
7325 inner_len = GET_MODE_UNIT_BITSIZE (GET_MODE (x));
7326 ok = s390_contiguous_bitmask_vector_p (x, &start, &stop);
7328 if (code == 's' || code == 't')
7329 ival = inner_len - stop - 1;
7331 ival = inner_len - start - 1;
7332 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
7338 bool ok = s390_bytemask_vector_p (x, &mask);
7340 fprintf (file, "%u", mask);
7345 output_operand_lossage ("invalid constant vector for output "
7346 "modifier '%c'", code);
7352 output_operand_lossage ("invalid expression - try using "
7353 "an output modifier");
7355 output_operand_lossage ("invalid expression for output "
7356 "modifier '%c'", code);
7361 /* Target hook for assembling integer objects. We need to define it
7362 here to work a round a bug in some versions of GAS, which couldn't
7363 handle values smaller than INT_MIN when printed in decimal. */
7366 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
7368 if (size == 8 && aligned_p
7369 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
7371 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
7375 return default_assemble_integer (x, size, aligned_p);
7378 /* Returns true if register REGNO is used for forming
7379 a memory address in expression X. */
7382 reg_used_in_mem_p (int regno, rtx x)
7384 enum rtx_code code = GET_CODE (x);
7390 if (refers_to_regno_p (regno, XEXP (x, 0)))
7393 else if (code == SET
7394 && GET_CODE (SET_DEST (x)) == PC)
7396 if (refers_to_regno_p (regno, SET_SRC (x)))
7400 fmt = GET_RTX_FORMAT (code);
7401 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7404 && reg_used_in_mem_p (regno, XEXP (x, i)))
7407 else if (fmt[i] == 'E')
7408 for (j = 0; j < XVECLEN (x, i); j++)
7409 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
7415 /* Returns true if expression DEP_RTX sets an address register
7416 used by instruction INSN to address memory. */
7419 addr_generation_dependency_p (rtx dep_rtx, rtx_insn *insn)
7423 if (NONJUMP_INSN_P (dep_rtx))
7424 dep_rtx = PATTERN (dep_rtx);
7426 if (GET_CODE (dep_rtx) == SET)
7428 target = SET_DEST (dep_rtx);
7429 if (GET_CODE (target) == STRICT_LOW_PART)
7430 target = XEXP (target, 0);
7431 while (GET_CODE (target) == SUBREG)
7432 target = SUBREG_REG (target);
7434 if (GET_CODE (target) == REG)
7436 int regno = REGNO (target);
7438 if (s390_safe_attr_type (insn) == TYPE_LA)
7440 pat = PATTERN (insn);
7441 if (GET_CODE (pat) == PARALLEL)
7443 gcc_assert (XVECLEN (pat, 0) == 2);
7444 pat = XVECEXP (pat, 0, 0);
7446 gcc_assert (GET_CODE (pat) == SET);
7447 return refers_to_regno_p (regno, SET_SRC (pat));
7449 else if (get_attr_atype (insn) == ATYPE_AGEN)
7450 return reg_used_in_mem_p (regno, PATTERN (insn));
7456 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
7459 s390_agen_dep_p (rtx_insn *dep_insn, rtx_insn *insn)
7461 rtx dep_rtx = PATTERN (dep_insn);
7464 if (GET_CODE (dep_rtx) == SET
7465 && addr_generation_dependency_p (dep_rtx, insn))
7467 else if (GET_CODE (dep_rtx) == PARALLEL)
7469 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
7471 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
7479 /* A C statement (sans semicolon) to update the integer scheduling priority
7480 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
7481 reduce the priority to execute INSN later. Do not define this macro if
7482 you do not need to adjust the scheduling priorities of insns.
7484 A STD instruction should be scheduled earlier,
7485 in order to use the bypass. */
7487 s390_adjust_priority (rtx_insn *insn, int priority)
7489 if (! INSN_P (insn))
7492 if (s390_tune <= PROCESSOR_2064_Z900)
7495 switch (s390_safe_attr_type (insn))
7499 priority = priority << 3;
7503 priority = priority << 1;
7512 /* The number of instructions that can be issued per cycle. */
7515 s390_issue_rate (void)
7519 case PROCESSOR_2084_Z990:
7520 case PROCESSOR_2094_Z9_109:
7521 case PROCESSOR_2094_Z9_EC:
7522 case PROCESSOR_2817_Z196:
7524 case PROCESSOR_2097_Z10:
7526 case PROCESSOR_9672_G5:
7527 case PROCESSOR_9672_G6:
7528 case PROCESSOR_2064_Z900:
7529 /* Starting with EC12 we use the sched_reorder hook to take care
7530 of instruction dispatch constraints. The algorithm only
7531 picks the best instruction and assumes only a single
7532 instruction gets issued per cycle. */
7533 case PROCESSOR_2827_ZEC12:
7534 case PROCESSOR_2964_Z13:
7541 s390_first_cycle_multipass_dfa_lookahead (void)
7546 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
7547 Fix up MEMs as required. */
7550 annotate_constant_pool_refs (rtx *x)
7555 gcc_assert (GET_CODE (*x) != SYMBOL_REF
7556 || !CONSTANT_POOL_ADDRESS_P (*x));
7558 /* Literal pool references can only occur inside a MEM ... */
7559 if (GET_CODE (*x) == MEM)
7561 rtx memref = XEXP (*x, 0);
7563 if (GET_CODE (memref) == SYMBOL_REF
7564 && CONSTANT_POOL_ADDRESS_P (memref))
7566 rtx base = cfun->machine->base_reg;
7567 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
7570 *x = replace_equiv_address (*x, addr);
7574 if (GET_CODE (memref) == CONST
7575 && GET_CODE (XEXP (memref, 0)) == PLUS
7576 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
7577 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
7578 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
7580 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
7581 rtx sym = XEXP (XEXP (memref, 0), 0);
7582 rtx base = cfun->machine->base_reg;
7583 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
7586 *x = replace_equiv_address (*x, plus_constant (Pmode, addr, off));
7591 /* ... or a load-address type pattern. */
7592 if (GET_CODE (*x) == SET)
7594 rtx addrref = SET_SRC (*x);
7596 if (GET_CODE (addrref) == SYMBOL_REF
7597 && CONSTANT_POOL_ADDRESS_P (addrref))
7599 rtx base = cfun->machine->base_reg;
7600 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
7603 SET_SRC (*x) = addr;
7607 if (GET_CODE (addrref) == CONST
7608 && GET_CODE (XEXP (addrref, 0)) == PLUS
7609 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
7610 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
7611 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
7613 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
7614 rtx sym = XEXP (XEXP (addrref, 0), 0);
7615 rtx base = cfun->machine->base_reg;
7616 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
7619 SET_SRC (*x) = plus_constant (Pmode, addr, off);
7624 /* Annotate LTREL_BASE as well. */
7625 if (GET_CODE (*x) == UNSPEC
7626 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
7628 rtx base = cfun->machine->base_reg;
7629 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
7634 fmt = GET_RTX_FORMAT (GET_CODE (*x));
7635 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
7639 annotate_constant_pool_refs (&XEXP (*x, i));
7641 else if (fmt[i] == 'E')
7643 for (j = 0; j < XVECLEN (*x, i); j++)
7644 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
7649 /* Split all branches that exceed the maximum distance.
7650 Returns true if this created a new literal pool entry. */
7653 s390_split_branches (void)
7655 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
7656 int new_literal = 0, ret;
7661 /* We need correct insn addresses. */
7663 shorten_branches (get_insns ());
7665 /* Find all branches that exceed 64KB, and split them. */
7667 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7669 if (! JUMP_P (insn) || tablejump_p (insn, NULL, NULL))
7672 pat = PATTERN (insn);
7673 if (GET_CODE (pat) == PARALLEL)
7674 pat = XVECEXP (pat, 0, 0);
7675 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
7678 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
7680 label = &SET_SRC (pat);
7682 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
7684 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
7685 label = &XEXP (SET_SRC (pat), 1);
7686 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
7687 label = &XEXP (SET_SRC (pat), 2);
7694 if (get_attr_length (insn) <= 4)
7697 /* We are going to use the return register as scratch register,
7698 make sure it will be saved/restored by the prologue/epilogue. */
7699 cfun_frame_layout.save_return_addr_p = 1;
7704 rtx mem = force_const_mem (Pmode, *label);
7705 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, mem),
7707 INSN_ADDRESSES_NEW (set_insn, -1);
7708 annotate_constant_pool_refs (&PATTERN (set_insn));
7715 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
7716 UNSPEC_LTREL_OFFSET);
7717 target = gen_rtx_CONST (Pmode, target);
7718 target = force_const_mem (Pmode, target);
7719 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, target),
7721 INSN_ADDRESSES_NEW (set_insn, -1);
7722 annotate_constant_pool_refs (&PATTERN (set_insn));
7724 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
7725 cfun->machine->base_reg),
7727 target = gen_rtx_PLUS (Pmode, temp_reg, target);
7730 ret = validate_change (insn, label, target, 0);
7738 /* Find an annotated literal pool symbol referenced in RTX X,
7739 and store it at REF. Will abort if X contains references to
7740 more than one such pool symbol; multiple references to the same
7741 symbol are allowed, however.
7743 The rtx pointed to by REF must be initialized to NULL_RTX
7744 by the caller before calling this routine. */
7747 find_constant_pool_ref (rtx x, rtx *ref)
7752 /* Ignore LTREL_BASE references. */
7753 if (GET_CODE (x) == UNSPEC
7754 && XINT (x, 1) == UNSPEC_LTREL_BASE)
7756 /* Likewise POOL_ENTRY insns. */
7757 if (GET_CODE (x) == UNSPEC_VOLATILE
7758 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
7761 gcc_assert (GET_CODE (x) != SYMBOL_REF
7762 || !CONSTANT_POOL_ADDRESS_P (x));
7764 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
7766 rtx sym = XVECEXP (x, 0, 0);
7767 gcc_assert (GET_CODE (sym) == SYMBOL_REF
7768 && CONSTANT_POOL_ADDRESS_P (sym));
7770 if (*ref == NULL_RTX)
7773 gcc_assert (*ref == sym);
7778 fmt = GET_RTX_FORMAT (GET_CODE (x));
7779 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
7783 find_constant_pool_ref (XEXP (x, i), ref);
7785 else if (fmt[i] == 'E')
7787 for (j = 0; j < XVECLEN (x, i); j++)
7788 find_constant_pool_ref (XVECEXP (x, i, j), ref);
7793 /* Replace every reference to the annotated literal pool
7794 symbol REF in X by its base plus OFFSET. */
7797 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
7802 gcc_assert (*x != ref);
7804 if (GET_CODE (*x) == UNSPEC
7805 && XINT (*x, 1) == UNSPEC_LTREF
7806 && XVECEXP (*x, 0, 0) == ref)
7808 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
7812 if (GET_CODE (*x) == PLUS
7813 && GET_CODE (XEXP (*x, 1)) == CONST_INT
7814 && GET_CODE (XEXP (*x, 0)) == UNSPEC
7815 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
7816 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
7818 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
7819 *x = plus_constant (Pmode, addr, INTVAL (XEXP (*x, 1)));
7823 fmt = GET_RTX_FORMAT (GET_CODE (*x));
7824 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
7828 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
7830 else if (fmt[i] == 'E')
7832 for (j = 0; j < XVECLEN (*x, i); j++)
7833 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
7838 /* Check whether X contains an UNSPEC_LTREL_BASE.
7839 Return its constant pool symbol if found, NULL_RTX otherwise. */
7842 find_ltrel_base (rtx x)
7847 if (GET_CODE (x) == UNSPEC
7848 && XINT (x, 1) == UNSPEC_LTREL_BASE)
7849 return XVECEXP (x, 0, 0);
7851 fmt = GET_RTX_FORMAT (GET_CODE (x));
7852 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
7856 rtx fnd = find_ltrel_base (XEXP (x, i));
7860 else if (fmt[i] == 'E')
7862 for (j = 0; j < XVECLEN (x, i); j++)
7864 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
7874 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
7877 replace_ltrel_base (rtx *x)
7882 if (GET_CODE (*x) == UNSPEC
7883 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
7885 *x = XVECEXP (*x, 0, 1);
7889 fmt = GET_RTX_FORMAT (GET_CODE (*x));
7890 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
7894 replace_ltrel_base (&XEXP (*x, i));
7896 else if (fmt[i] == 'E')
7898 for (j = 0; j < XVECLEN (*x, i); j++)
7899 replace_ltrel_base (&XVECEXP (*x, i, j));
7905 /* We keep a list of constants which we have to add to internal
7906 constant tables in the middle of large functions. */
7908 #define NR_C_MODES 32
7909 machine_mode constant_modes[NR_C_MODES] =
7911 TFmode, TImode, TDmode,
7912 V16QImode, V8HImode, V4SImode, V2DImode, V1TImode,
7913 V4SFmode, V2DFmode, V1TFmode,
7914 DFmode, DImode, DDmode,
7915 V8QImode, V4HImode, V2SImode, V1DImode, V2SFmode, V1DFmode,
7916 SFmode, SImode, SDmode,
7917 V4QImode, V2HImode, V1SImode, V1SFmode,
7926 struct constant *next;
7928 rtx_code_label *label;
7931 struct constant_pool
7933 struct constant_pool *next;
7934 rtx_insn *first_insn;
7935 rtx_insn *pool_insn;
7937 rtx_insn *emit_pool_after;
7939 struct constant *constants[NR_C_MODES];
7940 struct constant *execute;
7941 rtx_code_label *label;
7945 /* Allocate new constant_pool structure. */
7947 static struct constant_pool *
7948 s390_alloc_pool (void)
7950 struct constant_pool *pool;
7953 pool = (struct constant_pool *) xmalloc (sizeof *pool);
7955 for (i = 0; i < NR_C_MODES; i++)
7956 pool->constants[i] = NULL;
7958 pool->execute = NULL;
7959 pool->label = gen_label_rtx ();
7960 pool->first_insn = NULL;
7961 pool->pool_insn = NULL;
7962 pool->insns = BITMAP_ALLOC (NULL);
7964 pool->emit_pool_after = NULL;
7969 /* Create new constant pool covering instructions starting at INSN
7970 and chain it to the end of POOL_LIST. */
7972 static struct constant_pool *
7973 s390_start_pool (struct constant_pool **pool_list, rtx_insn *insn)
7975 struct constant_pool *pool, **prev;
7977 pool = s390_alloc_pool ();
7978 pool->first_insn = insn;
7980 for (prev = pool_list; *prev; prev = &(*prev)->next)
7987 /* End range of instructions covered by POOL at INSN and emit
7988 placeholder insn representing the pool. */
7991 s390_end_pool (struct constant_pool *pool, rtx_insn *insn)
7993 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
7996 insn = get_last_insn ();
7998 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
7999 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8002 /* Add INSN to the list of insns covered by POOL. */
8005 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
8007 bitmap_set_bit (pool->insns, INSN_UID (insn));
8010 /* Return pool out of POOL_LIST that covers INSN. */
8012 static struct constant_pool *
8013 s390_find_pool (struct constant_pool *pool_list, rtx insn)
8015 struct constant_pool *pool;
8017 for (pool = pool_list; pool; pool = pool->next)
8018 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
8024 /* Add constant VAL of mode MODE to the constant pool POOL. */
8027 s390_add_constant (struct constant_pool *pool, rtx val, machine_mode mode)
8032 for (i = 0; i < NR_C_MODES; i++)
8033 if (constant_modes[i] == mode)
8035 gcc_assert (i != NR_C_MODES);
8037 for (c = pool->constants[i]; c != NULL; c = c->next)
8038 if (rtx_equal_p (val, c->value))
8043 c = (struct constant *) xmalloc (sizeof *c);
8045 c->label = gen_label_rtx ();
8046 c->next = pool->constants[i];
8047 pool->constants[i] = c;
8048 pool->size += GET_MODE_SIZE (mode);
8052 /* Return an rtx that represents the offset of X from the start of
8056 s390_pool_offset (struct constant_pool *pool, rtx x)
8060 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
8061 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
8062 UNSPEC_POOL_OFFSET);
8063 return gen_rtx_CONST (GET_MODE (x), x);
8066 /* Find constant VAL of mode MODE in the constant pool POOL.
8067 Return an RTX describing the distance from the start of
8068 the pool to the location of the new constant. */
8071 s390_find_constant (struct constant_pool *pool, rtx val,
8077 for (i = 0; i < NR_C_MODES; i++)
8078 if (constant_modes[i] == mode)
8080 gcc_assert (i != NR_C_MODES);
8082 for (c = pool->constants[i]; c != NULL; c = c->next)
8083 if (rtx_equal_p (val, c->value))
8088 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
8091 /* Check whether INSN is an execute. Return the label_ref to its
8092 execute target template if so, NULL_RTX otherwise. */
8095 s390_execute_label (rtx insn)
8097 if (NONJUMP_INSN_P (insn)
8098 && GET_CODE (PATTERN (insn)) == PARALLEL
8099 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
8100 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
8101 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
8106 /* Add execute target for INSN to the constant pool POOL. */
8109 s390_add_execute (struct constant_pool *pool, rtx insn)
8113 for (c = pool->execute; c != NULL; c = c->next)
8114 if (INSN_UID (insn) == INSN_UID (c->value))
8119 c = (struct constant *) xmalloc (sizeof *c);
8121 c->label = gen_label_rtx ();
8122 c->next = pool->execute;
8128 /* Find execute target for INSN in the constant pool POOL.
8129 Return an RTX describing the distance from the start of
8130 the pool to the location of the execute target. */
8133 s390_find_execute (struct constant_pool *pool, rtx insn)
8137 for (c = pool->execute; c != NULL; c = c->next)
8138 if (INSN_UID (insn) == INSN_UID (c->value))
8143 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
8146 /* For an execute INSN, extract the execute target template. */
8149 s390_execute_target (rtx insn)
8151 rtx pattern = PATTERN (insn);
8152 gcc_assert (s390_execute_label (insn));
8154 if (XVECLEN (pattern, 0) == 2)
8156 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
8160 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
8163 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
8164 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
8166 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
8172 /* Indicate that INSN cannot be duplicated. This is the case for
8173 execute insns that carry a unique label. */
8176 s390_cannot_copy_insn_p (rtx_insn *insn)
8178 rtx label = s390_execute_label (insn);
8179 return label && label != const0_rtx;
8182 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
8183 do not emit the pool base label. */
8186 s390_dump_pool (struct constant_pool *pool, bool remote_label)
8189 rtx_insn *insn = pool->pool_insn;
8192 /* Switch to rodata section. */
8193 if (TARGET_CPU_ZARCH)
8195 insn = emit_insn_after (gen_pool_section_start (), insn);
8196 INSN_ADDRESSES_NEW (insn, -1);
8199 /* Ensure minimum pool alignment. */
8200 if (TARGET_CPU_ZARCH)
8201 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
8203 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
8204 INSN_ADDRESSES_NEW (insn, -1);
8206 /* Emit pool base label. */
8209 insn = emit_label_after (pool->label, insn);
8210 INSN_ADDRESSES_NEW (insn, -1);
8213 /* Dump constants in descending alignment requirement order,
8214 ensuring proper alignment for every constant. */
8215 for (i = 0; i < NR_C_MODES; i++)
8216 for (c = pool->constants[i]; c; c = c->next)
8218 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
8219 rtx value = copy_rtx (c->value);
8220 if (GET_CODE (value) == CONST
8221 && GET_CODE (XEXP (value, 0)) == UNSPEC
8222 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
8223 && XVECLEN (XEXP (value, 0), 0) == 1)
8224 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
8226 insn = emit_label_after (c->label, insn);
8227 INSN_ADDRESSES_NEW (insn, -1);
8229 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
8230 gen_rtvec (1, value),
8231 UNSPECV_POOL_ENTRY);
8232 insn = emit_insn_after (value, insn);
8233 INSN_ADDRESSES_NEW (insn, -1);
8236 /* Ensure minimum alignment for instructions. */
8237 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
8238 INSN_ADDRESSES_NEW (insn, -1);
8240 /* Output in-pool execute template insns. */
8241 for (c = pool->execute; c; c = c->next)
8243 insn = emit_label_after (c->label, insn);
8244 INSN_ADDRESSES_NEW (insn, -1);
8246 insn = emit_insn_after (s390_execute_target (c->value), insn);
8247 INSN_ADDRESSES_NEW (insn, -1);
8250 /* Switch back to previous section. */
8251 if (TARGET_CPU_ZARCH)
8253 insn = emit_insn_after (gen_pool_section_end (), insn);
8254 INSN_ADDRESSES_NEW (insn, -1);
8257 insn = emit_barrier_after (insn);
8258 INSN_ADDRESSES_NEW (insn, -1);
8260 /* Remove placeholder insn. */
8261 remove_insn (pool->pool_insn);
8264 /* Free all memory used by POOL. */
8267 s390_free_pool (struct constant_pool *pool)
8269 struct constant *c, *next;
8272 for (i = 0; i < NR_C_MODES; i++)
8273 for (c = pool->constants[i]; c; c = next)
8279 for (c = pool->execute; c; c = next)
8285 BITMAP_FREE (pool->insns);
8290 /* Collect main literal pool. Return NULL on overflow. */
8292 static struct constant_pool *
8293 s390_mainpool_start (void)
8295 struct constant_pool *pool;
8298 pool = s390_alloc_pool ();
8300 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8302 if (NONJUMP_INSN_P (insn)
8303 && GET_CODE (PATTERN (insn)) == SET
8304 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
8305 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
8307 /* There might be two main_pool instructions if base_reg
8308 is call-clobbered; one for shrink-wrapped code and one
8309 for the rest. We want to keep the first. */
8310 if (pool->pool_insn)
8312 insn = PREV_INSN (insn);
8313 delete_insn (NEXT_INSN (insn));
8316 pool->pool_insn = insn;
8319 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
8321 s390_add_execute (pool, insn);
8323 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8325 rtx pool_ref = NULL_RTX;
8326 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8329 rtx constant = get_pool_constant (pool_ref);
8330 machine_mode mode = get_pool_mode (pool_ref);
8331 s390_add_constant (pool, constant, mode);
8335 /* If hot/cold partitioning is enabled we have to make sure that
8336 the literal pool is emitted in the same section where the
8337 initialization of the literal pool base pointer takes place.
8338 emit_pool_after is only used in the non-overflow case on non
8339 Z cpus where we can emit the literal pool at the end of the
8340 function body within the text section. */
8342 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
8343 && !pool->emit_pool_after)
8344 pool->emit_pool_after = PREV_INSN (insn);
8347 gcc_assert (pool->pool_insn || pool->size == 0);
8349 if (pool->size >= 4096)
8351 /* We're going to chunkify the pool, so remove the main
8352 pool placeholder insn. */
8353 remove_insn (pool->pool_insn);
8355 s390_free_pool (pool);
8359 /* If the functions ends with the section where the literal pool
8360 should be emitted set the marker to its end. */
8361 if (pool && !pool->emit_pool_after)
8362 pool->emit_pool_after = get_last_insn ();
8367 /* POOL holds the main literal pool as collected by s390_mainpool_start.
8368 Modify the current function to output the pool constants as well as
8369 the pool register setup instruction. */
8372 s390_mainpool_finish (struct constant_pool *pool)
8374 rtx base_reg = cfun->machine->base_reg;
8376 /* If the pool is empty, we're done. */
8377 if (pool->size == 0)
8379 /* We don't actually need a base register after all. */
8380 cfun->machine->base_reg = NULL_RTX;
8382 if (pool->pool_insn)
8383 remove_insn (pool->pool_insn);
8384 s390_free_pool (pool);
8388 /* We need correct insn addresses. */
8389 shorten_branches (get_insns ());
8391 /* On zSeries, we use a LARL to load the pool register. The pool is
8392 located in the .rodata section, so we emit it after the function. */
8393 if (TARGET_CPU_ZARCH)
8395 rtx set = gen_main_base_64 (base_reg, pool->label);
8396 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
8397 INSN_ADDRESSES_NEW (insn, -1);
8398 remove_insn (pool->pool_insn);
8400 insn = get_last_insn ();
8401 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8402 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8404 s390_dump_pool (pool, 0);
8407 /* On S/390, if the total size of the function's code plus literal pool
8408 does not exceed 4096 bytes, we use BASR to set up a function base
8409 pointer, and emit the literal pool at the end of the function. */
8410 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
8411 + pool->size + 8 /* alignment slop */ < 4096)
8413 rtx set = gen_main_base_31_small (base_reg, pool->label);
8414 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
8415 INSN_ADDRESSES_NEW (insn, -1);
8416 remove_insn (pool->pool_insn);
8418 insn = emit_label_after (pool->label, insn);
8419 INSN_ADDRESSES_NEW (insn, -1);
8421 /* emit_pool_after will be set by s390_mainpool_start to the
8422 last insn of the section where the literal pool should be
8424 insn = pool->emit_pool_after;
8426 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8427 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8429 s390_dump_pool (pool, 1);
8432 /* Otherwise, we emit an inline literal pool and use BASR to branch
8433 over it, setting up the pool register at the same time. */
8436 rtx_code_label *pool_end = gen_label_rtx ();
8438 rtx pat = gen_main_base_31_large (base_reg, pool->label, pool_end);
8439 rtx_insn *insn = emit_jump_insn_after (pat, pool->pool_insn);
8440 JUMP_LABEL (insn) = pool_end;
8441 INSN_ADDRESSES_NEW (insn, -1);
8442 remove_insn (pool->pool_insn);
8444 insn = emit_label_after (pool->label, insn);
8445 INSN_ADDRESSES_NEW (insn, -1);
8447 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8448 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8450 insn = emit_label_after (pool_end, pool->pool_insn);
8451 INSN_ADDRESSES_NEW (insn, -1);
8453 s390_dump_pool (pool, 1);
8457 /* Replace all literal pool references. */
8459 for (rtx_insn *insn = get_insns (); insn; insn = NEXT_INSN (insn))
8462 replace_ltrel_base (&PATTERN (insn));
8464 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8466 rtx addr, pool_ref = NULL_RTX;
8467 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8470 if (s390_execute_label (insn))
8471 addr = s390_find_execute (pool, insn);
8473 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
8474 get_pool_mode (pool_ref));
8476 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
8477 INSN_CODE (insn) = -1;
8483 /* Free the pool. */
8484 s390_free_pool (pool);
8487 /* POOL holds the main literal pool as collected by s390_mainpool_start.
8488 We have decided we cannot use this pool, so revert all changes
8489 to the current function that were done by s390_mainpool_start. */
8491 s390_mainpool_cancel (struct constant_pool *pool)
8493 /* We didn't actually change the instruction stream, so simply
8494 free the pool memory. */
8495 s390_free_pool (pool);
8499 /* Chunkify the literal pool. */
8501 #define S390_POOL_CHUNK_MIN 0xc00
8502 #define S390_POOL_CHUNK_MAX 0xe00
8504 static struct constant_pool *
8505 s390_chunkify_start (void)
8507 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
8510 rtx pending_ltrel = NULL_RTX;
8513 rtx (*gen_reload_base) (rtx, rtx) =
8514 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
8517 /* We need correct insn addresses. */
8519 shorten_branches (get_insns ());
8521 /* Scan all insns and move literals to pool chunks. */
8523 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8525 bool section_switch_p = false;
8527 /* Check for pending LTREL_BASE. */
8530 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
8533 gcc_assert (ltrel_base == pending_ltrel);
8534 pending_ltrel = NULL_RTX;
8538 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
8541 curr_pool = s390_start_pool (&pool_list, insn);
8543 s390_add_execute (curr_pool, insn);
8544 s390_add_pool_insn (curr_pool, insn);
8546 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8548 rtx pool_ref = NULL_RTX;
8549 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8552 rtx constant = get_pool_constant (pool_ref);
8553 machine_mode mode = get_pool_mode (pool_ref);
8556 curr_pool = s390_start_pool (&pool_list, insn);
8558 s390_add_constant (curr_pool, constant, mode);
8559 s390_add_pool_insn (curr_pool, insn);
8561 /* Don't split the pool chunk between a LTREL_OFFSET load
8562 and the corresponding LTREL_BASE. */
8563 if (GET_CODE (constant) == CONST
8564 && GET_CODE (XEXP (constant, 0)) == UNSPEC
8565 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
8567 gcc_assert (!pending_ltrel);
8568 pending_ltrel = pool_ref;
8573 if (JUMP_P (insn) || JUMP_TABLE_DATA_P (insn) || LABEL_P (insn))
8576 s390_add_pool_insn (curr_pool, insn);
8577 /* An LTREL_BASE must follow within the same basic block. */
8578 gcc_assert (!pending_ltrel);
8582 switch (NOTE_KIND (insn))
8584 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
8585 section_switch_p = true;
8587 case NOTE_INSN_VAR_LOCATION:
8588 case NOTE_INSN_CALL_ARG_LOCATION:
8595 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
8596 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
8599 if (TARGET_CPU_ZARCH)
8601 if (curr_pool->size < S390_POOL_CHUNK_MAX)
8604 s390_end_pool (curr_pool, NULL);
8609 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
8610 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
8613 /* We will later have to insert base register reload insns.
8614 Those will have an effect on code size, which we need to
8615 consider here. This calculation makes rather pessimistic
8616 worst-case assumptions. */
8620 if (chunk_size < S390_POOL_CHUNK_MIN
8621 && curr_pool->size < S390_POOL_CHUNK_MIN
8622 && !section_switch_p)
8625 /* Pool chunks can only be inserted after BARRIERs ... */
8626 if (BARRIER_P (insn))
8628 s390_end_pool (curr_pool, insn);
8633 /* ... so if we don't find one in time, create one. */
8634 else if (chunk_size > S390_POOL_CHUNK_MAX
8635 || curr_pool->size > S390_POOL_CHUNK_MAX
8636 || section_switch_p)
8638 rtx_insn *label, *jump, *barrier, *next, *prev;
8640 if (!section_switch_p)
8642 /* We can insert the barrier only after a 'real' insn. */
8643 if (! NONJUMP_INSN_P (insn) && ! CALL_P (insn))
8645 if (get_attr_length (insn) == 0)
8647 /* Don't separate LTREL_BASE from the corresponding
8648 LTREL_OFFSET load. */
8655 next = NEXT_INSN (insn);
8659 && (NOTE_KIND (next) == NOTE_INSN_VAR_LOCATION
8660 || NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION));
8664 gcc_assert (!pending_ltrel);
8666 /* The old pool has to end before the section switch
8667 note in order to make it part of the current
8669 insn = PREV_INSN (insn);
8672 label = gen_label_rtx ();
8674 if (prev && NOTE_P (prev))
8675 prev = prev_nonnote_insn (prev);
8677 jump = emit_jump_insn_after_setloc (gen_jump (label), insn,
8678 INSN_LOCATION (prev));
8680 jump = emit_jump_insn_after_noloc (gen_jump (label), insn);
8681 barrier = emit_barrier_after (jump);
8682 insn = emit_label_after (label, barrier);
8683 JUMP_LABEL (jump) = label;
8684 LABEL_NUSES (label) = 1;
8686 INSN_ADDRESSES_NEW (jump, -1);
8687 INSN_ADDRESSES_NEW (barrier, -1);
8688 INSN_ADDRESSES_NEW (insn, -1);
8690 s390_end_pool (curr_pool, barrier);
8698 s390_end_pool (curr_pool, NULL);
8699 gcc_assert (!pending_ltrel);
8701 /* Find all labels that are branched into
8702 from an insn belonging to a different chunk. */
8704 far_labels = BITMAP_ALLOC (NULL);
8706 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8708 rtx_jump_table_data *table;
8710 /* Labels marked with LABEL_PRESERVE_P can be target
8711 of non-local jumps, so we have to mark them.
8712 The same holds for named labels.
8714 Don't do that, however, if it is the label before
8718 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
8720 rtx_insn *vec_insn = NEXT_INSN (insn);
8721 if (! vec_insn || ! JUMP_TABLE_DATA_P (vec_insn))
8722 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
8724 /* Check potential targets in a table jump (casesi_jump). */
8725 else if (tablejump_p (insn, NULL, &table))
8727 rtx vec_pat = PATTERN (table);
8728 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
8730 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
8732 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
8734 if (s390_find_pool (pool_list, label)
8735 != s390_find_pool (pool_list, insn))
8736 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
8739 /* If we have a direct jump (conditional or unconditional),
8740 check all potential targets. */
8741 else if (JUMP_P (insn))
8743 rtx pat = PATTERN (insn);
8745 if (GET_CODE (pat) == PARALLEL)
8746 pat = XVECEXP (pat, 0, 0);
8748 if (GET_CODE (pat) == SET)
8750 rtx label = JUMP_LABEL (insn);
8751 if (label && !ANY_RETURN_P (label))
8753 if (s390_find_pool (pool_list, label)
8754 != s390_find_pool (pool_list, insn))
8755 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
8761 /* Insert base register reload insns before every pool. */
8763 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
8765 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
8767 rtx_insn *insn = curr_pool->first_insn;
8768 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
8771 /* Insert base register reload insns at every far label. */
8773 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8775 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
8777 struct constant_pool *pool = s390_find_pool (pool_list, insn);
8780 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
8782 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
8787 BITMAP_FREE (far_labels);
8790 /* Recompute insn addresses. */
8792 init_insn_lengths ();
8793 shorten_branches (get_insns ());
8798 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
8799 After we have decided to use this list, finish implementing
8800 all changes to the current function as required. */
8803 s390_chunkify_finish (struct constant_pool *pool_list)
8805 struct constant_pool *curr_pool = NULL;
8809 /* Replace all literal pool references. */
8811 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8814 replace_ltrel_base (&PATTERN (insn));
8816 curr_pool = s390_find_pool (pool_list, insn);
8820 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8822 rtx addr, pool_ref = NULL_RTX;
8823 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8826 if (s390_execute_label (insn))
8827 addr = s390_find_execute (curr_pool, insn);
8829 addr = s390_find_constant (curr_pool,
8830 get_pool_constant (pool_ref),
8831 get_pool_mode (pool_ref));
8833 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
8834 INSN_CODE (insn) = -1;
8839 /* Dump out all literal pools. */
8841 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
8842 s390_dump_pool (curr_pool, 0);
8844 /* Free pool list. */
8848 struct constant_pool *next = pool_list->next;
8849 s390_free_pool (pool_list);
8854 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
8855 We have decided we cannot use this list, so revert all changes
8856 to the current function that were done by s390_chunkify_start. */
8859 s390_chunkify_cancel (struct constant_pool *pool_list)
8861 struct constant_pool *curr_pool = NULL;
8864 /* Remove all pool placeholder insns. */
8866 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
8868 /* Did we insert an extra barrier? Remove it. */
8869 rtx_insn *barrier = PREV_INSN (curr_pool->pool_insn);
8870 rtx_insn *jump = barrier? PREV_INSN (barrier) : NULL;
8871 rtx_insn *label = NEXT_INSN (curr_pool->pool_insn);
8873 if (jump && JUMP_P (jump)
8874 && barrier && BARRIER_P (barrier)
8875 && label && LABEL_P (label)
8876 && GET_CODE (PATTERN (jump)) == SET
8877 && SET_DEST (PATTERN (jump)) == pc_rtx
8878 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
8879 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
8882 remove_insn (barrier);
8883 remove_insn (label);
8886 remove_insn (curr_pool->pool_insn);
8889 /* Remove all base register reload insns. */
8891 for (insn = get_insns (); insn; )
8893 rtx_insn *next_insn = NEXT_INSN (insn);
8895 if (NONJUMP_INSN_P (insn)
8896 && GET_CODE (PATTERN (insn)) == SET
8897 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
8898 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
8904 /* Free pool list. */
8908 struct constant_pool *next = pool_list->next;
8909 s390_free_pool (pool_list);
8914 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
8917 s390_output_pool_entry (rtx exp, machine_mode mode, unsigned int align)
8919 switch (GET_MODE_CLASS (mode))
8922 case MODE_DECIMAL_FLOAT:
8923 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
8925 assemble_real (*CONST_DOUBLE_REAL_VALUE (exp), mode, align);
8929 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
8930 mark_symbol_refs_as_used (exp);
8933 case MODE_VECTOR_INT:
8934 case MODE_VECTOR_FLOAT:
8937 machine_mode inner_mode;
8938 gcc_assert (GET_CODE (exp) == CONST_VECTOR);
8940 inner_mode = GET_MODE_INNER (GET_MODE (exp));
8941 for (i = 0; i < XVECLEN (exp, 0); i++)
8942 s390_output_pool_entry (XVECEXP (exp, 0, i),
8946 : GET_MODE_BITSIZE (inner_mode));
8956 /* Return an RTL expression representing the value of the return address
8957 for the frame COUNT steps up from the current frame. FRAME is the
8958 frame pointer of that frame. */
8961 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
8966 /* Without backchain, we fail for all but the current frame. */
8968 if (!TARGET_BACKCHAIN && count > 0)
8971 /* For the current frame, we need to make sure the initial
8972 value of RETURN_REGNUM is actually saved. */
8976 /* On non-z architectures branch splitting could overwrite r14. */
8977 if (TARGET_CPU_ZARCH)
8978 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
8981 cfun_frame_layout.save_return_addr_p = true;
8982 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
8986 if (TARGET_PACKED_STACK)
8987 offset = -2 * UNITS_PER_LONG;
8989 offset = RETURN_REGNUM * UNITS_PER_LONG;
8991 addr = plus_constant (Pmode, frame, offset);
8992 addr = memory_address (Pmode, addr);
8993 return gen_rtx_MEM (Pmode, addr);
8996 /* Return an RTL expression representing the back chain stored in
8997 the current stack frame. */
9000 s390_back_chain_rtx (void)
9004 gcc_assert (TARGET_BACKCHAIN);
9006 if (TARGET_PACKED_STACK)
9007 chain = plus_constant (Pmode, stack_pointer_rtx,
9008 STACK_POINTER_OFFSET - UNITS_PER_LONG);
9010 chain = stack_pointer_rtx;
9012 chain = gen_rtx_MEM (Pmode, chain);
9016 /* Find first call clobbered register unused in a function.
9017 This could be used as base register in a leaf function
9018 or for holding the return address before epilogue. */
9021 find_unused_clobbered_reg (void)
9024 for (i = 0; i < 6; i++)
9025 if (!df_regs_ever_live_p (i))
9031 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
9032 clobbered hard regs in SETREG. */
9035 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
9037 char *regs_ever_clobbered = (char *)data;
9038 unsigned int i, regno;
9039 machine_mode mode = GET_MODE (setreg);
9041 if (GET_CODE (setreg) == SUBREG)
9043 rtx inner = SUBREG_REG (setreg);
9044 if (!GENERAL_REG_P (inner) && !FP_REG_P (inner))
9046 regno = subreg_regno (setreg);
9048 else if (GENERAL_REG_P (setreg) || FP_REG_P (setreg))
9049 regno = REGNO (setreg);
9054 i < regno + HARD_REGNO_NREGS (regno, mode);
9056 regs_ever_clobbered[i] = 1;
9059 /* Walks through all basic blocks of the current function looking
9060 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
9061 of the passed integer array REGS_EVER_CLOBBERED are set to one for
9062 each of those regs. */
9065 s390_regs_ever_clobbered (char regs_ever_clobbered[])
9071 memset (regs_ever_clobbered, 0, 32);
9073 /* For non-leaf functions we have to consider all call clobbered regs to be
9077 for (i = 0; i < 32; i++)
9078 regs_ever_clobbered[i] = call_really_used_regs[i];
9081 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
9082 this work is done by liveness analysis (mark_regs_live_at_end).
9083 Special care is needed for functions containing landing pads. Landing pads
9084 may use the eh registers, but the code which sets these registers is not
9085 contained in that function. Hence s390_regs_ever_clobbered is not able to
9086 deal with this automatically. */
9087 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
9088 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
9089 if (crtl->calls_eh_return
9090 || (cfun->machine->has_landing_pad_p
9091 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
9092 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
9094 /* For nonlocal gotos all call-saved registers have to be saved.
9095 This flag is also set for the unwinding code in libgcc.
9096 See expand_builtin_unwind_init. For regs_ever_live this is done by
9098 if (crtl->saves_all_registers)
9099 for (i = 0; i < 32; i++)
9100 if (!call_really_used_regs[i])
9101 regs_ever_clobbered[i] = 1;
9103 FOR_EACH_BB_FN (cur_bb, cfun)
9105 FOR_BB_INSNS (cur_bb, cur_insn)
9109 if (!INSN_P (cur_insn))
9112 pat = PATTERN (cur_insn);
9114 /* Ignore GPR restore insns. */
9115 if (epilogue_completed && RTX_FRAME_RELATED_P (cur_insn))
9117 if (GET_CODE (pat) == SET
9118 && GENERAL_REG_P (SET_DEST (pat)))
9121 if (GET_MODE (SET_SRC (pat)) == DImode
9122 && FP_REG_P (SET_SRC (pat)))
9126 if (GET_CODE (SET_SRC (pat)) == MEM)
9131 if (GET_CODE (pat) == PARALLEL
9132 && load_multiple_operation (pat, VOIDmode))
9137 s390_reg_clobbered_rtx,
9138 regs_ever_clobbered);
9143 /* Determine the frame area which actually has to be accessed
9144 in the function epilogue. The values are stored at the
9145 given pointers AREA_BOTTOM (address of the lowest used stack
9146 address) and AREA_TOP (address of the first item which does
9147 not belong to the stack frame). */
9150 s390_frame_area (int *area_bottom, int *area_top)
9157 if (cfun_frame_layout.first_restore_gpr != -1)
9159 b = (cfun_frame_layout.gprs_offset
9160 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
9161 t = b + (cfun_frame_layout.last_restore_gpr
9162 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
9165 if (TARGET_64BIT && cfun_save_high_fprs_p)
9167 b = MIN (b, cfun_frame_layout.f8_offset);
9168 t = MAX (t, (cfun_frame_layout.f8_offset
9169 + cfun_frame_layout.high_fprs * 8));
9174 if (cfun_fpr_save_p (FPR4_REGNUM))
9176 b = MIN (b, cfun_frame_layout.f4_offset);
9177 t = MAX (t, cfun_frame_layout.f4_offset + 8);
9179 if (cfun_fpr_save_p (FPR6_REGNUM))
9181 b = MIN (b, cfun_frame_layout.f4_offset + 8);
9182 t = MAX (t, cfun_frame_layout.f4_offset + 16);
9188 /* Update gpr_save_slots in the frame layout trying to make use of
9189 FPRs as GPR save slots.
9190 This is a helper routine of s390_register_info. */
9193 s390_register_info_gprtofpr ()
9195 int save_reg_slot = FPR0_REGNUM;
9198 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
9201 for (i = 15; i >= 6; i--)
9203 if (cfun_gpr_save_slot (i) == SAVE_SLOT_NONE)
9206 /* Advance to the next FP register which can be used as a
9208 while ((!call_really_used_regs[save_reg_slot]
9209 || df_regs_ever_live_p (save_reg_slot)
9210 || cfun_fpr_save_p (save_reg_slot))
9211 && FP_REGNO_P (save_reg_slot))
9213 if (!FP_REGNO_P (save_reg_slot))
9215 /* We only want to use ldgr/lgdr if we can get rid of
9216 stm/lm entirely. So undo the gpr slot allocation in
9217 case we ran out of FPR save slots. */
9218 for (j = 6; j <= 15; j++)
9219 if (FP_REGNO_P (cfun_gpr_save_slot (j)))
9220 cfun_gpr_save_slot (j) = SAVE_SLOT_STACK;
9223 cfun_gpr_save_slot (i) = save_reg_slot++;
9227 /* Set the bits in fpr_bitmap for FPRs which need to be saved due to
9229 This is a helper routine for s390_register_info. */
9232 s390_register_info_stdarg_fpr ()
9238 /* Save the FP argument regs for stdarg. f0, f2 for 31 bit and
9239 f0-f4 for 64 bit. */
9241 || !TARGET_HARD_FLOAT
9242 || !cfun->va_list_fpr_size
9243 || crtl->args.info.fprs >= FP_ARG_NUM_REG)
9246 min_fpr = crtl->args.info.fprs;
9247 max_fpr = min_fpr + cfun->va_list_fpr_size - 1;
9248 if (max_fpr >= FP_ARG_NUM_REG)
9249 max_fpr = FP_ARG_NUM_REG - 1;
9251 /* FPR argument regs start at f0. */
9252 min_fpr += FPR0_REGNUM;
9253 max_fpr += FPR0_REGNUM;
9255 for (i = min_fpr; i <= max_fpr; i++)
9256 cfun_set_fpr_save (i);
9259 /* Reserve the GPR save slots for GPRs which need to be saved due to
9261 This is a helper routine for s390_register_info. */
9264 s390_register_info_stdarg_gpr ()
9271 || !cfun->va_list_gpr_size
9272 || crtl->args.info.gprs >= GP_ARG_NUM_REG)
9275 min_gpr = crtl->args.info.gprs;
9276 max_gpr = min_gpr + cfun->va_list_gpr_size - 1;
9277 if (max_gpr >= GP_ARG_NUM_REG)
9278 max_gpr = GP_ARG_NUM_REG - 1;
9280 /* GPR argument regs start at r2. */
9281 min_gpr += GPR2_REGNUM;
9282 max_gpr += GPR2_REGNUM;
9284 /* If r6 was supposed to be saved into an FPR and now needs to go to
9285 the stack for vararg we have to adjust the restore range to make
9286 sure that the restore is done from stack as well. */
9287 if (FP_REGNO_P (cfun_gpr_save_slot (GPR6_REGNUM))
9288 && min_gpr <= GPR6_REGNUM
9289 && max_gpr >= GPR6_REGNUM)
9291 if (cfun_frame_layout.first_restore_gpr == -1
9292 || cfun_frame_layout.first_restore_gpr > GPR6_REGNUM)
9293 cfun_frame_layout.first_restore_gpr = GPR6_REGNUM;
9294 if (cfun_frame_layout.last_restore_gpr == -1
9295 || cfun_frame_layout.last_restore_gpr < GPR6_REGNUM)
9296 cfun_frame_layout.last_restore_gpr = GPR6_REGNUM;
9299 if (cfun_frame_layout.first_save_gpr == -1
9300 || cfun_frame_layout.first_save_gpr > min_gpr)
9301 cfun_frame_layout.first_save_gpr = min_gpr;
9303 if (cfun_frame_layout.last_save_gpr == -1
9304 || cfun_frame_layout.last_save_gpr < max_gpr)
9305 cfun_frame_layout.last_save_gpr = max_gpr;
9307 for (i = min_gpr; i <= max_gpr; i++)
9308 cfun_gpr_save_slot (i) = SAVE_SLOT_STACK;
9311 /* Calculate the save and restore ranges for stm(g) and lm(g) in the
9312 prologue and epilogue. */
9315 s390_register_info_set_ranges ()
9319 /* Find the first and the last save slot supposed to use the stack
9320 to set the restore range.
9321 Vararg regs might be marked as save to stack but only the
9322 call-saved regs really need restoring (i.e. r6). This code
9323 assumes that the vararg regs have not yet been recorded in
9324 cfun_gpr_save_slot. */
9325 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != SAVE_SLOT_STACK; i++);
9326 for (j = 15; j > i && cfun_gpr_save_slot (j) != SAVE_SLOT_STACK; j--);
9327 cfun_frame_layout.first_restore_gpr = (i == 16) ? -1 : i;
9328 cfun_frame_layout.last_restore_gpr = (i == 16) ? -1 : j;
9330 /* Now the range of GPRs which need saving. */
9331 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != SAVE_SLOT_STACK; i++);
9332 for (j = 15; j > i && cfun_gpr_save_slot (j) != SAVE_SLOT_STACK; j--);
9333 cfun_frame_layout.first_save_gpr = (i == 16) ? -1 : i;
9334 cfun_frame_layout.last_save_gpr = (i == 16) ? -1 : j;
9337 /* The GPR and FPR save slots in cfun->machine->frame_layout are set
9338 for registers which need to be saved in function prologue.
9339 This function can be used until the insns emitted for save/restore
9340 of the regs are visible in the RTL stream. */
9343 s390_register_info ()
9346 char clobbered_regs[32];
9348 gcc_assert (!epilogue_completed);
9350 if (reload_completed)
9351 /* After reload we rely on our own routine to determine which
9352 registers need saving. */
9353 s390_regs_ever_clobbered (clobbered_regs);
9355 /* During reload we use regs_ever_live as a base since reload
9356 does changes in there which we otherwise would not be aware
9358 for (i = 0; i < 32; i++)
9359 clobbered_regs[i] = df_regs_ever_live_p (i);
9361 for (i = 0; i < 32; i++)
9362 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
9364 /* Mark the call-saved FPRs which need to be saved.
9365 This needs to be done before checking the special GPRs since the
9366 stack pointer usage depends on whether high FPRs have to be saved
9368 cfun_frame_layout.fpr_bitmap = 0;
9369 cfun_frame_layout.high_fprs = 0;
9370 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
9371 if (clobbered_regs[i] && !call_really_used_regs[i])
9373 cfun_set_fpr_save (i);
9374 if (i >= FPR8_REGNUM)
9375 cfun_frame_layout.high_fprs++;
9379 clobbered_regs[PIC_OFFSET_TABLE_REGNUM]
9380 |= !!df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
9382 clobbered_regs[BASE_REGNUM]
9383 |= (cfun->machine->base_reg
9384 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
9386 clobbered_regs[HARD_FRAME_POINTER_REGNUM]
9387 |= !!frame_pointer_needed;
9389 /* On pre z900 machines this might take until machine dependent
9391 save_return_addr_p will only be set on non-zarch machines so
9392 there is no risk that r14 goes into an FPR instead of a stack
9394 clobbered_regs[RETURN_REGNUM]
9396 || TARGET_TPF_PROFILING
9397 || cfun->machine->split_branches_pending_p
9398 || cfun_frame_layout.save_return_addr_p
9399 || crtl->calls_eh_return);
9401 clobbered_regs[STACK_POINTER_REGNUM]
9403 || TARGET_TPF_PROFILING
9404 || cfun_save_high_fprs_p
9405 || get_frame_size () > 0
9406 || (reload_completed && cfun_frame_layout.frame_size > 0)
9407 || cfun->calls_alloca);
9409 memset (cfun_frame_layout.gpr_save_slots, SAVE_SLOT_NONE, 16);
9411 for (i = 6; i < 16; i++)
9412 if (clobbered_regs[i])
9413 cfun_gpr_save_slot (i) = SAVE_SLOT_STACK;
9415 s390_register_info_stdarg_fpr ();
9416 s390_register_info_gprtofpr ();
9417 s390_register_info_set_ranges ();
9418 /* stdarg functions might need to save GPRs 2 to 6. This might
9419 override the GPR->FPR save decision made by
9420 s390_register_info_gprtofpr for r6 since vararg regs must go to
9422 s390_register_info_stdarg_gpr ();
9425 /* This function is called by s390_optimize_prologue in order to get
9426 rid of unnecessary GPR save/restore instructions. The register info
9427 for the GPRs is re-computed and the ranges are re-calculated. */
9430 s390_optimize_register_info ()
9432 char clobbered_regs[32];
9435 gcc_assert (epilogue_completed);
9436 gcc_assert (!cfun->machine->split_branches_pending_p);
9438 s390_regs_ever_clobbered (clobbered_regs);
9440 for (i = 0; i < 32; i++)
9441 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
9443 /* There is still special treatment needed for cases invisible to
9444 s390_regs_ever_clobbered. */
9445 clobbered_regs[RETURN_REGNUM]
9446 |= (TARGET_TPF_PROFILING
9447 /* When expanding builtin_return_addr in ESA mode we do not
9448 know whether r14 will later be needed as scratch reg when
9449 doing branch splitting. So the builtin always accesses the
9450 r14 save slot and we need to stick to the save/restore
9451 decision for r14 even if it turns out that it didn't get
9453 || cfun_frame_layout.save_return_addr_p
9454 || crtl->calls_eh_return);
9456 memset (cfun_frame_layout.gpr_save_slots, SAVE_SLOT_NONE, 6);
9458 for (i = 6; i < 16; i++)
9459 if (!clobbered_regs[i])
9460 cfun_gpr_save_slot (i) = SAVE_SLOT_NONE;
9462 s390_register_info_set_ranges ();
9463 s390_register_info_stdarg_gpr ();
9466 /* Fill cfun->machine with info about frame of current function. */
9469 s390_frame_info (void)
9471 HOST_WIDE_INT lowest_offset;
9473 cfun_frame_layout.first_save_gpr_slot = cfun_frame_layout.first_save_gpr;
9474 cfun_frame_layout.last_save_gpr_slot = cfun_frame_layout.last_save_gpr;
9476 /* The va_arg builtin uses a constant distance of 16 *
9477 UNITS_PER_LONG (r0-r15) to reach the FPRs from the reg_save_area
9478 pointer. So even if we are going to save the stack pointer in an
9479 FPR we need the stack space in order to keep the offsets
9481 if (cfun->stdarg && cfun_save_arg_fprs_p)
9483 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
9485 if (cfun_frame_layout.first_save_gpr_slot == -1)
9486 cfun_frame_layout.first_save_gpr_slot = STACK_POINTER_REGNUM;
9489 cfun_frame_layout.frame_size = get_frame_size ();
9490 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
9491 fatal_error (input_location,
9492 "total size of local variables exceeds architecture limit");
9494 if (!TARGET_PACKED_STACK)
9496 /* Fixed stack layout. */
9497 cfun_frame_layout.backchain_offset = 0;
9498 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
9499 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
9500 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
9501 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
9504 else if (TARGET_BACKCHAIN)
9506 /* Kernel stack layout - packed stack, backchain, no float */
9507 gcc_assert (TARGET_SOFT_FLOAT);
9508 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
9511 /* The distance between the backchain and the return address
9512 save slot must not change. So we always need a slot for the
9513 stack pointer which resides in between. */
9514 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
9516 cfun_frame_layout.gprs_offset
9517 = cfun_frame_layout.backchain_offset - cfun_gprs_save_area_size;
9519 /* FPRs will not be saved. Nevertheless pick sane values to
9520 keep area calculations valid. */
9521 cfun_frame_layout.f0_offset =
9522 cfun_frame_layout.f4_offset =
9523 cfun_frame_layout.f8_offset = cfun_frame_layout.gprs_offset;
9529 /* Packed stack layout without backchain. */
9531 /* With stdarg FPRs need their dedicated slots. */
9532 num_fprs = (TARGET_64BIT && cfun->stdarg ? 2
9533 : (cfun_fpr_save_p (FPR4_REGNUM) +
9534 cfun_fpr_save_p (FPR6_REGNUM)));
9535 cfun_frame_layout.f4_offset = STACK_POINTER_OFFSET - 8 * num_fprs;
9537 num_fprs = (cfun->stdarg ? 2
9538 : (cfun_fpr_save_p (FPR0_REGNUM)
9539 + cfun_fpr_save_p (FPR2_REGNUM)));
9540 cfun_frame_layout.f0_offset = cfun_frame_layout.f4_offset - 8 * num_fprs;
9542 cfun_frame_layout.gprs_offset
9543 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
9545 cfun_frame_layout.f8_offset = (cfun_frame_layout.gprs_offset
9546 - cfun_frame_layout.high_fprs * 8);
9549 if (cfun_save_high_fprs_p)
9550 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
9553 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
9555 /* In the following cases we have to allocate a STACK_POINTER_OFFSET
9556 sized area at the bottom of the stack. This is required also for
9557 leaf functions. When GCC generates a local stack reference it
9558 will always add STACK_POINTER_OFFSET to all these references. */
9560 && !TARGET_TPF_PROFILING
9561 && cfun_frame_layout.frame_size == 0
9562 && !cfun->calls_alloca)
9565 /* Calculate the number of bytes we have used in our own register
9566 save area. With the packed stack layout we can re-use the
9567 remaining bytes for normal stack elements. */
9569 if (TARGET_PACKED_STACK)
9570 lowest_offset = MIN (MIN (cfun_frame_layout.f0_offset,
9571 cfun_frame_layout.f4_offset),
9572 cfun_frame_layout.gprs_offset);
9576 if (TARGET_BACKCHAIN)
9577 lowest_offset = MIN (lowest_offset, cfun_frame_layout.backchain_offset);
9579 cfun_frame_layout.frame_size += STACK_POINTER_OFFSET - lowest_offset;
9581 /* If under 31 bit an odd number of gprs has to be saved we have to
9582 adjust the frame size to sustain 8 byte alignment of stack
9584 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
9585 STACK_BOUNDARY / BITS_PER_UNIT - 1)
9586 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
9589 /* Generate frame layout. Fills in register and frame data for the current
9590 function in cfun->machine. This routine can be called multiple times;
9591 it will re-do the complete frame layout every time. */
9594 s390_init_frame_layout (void)
9596 HOST_WIDE_INT frame_size;
9599 /* After LRA the frame layout is supposed to be read-only and should
9600 not be re-computed. */
9601 if (reload_completed)
9604 /* On S/390 machines, we may need to perform branch splitting, which
9605 will require both base and return address register. We have no
9606 choice but to assume we're going to need them until right at the
9607 end of the machine dependent reorg phase. */
9608 if (!TARGET_CPU_ZARCH)
9609 cfun->machine->split_branches_pending_p = true;
9613 frame_size = cfun_frame_layout.frame_size;
9615 /* Try to predict whether we'll need the base register. */
9616 base_used = cfun->machine->split_branches_pending_p
9617 || crtl->uses_const_pool
9618 || (!DISP_IN_RANGE (frame_size)
9619 && !CONST_OK_FOR_K (frame_size));
9621 /* Decide which register to use as literal pool base. In small
9622 leaf functions, try to use an unused call-clobbered register
9623 as base register to avoid save/restore overhead. */
9625 cfun->machine->base_reg = NULL_RTX;
9631 /* Prefer r5 (most likely to be free). */
9632 for (br = 5; br >= 2 && df_regs_ever_live_p (br); br--)
9634 cfun->machine->base_reg =
9635 gen_rtx_REG (Pmode, (br >= 2) ? br : BASE_REGNUM);
9638 s390_register_info ();
9641 while (frame_size != cfun_frame_layout.frame_size);
9644 /* Remove the FPR clobbers from a tbegin insn if it can be proven that
9645 the TX is nonescaping. A transaction is considered escaping if
9646 there is at least one path from tbegin returning CC0 to the
9647 function exit block without an tend.
9649 The check so far has some limitations:
9650 - only single tbegin/tend BBs are supported
9651 - the first cond jump after tbegin must separate the CC0 path from ~CC0
9652 - when CC is copied to a GPR and the CC0 check is done with the GPR
9653 this is not supported
9657 s390_optimize_nonescaping_tx (void)
9659 const unsigned int CC0 = 1 << 3;
9660 basic_block tbegin_bb = NULL;
9661 basic_block tend_bb = NULL;
9666 rtx_insn *tbegin_insn = NULL;
9668 if (!cfun->machine->tbegin_p)
9671 for (bb_index = 0; bb_index < n_basic_blocks_for_fn (cfun); bb_index++)
9673 bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
9678 FOR_BB_INSNS (bb, insn)
9680 rtx ite, cc, pat, target;
9681 unsigned HOST_WIDE_INT mask;
9683 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
9686 pat = PATTERN (insn);
9688 if (GET_CODE (pat) == PARALLEL)
9689 pat = XVECEXP (pat, 0, 0);
9691 if (GET_CODE (pat) != SET
9692 || GET_CODE (SET_SRC (pat)) != UNSPEC_VOLATILE)
9695 if (XINT (SET_SRC (pat), 1) == UNSPECV_TBEGIN)
9701 /* Just return if the tbegin doesn't have clobbers. */
9702 if (GET_CODE (PATTERN (insn)) != PARALLEL)
9705 if (tbegin_bb != NULL)
9708 /* Find the next conditional jump. */
9709 for (tmp = NEXT_INSN (insn);
9711 tmp = NEXT_INSN (tmp))
9713 if (reg_set_p (gen_rtx_REG (CCmode, CC_REGNUM), tmp))
9718 ite = SET_SRC (PATTERN (tmp));
9719 if (GET_CODE (ite) != IF_THEN_ELSE)
9722 cc = XEXP (XEXP (ite, 0), 0);
9723 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc))
9724 || GET_MODE (cc) != CCRAWmode
9725 || GET_CODE (XEXP (XEXP (ite, 0), 1)) != CONST_INT)
9728 if (bb->succs->length () != 2)
9731 mask = INTVAL (XEXP (XEXP (ite, 0), 1));
9732 if (GET_CODE (XEXP (ite, 0)) == NE)
9736 target = XEXP (ite, 1);
9737 else if (mask == (CC0 ^ 0xf))
9738 target = XEXP (ite, 2);
9746 ei = ei_start (bb->succs);
9747 e1 = ei_safe_edge (ei);
9749 e2 = ei_safe_edge (ei);
9751 if (e2->flags & EDGE_FALLTHRU)
9754 e1 = ei_safe_edge (ei);
9757 if (!(e1->flags & EDGE_FALLTHRU))
9760 tbegin_bb = (target == pc_rtx) ? e1->dest : e2->dest;
9762 if (tmp == BB_END (bb))
9767 if (XINT (SET_SRC (pat), 1) == UNSPECV_TEND)
9769 if (tend_bb != NULL)
9776 /* Either we successfully remove the FPR clobbers here or we are not
9777 able to do anything for this TX. Both cases don't qualify for
9779 cfun->machine->tbegin_p = false;
9781 if (tbegin_bb == NULL || tend_bb == NULL)
9784 calculate_dominance_info (CDI_POST_DOMINATORS);
9785 result = dominated_by_p (CDI_POST_DOMINATORS, tbegin_bb, tend_bb);
9786 free_dominance_info (CDI_POST_DOMINATORS);
9791 PATTERN (tbegin_insn) = gen_rtx_PARALLEL (VOIDmode,
9793 XVECEXP (PATTERN (tbegin_insn), 0, 0),
9794 XVECEXP (PATTERN (tbegin_insn), 0, 1)));
9795 INSN_CODE (tbegin_insn) = -1;
9796 df_insn_rescan (tbegin_insn);
9801 /* Return true if it is legal to put a value with MODE into REGNO. */
9804 s390_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
9806 if (!TARGET_VX && VECTOR_NOFP_REGNO_P (regno))
9809 switch (REGNO_REG_CLASS (regno))
9812 return ((GET_MODE_CLASS (mode) == MODE_INT
9813 && s390_class_max_nregs (VEC_REGS, mode) == 1)
9815 || s390_vector_mode_supported_p (mode));
9819 && ((GET_MODE_CLASS (mode) == MODE_INT
9820 && s390_class_max_nregs (FP_REGS, mode) == 1)
9822 || s390_vector_mode_supported_p (mode)))
9825 if (REGNO_PAIR_OK (regno, mode))
9827 if (mode == SImode || mode == DImode)
9830 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
9835 if (FRAME_REGNO_P (regno) && mode == Pmode)
9840 if (REGNO_PAIR_OK (regno, mode))
9843 || (mode != TFmode && mode != TCmode && mode != TDmode))
9848 if (GET_MODE_CLASS (mode) == MODE_CC)
9852 if (REGNO_PAIR_OK (regno, mode))
9854 if (mode == SImode || mode == Pmode)
9865 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
9868 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
9870 /* Once we've decided upon a register to use as base register, it must
9871 no longer be used for any other purpose. */
9872 if (cfun->machine->base_reg)
9873 if (REGNO (cfun->machine->base_reg) == old_reg
9874 || REGNO (cfun->machine->base_reg) == new_reg)
9877 /* Prevent regrename from using call-saved regs which haven't
9878 actually been saved. This is necessary since regrename assumes
9879 the backend save/restore decisions are based on
9880 df_regs_ever_live. Since we have our own routine we have to tell
9881 regrename manually about it. */
9882 if (GENERAL_REGNO_P (new_reg)
9883 && !call_really_used_regs[new_reg]
9884 && cfun_gpr_save_slot (new_reg) == SAVE_SLOT_NONE)
9890 /* Return nonzero if register REGNO can be used as a scratch register
9894 s390_hard_regno_scratch_ok (unsigned int regno)
9896 /* See s390_hard_regno_rename_ok. */
9897 if (GENERAL_REGNO_P (regno)
9898 && !call_really_used_regs[regno]
9899 && cfun_gpr_save_slot (regno) == SAVE_SLOT_NONE)
9905 /* Maximum number of registers to represent a value of mode MODE
9906 in a register of class RCLASS. */
9909 s390_class_max_nregs (enum reg_class rclass, machine_mode mode)
9912 bool reg_pair_required_p = false;
9918 reg_size = TARGET_VX ? 16 : 8;
9920 /* TF and TD modes would fit into a VR but we put them into a
9921 register pair since we do not have 128bit FP instructions on
9924 && SCALAR_FLOAT_MODE_P (mode)
9925 && GET_MODE_SIZE (mode) >= 16)
9926 reg_pair_required_p = true;
9928 /* Even if complex types would fit into a single FPR/VR we force
9929 them into a register pair to deal with the parts more easily.
9930 (FIXME: What about complex ints?) */
9931 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
9932 reg_pair_required_p = true;
9938 reg_size = UNITS_PER_WORD;
9942 if (reg_pair_required_p)
9943 return 2 * ((GET_MODE_SIZE (mode) / 2 + reg_size - 1) / reg_size);
9945 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
9948 /* Return TRUE if changing mode from FROM to TO should not be allowed
9949 for register class CLASS. */
9952 s390_cannot_change_mode_class (machine_mode from_mode,
9953 machine_mode to_mode,
9954 enum reg_class rclass)
9956 machine_mode small_mode;
9957 machine_mode big_mode;
9959 if (GET_MODE_SIZE (from_mode) == GET_MODE_SIZE (to_mode))
9962 if (GET_MODE_SIZE (from_mode) < GET_MODE_SIZE (to_mode))
9964 small_mode = from_mode;
9969 small_mode = to_mode;
9970 big_mode = from_mode;
9973 /* Values residing in VRs are little-endian style. All modes are
9974 placed left-aligned in an VR. This means that we cannot allow
9975 switching between modes with differing sizes. Also if the vector
9976 facility is available we still place TFmode values in VR register
9977 pairs, since the only instructions we have operating on TFmodes
9978 only deal with register pairs. Therefore we have to allow DFmode
9979 subregs of TFmodes to enable the TFmode splitters. */
9980 if (reg_classes_intersect_p (VEC_REGS, rclass)
9981 && (GET_MODE_SIZE (small_mode) < 8
9982 || s390_class_max_nregs (VEC_REGS, big_mode) == 1))
9985 /* Likewise for access registers, since they have only half the
9986 word size on 64-bit. */
9987 if (reg_classes_intersect_p (ACCESS_REGS, rclass))
9993 /* Return true if we use LRA instead of reload pass. */
9997 return s390_lra_flag;
10000 /* Return true if register FROM can be eliminated via register TO. */
10003 s390_can_eliminate (const int from, const int to)
10005 /* On zSeries machines, we have not marked the base register as fixed.
10006 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
10007 If a function requires the base register, we say here that this
10008 elimination cannot be performed. This will cause reload to free
10009 up the base register (as if it were fixed). On the other hand,
10010 if the current function does *not* require the base register, we
10011 say here the elimination succeeds, which in turn allows reload
10012 to allocate the base register for any other purpose. */
10013 if (from == BASE_REGNUM && to == BASE_REGNUM)
10015 if (TARGET_CPU_ZARCH)
10017 s390_init_frame_layout ();
10018 return cfun->machine->base_reg == NULL_RTX;
10024 /* Everything else must point into the stack frame. */
10025 gcc_assert (to == STACK_POINTER_REGNUM
10026 || to == HARD_FRAME_POINTER_REGNUM);
10028 gcc_assert (from == FRAME_POINTER_REGNUM
10029 || from == ARG_POINTER_REGNUM
10030 || from == RETURN_ADDRESS_POINTER_REGNUM);
10032 /* Make sure we actually saved the return address. */
10033 if (from == RETURN_ADDRESS_POINTER_REGNUM)
10034 if (!crtl->calls_eh_return
10036 && !cfun_frame_layout.save_return_addr_p)
10042 /* Return offset between register FROM and TO initially after prolog. */
10045 s390_initial_elimination_offset (int from, int to)
10047 HOST_WIDE_INT offset;
10049 /* ??? Why are we called for non-eliminable pairs? */
10050 if (!s390_can_eliminate (from, to))
10055 case FRAME_POINTER_REGNUM:
10056 offset = (get_frame_size()
10057 + STACK_POINTER_OFFSET
10058 + crtl->outgoing_args_size);
10061 case ARG_POINTER_REGNUM:
10062 s390_init_frame_layout ();
10063 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
10066 case RETURN_ADDRESS_POINTER_REGNUM:
10067 s390_init_frame_layout ();
10069 if (cfun_frame_layout.first_save_gpr_slot == -1)
10071 /* If it turns out that for stdarg nothing went into the reg
10072 save area we also do not need the return address
10074 if (cfun->stdarg && !cfun_save_arg_fprs_p)
10077 gcc_unreachable ();
10080 /* In order to make the following work it is not necessary for
10081 r14 to have a save slot. It is sufficient if one other GPR
10082 got one. Since the GPRs are always stored without gaps we
10083 are able to calculate where the r14 save slot would
10085 offset = (cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset +
10086 (RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot) *
10095 gcc_unreachable ();
10101 /* Emit insn to save fpr REGNUM at offset OFFSET relative
10102 to register BASE. Return generated insn. */
10105 save_fpr (rtx base, int offset, int regnum)
10108 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
10110 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
10111 set_mem_alias_set (addr, get_varargs_alias_set ());
10113 set_mem_alias_set (addr, get_frame_alias_set ());
10115 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
10118 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
10119 to register BASE. Return generated insn. */
10122 restore_fpr (rtx base, int offset, int regnum)
10125 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
10126 set_mem_alias_set (addr, get_frame_alias_set ());
10128 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
10131 /* Return true if REGNO is a global register, but not one
10132 of the special ones that need to be saved/restored in anyway. */
10135 global_not_special_regno_p (int regno)
10137 return (global_regs[regno]
10138 /* These registers are special and need to be
10139 restored in any case. */
10140 && !(regno == STACK_POINTER_REGNUM
10141 || regno == RETURN_REGNUM
10142 || regno == BASE_REGNUM
10143 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
10146 /* Generate insn to save registers FIRST to LAST into
10147 the register save area located at offset OFFSET
10148 relative to register BASE. */
10151 save_gprs (rtx base, int offset, int first, int last)
10153 rtx addr, insn, note;
10156 addr = plus_constant (Pmode, base, offset);
10157 addr = gen_rtx_MEM (Pmode, addr);
10159 set_mem_alias_set (addr, get_frame_alias_set ());
10161 /* Special-case single register. */
10165 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
10167 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
10169 if (!global_not_special_regno_p (first))
10170 RTX_FRAME_RELATED_P (insn) = 1;
10175 insn = gen_store_multiple (addr,
10176 gen_rtx_REG (Pmode, first),
10177 GEN_INT (last - first + 1));
10179 if (first <= 6 && cfun->stdarg)
10180 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
10182 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
10184 if (first + i <= 6)
10185 set_mem_alias_set (mem, get_varargs_alias_set ());
10188 /* We need to set the FRAME_RELATED flag on all SETs
10189 inside the store-multiple pattern.
10191 However, we must not emit DWARF records for registers 2..5
10192 if they are stored for use by variable arguments ...
10194 ??? Unfortunately, it is not enough to simply not the
10195 FRAME_RELATED flags for those SETs, because the first SET
10196 of the PARALLEL is always treated as if it had the flag
10197 set, even if it does not. Therefore we emit a new pattern
10198 without those registers as REG_FRAME_RELATED_EXPR note. */
10200 if (first >= 6 && !global_not_special_regno_p (first))
10202 rtx pat = PATTERN (insn);
10204 for (i = 0; i < XVECLEN (pat, 0); i++)
10205 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
10206 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
10208 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
10210 RTX_FRAME_RELATED_P (insn) = 1;
10212 else if (last >= 6)
10216 for (start = first >= 6 ? first : 6; start <= last; start++)
10217 if (!global_not_special_regno_p (start))
10223 addr = plus_constant (Pmode, base,
10224 offset + (start - first) * UNITS_PER_LONG);
10229 note = gen_movdi (gen_rtx_MEM (Pmode, addr),
10230 gen_rtx_REG (Pmode, start));
10232 note = gen_movsi (gen_rtx_MEM (Pmode, addr),
10233 gen_rtx_REG (Pmode, start));
10234 note = PATTERN (note);
10236 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
10237 RTX_FRAME_RELATED_P (insn) = 1;
10242 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
10243 gen_rtx_REG (Pmode, start),
10244 GEN_INT (last - start + 1));
10245 note = PATTERN (note);
10247 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
10249 for (i = 0; i < XVECLEN (note, 0); i++)
10250 if (GET_CODE (XVECEXP (note, 0, i)) == SET
10251 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
10253 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
10255 RTX_FRAME_RELATED_P (insn) = 1;
10261 /* Generate insn to restore registers FIRST to LAST from
10262 the register save area located at offset OFFSET
10263 relative to register BASE. */
10266 restore_gprs (rtx base, int offset, int first, int last)
10270 addr = plus_constant (Pmode, base, offset);
10271 addr = gen_rtx_MEM (Pmode, addr);
10272 set_mem_alias_set (addr, get_frame_alias_set ());
10274 /* Special-case single register. */
10278 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
10280 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
10282 RTX_FRAME_RELATED_P (insn) = 1;
10286 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
10288 GEN_INT (last - first + 1));
10289 RTX_FRAME_RELATED_P (insn) = 1;
10293 /* Return insn sequence to load the GOT register. */
10295 static GTY(()) rtx got_symbol;
10297 s390_load_got (void)
10301 /* We cannot use pic_offset_table_rtx here since we use this
10302 function also for non-pic if __tls_get_offset is called and in
10303 that case PIC_OFFSET_TABLE_REGNUM as well as pic_offset_table_rtx
10305 rtx got_rtx = gen_rtx_REG (Pmode, 12);
10309 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
10310 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
10315 if (TARGET_CPU_ZARCH)
10317 emit_move_insn (got_rtx, got_symbol);
10323 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
10324 UNSPEC_LTREL_OFFSET);
10325 offset = gen_rtx_CONST (Pmode, offset);
10326 offset = force_const_mem (Pmode, offset);
10328 emit_move_insn (got_rtx, offset);
10330 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
10331 UNSPEC_LTREL_BASE);
10332 offset = gen_rtx_PLUS (Pmode, got_rtx, offset);
10334 emit_move_insn (got_rtx, offset);
10337 insns = get_insns ();
10342 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
10343 and the change to the stack pointer. */
10346 s390_emit_stack_tie (void)
10348 rtx mem = gen_frame_mem (BLKmode,
10349 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
10351 emit_insn (gen_stack_tie (mem));
10354 /* Copy GPRS into FPR save slots. */
10357 s390_save_gprs_to_fprs (void)
10361 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
10364 for (i = 6; i < 16; i++)
10366 if (FP_REGNO_P (cfun_gpr_save_slot (i)))
10369 emit_move_insn (gen_rtx_REG (DImode, cfun_gpr_save_slot (i)),
10370 gen_rtx_REG (DImode, i));
10371 RTX_FRAME_RELATED_P (insn) = 1;
10372 /* This prevents dwarf2cfi from interpreting the set. Doing
10373 so it might emit def_cfa_register infos setting an FPR as
10375 add_reg_note (insn, REG_CFA_REGISTER, PATTERN (insn));
10380 /* Restore GPRs from FPR save slots. */
10383 s390_restore_gprs_from_fprs (void)
10387 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
10390 for (i = 6; i < 16; i++)
10392 if (FP_REGNO_P (cfun_gpr_save_slot (i)))
10395 emit_move_insn (gen_rtx_REG (DImode, i),
10396 gen_rtx_REG (DImode, cfun_gpr_save_slot (i)));
10397 df_set_regs_ever_live (i, true);
10398 add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, i));
10399 if (i == STACK_POINTER_REGNUM)
10400 add_reg_note (insn, REG_CFA_DEF_CFA,
10401 plus_constant (Pmode, stack_pointer_rtx,
10402 STACK_POINTER_OFFSET));
10403 RTX_FRAME_RELATED_P (insn) = 1;
10409 /* A pass run immediately before shrink-wrapping and prologue and epilogue
10414 const pass_data pass_data_s390_early_mach =
10416 RTL_PASS, /* type */
10417 "early_mach", /* name */
10418 OPTGROUP_NONE, /* optinfo_flags */
10419 TV_MACH_DEP, /* tv_id */
10420 0, /* properties_required */
10421 0, /* properties_provided */
10422 0, /* properties_destroyed */
10423 0, /* todo_flags_start */
10424 ( TODO_df_verify | TODO_df_finish ), /* todo_flags_finish */
10427 class pass_s390_early_mach : public rtl_opt_pass
10430 pass_s390_early_mach (gcc::context *ctxt)
10431 : rtl_opt_pass (pass_data_s390_early_mach, ctxt)
10434 /* opt_pass methods: */
10435 virtual unsigned int execute (function *);
10437 }; // class pass_s390_early_mach
10440 pass_s390_early_mach::execute (function *fun)
10444 /* Try to get rid of the FPR clobbers. */
10445 s390_optimize_nonescaping_tx ();
10447 /* Re-compute register info. */
10448 s390_register_info ();
10450 /* If we're using a base register, ensure that it is always valid for
10451 the first non-prologue instruction. */
10452 if (fun->machine->base_reg)
10453 emit_insn_at_entry (gen_main_pool (fun->machine->base_reg));
10455 /* Annotate all constant pool references to let the scheduler know
10456 they implicitly use the base register. */
10457 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10460 annotate_constant_pool_refs (&PATTERN (insn));
10461 df_insn_rescan (insn);
10466 } // anon namespace
10468 /* Expand the prologue into a bunch of separate insns. */
10471 s390_emit_prologue (void)
10479 /* Choose best register to use for temp use within prologue.
10480 See below for why TPF must use the register 1. */
10482 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
10484 && !TARGET_TPF_PROFILING)
10485 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
10487 temp_reg = gen_rtx_REG (Pmode, 1);
10489 s390_save_gprs_to_fprs ();
10491 /* Save call saved gprs. */
10492 if (cfun_frame_layout.first_save_gpr != -1)
10494 insn = save_gprs (stack_pointer_rtx,
10495 cfun_frame_layout.gprs_offset +
10496 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
10497 - cfun_frame_layout.first_save_gpr_slot),
10498 cfun_frame_layout.first_save_gpr,
10499 cfun_frame_layout.last_save_gpr);
10503 /* Dummy insn to mark literal pool slot. */
10505 if (cfun->machine->base_reg)
10506 emit_insn (gen_main_pool (cfun->machine->base_reg));
10508 offset = cfun_frame_layout.f0_offset;
10510 /* Save f0 and f2. */
10511 for (i = FPR0_REGNUM; i <= FPR0_REGNUM + 1; i++)
10513 if (cfun_fpr_save_p (i))
10515 save_fpr (stack_pointer_rtx, offset, i);
10518 else if (!TARGET_PACKED_STACK || cfun->stdarg)
10522 /* Save f4 and f6. */
10523 offset = cfun_frame_layout.f4_offset;
10524 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
10526 if (cfun_fpr_save_p (i))
10528 insn = save_fpr (stack_pointer_rtx, offset, i);
10531 /* If f4 and f6 are call clobbered they are saved due to
10532 stdargs and therefore are not frame related. */
10533 if (!call_really_used_regs[i])
10534 RTX_FRAME_RELATED_P (insn) = 1;
10536 else if (!TARGET_PACKED_STACK || call_really_used_regs[i])
10540 if (TARGET_PACKED_STACK
10541 && cfun_save_high_fprs_p
10542 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
10544 offset = (cfun_frame_layout.f8_offset
10545 + (cfun_frame_layout.high_fprs - 1) * 8);
10547 for (i = FPR15_REGNUM; i >= FPR8_REGNUM && offset >= 0; i--)
10548 if (cfun_fpr_save_p (i))
10550 insn = save_fpr (stack_pointer_rtx, offset, i);
10552 RTX_FRAME_RELATED_P (insn) = 1;
10555 if (offset >= cfun_frame_layout.f8_offset)
10559 if (!TARGET_PACKED_STACK)
10560 next_fpr = cfun_save_high_fprs_p ? FPR15_REGNUM : 0;
10562 if (flag_stack_usage_info)
10563 current_function_static_stack_size = cfun_frame_layout.frame_size;
10565 /* Decrement stack pointer. */
10567 if (cfun_frame_layout.frame_size > 0)
10569 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
10570 rtx real_frame_off;
10572 if (s390_stack_size)
10574 HOST_WIDE_INT stack_guard;
10576 if (s390_stack_guard)
10577 stack_guard = s390_stack_guard;
10580 /* If no value for stack guard is provided the smallest power of 2
10581 larger than the current frame size is chosen. */
10583 while (stack_guard < cfun_frame_layout.frame_size)
10587 if (cfun_frame_layout.frame_size >= s390_stack_size)
10589 warning (0, "frame size of function %qs is %wd"
10590 " bytes exceeding user provided stack limit of "
10592 "An unconditional trap is added.",
10593 current_function_name(), cfun_frame_layout.frame_size,
10595 emit_insn (gen_trap ());
10600 /* stack_guard has to be smaller than s390_stack_size.
10601 Otherwise we would emit an AND with zero which would
10602 not match the test under mask pattern. */
10603 if (stack_guard >= s390_stack_size)
10605 warning (0, "frame size of function %qs is %wd"
10606 " bytes which is more than half the stack size. "
10607 "The dynamic check would not be reliable. "
10608 "No check emitted for this function.",
10609 current_function_name(),
10610 cfun_frame_layout.frame_size);
10614 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
10615 & ~(stack_guard - 1));
10617 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
10618 GEN_INT (stack_check_mask));
10620 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
10622 t, const0_rtx, const0_rtx));
10624 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
10626 t, const0_rtx, const0_rtx));
10631 if (s390_warn_framesize > 0
10632 && cfun_frame_layout.frame_size >= s390_warn_framesize)
10633 warning (0, "frame size of %qs is %wd bytes",
10634 current_function_name (), cfun_frame_layout.frame_size);
10636 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
10637 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
10639 /* Save incoming stack pointer into temp reg. */
10640 if (TARGET_BACKCHAIN || next_fpr)
10641 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
10643 /* Subtract frame size from stack pointer. */
10645 if (DISP_IN_RANGE (INTVAL (frame_off)))
10647 insn = gen_rtx_SET (stack_pointer_rtx,
10648 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
10650 insn = emit_insn (insn);
10654 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
10655 frame_off = force_const_mem (Pmode, frame_off);
10657 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
10658 annotate_constant_pool_refs (&PATTERN (insn));
10661 RTX_FRAME_RELATED_P (insn) = 1;
10662 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
10663 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
10664 gen_rtx_SET (stack_pointer_rtx,
10665 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
10668 /* Set backchain. */
10670 if (TARGET_BACKCHAIN)
10672 if (cfun_frame_layout.backchain_offset)
10673 addr = gen_rtx_MEM (Pmode,
10674 plus_constant (Pmode, stack_pointer_rtx,
10675 cfun_frame_layout.backchain_offset));
10677 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
10678 set_mem_alias_set (addr, get_frame_alias_set ());
10679 insn = emit_insn (gen_move_insn (addr, temp_reg));
10682 /* If we support non-call exceptions (e.g. for Java),
10683 we need to make sure the backchain pointer is set up
10684 before any possibly trapping memory access. */
10685 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
10687 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
10688 emit_clobber (addr);
10692 /* Save fprs 8 - 15 (64 bit ABI). */
10694 if (cfun_save_high_fprs_p && next_fpr)
10696 /* If the stack might be accessed through a different register
10697 we have to make sure that the stack pointer decrement is not
10698 moved below the use of the stack slots. */
10699 s390_emit_stack_tie ();
10701 insn = emit_insn (gen_add2_insn (temp_reg,
10702 GEN_INT (cfun_frame_layout.f8_offset)));
10706 for (i = FPR8_REGNUM; i <= next_fpr; i++)
10707 if (cfun_fpr_save_p (i))
10709 rtx addr = plus_constant (Pmode, stack_pointer_rtx,
10710 cfun_frame_layout.frame_size
10711 + cfun_frame_layout.f8_offset
10714 insn = save_fpr (temp_reg, offset, i);
10716 RTX_FRAME_RELATED_P (insn) = 1;
10717 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
10718 gen_rtx_SET (gen_rtx_MEM (DFmode, addr),
10719 gen_rtx_REG (DFmode, i)));
10723 /* Set frame pointer, if needed. */
10725 if (frame_pointer_needed)
10727 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
10728 RTX_FRAME_RELATED_P (insn) = 1;
10731 /* Set up got pointer, if needed. */
10733 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
10735 rtx_insn *insns = s390_load_got ();
10737 for (rtx_insn *insn = insns; insn; insn = NEXT_INSN (insn))
10738 annotate_constant_pool_refs (&PATTERN (insn));
10743 if (TARGET_TPF_PROFILING)
10745 /* Generate a BAS instruction to serve as a function
10746 entry intercept to facilitate the use of tracing
10747 algorithms located at the branch target. */
10748 emit_insn (gen_prologue_tpf ());
10750 /* Emit a blockage here so that all code
10751 lies between the profiling mechanisms. */
10752 emit_insn (gen_blockage ());
10756 /* Expand the epilogue into a bunch of separate insns. */
10759 s390_emit_epilogue (bool sibcall)
10761 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
10762 int area_bottom, area_top, offset = 0;
10767 if (TARGET_TPF_PROFILING)
10770 /* Generate a BAS instruction to serve as a function
10771 entry intercept to facilitate the use of tracing
10772 algorithms located at the branch target. */
10774 /* Emit a blockage here so that all code
10775 lies between the profiling mechanisms. */
10776 emit_insn (gen_blockage ());
10778 emit_insn (gen_epilogue_tpf ());
10781 /* Check whether to use frame or stack pointer for restore. */
10783 frame_pointer = (frame_pointer_needed
10784 ? hard_frame_pointer_rtx : stack_pointer_rtx);
10786 s390_frame_area (&area_bottom, &area_top);
10788 /* Check whether we can access the register save area.
10789 If not, increment the frame pointer as required. */
10791 if (area_top <= area_bottom)
10793 /* Nothing to restore. */
10795 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
10796 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
10798 /* Area is in range. */
10799 offset = cfun_frame_layout.frame_size;
10803 rtx insn, frame_off, cfa;
10805 offset = area_bottom < 0 ? -area_bottom : 0;
10806 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
10808 cfa = gen_rtx_SET (frame_pointer,
10809 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
10810 if (DISP_IN_RANGE (INTVAL (frame_off)))
10812 insn = gen_rtx_SET (frame_pointer,
10813 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
10814 insn = emit_insn (insn);
10818 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
10819 frame_off = force_const_mem (Pmode, frame_off);
10821 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
10822 annotate_constant_pool_refs (&PATTERN (insn));
10824 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
10825 RTX_FRAME_RELATED_P (insn) = 1;
10828 /* Restore call saved fprs. */
10832 if (cfun_save_high_fprs_p)
10834 next_offset = cfun_frame_layout.f8_offset;
10835 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
10837 if (cfun_fpr_save_p (i))
10839 restore_fpr (frame_pointer,
10840 offset + next_offset, i);
10842 = alloc_reg_note (REG_CFA_RESTORE,
10843 gen_rtx_REG (DFmode, i), cfa_restores);
10852 next_offset = cfun_frame_layout.f4_offset;
10854 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
10856 if (cfun_fpr_save_p (i))
10858 restore_fpr (frame_pointer,
10859 offset + next_offset, i);
10861 = alloc_reg_note (REG_CFA_RESTORE,
10862 gen_rtx_REG (DFmode, i), cfa_restores);
10865 else if (!TARGET_PACKED_STACK)
10871 /* Return register. */
10873 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
10875 /* Restore call saved gprs. */
10877 if (cfun_frame_layout.first_restore_gpr != -1)
10882 /* Check for global register and save them
10883 to stack location from where they get restored. */
10885 for (i = cfun_frame_layout.first_restore_gpr;
10886 i <= cfun_frame_layout.last_restore_gpr;
10889 if (global_not_special_regno_p (i))
10891 addr = plus_constant (Pmode, frame_pointer,
10892 offset + cfun_frame_layout.gprs_offset
10893 + (i - cfun_frame_layout.first_save_gpr_slot)
10895 addr = gen_rtx_MEM (Pmode, addr);
10896 set_mem_alias_set (addr, get_frame_alias_set ());
10897 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
10901 = alloc_reg_note (REG_CFA_RESTORE,
10902 gen_rtx_REG (Pmode, i), cfa_restores);
10907 /* Fetch return address from stack before load multiple,
10908 this will do good for scheduling.
10910 Only do this if we already decided that r14 needs to be
10911 saved to a stack slot. (And not just because r14 happens to
10912 be in between two GPRs which need saving.) Otherwise it
10913 would be difficult to take that decision back in
10914 s390_optimize_prologue. */
10915 if (cfun_gpr_save_slot (RETURN_REGNUM) == SAVE_SLOT_STACK)
10917 int return_regnum = find_unused_clobbered_reg();
10918 if (!return_regnum)
10920 return_reg = gen_rtx_REG (Pmode, return_regnum);
10922 addr = plus_constant (Pmode, frame_pointer,
10923 offset + cfun_frame_layout.gprs_offset
10925 - cfun_frame_layout.first_save_gpr_slot)
10927 addr = gen_rtx_MEM (Pmode, addr);
10928 set_mem_alias_set (addr, get_frame_alias_set ());
10929 emit_move_insn (return_reg, addr);
10931 /* Once we did that optimization we have to make sure
10932 s390_optimize_prologue does not try to remove the
10933 store of r14 since we will not be able to find the
10934 load issued here. */
10935 cfun_frame_layout.save_return_addr_p = true;
10939 insn = restore_gprs (frame_pointer,
10940 offset + cfun_frame_layout.gprs_offset
10941 + (cfun_frame_layout.first_restore_gpr
10942 - cfun_frame_layout.first_save_gpr_slot)
10944 cfun_frame_layout.first_restore_gpr,
10945 cfun_frame_layout.last_restore_gpr);
10946 insn = emit_insn (insn);
10947 REG_NOTES (insn) = cfa_restores;
10948 add_reg_note (insn, REG_CFA_DEF_CFA,
10949 plus_constant (Pmode, stack_pointer_rtx,
10950 STACK_POINTER_OFFSET));
10951 RTX_FRAME_RELATED_P (insn) = 1;
10954 s390_restore_gprs_from_fprs ();
10959 /* Return to caller. */
10961 p = rtvec_alloc (2);
10963 RTVEC_ELT (p, 0) = ret_rtx;
10964 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
10965 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
10969 /* Implement TARGET_SET_UP_BY_PROLOGUE. */
10972 s300_set_up_by_prologue (hard_reg_set_container *regs)
10974 if (cfun->machine->base_reg
10975 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
10976 SET_HARD_REG_BIT (regs->set, REGNO (cfun->machine->base_reg));
10979 /* Return true if the function can use simple_return to return outside
10980 of a shrink-wrapped region. At present shrink-wrapping is supported
10984 s390_can_use_simple_return_insn (void)
10989 /* Return true if the epilogue is guaranteed to contain only a return
10990 instruction and if a direct return can therefore be used instead.
10991 One of the main advantages of using direct return instructions
10992 is that we can then use conditional returns. */
10995 s390_can_use_return_insn (void)
10999 if (!reload_completed)
11005 if (TARGET_TPF_PROFILING)
11008 for (i = 0; i < 16; i++)
11009 if (cfun_gpr_save_slot (i) != SAVE_SLOT_NONE)
11012 /* For 31 bit this is not covered by the frame_size check below
11013 since f4, f6 are saved in the register save area without needing
11014 additional stack space. */
11016 && (cfun_fpr_save_p (FPR4_REGNUM) || cfun_fpr_save_p (FPR6_REGNUM)))
11019 if (cfun->machine->base_reg
11020 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
11023 return cfun_frame_layout.frame_size == 0;
11026 /* The VX ABI differs for vararg functions. Therefore we need the
11027 prototype of the callee to be available when passing vector type
11029 static const char *
11030 s390_invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
11032 return ((TARGET_VX_ABI
11034 && VECTOR_TYPE_P (TREE_TYPE (val))
11035 && (funcdecl == NULL_TREE
11036 || (TREE_CODE (funcdecl) == FUNCTION_DECL
11037 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
11038 ? N_("Vector argument passed to unprototyped function")
11043 /* Return the size in bytes of a function argument of
11044 type TYPE and/or mode MODE. At least one of TYPE or
11045 MODE must be specified. */
11048 s390_function_arg_size (machine_mode mode, const_tree type)
11051 return int_size_in_bytes (type);
11053 /* No type info available for some library calls ... */
11054 if (mode != BLKmode)
11055 return GET_MODE_SIZE (mode);
11057 /* If we have neither type nor mode, abort */
11058 gcc_unreachable ();
11061 /* Return true if a function argument of type TYPE and mode MODE
11062 is to be passed in a vector register, if available. */
11065 s390_function_arg_vector (machine_mode mode, const_tree type)
11067 if (!TARGET_VX_ABI)
11070 if (s390_function_arg_size (mode, type) > 16)
11073 /* No type info available for some library calls ... */
11075 return VECTOR_MODE_P (mode);
11077 /* The ABI says that record types with a single member are treated
11078 just like that member would be. */
11079 while (TREE_CODE (type) == RECORD_TYPE)
11081 tree field, single = NULL_TREE;
11083 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
11085 if (TREE_CODE (field) != FIELD_DECL)
11088 if (single == NULL_TREE)
11089 single = TREE_TYPE (field);
11094 if (single == NULL_TREE)
11098 /* If the field declaration adds extra byte due to
11099 e.g. padding this is not accepted as vector type. */
11100 if (int_size_in_bytes (single) <= 0
11101 || int_size_in_bytes (single) != int_size_in_bytes (type))
11107 return VECTOR_TYPE_P (type);
11110 /* Return true if a function argument of type TYPE and mode MODE
11111 is to be passed in a floating-point register, if available. */
11114 s390_function_arg_float (machine_mode mode, const_tree type)
11116 if (s390_function_arg_size (mode, type) > 8)
11119 /* Soft-float changes the ABI: no floating-point registers are used. */
11120 if (TARGET_SOFT_FLOAT)
11123 /* No type info available for some library calls ... */
11125 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
11127 /* The ABI says that record types with a single member are treated
11128 just like that member would be. */
11129 while (TREE_CODE (type) == RECORD_TYPE)
11131 tree field, single = NULL_TREE;
11133 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
11135 if (TREE_CODE (field) != FIELD_DECL)
11138 if (single == NULL_TREE)
11139 single = TREE_TYPE (field);
11144 if (single == NULL_TREE)
11150 return TREE_CODE (type) == REAL_TYPE;
11153 /* Return true if a function argument of type TYPE and mode MODE
11154 is to be passed in an integer register, or a pair of integer
11155 registers, if available. */
11158 s390_function_arg_integer (machine_mode mode, const_tree type)
11160 int size = s390_function_arg_size (mode, type);
11164 /* No type info available for some library calls ... */
11166 return GET_MODE_CLASS (mode) == MODE_INT
11167 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
11169 /* We accept small integral (and similar) types. */
11170 if (INTEGRAL_TYPE_P (type)
11171 || POINTER_TYPE_P (type)
11172 || TREE_CODE (type) == NULLPTR_TYPE
11173 || TREE_CODE (type) == OFFSET_TYPE
11174 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
11177 /* We also accept structs of size 1, 2, 4, 8 that are not
11178 passed in floating-point registers. */
11179 if (AGGREGATE_TYPE_P (type)
11180 && exact_log2 (size) >= 0
11181 && !s390_function_arg_float (mode, type))
11187 /* Return 1 if a function argument of type TYPE and mode MODE
11188 is to be passed by reference. The ABI specifies that only
11189 structures of size 1, 2, 4, or 8 bytes are passed by value,
11190 all other structures (and complex numbers) are passed by
11194 s390_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
11195 machine_mode mode, const_tree type,
11196 bool named ATTRIBUTE_UNUSED)
11198 int size = s390_function_arg_size (mode, type);
11200 if (s390_function_arg_vector (mode, type))
11208 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
11211 if (TREE_CODE (type) == COMPLEX_TYPE
11212 || TREE_CODE (type) == VECTOR_TYPE)
11219 /* Update the data in CUM to advance over an argument of mode MODE and
11220 data type TYPE. (TYPE is null for libcalls where that information
11221 may not be available.). The boolean NAMED specifies whether the
11222 argument is a named argument (as opposed to an unnamed argument
11223 matching an ellipsis). */
11226 s390_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
11227 const_tree type, bool named)
11229 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11231 if (s390_function_arg_vector (mode, type))
11233 /* We are called for unnamed vector stdarg arguments which are
11234 passed on the stack. In this case this hook does not have to
11235 do anything since stack arguments are tracked by common
11241 else if (s390_function_arg_float (mode, type))
11245 else if (s390_function_arg_integer (mode, type))
11247 int size = s390_function_arg_size (mode, type);
11248 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
11251 gcc_unreachable ();
11254 /* Define where to put the arguments to a function.
11255 Value is zero to push the argument on the stack,
11256 or a hard register in which to store the argument.
11258 MODE is the argument's machine mode.
11259 TYPE is the data type of the argument (as a tree).
11260 This is null for libcalls where that information may
11262 CUM is a variable of type CUMULATIVE_ARGS which gives info about
11263 the preceding args and about the function being called.
11264 NAMED is nonzero if this argument is a named parameter
11265 (otherwise it is an extra parameter matching an ellipsis).
11267 On S/390, we use general purpose registers 2 through 6 to
11268 pass integer, pointer, and certain structure arguments, and
11269 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
11270 to pass floating point arguments. All remaining arguments
11271 are pushed to the stack. */
11274 s390_function_arg (cumulative_args_t cum_v, machine_mode mode,
11275 const_tree type, bool named)
11277 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11280 s390_check_type_for_vector_abi (type, true, false);
11282 if (s390_function_arg_vector (mode, type))
11284 /* Vector arguments being part of the ellipsis are passed on the
11286 if (!named || (cum->vrs + 1 > VEC_ARG_NUM_REG))
11289 return gen_rtx_REG (mode, cum->vrs + FIRST_VEC_ARG_REGNO);
11291 else if (s390_function_arg_float (mode, type))
11293 if (cum->fprs + 1 > FP_ARG_NUM_REG)
11296 return gen_rtx_REG (mode, cum->fprs + 16);
11298 else if (s390_function_arg_integer (mode, type))
11300 int size = s390_function_arg_size (mode, type);
11301 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
11303 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
11305 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
11306 return gen_rtx_REG (mode, cum->gprs + 2);
11307 else if (n_gprs == 2)
11309 rtvec p = rtvec_alloc (2);
11312 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
11315 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
11318 return gen_rtx_PARALLEL (mode, p);
11322 /* After the real arguments, expand_call calls us once again
11323 with a void_type_node type. Whatever we return here is
11324 passed as operand 2 to the call expanders.
11326 We don't need this feature ... */
11327 else if (type == void_type_node)
11330 gcc_unreachable ();
11333 /* Return true if return values of type TYPE should be returned
11334 in a memory buffer whose address is passed by the caller as
11335 hidden first argument. */
11338 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
11340 /* We accept small integral (and similar) types. */
11341 if (INTEGRAL_TYPE_P (type)
11342 || POINTER_TYPE_P (type)
11343 || TREE_CODE (type) == OFFSET_TYPE
11344 || TREE_CODE (type) == REAL_TYPE)
11345 return int_size_in_bytes (type) > 8;
11347 /* vector types which fit into a VR. */
11349 && VECTOR_TYPE_P (type)
11350 && int_size_in_bytes (type) <= 16)
11353 /* Aggregates and similar constructs are always returned
11355 if (AGGREGATE_TYPE_P (type)
11356 || TREE_CODE (type) == COMPLEX_TYPE
11357 || VECTOR_TYPE_P (type))
11360 /* ??? We get called on all sorts of random stuff from
11361 aggregate_value_p. We can't abort, but it's not clear
11362 what's safe to return. Pretend it's a struct I guess. */
11366 /* Function arguments and return values are promoted to word size. */
11368 static machine_mode
11369 s390_promote_function_mode (const_tree type, machine_mode mode,
11371 const_tree fntype ATTRIBUTE_UNUSED,
11372 int for_return ATTRIBUTE_UNUSED)
11374 if (INTEGRAL_MODE_P (mode)
11375 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
11377 if (type != NULL_TREE && POINTER_TYPE_P (type))
11378 *punsignedp = POINTERS_EXTEND_UNSIGNED;
11385 /* Define where to return a (scalar) value of type RET_TYPE.
11386 If RET_TYPE is null, define where to return a (scalar)
11387 value of mode MODE from a libcall. */
11390 s390_function_and_libcall_value (machine_mode mode,
11391 const_tree ret_type,
11392 const_tree fntype_or_decl,
11393 bool outgoing ATTRIBUTE_UNUSED)
11395 /* For vector return types it is important to use the RET_TYPE
11396 argument whenever available since the middle-end might have
11397 changed the mode to a scalar mode. */
11398 bool vector_ret_type_p = ((ret_type && VECTOR_TYPE_P (ret_type))
11399 || (!ret_type && VECTOR_MODE_P (mode)));
11401 /* For normal functions perform the promotion as
11402 promote_function_mode would do. */
11405 int unsignedp = TYPE_UNSIGNED (ret_type);
11406 mode = promote_function_mode (ret_type, mode, &unsignedp,
11407 fntype_or_decl, 1);
11410 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT
11411 || SCALAR_FLOAT_MODE_P (mode)
11412 || (TARGET_VX_ABI && vector_ret_type_p));
11413 gcc_assert (GET_MODE_SIZE (mode) <= (TARGET_VX_ABI ? 16 : 8));
11415 if (TARGET_VX_ABI && vector_ret_type_p)
11416 return gen_rtx_REG (mode, FIRST_VEC_ARG_REGNO);
11417 else if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
11418 return gen_rtx_REG (mode, 16);
11419 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
11420 || UNITS_PER_LONG == UNITS_PER_WORD)
11421 return gen_rtx_REG (mode, 2);
11422 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
11424 /* This case is triggered when returning a 64 bit value with
11425 -m31 -mzarch. Although the value would fit into a single
11426 register it has to be forced into a 32 bit register pair in
11427 order to match the ABI. */
11428 rtvec p = rtvec_alloc (2);
11431 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
11433 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
11435 return gen_rtx_PARALLEL (mode, p);
11438 gcc_unreachable ();
11441 /* Define where to return a scalar return value of type RET_TYPE. */
11444 s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
11447 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
11448 fn_decl_or_type, outgoing);
11451 /* Define where to return a scalar libcall return value of mode
11455 s390_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
11457 return s390_function_and_libcall_value (mode, NULL_TREE,
11462 /* Create and return the va_list datatype.
11464 On S/390, va_list is an array type equivalent to
11466 typedef struct __va_list_tag
11470 void *__overflow_arg_area;
11471 void *__reg_save_area;
11474 where __gpr and __fpr hold the number of general purpose
11475 or floating point arguments used up to now, respectively,
11476 __overflow_arg_area points to the stack location of the
11477 next argument passed on the stack, and __reg_save_area
11478 always points to the start of the register area in the
11479 call frame of the current function. The function prologue
11480 saves all registers used for argument passing into this
11481 area if the function uses variable arguments. */
11484 s390_build_builtin_va_list (void)
11486 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
11488 record = lang_hooks.types.make_type (RECORD_TYPE);
11491 build_decl (BUILTINS_LOCATION,
11492 TYPE_DECL, get_identifier ("__va_list_tag"), record);
11494 f_gpr = build_decl (BUILTINS_LOCATION,
11495 FIELD_DECL, get_identifier ("__gpr"),
11496 long_integer_type_node);
11497 f_fpr = build_decl (BUILTINS_LOCATION,
11498 FIELD_DECL, get_identifier ("__fpr"),
11499 long_integer_type_node);
11500 f_ovf = build_decl (BUILTINS_LOCATION,
11501 FIELD_DECL, get_identifier ("__overflow_arg_area"),
11503 f_sav = build_decl (BUILTINS_LOCATION,
11504 FIELD_DECL, get_identifier ("__reg_save_area"),
11507 va_list_gpr_counter_field = f_gpr;
11508 va_list_fpr_counter_field = f_fpr;
11510 DECL_FIELD_CONTEXT (f_gpr) = record;
11511 DECL_FIELD_CONTEXT (f_fpr) = record;
11512 DECL_FIELD_CONTEXT (f_ovf) = record;
11513 DECL_FIELD_CONTEXT (f_sav) = record;
11515 TYPE_STUB_DECL (record) = type_decl;
11516 TYPE_NAME (record) = type_decl;
11517 TYPE_FIELDS (record) = f_gpr;
11518 DECL_CHAIN (f_gpr) = f_fpr;
11519 DECL_CHAIN (f_fpr) = f_ovf;
11520 DECL_CHAIN (f_ovf) = f_sav;
11522 layout_type (record);
11524 /* The correct type is an array type of one element. */
11525 return build_array_type (record, build_index_type (size_zero_node));
11528 /* Implement va_start by filling the va_list structure VALIST.
11529 STDARG_P is always true, and ignored.
11530 NEXTARG points to the first anonymous stack argument.
11532 The following global variables are used to initialize
11533 the va_list structure:
11536 holds number of gprs and fprs used for named arguments.
11537 crtl->args.arg_offset_rtx:
11538 holds the offset of the first anonymous stack argument
11539 (relative to the virtual arg pointer). */
11542 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
11544 HOST_WIDE_INT n_gpr, n_fpr;
11546 tree f_gpr, f_fpr, f_ovf, f_sav;
11547 tree gpr, fpr, ovf, sav, t;
11549 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
11550 f_fpr = DECL_CHAIN (f_gpr);
11551 f_ovf = DECL_CHAIN (f_fpr);
11552 f_sav = DECL_CHAIN (f_ovf);
11554 valist = build_simple_mem_ref (valist);
11555 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
11556 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
11557 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
11558 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
11560 /* Count number of gp and fp argument registers used. */
11562 n_gpr = crtl->args.info.gprs;
11563 n_fpr = crtl->args.info.fprs;
11565 if (cfun->va_list_gpr_size)
11567 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
11568 build_int_cst (NULL_TREE, n_gpr));
11569 TREE_SIDE_EFFECTS (t) = 1;
11570 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11573 if (cfun->va_list_fpr_size)
11575 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
11576 build_int_cst (NULL_TREE, n_fpr));
11577 TREE_SIDE_EFFECTS (t) = 1;
11578 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11581 /* Find the overflow area.
11582 FIXME: This currently is too pessimistic when the vector ABI is
11583 enabled. In that case we *always* set up the overflow area
11585 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
11586 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG
11589 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
11591 off = INTVAL (crtl->args.arg_offset_rtx);
11592 off = off < 0 ? 0 : off;
11593 if (TARGET_DEBUG_ARG)
11594 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
11595 (int)n_gpr, (int)n_fpr, off);
11597 t = fold_build_pointer_plus_hwi (t, off);
11599 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
11600 TREE_SIDE_EFFECTS (t) = 1;
11601 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11604 /* Find the register save area. */
11605 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
11606 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
11608 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
11609 t = fold_build_pointer_plus_hwi (t, -RETURN_REGNUM * UNITS_PER_LONG);
11611 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
11612 TREE_SIDE_EFFECTS (t) = 1;
11613 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11617 /* Implement va_arg by updating the va_list structure
11618 VALIST as required to retrieve an argument of type
11619 TYPE, and returning that argument.
11621 Generates code equivalent to:
11623 if (integral value) {
11624 if (size <= 4 && args.gpr < 5 ||
11625 size > 4 && args.gpr < 4 )
11626 ret = args.reg_save_area[args.gpr+8]
11628 ret = *args.overflow_arg_area++;
11629 } else if (vector value) {
11630 ret = *args.overflow_arg_area;
11631 args.overflow_arg_area += size / 8;
11632 } else if (float value) {
11634 ret = args.reg_save_area[args.fpr+64]
11636 ret = *args.overflow_arg_area++;
11637 } else if (aggregate value) {
11639 ret = *args.reg_save_area[args.gpr]
11641 ret = **args.overflow_arg_area++;
11645 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
11646 gimple_seq *post_p ATTRIBUTE_UNUSED)
11648 tree f_gpr, f_fpr, f_ovf, f_sav;
11649 tree gpr, fpr, ovf, sav, reg, t, u;
11650 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
11651 tree lab_false, lab_over;
11652 tree addr = create_tmp_var (ptr_type_node, "addr");
11653 bool left_align_p; /* How a value < UNITS_PER_LONG is aligned within
11656 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
11657 f_fpr = DECL_CHAIN (f_gpr);
11658 f_ovf = DECL_CHAIN (f_fpr);
11659 f_sav = DECL_CHAIN (f_ovf);
11661 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
11662 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
11663 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
11665 /* The tree for args* cannot be shared between gpr/fpr and ovf since
11666 both appear on a lhs. */
11667 valist = unshare_expr (valist);
11668 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
11670 size = int_size_in_bytes (type);
11672 s390_check_type_for_vector_abi (type, true, false);
11674 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
11676 if (TARGET_DEBUG_ARG)
11678 fprintf (stderr, "va_arg: aggregate type");
11682 /* Aggregates are passed by reference. */
11687 /* kernel stack layout on 31 bit: It is assumed here that no padding
11688 will be added by s390_frame_info because for va_args always an even
11689 number of gprs has to be saved r15-r2 = 14 regs. */
11690 sav_ofs = 2 * UNITS_PER_LONG;
11691 sav_scale = UNITS_PER_LONG;
11692 size = UNITS_PER_LONG;
11693 max_reg = GP_ARG_NUM_REG - n_reg;
11694 left_align_p = false;
11696 else if (s390_function_arg_vector (TYPE_MODE (type), type))
11698 if (TARGET_DEBUG_ARG)
11700 fprintf (stderr, "va_arg: vector type");
11710 left_align_p = true;
11712 else if (s390_function_arg_float (TYPE_MODE (type), type))
11714 if (TARGET_DEBUG_ARG)
11716 fprintf (stderr, "va_arg: float type");
11720 /* FP args go in FP registers, if present. */
11724 sav_ofs = 16 * UNITS_PER_LONG;
11726 max_reg = FP_ARG_NUM_REG - n_reg;
11727 left_align_p = false;
11731 if (TARGET_DEBUG_ARG)
11733 fprintf (stderr, "va_arg: other type");
11737 /* Otherwise into GP registers. */
11740 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
11742 /* kernel stack layout on 31 bit: It is assumed here that no padding
11743 will be added by s390_frame_info because for va_args always an even
11744 number of gprs has to be saved r15-r2 = 14 regs. */
11745 sav_ofs = 2 * UNITS_PER_LONG;
11747 if (size < UNITS_PER_LONG)
11748 sav_ofs += UNITS_PER_LONG - size;
11750 sav_scale = UNITS_PER_LONG;
11751 max_reg = GP_ARG_NUM_REG - n_reg;
11752 left_align_p = false;
11755 /* Pull the value out of the saved registers ... */
11757 if (reg != NULL_TREE)
11760 if (reg > ((typeof (reg))max_reg))
11763 addr = sav + sav_ofs + reg * save_scale;
11770 lab_false = create_artificial_label (UNKNOWN_LOCATION);
11771 lab_over = create_artificial_label (UNKNOWN_LOCATION);
11773 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
11774 t = build2 (GT_EXPR, boolean_type_node, reg, t);
11775 u = build1 (GOTO_EXPR, void_type_node, lab_false);
11776 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
11777 gimplify_and_add (t, pre_p);
11779 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
11780 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
11781 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
11782 t = fold_build_pointer_plus (t, u);
11784 gimplify_assign (addr, t, pre_p);
11786 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
11788 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
11791 /* ... Otherwise out of the overflow area. */
11794 if (size < UNITS_PER_LONG && !left_align_p)
11795 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG - size);
11797 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
11799 gimplify_assign (addr, t, pre_p);
11801 if (size < UNITS_PER_LONG && left_align_p)
11802 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG);
11804 t = fold_build_pointer_plus_hwi (t, size);
11806 gimplify_assign (ovf, t, pre_p);
11808 if (reg != NULL_TREE)
11809 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
11812 /* Increment register save count. */
11816 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
11817 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
11818 gimplify_and_add (u, pre_p);
11823 t = build_pointer_type_for_mode (build_pointer_type (type),
11825 addr = fold_convert (t, addr);
11826 addr = build_va_arg_indirect_ref (addr);
11830 t = build_pointer_type_for_mode (type, ptr_mode, true);
11831 addr = fold_convert (t, addr);
11834 return build_va_arg_indirect_ref (addr);
11837 /* Emit rtl for the tbegin or tbegin_retry (RETRY != NULL_RTX)
11839 DEST - Register location where CC will be stored.
11840 TDB - Pointer to a 256 byte area where to store the transaction.
11841 diagnostic block. NULL if TDB is not needed.
11842 RETRY - Retry count value. If non-NULL a retry loop for CC2
11844 CLOBBER_FPRS_P - If true clobbers for all FPRs are emitted as part
11845 of the tbegin instruction pattern. */
11848 s390_expand_tbegin (rtx dest, rtx tdb, rtx retry, bool clobber_fprs_p)
11850 rtx retry_plus_two = gen_reg_rtx (SImode);
11851 rtx retry_reg = gen_reg_rtx (SImode);
11852 rtx_code_label *retry_label = NULL;
11854 if (retry != NULL_RTX)
11856 emit_move_insn (retry_reg, retry);
11857 emit_insn (gen_addsi3 (retry_plus_two, retry_reg, const2_rtx));
11858 emit_insn (gen_addsi3 (retry_reg, retry_reg, const1_rtx));
11859 retry_label = gen_label_rtx ();
11860 emit_label (retry_label);
11863 if (clobber_fprs_p)
11866 emit_insn (gen_tbegin_1_z13 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
11869 emit_insn (gen_tbegin_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
11873 emit_insn (gen_tbegin_nofloat_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
11876 emit_move_insn (dest, gen_rtx_UNSPEC (SImode,
11877 gen_rtvec (1, gen_rtx_REG (CCRAWmode,
11879 UNSPEC_CC_TO_INT));
11880 if (retry != NULL_RTX)
11882 const int CC0 = 1 << 3;
11883 const int CC1 = 1 << 2;
11884 const int CC3 = 1 << 0;
11886 rtx count = gen_reg_rtx (SImode);
11887 rtx_code_label *leave_label = gen_label_rtx ();
11889 /* Exit for success and permanent failures. */
11890 jump = s390_emit_jump (leave_label,
11891 gen_rtx_EQ (VOIDmode,
11892 gen_rtx_REG (CCRAWmode, CC_REGNUM),
11893 gen_rtx_CONST_INT (VOIDmode, CC0 | CC1 | CC3)));
11894 LABEL_NUSES (leave_label) = 1;
11896 /* CC2 - transient failure. Perform retry with ppa. */
11897 emit_move_insn (count, retry_plus_two);
11898 emit_insn (gen_subsi3 (count, count, retry_reg));
11899 emit_insn (gen_tx_assist (count));
11900 jump = emit_jump_insn (gen_doloop_si64 (retry_label,
11903 JUMP_LABEL (jump) = retry_label;
11904 LABEL_NUSES (retry_label) = 1;
11905 emit_label (leave_label);
11910 /* Return the decl for the target specific builtin with the function
11914 s390_builtin_decl (unsigned fcode, bool initialized_p ATTRIBUTE_UNUSED)
11916 if (fcode >= S390_BUILTIN_MAX)
11917 return error_mark_node;
11919 return s390_builtin_decls[fcode];
11922 /* We call mcount before the function prologue. So a profiled leaf
11923 function should stay a leaf function. */
11926 s390_keep_leaf_when_profiled ()
11931 /* Output assembly code for the trampoline template to
11934 On S/390, we use gpr 1 internally in the trampoline code;
11935 gpr 0 is used to hold the static chain. */
11938 s390_asm_trampoline_template (FILE *file)
11941 op[0] = gen_rtx_REG (Pmode, 0);
11942 op[1] = gen_rtx_REG (Pmode, 1);
11946 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
11947 output_asm_insn ("lmg\t%0,%1,14(%1)", op); /* 6 byte */
11948 output_asm_insn ("br\t%1", op); /* 2 byte */
11949 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
11953 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
11954 output_asm_insn ("lm\t%0,%1,6(%1)", op); /* 4 byte */
11955 output_asm_insn ("br\t%1", op); /* 2 byte */
11956 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
11960 /* Emit RTL insns to initialize the variable parts of a trampoline.
11961 FNADDR is an RTX for the address of the function's pure code.
11962 CXT is an RTX for the static chain value for the function. */
11965 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
11967 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
11970 emit_block_move (m_tramp, assemble_trampoline_template (),
11971 GEN_INT (2 * UNITS_PER_LONG), BLOCK_OP_NORMAL);
11973 mem = adjust_address (m_tramp, Pmode, 2 * UNITS_PER_LONG);
11974 emit_move_insn (mem, cxt);
11975 mem = adjust_address (m_tramp, Pmode, 3 * UNITS_PER_LONG);
11976 emit_move_insn (mem, fnaddr);
11979 /* Output assembler code to FILE to increment profiler label # LABELNO
11980 for profiling a function entry. */
11983 s390_function_profiler (FILE *file, int labelno)
11988 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
11990 fprintf (file, "# function profiler \n");
11992 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
11993 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
11994 op[1] = gen_rtx_MEM (Pmode, plus_constant (Pmode, op[1], UNITS_PER_LONG));
11996 op[2] = gen_rtx_REG (Pmode, 1);
11997 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
11998 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
12000 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
12003 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
12004 op[4] = gen_rtx_CONST (Pmode, op[4]);
12009 output_asm_insn ("stg\t%0,%1", op);
12010 output_asm_insn ("larl\t%2,%3", op);
12011 output_asm_insn ("brasl\t%0,%4", op);
12012 output_asm_insn ("lg\t%0,%1", op);
12014 else if (!flag_pic)
12016 op[6] = gen_label_rtx ();
12018 output_asm_insn ("st\t%0,%1", op);
12019 output_asm_insn ("bras\t%2,%l6", op);
12020 output_asm_insn (".long\t%4", op);
12021 output_asm_insn (".long\t%3", op);
12022 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
12023 output_asm_insn ("l\t%0,0(%2)", op);
12024 output_asm_insn ("l\t%2,4(%2)", op);
12025 output_asm_insn ("basr\t%0,%0", op);
12026 output_asm_insn ("l\t%0,%1", op);
12030 op[5] = gen_label_rtx ();
12031 op[6] = gen_label_rtx ();
12033 output_asm_insn ("st\t%0,%1", op);
12034 output_asm_insn ("bras\t%2,%l6", op);
12035 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
12036 output_asm_insn (".long\t%4-%l5", op);
12037 output_asm_insn (".long\t%3-%l5", op);
12038 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
12039 output_asm_insn ("lr\t%0,%2", op);
12040 output_asm_insn ("a\t%0,0(%2)", op);
12041 output_asm_insn ("a\t%2,4(%2)", op);
12042 output_asm_insn ("basr\t%0,%0", op);
12043 output_asm_insn ("l\t%0,%1", op);
12047 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
12048 into its SYMBOL_REF_FLAGS. */
12051 s390_encode_section_info (tree decl, rtx rtl, int first)
12053 default_encode_section_info (decl, rtl, first);
12055 if (TREE_CODE (decl) == VAR_DECL)
12057 /* Store the alignment to be able to check if we can use
12058 a larl/load-relative instruction. We only handle the cases
12059 that can go wrong (i.e. no FUNC_DECLs). If a symref does
12060 not have any flag we assume it to be correctly aligned. */
12062 if (DECL_ALIGN (decl) % 64)
12063 SYMBOL_FLAG_SET_NOTALIGN8 (XEXP (rtl, 0));
12065 if (DECL_ALIGN (decl) % 32)
12066 SYMBOL_FLAG_SET_NOTALIGN4 (XEXP (rtl, 0));
12068 if (DECL_ALIGN (decl) == 0 || DECL_ALIGN (decl) % 16)
12069 SYMBOL_FLAG_SET_NOTALIGN2 (XEXP (rtl, 0));
12072 /* Literal pool references don't have a decl so they are handled
12073 differently here. We rely on the information in the MEM_ALIGN
12074 entry to decide upon the alignment. */
12076 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
12077 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0))
12078 && MEM_ALIGN (rtl) != 0
12079 && GET_MODE_BITSIZE (GET_MODE (rtl)) != 0)
12081 if (MEM_ALIGN (rtl) % 64)
12082 SYMBOL_FLAG_SET_NOTALIGN8 (XEXP (rtl, 0));
12084 if (MEM_ALIGN (rtl) % 32)
12085 SYMBOL_FLAG_SET_NOTALIGN4 (XEXP (rtl, 0));
12087 if (MEM_ALIGN (rtl) == 0 || MEM_ALIGN (rtl) % 16)
12088 SYMBOL_FLAG_SET_NOTALIGN2 (XEXP (rtl, 0));
12092 /* Output thunk to FILE that implements a C++ virtual function call (with
12093 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
12094 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
12095 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
12096 relative to the resulting this pointer. */
12099 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
12100 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
12106 /* Make sure unwind info is emitted for the thunk if needed. */
12107 final_start_function (emit_barrier (), file, 1);
12109 /* Operand 0 is the target function. */
12110 op[0] = XEXP (DECL_RTL (function), 0);
12111 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
12114 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
12115 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
12116 op[0] = gen_rtx_CONST (Pmode, op[0]);
12119 /* Operand 1 is the 'this' pointer. */
12120 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
12121 op[1] = gen_rtx_REG (Pmode, 3);
12123 op[1] = gen_rtx_REG (Pmode, 2);
12125 /* Operand 2 is the delta. */
12126 op[2] = GEN_INT (delta);
12128 /* Operand 3 is the vcall_offset. */
12129 op[3] = GEN_INT (vcall_offset);
12131 /* Operand 4 is the temporary register. */
12132 op[4] = gen_rtx_REG (Pmode, 1);
12134 /* Operands 5 to 8 can be used as labels. */
12140 /* Operand 9 can be used for temporary register. */
12143 /* Generate code. */
12146 /* Setup literal pool pointer if required. */
12147 if ((!DISP_IN_RANGE (delta)
12148 && !CONST_OK_FOR_K (delta)
12149 && !CONST_OK_FOR_Os (delta))
12150 || (!DISP_IN_RANGE (vcall_offset)
12151 && !CONST_OK_FOR_K (vcall_offset)
12152 && !CONST_OK_FOR_Os (vcall_offset)))
12154 op[5] = gen_label_rtx ();
12155 output_asm_insn ("larl\t%4,%5", op);
12158 /* Add DELTA to this pointer. */
12161 if (CONST_OK_FOR_J (delta))
12162 output_asm_insn ("la\t%1,%2(%1)", op);
12163 else if (DISP_IN_RANGE (delta))
12164 output_asm_insn ("lay\t%1,%2(%1)", op);
12165 else if (CONST_OK_FOR_K (delta))
12166 output_asm_insn ("aghi\t%1,%2", op);
12167 else if (CONST_OK_FOR_Os (delta))
12168 output_asm_insn ("agfi\t%1,%2", op);
12171 op[6] = gen_label_rtx ();
12172 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
12176 /* Perform vcall adjustment. */
12179 if (DISP_IN_RANGE (vcall_offset))
12181 output_asm_insn ("lg\t%4,0(%1)", op);
12182 output_asm_insn ("ag\t%1,%3(%4)", op);
12184 else if (CONST_OK_FOR_K (vcall_offset))
12186 output_asm_insn ("lghi\t%4,%3", op);
12187 output_asm_insn ("ag\t%4,0(%1)", op);
12188 output_asm_insn ("ag\t%1,0(%4)", op);
12190 else if (CONST_OK_FOR_Os (vcall_offset))
12192 output_asm_insn ("lgfi\t%4,%3", op);
12193 output_asm_insn ("ag\t%4,0(%1)", op);
12194 output_asm_insn ("ag\t%1,0(%4)", op);
12198 op[7] = gen_label_rtx ();
12199 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
12200 output_asm_insn ("ag\t%4,0(%1)", op);
12201 output_asm_insn ("ag\t%1,0(%4)", op);
12205 /* Jump to target. */
12206 output_asm_insn ("jg\t%0", op);
12208 /* Output literal pool if required. */
12211 output_asm_insn (".align\t4", op);
12212 targetm.asm_out.internal_label (file, "L",
12213 CODE_LABEL_NUMBER (op[5]));
12217 targetm.asm_out.internal_label (file, "L",
12218 CODE_LABEL_NUMBER (op[6]));
12219 output_asm_insn (".long\t%2", op);
12223 targetm.asm_out.internal_label (file, "L",
12224 CODE_LABEL_NUMBER (op[7]));
12225 output_asm_insn (".long\t%3", op);
12230 /* Setup base pointer if required. */
12232 || (!DISP_IN_RANGE (delta)
12233 && !CONST_OK_FOR_K (delta)
12234 && !CONST_OK_FOR_Os (delta))
12235 || (!DISP_IN_RANGE (delta)
12236 && !CONST_OK_FOR_K (vcall_offset)
12237 && !CONST_OK_FOR_Os (vcall_offset)))
12239 op[5] = gen_label_rtx ();
12240 output_asm_insn ("basr\t%4,0", op);
12241 targetm.asm_out.internal_label (file, "L",
12242 CODE_LABEL_NUMBER (op[5]));
12245 /* Add DELTA to this pointer. */
12248 if (CONST_OK_FOR_J (delta))
12249 output_asm_insn ("la\t%1,%2(%1)", op);
12250 else if (DISP_IN_RANGE (delta))
12251 output_asm_insn ("lay\t%1,%2(%1)", op);
12252 else if (CONST_OK_FOR_K (delta))
12253 output_asm_insn ("ahi\t%1,%2", op);
12254 else if (CONST_OK_FOR_Os (delta))
12255 output_asm_insn ("afi\t%1,%2", op);
12258 op[6] = gen_label_rtx ();
12259 output_asm_insn ("a\t%1,%6-%5(%4)", op);
12263 /* Perform vcall adjustment. */
12266 if (CONST_OK_FOR_J (vcall_offset))
12268 output_asm_insn ("l\t%4,0(%1)", op);
12269 output_asm_insn ("a\t%1,%3(%4)", op);
12271 else if (DISP_IN_RANGE (vcall_offset))
12273 output_asm_insn ("l\t%4,0(%1)", op);
12274 output_asm_insn ("ay\t%1,%3(%4)", op);
12276 else if (CONST_OK_FOR_K (vcall_offset))
12278 output_asm_insn ("lhi\t%4,%3", op);
12279 output_asm_insn ("a\t%4,0(%1)", op);
12280 output_asm_insn ("a\t%1,0(%4)", op);
12282 else if (CONST_OK_FOR_Os (vcall_offset))
12284 output_asm_insn ("iilf\t%4,%3", op);
12285 output_asm_insn ("a\t%4,0(%1)", op);
12286 output_asm_insn ("a\t%1,0(%4)", op);
12290 op[7] = gen_label_rtx ();
12291 output_asm_insn ("l\t%4,%7-%5(%4)", op);
12292 output_asm_insn ("a\t%4,0(%1)", op);
12293 output_asm_insn ("a\t%1,0(%4)", op);
12296 /* We had to clobber the base pointer register.
12297 Re-setup the base pointer (with a different base). */
12298 op[5] = gen_label_rtx ();
12299 output_asm_insn ("basr\t%4,0", op);
12300 targetm.asm_out.internal_label (file, "L",
12301 CODE_LABEL_NUMBER (op[5]));
12304 /* Jump to target. */
12305 op[8] = gen_label_rtx ();
12308 output_asm_insn ("l\t%4,%8-%5(%4)", op);
12309 else if (!nonlocal)
12310 output_asm_insn ("a\t%4,%8-%5(%4)", op);
12311 /* We cannot call through .plt, since .plt requires %r12 loaded. */
12312 else if (flag_pic == 1)
12314 output_asm_insn ("a\t%4,%8-%5(%4)", op);
12315 output_asm_insn ("l\t%4,%0(%4)", op);
12317 else if (flag_pic == 2)
12319 op[9] = gen_rtx_REG (Pmode, 0);
12320 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
12321 output_asm_insn ("a\t%4,%8-%5(%4)", op);
12322 output_asm_insn ("ar\t%4,%9", op);
12323 output_asm_insn ("l\t%4,0(%4)", op);
12326 output_asm_insn ("br\t%4", op);
12328 /* Output literal pool. */
12329 output_asm_insn (".align\t4", op);
12331 if (nonlocal && flag_pic == 2)
12332 output_asm_insn (".long\t%0", op);
12335 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
12336 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
12339 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
12341 output_asm_insn (".long\t%0", op);
12343 output_asm_insn (".long\t%0-%5", op);
12347 targetm.asm_out.internal_label (file, "L",
12348 CODE_LABEL_NUMBER (op[6]));
12349 output_asm_insn (".long\t%2", op);
12353 targetm.asm_out.internal_label (file, "L",
12354 CODE_LABEL_NUMBER (op[7]));
12355 output_asm_insn (".long\t%3", op);
12358 final_end_function ();
12362 s390_valid_pointer_mode (machine_mode mode)
12364 return (mode == SImode || (TARGET_64BIT && mode == DImode));
12367 /* Checks whether the given CALL_EXPR would use a caller
12368 saved register. This is used to decide whether sibling call
12369 optimization could be performed on the respective function
12373 s390_call_saved_register_used (tree call_expr)
12375 CUMULATIVE_ARGS cum_v;
12376 cumulative_args_t cum;
12383 INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0);
12384 cum = pack_cumulative_args (&cum_v);
12386 for (i = 0; i < call_expr_nargs (call_expr); i++)
12388 parameter = CALL_EXPR_ARG (call_expr, i);
12389 gcc_assert (parameter);
12391 /* For an undeclared variable passed as parameter we will get
12392 an ERROR_MARK node here. */
12393 if (TREE_CODE (parameter) == ERROR_MARK)
12396 type = TREE_TYPE (parameter);
12399 mode = TYPE_MODE (type);
12402 /* We assume that in the target function all parameters are
12403 named. This only has an impact on vector argument register
12404 usage none of which is call-saved. */
12405 if (pass_by_reference (&cum_v, mode, type, true))
12408 type = build_pointer_type (type);
12411 parm_rtx = s390_function_arg (cum, mode, type, true);
12413 s390_function_arg_advance (cum, mode, type, true);
12418 if (REG_P (parm_rtx))
12421 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
12423 if (!call_used_regs[reg + REGNO (parm_rtx)])
12427 if (GET_CODE (parm_rtx) == PARALLEL)
12431 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
12433 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
12435 gcc_assert (REG_P (r));
12438 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
12440 if (!call_used_regs[reg + REGNO (r)])
12449 /* Return true if the given call expression can be
12450 turned into a sibling call.
12451 DECL holds the declaration of the function to be called whereas
12452 EXP is the call expression itself. */
12455 s390_function_ok_for_sibcall (tree decl, tree exp)
12457 /* The TPF epilogue uses register 1. */
12458 if (TARGET_TPF_PROFILING)
12461 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
12462 which would have to be restored before the sibcall. */
12463 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
12466 /* Register 6 on s390 is available as an argument register but unfortunately
12467 "caller saved". This makes functions needing this register for arguments
12468 not suitable for sibcalls. */
12469 return !s390_call_saved_register_used (exp);
12472 /* Return the fixed registers used for condition codes. */
12475 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
12478 *p2 = INVALID_REGNUM;
12483 /* This function is used by the call expanders of the machine description.
12484 It emits the call insn itself together with the necessary operations
12485 to adjust the target address and returns the emitted insn.
12486 ADDR_LOCATION is the target address rtx
12487 TLS_CALL the location of the thread-local symbol
12488 RESULT_REG the register where the result of the call should be stored
12489 RETADDR_REG the register where the return address should be stored
12490 If this parameter is NULL_RTX the call is considered
12491 to be a sibling call. */
12494 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
12497 bool plt_call = false;
12503 /* Direct function calls need special treatment. */
12504 if (GET_CODE (addr_location) == SYMBOL_REF)
12506 /* When calling a global routine in PIC mode, we must
12507 replace the symbol itself with the PLT stub. */
12508 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
12510 if (TARGET_64BIT || retaddr_reg != NULL_RTX)
12512 addr_location = gen_rtx_UNSPEC (Pmode,
12513 gen_rtvec (1, addr_location),
12515 addr_location = gen_rtx_CONST (Pmode, addr_location);
12519 /* For -fpic code the PLT entries might use r12 which is
12520 call-saved. Therefore we cannot do a sibcall when
12521 calling directly using a symbol ref. When reaching
12522 this point we decided (in s390_function_ok_for_sibcall)
12523 to do a sibcall for a function pointer but one of the
12524 optimizers was able to get rid of the function pointer
12525 by propagating the symbol ref into the call. This
12526 optimization is illegal for S/390 so we turn the direct
12527 call into a indirect call again. */
12528 addr_location = force_reg (Pmode, addr_location);
12531 /* Unless we can use the bras(l) insn, force the
12532 routine address into a register. */
12533 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
12536 addr_location = legitimize_pic_address (addr_location, 0);
12538 addr_location = force_reg (Pmode, addr_location);
12542 /* If it is already an indirect call or the code above moved the
12543 SYMBOL_REF to somewhere else make sure the address can be found in
12545 if (retaddr_reg == NULL_RTX
12546 && GET_CODE (addr_location) != SYMBOL_REF
12549 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
12550 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
12553 addr_location = gen_rtx_MEM (QImode, addr_location);
12554 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
12556 if (result_reg != NULL_RTX)
12557 call = gen_rtx_SET (result_reg, call);
12559 if (retaddr_reg != NULL_RTX)
12561 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
12563 if (tls_call != NULL_RTX)
12564 vec = gen_rtvec (3, call, clobber,
12565 gen_rtx_USE (VOIDmode, tls_call));
12567 vec = gen_rtvec (2, call, clobber);
12569 call = gen_rtx_PARALLEL (VOIDmode, vec);
12572 insn = emit_call_insn (call);
12574 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
12575 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
12577 /* s390_function_ok_for_sibcall should
12578 have denied sibcalls in this case. */
12579 gcc_assert (retaddr_reg != NULL_RTX);
12580 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, 12));
12585 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
12588 s390_conditional_register_usage (void)
12594 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
12595 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
12597 if (TARGET_CPU_ZARCH)
12599 fixed_regs[BASE_REGNUM] = 0;
12600 call_used_regs[BASE_REGNUM] = 0;
12601 fixed_regs[RETURN_REGNUM] = 0;
12602 call_used_regs[RETURN_REGNUM] = 0;
12606 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
12607 call_used_regs[i] = call_really_used_regs[i] = 0;
12611 call_used_regs[FPR4_REGNUM] = call_really_used_regs[FPR4_REGNUM] = 0;
12612 call_used_regs[FPR6_REGNUM] = call_really_used_regs[FPR6_REGNUM] = 0;
12615 if (TARGET_SOFT_FLOAT)
12617 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
12618 call_used_regs[i] = fixed_regs[i] = 1;
12621 /* Disable v16 - v31 for non-vector target. */
12624 for (i = VR16_REGNUM; i <= VR31_REGNUM; i++)
12625 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
12629 /* Corresponding function to eh_return expander. */
12631 static GTY(()) rtx s390_tpf_eh_return_symbol;
12633 s390_emit_tpf_eh_return (rtx target)
12638 if (!s390_tpf_eh_return_symbol)
12639 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
12641 reg = gen_rtx_REG (Pmode, 2);
12642 orig_ra = gen_rtx_REG (Pmode, 3);
12644 emit_move_insn (reg, target);
12645 emit_move_insn (orig_ra, get_hard_reg_initial_val (Pmode, RETURN_REGNUM));
12646 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
12647 gen_rtx_REG (Pmode, RETURN_REGNUM));
12648 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
12649 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), orig_ra);
12651 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
12654 /* Rework the prologue/epilogue to avoid saving/restoring
12655 registers unnecessarily. */
12658 s390_optimize_prologue (void)
12660 rtx_insn *insn, *new_insn, *next_insn;
12662 /* Do a final recompute of the frame-related data. */
12663 s390_optimize_register_info ();
12665 /* If all special registers are in fact used, there's nothing we
12666 can do, so no point in walking the insn list. */
12668 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
12669 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
12670 && (TARGET_CPU_ZARCH
12671 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
12672 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
12675 /* Search for prologue/epilogue insns and replace them. */
12677 for (insn = get_insns (); insn; insn = next_insn)
12679 int first, last, off;
12680 rtx set, base, offset;
12683 next_insn = NEXT_INSN (insn);
12685 if (! NONJUMP_INSN_P (insn) || ! RTX_FRAME_RELATED_P (insn))
12688 pat = PATTERN (insn);
12690 /* Remove ldgr/lgdr instructions used for saving and restore
12691 GPRs if possible. */
12693 && GET_CODE (pat) == SET
12694 && GET_MODE (SET_SRC (pat)) == DImode
12695 && REG_P (SET_SRC (pat))
12696 && REG_P (SET_DEST (pat)))
12698 int src_regno = REGNO (SET_SRC (pat));
12699 int dest_regno = REGNO (SET_DEST (pat));
12703 if (!((GENERAL_REGNO_P (src_regno) && FP_REGNO_P (dest_regno))
12704 || (FP_REGNO_P (src_regno) && GENERAL_REGNO_P (dest_regno))))
12707 gpr_regno = GENERAL_REGNO_P (src_regno) ? src_regno : dest_regno;
12708 fpr_regno = FP_REGNO_P (src_regno) ? src_regno : dest_regno;
12710 /* GPR must be call-saved, FPR must be call-clobbered. */
12711 if (!call_really_used_regs[fpr_regno]
12712 || call_really_used_regs[gpr_regno])
12715 /* It must not happen that what we once saved in an FPR now
12716 needs a stack slot. */
12717 gcc_assert (cfun_gpr_save_slot (gpr_regno) != SAVE_SLOT_STACK);
12719 if (cfun_gpr_save_slot (gpr_regno) == SAVE_SLOT_NONE)
12721 remove_insn (insn);
12726 if (GET_CODE (pat) == PARALLEL
12727 && store_multiple_operation (pat, VOIDmode))
12729 set = XVECEXP (pat, 0, 0);
12730 first = REGNO (SET_SRC (set));
12731 last = first + XVECLEN (pat, 0) - 1;
12732 offset = const0_rtx;
12733 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
12734 off = INTVAL (offset);
12736 if (GET_CODE (base) != REG || off < 0)
12738 if (cfun_frame_layout.first_save_gpr != -1
12739 && (cfun_frame_layout.first_save_gpr < first
12740 || cfun_frame_layout.last_save_gpr > last))
12742 if (REGNO (base) != STACK_POINTER_REGNUM
12743 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
12745 if (first > BASE_REGNUM || last < BASE_REGNUM)
12748 if (cfun_frame_layout.first_save_gpr != -1)
12750 rtx s_pat = save_gprs (base,
12751 off + (cfun_frame_layout.first_save_gpr
12752 - first) * UNITS_PER_LONG,
12753 cfun_frame_layout.first_save_gpr,
12754 cfun_frame_layout.last_save_gpr);
12755 new_insn = emit_insn_before (s_pat, insn);
12756 INSN_ADDRESSES_NEW (new_insn, -1);
12759 remove_insn (insn);
12763 if (cfun_frame_layout.first_save_gpr == -1
12764 && GET_CODE (pat) == SET
12765 && GENERAL_REG_P (SET_SRC (pat))
12766 && GET_CODE (SET_DEST (pat)) == MEM)
12769 first = REGNO (SET_SRC (set));
12770 offset = const0_rtx;
12771 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
12772 off = INTVAL (offset);
12774 if (GET_CODE (base) != REG || off < 0)
12776 if (REGNO (base) != STACK_POINTER_REGNUM
12777 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
12780 remove_insn (insn);
12784 if (GET_CODE (pat) == PARALLEL
12785 && load_multiple_operation (pat, VOIDmode))
12787 set = XVECEXP (pat, 0, 0);
12788 first = REGNO (SET_DEST (set));
12789 last = first + XVECLEN (pat, 0) - 1;
12790 offset = const0_rtx;
12791 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
12792 off = INTVAL (offset);
12794 if (GET_CODE (base) != REG || off < 0)
12797 if (cfun_frame_layout.first_restore_gpr != -1
12798 && (cfun_frame_layout.first_restore_gpr < first
12799 || cfun_frame_layout.last_restore_gpr > last))
12801 if (REGNO (base) != STACK_POINTER_REGNUM
12802 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
12804 if (first > BASE_REGNUM || last < BASE_REGNUM)
12807 if (cfun_frame_layout.first_restore_gpr != -1)
12809 rtx rpat = restore_gprs (base,
12810 off + (cfun_frame_layout.first_restore_gpr
12811 - first) * UNITS_PER_LONG,
12812 cfun_frame_layout.first_restore_gpr,
12813 cfun_frame_layout.last_restore_gpr);
12815 /* Remove REG_CFA_RESTOREs for registers that we no
12816 longer need to save. */
12817 REG_NOTES (rpat) = REG_NOTES (insn);
12818 for (rtx *ptr = ®_NOTES (rpat); *ptr; )
12819 if (REG_NOTE_KIND (*ptr) == REG_CFA_RESTORE
12820 && ((int) REGNO (XEXP (*ptr, 0))
12821 < cfun_frame_layout.first_restore_gpr))
12822 *ptr = XEXP (*ptr, 1);
12824 ptr = &XEXP (*ptr, 1);
12825 new_insn = emit_insn_before (rpat, insn);
12826 RTX_FRAME_RELATED_P (new_insn) = 1;
12827 INSN_ADDRESSES_NEW (new_insn, -1);
12830 remove_insn (insn);
12834 if (cfun_frame_layout.first_restore_gpr == -1
12835 && GET_CODE (pat) == SET
12836 && GENERAL_REG_P (SET_DEST (pat))
12837 && GET_CODE (SET_SRC (pat)) == MEM)
12840 first = REGNO (SET_DEST (set));
12841 offset = const0_rtx;
12842 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
12843 off = INTVAL (offset);
12845 if (GET_CODE (base) != REG || off < 0)
12848 if (REGNO (base) != STACK_POINTER_REGNUM
12849 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
12852 remove_insn (insn);
12858 /* On z10 and later the dynamic branch prediction must see the
12859 backward jump within a certain windows. If not it falls back to
12860 the static prediction. This function rearranges the loop backward
12861 branch in a way which makes the static prediction always correct.
12862 The function returns true if it added an instruction. */
12864 s390_fix_long_loop_prediction (rtx_insn *insn)
12866 rtx set = single_set (insn);
12867 rtx code_label, label_ref, new_label;
12868 rtx_insn *uncond_jump;
12869 rtx_insn *cur_insn;
12873 /* This will exclude branch on count and branch on index patterns
12874 since these are correctly statically predicted. */
12876 || SET_DEST (set) != pc_rtx
12877 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
12880 /* Skip conditional returns. */
12881 if (ANY_RETURN_P (XEXP (SET_SRC (set), 1))
12882 && XEXP (SET_SRC (set), 2) == pc_rtx)
12885 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
12886 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
12888 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
12890 code_label = XEXP (label_ref, 0);
12892 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
12893 || INSN_ADDRESSES (INSN_UID (insn)) == -1
12894 || (INSN_ADDRESSES (INSN_UID (insn))
12895 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
12898 for (distance = 0, cur_insn = PREV_INSN (insn);
12899 distance < PREDICT_DISTANCE - 6;
12900 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
12901 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
12904 new_label = gen_label_rtx ();
12905 uncond_jump = emit_jump_insn_after (
12906 gen_rtx_SET (pc_rtx,
12907 gen_rtx_LABEL_REF (VOIDmode, code_label)),
12909 emit_label_after (new_label, uncond_jump);
12911 tmp = XEXP (SET_SRC (set), 1);
12912 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
12913 XEXP (SET_SRC (set), 2) = tmp;
12914 INSN_CODE (insn) = -1;
12916 XEXP (label_ref, 0) = new_label;
12917 JUMP_LABEL (insn) = new_label;
12918 JUMP_LABEL (uncond_jump) = code_label;
12923 /* Returns 1 if INSN reads the value of REG for purposes not related
12924 to addressing of memory, and 0 otherwise. */
12926 s390_non_addr_reg_read_p (rtx reg, rtx_insn *insn)
12928 return reg_referenced_p (reg, PATTERN (insn))
12929 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
12932 /* Starting from INSN find_cond_jump looks downwards in the insn
12933 stream for a single jump insn which is the last user of the
12934 condition code set in INSN. */
12936 find_cond_jump (rtx_insn *insn)
12938 for (; insn; insn = NEXT_INSN (insn))
12942 if (LABEL_P (insn))
12945 if (!JUMP_P (insn))
12947 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
12952 /* This will be triggered by a return. */
12953 if (GET_CODE (PATTERN (insn)) != SET)
12956 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
12957 ite = SET_SRC (PATTERN (insn));
12959 if (GET_CODE (ite) != IF_THEN_ELSE)
12962 cc = XEXP (XEXP (ite, 0), 0);
12963 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
12966 if (find_reg_note (insn, REG_DEAD, cc))
12974 /* Swap the condition in COND and the operands in OP0 and OP1 so that
12975 the semantics does not change. If NULL_RTX is passed as COND the
12976 function tries to find the conditional jump starting with INSN. */
12978 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx_insn *insn)
12982 if (cond == NULL_RTX)
12984 rtx_insn *jump = find_cond_jump (NEXT_INSN (insn));
12985 rtx set = jump ? single_set (jump) : NULL_RTX;
12987 if (set == NULL_RTX)
12990 cond = XEXP (SET_SRC (set), 0);
12995 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
12998 /* On z10, instructions of the compare-and-branch family have the
12999 property to access the register occurring as second operand with
13000 its bits complemented. If such a compare is grouped with a second
13001 instruction that accesses the same register non-complemented, and
13002 if that register's value is delivered via a bypass, then the
13003 pipeline recycles, thereby causing significant performance decline.
13004 This function locates such situations and exchanges the two
13005 operands of the compare. The function return true whenever it
13008 s390_z10_optimize_cmp (rtx_insn *insn)
13010 rtx_insn *prev_insn, *next_insn;
13011 bool insn_added_p = false;
13012 rtx cond, *op0, *op1;
13014 if (GET_CODE (PATTERN (insn)) == PARALLEL)
13016 /* Handle compare and branch and branch on count
13018 rtx pattern = single_set (insn);
13021 || SET_DEST (pattern) != pc_rtx
13022 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
13025 cond = XEXP (SET_SRC (pattern), 0);
13026 op0 = &XEXP (cond, 0);
13027 op1 = &XEXP (cond, 1);
13029 else if (GET_CODE (PATTERN (insn)) == SET)
13033 /* Handle normal compare instructions. */
13034 src = SET_SRC (PATTERN (insn));
13035 dest = SET_DEST (PATTERN (insn));
13038 || !CC_REGNO_P (REGNO (dest))
13039 || GET_CODE (src) != COMPARE)
13042 /* s390_swap_cmp will try to find the conditional
13043 jump when passing NULL_RTX as condition. */
13045 op0 = &XEXP (src, 0);
13046 op1 = &XEXP (src, 1);
13051 if (!REG_P (*op0) || !REG_P (*op1))
13054 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
13057 /* Swap the COMPARE arguments and its mask if there is a
13058 conflicting access in the previous insn. */
13059 prev_insn = prev_active_insn (insn);
13060 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
13061 && reg_referenced_p (*op1, PATTERN (prev_insn)))
13062 s390_swap_cmp (cond, op0, op1, insn);
13064 /* Check if there is a conflict with the next insn. If there
13065 was no conflict with the previous insn, then swap the
13066 COMPARE arguments and its mask. If we already swapped
13067 the operands, or if swapping them would cause a conflict
13068 with the previous insn, issue a NOP after the COMPARE in
13069 order to separate the two instuctions. */
13070 next_insn = next_active_insn (insn);
13071 if (next_insn != NULL_RTX && INSN_P (next_insn)
13072 && s390_non_addr_reg_read_p (*op1, next_insn))
13074 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
13075 && s390_non_addr_reg_read_p (*op0, prev_insn))
13077 if (REGNO (*op1) == 0)
13078 emit_insn_after (gen_nop1 (), insn);
13080 emit_insn_after (gen_nop (), insn);
13081 insn_added_p = true;
13084 s390_swap_cmp (cond, op0, op1, insn);
13086 return insn_added_p;
13089 /* Perform machine-dependent processing. */
13094 bool pool_overflow = false;
13095 int hw_before, hw_after;
13097 /* Make sure all splits have been performed; splits after
13098 machine_dependent_reorg might confuse insn length counts. */
13099 split_all_insns_noflow ();
13101 /* Install the main literal pool and the associated base
13102 register load insns.
13104 In addition, there are two problematic situations we need
13107 - the literal pool might be > 4096 bytes in size, so that
13108 some of its elements cannot be directly accessed
13110 - a branch target might be > 64K away from the branch, so that
13111 it is not possible to use a PC-relative instruction.
13113 To fix those, we split the single literal pool into multiple
13114 pool chunks, reloading the pool base register at various
13115 points throughout the function to ensure it always points to
13116 the pool chunk the following code expects, and / or replace
13117 PC-relative branches by absolute branches.
13119 However, the two problems are interdependent: splitting the
13120 literal pool can move a branch further away from its target,
13121 causing the 64K limit to overflow, and on the other hand,
13122 replacing a PC-relative branch by an absolute branch means
13123 we need to put the branch target address into the literal
13124 pool, possibly causing it to overflow.
13126 So, we loop trying to fix up both problems until we manage
13127 to satisfy both conditions at the same time. Note that the
13128 loop is guaranteed to terminate as every pass of the loop
13129 strictly decreases the total number of PC-relative branches
13130 in the function. (This is not completely true as there
13131 might be branch-over-pool insns introduced by chunkify_start.
13132 Those never need to be split however.) */
13136 struct constant_pool *pool = NULL;
13138 /* Collect the literal pool. */
13139 if (!pool_overflow)
13141 pool = s390_mainpool_start ();
13143 pool_overflow = true;
13146 /* If literal pool overflowed, start to chunkify it. */
13148 pool = s390_chunkify_start ();
13150 /* Split out-of-range branches. If this has created new
13151 literal pool entries, cancel current chunk list and
13152 recompute it. zSeries machines have large branch
13153 instructions, so we never need to split a branch. */
13154 if (!TARGET_CPU_ZARCH && s390_split_branches ())
13157 s390_chunkify_cancel (pool);
13159 s390_mainpool_cancel (pool);
13164 /* If we made it up to here, both conditions are satisfied.
13165 Finish up literal pool related changes. */
13167 s390_chunkify_finish (pool);
13169 s390_mainpool_finish (pool);
13171 /* We're done splitting branches. */
13172 cfun->machine->split_branches_pending_p = false;
13176 /* Generate out-of-pool execute target insns. */
13177 if (TARGET_CPU_ZARCH)
13179 rtx_insn *insn, *target;
13182 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13184 label = s390_execute_label (insn);
13188 gcc_assert (label != const0_rtx);
13190 target = emit_label (XEXP (label, 0));
13191 INSN_ADDRESSES_NEW (target, -1);
13193 target = emit_insn (s390_execute_target (insn));
13194 INSN_ADDRESSES_NEW (target, -1);
13198 /* Try to optimize prologue and epilogue further. */
13199 s390_optimize_prologue ();
13201 /* Walk over the insns and do some >=z10 specific changes. */
13202 if (s390_tune >= PROCESSOR_2097_Z10)
13205 bool insn_added_p = false;
13207 /* The insn lengths and addresses have to be up to date for the
13208 following manipulations. */
13209 shorten_branches (get_insns ());
13211 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13213 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
13217 insn_added_p |= s390_fix_long_loop_prediction (insn);
13219 if ((GET_CODE (PATTERN (insn)) == PARALLEL
13220 || GET_CODE (PATTERN (insn)) == SET)
13221 && s390_tune == PROCESSOR_2097_Z10)
13222 insn_added_p |= s390_z10_optimize_cmp (insn);
13225 /* Adjust branches if we added new instructions. */
13227 shorten_branches (get_insns ());
13230 s390_function_num_hotpatch_hw (current_function_decl, &hw_before, &hw_after);
13235 /* Insert NOPs for hotpatching. */
13236 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
13238 1. inside the area covered by debug information to allow setting
13239 breakpoints at the NOPs,
13240 2. before any insn which results in an asm instruction,
13241 3. before in-function labels to avoid jumping to the NOPs, for
13242 example as part of a loop,
13243 4. before any barrier in case the function is completely empty
13244 (__builtin_unreachable ()) and has neither internal labels nor
13247 if (active_insn_p (insn) || BARRIER_P (insn) || LABEL_P (insn))
13249 /* Output a series of NOPs before the first active insn. */
13250 while (insn && hw_after > 0)
13252 if (hw_after >= 3 && TARGET_CPU_ZARCH)
13254 emit_insn_before (gen_nop_6_byte (), insn);
13257 else if (hw_after >= 2)
13259 emit_insn_before (gen_nop_4_byte (), insn);
13264 emit_insn_before (gen_nop_2_byte (), insn);
13271 /* Return true if INSN is a fp load insn writing register REGNO. */
13273 s390_fpload_toreg (rtx_insn *insn, unsigned int regno)
13276 enum attr_type flag = s390_safe_attr_type (insn);
13278 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
13281 set = single_set (insn);
13283 if (set == NULL_RTX)
13286 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
13289 if (REGNO (SET_DEST (set)) != regno)
13295 /* This value describes the distance to be avoided between an
13296 aritmetic fp instruction and an fp load writing the same register.
13297 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
13298 fine but the exact value has to be avoided. Otherwise the FP
13299 pipeline will throw an exception causing a major penalty. */
13300 #define Z10_EARLYLOAD_DISTANCE 7
13302 /* Rearrange the ready list in order to avoid the situation described
13303 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
13304 moved to the very end of the ready list. */
13306 s390_z10_prevent_earlyload_conflicts (rtx_insn **ready, int *nready_p)
13308 unsigned int regno;
13309 int nready = *nready_p;
13314 enum attr_type flag;
13317 /* Skip DISTANCE - 1 active insns. */
13318 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
13319 distance > 0 && insn != NULL_RTX;
13320 distance--, insn = prev_active_insn (insn))
13321 if (CALL_P (insn) || JUMP_P (insn))
13324 if (insn == NULL_RTX)
13327 set = single_set (insn);
13329 if (set == NULL_RTX || !REG_P (SET_DEST (set))
13330 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
13333 flag = s390_safe_attr_type (insn);
13335 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
13338 regno = REGNO (SET_DEST (set));
13341 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
13348 memmove (&ready[1], &ready[0], sizeof (rtx_insn *) * i);
13353 /* The s390_sched_state variable tracks the state of the current or
13354 the last instruction group.
13356 0,1,2 number of instructions scheduled in the current group
13357 3 the last group is complete - normal insns
13358 4 the last group was a cracked/expanded insn */
13360 static int s390_sched_state;
13362 #define S390_OOO_SCHED_STATE_NORMAL 3
13363 #define S390_OOO_SCHED_STATE_CRACKED 4
13365 #define S390_OOO_SCHED_ATTR_MASK_CRACKED 0x1
13366 #define S390_OOO_SCHED_ATTR_MASK_EXPANDED 0x2
13367 #define S390_OOO_SCHED_ATTR_MASK_ENDGROUP 0x4
13368 #define S390_OOO_SCHED_ATTR_MASK_GROUPALONE 0x8
13370 static unsigned int
13371 s390_get_sched_attrmask (rtx_insn *insn)
13373 unsigned int mask = 0;
13375 if (get_attr_ooo_cracked (insn))
13376 mask |= S390_OOO_SCHED_ATTR_MASK_CRACKED;
13377 if (get_attr_ooo_expanded (insn))
13378 mask |= S390_OOO_SCHED_ATTR_MASK_EXPANDED;
13379 if (get_attr_ooo_endgroup (insn))
13380 mask |= S390_OOO_SCHED_ATTR_MASK_ENDGROUP;
13381 if (get_attr_ooo_groupalone (insn))
13382 mask |= S390_OOO_SCHED_ATTR_MASK_GROUPALONE;
13386 /* Return the scheduling score for INSN. The higher the score the
13387 better. The score is calculated from the OOO scheduling attributes
13388 of INSN and the scheduling state s390_sched_state. */
13390 s390_sched_score (rtx_insn *insn)
13392 unsigned int mask = s390_get_sched_attrmask (insn);
13395 switch (s390_sched_state)
13398 /* Try to put insns into the first slot which would otherwise
13400 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
13401 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
13403 if ((mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
13406 /* Prefer not cracked insns while trying to put together a
13408 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
13409 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0
13410 && (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) == 0)
13412 if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) == 0)
13416 /* Prefer not cracked insns while trying to put together a
13418 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
13419 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0
13420 && (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) == 0)
13422 /* Prefer endgroup insns in the last slot. */
13423 if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) != 0)
13426 case S390_OOO_SCHED_STATE_NORMAL:
13427 /* Prefer not cracked insns if the last was not cracked. */
13428 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
13429 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0)
13431 if ((mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
13434 case S390_OOO_SCHED_STATE_CRACKED:
13435 /* Try to keep cracked insns together to prevent them from
13436 interrupting groups. */
13437 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
13438 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
13445 /* This function is called via hook TARGET_SCHED_REORDER before
13446 issuing one insn from list READY which contains *NREADYP entries.
13447 For target z10 it reorders load instructions to avoid early load
13448 conflicts in the floating point pipeline */
13450 s390_sched_reorder (FILE *file, int verbose,
13451 rtx_insn **ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
13453 if (s390_tune == PROCESSOR_2097_Z10
13454 && reload_completed
13456 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
13458 if (s390_tune >= PROCESSOR_2827_ZEC12
13459 && reload_completed
13463 int last_index = *nreadyp - 1;
13464 int max_index = -1;
13465 int max_score = -1;
13468 /* Just move the insn with the highest score to the top (the
13469 end) of the list. A full sort is not needed since a conflict
13470 in the hazard recognition cannot happen. So the top insn in
13471 the ready list will always be taken. */
13472 for (i = last_index; i >= 0; i--)
13476 if (recog_memoized (ready[i]) < 0)
13479 score = s390_sched_score (ready[i]);
13480 if (score > max_score)
13487 if (max_index != -1)
13489 if (max_index != last_index)
13491 tmp = ready[max_index];
13492 ready[max_index] = ready[last_index];
13493 ready[last_index] = tmp;
13497 "move insn %d to the top of list\n",
13498 INSN_UID (ready[last_index]));
13500 else if (verbose > 5)
13502 "best insn %d already on top\n",
13503 INSN_UID (ready[last_index]));
13508 fprintf (file, "ready list ooo attributes - sched state: %d\n",
13511 for (i = last_index; i >= 0; i--)
13513 if (recog_memoized (ready[i]) < 0)
13515 fprintf (file, "insn %d score: %d: ", INSN_UID (ready[i]),
13516 s390_sched_score (ready[i]));
13517 #define PRINT_OOO_ATTR(ATTR) fprintf (file, "%s ", get_attr_##ATTR (ready[i]) ? #ATTR : "!" #ATTR);
13518 PRINT_OOO_ATTR (ooo_cracked);
13519 PRINT_OOO_ATTR (ooo_expanded);
13520 PRINT_OOO_ATTR (ooo_endgroup);
13521 PRINT_OOO_ATTR (ooo_groupalone);
13522 #undef PRINT_OOO_ATTR
13523 fprintf (file, "\n");
13528 return s390_issue_rate ();
13532 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
13533 the scheduler has issued INSN. It stores the last issued insn into
13534 last_scheduled_insn in order to make it available for
13535 s390_sched_reorder. */
13537 s390_sched_variable_issue (FILE *file, int verbose, rtx_insn *insn, int more)
13539 last_scheduled_insn = insn;
13541 if (s390_tune >= PROCESSOR_2827_ZEC12
13542 && reload_completed
13543 && recog_memoized (insn) >= 0)
13545 unsigned int mask = s390_get_sched_attrmask (insn);
13547 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
13548 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
13549 s390_sched_state = S390_OOO_SCHED_STATE_CRACKED;
13550 else if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) != 0
13551 || (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
13552 s390_sched_state = S390_OOO_SCHED_STATE_NORMAL;
13555 /* Only normal insns are left (mask == 0). */
13556 switch (s390_sched_state)
13561 case S390_OOO_SCHED_STATE_NORMAL:
13562 if (s390_sched_state == S390_OOO_SCHED_STATE_NORMAL)
13563 s390_sched_state = 1;
13565 s390_sched_state++;
13568 case S390_OOO_SCHED_STATE_CRACKED:
13569 s390_sched_state = S390_OOO_SCHED_STATE_NORMAL;
13575 fprintf (file, "insn %d: ", INSN_UID (insn));
13576 #define PRINT_OOO_ATTR(ATTR) \
13577 fprintf (file, "%s ", get_attr_##ATTR (insn) ? #ATTR : "");
13578 PRINT_OOO_ATTR (ooo_cracked);
13579 PRINT_OOO_ATTR (ooo_expanded);
13580 PRINT_OOO_ATTR (ooo_endgroup);
13581 PRINT_OOO_ATTR (ooo_groupalone);
13582 #undef PRINT_OOO_ATTR
13583 fprintf (file, "\n");
13584 fprintf (file, "sched state: %d\n", s390_sched_state);
13588 if (GET_CODE (PATTERN (insn)) != USE
13589 && GET_CODE (PATTERN (insn)) != CLOBBER)
13596 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
13597 int verbose ATTRIBUTE_UNUSED,
13598 int max_ready ATTRIBUTE_UNUSED)
13600 last_scheduled_insn = NULL;
13601 s390_sched_state = 0;
13604 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
13605 a new number struct loop *loop should be unrolled if tuned for cpus with
13606 a built-in stride prefetcher.
13607 The loop is analyzed for memory accesses by calling check_dpu for
13608 each rtx of the loop. Depending on the loop_depth and the amount of
13609 memory accesses a new number <=nunroll is returned to improve the
13610 behaviour of the hardware prefetch unit. */
13612 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
13617 unsigned mem_count = 0;
13619 if (s390_tune < PROCESSOR_2097_Z10)
13622 /* Count the number of memory references within the loop body. */
13623 bbs = get_loop_body (loop);
13624 subrtx_iterator::array_type array;
13625 for (i = 0; i < loop->num_nodes; i++)
13626 FOR_BB_INSNS (bbs[i], insn)
13627 if (INSN_P (insn) && INSN_CODE (insn) != -1)
13628 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
13633 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
13634 if (mem_count == 0)
13637 switch (loop_depth(loop))
13640 return MIN (nunroll, 28 / mem_count);
13642 return MIN (nunroll, 22 / mem_count);
13644 return MIN (nunroll, 16 / mem_count);
13648 /* Restore the current options. This is a hook function and also called
13652 s390_function_specific_restore (struct gcc_options *opts,
13653 struct cl_target_option *ptr ATTRIBUTE_UNUSED)
13655 opts->x_s390_cost_pointer = (long)processor_table[opts->x_s390_tune].cost;
13659 s390_option_override_internal (bool main_args_p,
13660 struct gcc_options *opts,
13661 const struct gcc_options *opts_set)
13663 const char *prefix;
13664 const char *suffix;
13666 /* Set up prefix/suffix so the error messages refer to either the command
13667 line argument, or the attribute(target). */
13675 prefix = "option(\"";
13680 /* Architecture mode defaults according to ABI. */
13681 if (!(opts_set->x_target_flags & MASK_ZARCH))
13684 opts->x_target_flags |= MASK_ZARCH;
13686 opts->x_target_flags &= ~MASK_ZARCH;
13689 /* Set the march default in case it hasn't been specified on cmdline. */
13690 if (!opts_set->x_s390_arch)
13691 opts->x_s390_arch = PROCESSOR_2064_Z900;
13692 else if (opts->x_s390_arch == PROCESSOR_9672_G5
13693 || opts->x_s390_arch == PROCESSOR_9672_G6)
13694 warning (OPT_Wdeprecated, "%sarch=%s%s is deprecated and will be removed "
13695 "in future releases; use at least %sarch=z900%s",
13696 prefix, opts->x_s390_arch == PROCESSOR_9672_G5 ? "g5" : "g6",
13697 suffix, prefix, suffix);
13699 opts->x_s390_arch_flags = processor_flags_table[(int) opts->x_s390_arch];
13701 /* Determine processor to tune for. */
13702 if (!opts_set->x_s390_tune)
13703 opts->x_s390_tune = opts->x_s390_arch;
13704 else if (opts->x_s390_tune == PROCESSOR_9672_G5
13705 || opts->x_s390_tune == PROCESSOR_9672_G6)
13706 warning (OPT_Wdeprecated, "%stune=%s%s is deprecated and will be removed "
13707 "in future releases; use at least %stune=z900%s",
13708 prefix, opts->x_s390_tune == PROCESSOR_9672_G5 ? "g5" : "g6",
13709 suffix, prefix, suffix);
13711 opts->x_s390_tune_flags = processor_flags_table[opts->x_s390_tune];
13713 /* Sanity checks. */
13714 if (opts->x_s390_arch == PROCESSOR_NATIVE
13715 || opts->x_s390_tune == PROCESSOR_NATIVE)
13716 gcc_unreachable ();
13717 if (TARGET_ZARCH_P (opts->x_target_flags) && !TARGET_CPU_ZARCH_P (opts))
13718 error ("z/Architecture mode not supported on %s",
13719 processor_table[(int)opts->x_s390_arch].name);
13720 if (TARGET_64BIT && !TARGET_ZARCH_P (opts->x_target_flags))
13721 error ("64-bit ABI not supported in ESA/390 mode");
13723 /* Enable hardware transactions if available and not explicitly
13724 disabled by user. E.g. with -m31 -march=zEC12 -mzarch */
13725 if (!TARGET_OPT_HTM_P (opts_set->x_target_flags))
13727 if (TARGET_CPU_HTM_P (opts) && TARGET_ZARCH_P (opts->x_target_flags))
13728 opts->x_target_flags |= MASK_OPT_HTM;
13730 opts->x_target_flags &= ~MASK_OPT_HTM;
13733 if (TARGET_OPT_VX_P (opts_set->x_target_flags))
13735 if (TARGET_OPT_VX_P (opts->x_target_flags))
13737 if (!TARGET_CPU_VX_P (opts))
13738 error ("hardware vector support not available on %s",
13739 processor_table[(int)opts->x_s390_arch].name);
13740 if (TARGET_SOFT_FLOAT_P (opts->x_target_flags))
13741 error ("hardware vector support not available with -msoft-float");
13746 if (TARGET_CPU_VX_P (opts))
13747 /* Enable vector support if available and not explicitly disabled
13748 by user. E.g. with -m31 -march=z13 -mzarch */
13749 opts->x_target_flags |= MASK_OPT_VX;
13751 opts->x_target_flags &= ~MASK_OPT_VX;
13754 /* Use hardware DFP if available and not explicitly disabled by
13755 user. E.g. with -m31 -march=z10 -mzarch */
13756 if (!TARGET_HARD_DFP_P (opts_set->x_target_flags))
13758 if (TARGET_DFP_P (opts))
13759 opts->x_target_flags |= MASK_HARD_DFP;
13761 opts->x_target_flags &= ~MASK_HARD_DFP;
13764 if (TARGET_HARD_DFP_P (opts->x_target_flags) && !TARGET_DFP_P (opts))
13766 if (TARGET_HARD_DFP_P (opts_set->x_target_flags))
13768 if (!TARGET_CPU_DFP_P (opts))
13769 error ("hardware decimal floating point instructions"
13770 " not available on %s",
13771 processor_table[(int)opts->x_s390_arch].name);
13772 if (!TARGET_ZARCH_P (opts->x_target_flags))
13773 error ("hardware decimal floating point instructions"
13774 " not available in ESA/390 mode");
13777 opts->x_target_flags &= ~MASK_HARD_DFP;
13780 if (TARGET_SOFT_FLOAT_P (opts_set->x_target_flags)
13781 && TARGET_SOFT_FLOAT_P (opts->x_target_flags))
13783 if (TARGET_HARD_DFP_P (opts_set->x_target_flags)
13784 && TARGET_HARD_DFP_P (opts->x_target_flags))
13785 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
13787 opts->x_target_flags &= ~MASK_HARD_DFP;
13790 if (TARGET_BACKCHAIN_P (opts->x_target_flags)
13791 && TARGET_PACKED_STACK_P (opts->x_target_flags)
13792 && TARGET_HARD_FLOAT_P (opts->x_target_flags))
13793 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
13796 if (opts->x_s390_stack_size)
13798 if (opts->x_s390_stack_guard >= opts->x_s390_stack_size)
13799 error ("stack size must be greater than the stack guard value");
13800 else if (opts->x_s390_stack_size > 1 << 16)
13801 error ("stack size must not be greater than 64k");
13803 else if (opts->x_s390_stack_guard)
13804 error ("-mstack-guard implies use of -mstack-size");
13806 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
13807 if (!TARGET_LONG_DOUBLE_128_P (opts_set->x_target_flags))
13808 opts->x_target_flags |= MASK_LONG_DOUBLE_128;
13811 if (opts->x_s390_tune >= PROCESSOR_2097_Z10)
13813 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
13814 opts->x_param_values,
13815 opts_set->x_param_values);
13816 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
13817 opts->x_param_values,
13818 opts_set->x_param_values);
13819 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
13820 opts->x_param_values,
13821 opts_set->x_param_values);
13822 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
13823 opts->x_param_values,
13824 opts_set->x_param_values);
13827 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
13828 opts->x_param_values,
13829 opts_set->x_param_values);
13830 /* values for loop prefetching */
13831 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
13832 opts->x_param_values,
13833 opts_set->x_param_values);
13834 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
13835 opts->x_param_values,
13836 opts_set->x_param_values);
13837 /* s390 has more than 2 levels and the size is much larger. Since
13838 we are always running virtualized assume that we only get a small
13839 part of the caches above l1. */
13840 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
13841 opts->x_param_values,
13842 opts_set->x_param_values);
13843 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
13844 opts->x_param_values,
13845 opts_set->x_param_values);
13846 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
13847 opts->x_param_values,
13848 opts_set->x_param_values);
13850 /* Use the alternative scheduling-pressure algorithm by default. */
13851 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
13852 opts->x_param_values,
13853 opts_set->x_param_values);
13855 /* Call target specific restore function to do post-init work. At the moment,
13856 this just sets opts->x_s390_cost_pointer. */
13857 s390_function_specific_restore (opts, NULL);
13861 s390_option_override (void)
13864 cl_deferred_option *opt;
13865 vec<cl_deferred_option> *v =
13866 (vec<cl_deferred_option> *) s390_deferred_options;
13869 FOR_EACH_VEC_ELT (*v, i, opt)
13871 switch (opt->opt_index)
13873 case OPT_mhotpatch_:
13880 strncpy (s, opt->arg, 256);
13882 t = strchr (s, ',');
13887 val1 = integral_argument (s);
13888 val2 = integral_argument (t);
13895 if (val1 == -1 || val2 == -1)
13897 /* argument is not a plain number */
13898 error ("arguments to %qs should be non-negative integers",
13902 else if (val1 > s390_hotpatch_hw_max
13903 || val2 > s390_hotpatch_hw_max)
13905 error ("argument to %qs is too large (max. %d)",
13906 "-mhotpatch=n,m", s390_hotpatch_hw_max);
13909 s390_hotpatch_hw_before_label = val1;
13910 s390_hotpatch_hw_after_label = val2;
13914 gcc_unreachable ();
13918 /* Set up function hooks. */
13919 init_machine_status = s390_init_machine_status;
13921 s390_option_override_internal (true, &global_options, &global_options_set);
13923 /* Save the initial options in case the user does function specific
13925 target_option_default_node = build_target_option_node (&global_options);
13926 target_option_current_node = target_option_default_node;
13928 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
13929 requires the arch flags to be evaluated already. Since prefetching
13930 is beneficial on s390, we enable it if available. */
13931 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
13932 flag_prefetch_loop_arrays = 1;
13936 /* Don't emit DWARF3/4 unless specifically selected. The TPF
13937 debuggers do not yet support DWARF 3/4. */
13938 if (!global_options_set.x_dwarf_strict)
13940 if (!global_options_set.x_dwarf_version)
13944 /* Register a target-specific optimization-and-lowering pass
13945 to run immediately before prologue and epilogue generation.
13947 Registering the pass must be done at start up. It's
13948 convenient to do it here. */
13949 opt_pass *new_pass = new pass_s390_early_mach (g);
13950 struct register_pass_info insert_pass_s390_early_mach =
13952 new_pass, /* pass */
13953 "pro_and_epilogue", /* reference_pass_name */
13954 1, /* ref_pass_instance_number */
13955 PASS_POS_INSERT_BEFORE /* po_op */
13957 register_pass (&insert_pass_s390_early_mach);
13960 #if S390_USE_TARGET_ATTRIBUTE
13961 /* Inner function to process the attribute((target(...))), take an argument and
13962 set the current options from the argument. If we have a list, recursively go
13966 s390_valid_target_attribute_inner_p (tree args,
13967 struct gcc_options *opts,
13968 struct gcc_options *new_opts_set,
13974 #define S390_ATTRIB(S,O,A) { S, sizeof (S)-1, O, A, 0 }
13975 #define S390_PRAGMA(S,O,A) { S, sizeof (S)-1, O, A, 1 }
13976 static const struct
13978 const char *string;
13982 int only_as_pragma;
13985 S390_ATTRIB ("arch=", OPT_march_, 1),
13986 S390_ATTRIB ("tune=", OPT_mtune_, 1),
13987 /* uinteger options */
13988 S390_ATTRIB ("stack-guard=", OPT_mstack_guard_, 1),
13989 S390_ATTRIB ("stack-size=", OPT_mstack_size_, 1),
13990 S390_ATTRIB ("branch-cost=", OPT_mbranch_cost_, 1),
13991 S390_ATTRIB ("warn-framesize=", OPT_mwarn_framesize_, 1),
13993 S390_ATTRIB ("backchain", OPT_mbackchain, 0),
13994 S390_ATTRIB ("hard-dfp", OPT_mhard_dfp, 0),
13995 S390_ATTRIB ("hard-float", OPT_mhard_float, 0),
13996 S390_ATTRIB ("htm", OPT_mhtm, 0),
13997 S390_ATTRIB ("vx", OPT_mvx, 0),
13998 S390_ATTRIB ("packed-stack", OPT_mpacked_stack, 0),
13999 S390_ATTRIB ("small-exec", OPT_msmall_exec, 0),
14000 S390_ATTRIB ("soft-float", OPT_msoft_float, 0),
14001 S390_ATTRIB ("mvcle", OPT_mmvcle, 0),
14002 S390_PRAGMA ("zvector", OPT_mzvector, 0),
14003 /* boolean options */
14004 S390_ATTRIB ("warn-dynamicstack", OPT_mwarn_dynamicstack, 0),
14009 /* If this is a list, recurse to get the options. */
14010 if (TREE_CODE (args) == TREE_LIST)
14013 int num_pragma_values;
14016 /* Note: attribs.c:decl_attributes prepends the values from
14017 current_target_pragma to the list of target attributes. To determine
14018 whether we're looking at a value of the attribute or the pragma we
14019 assume that the first [list_length (current_target_pragma)] values in
14020 the list are the values from the pragma. */
14021 num_pragma_values = (!force_pragma && current_target_pragma != NULL)
14022 ? list_length (current_target_pragma) : 0;
14023 for (i = 0; args; args = TREE_CHAIN (args), i++)
14027 is_pragma = (force_pragma || i < num_pragma_values);
14028 if (TREE_VALUE (args)
14029 && !s390_valid_target_attribute_inner_p (TREE_VALUE (args),
14030 opts, new_opts_set,
14039 else if (TREE_CODE (args) != STRING_CST)
14041 error ("attribute %<target%> argument not a string");
14045 /* Handle multiple arguments separated by commas. */
14046 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
14048 while (next_optstr && *next_optstr != '\0')
14050 char *p = next_optstr;
14052 char *comma = strchr (next_optstr, ',');
14053 size_t len, opt_len;
14059 enum cl_var_type var_type;
14065 len = comma - next_optstr;
14066 next_optstr = comma + 1;
14071 next_optstr = NULL;
14074 /* Recognize no-xxx. */
14075 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
14084 /* Find the option. */
14087 for (i = 0; i < ARRAY_SIZE (attrs); i++)
14089 opt_len = attrs[i].len;
14090 if (ch == attrs[i].string[0]
14091 && ((attrs[i].has_arg) ? len > opt_len : len == opt_len)
14092 && memcmp (p, attrs[i].string, opt_len) == 0)
14094 opt = attrs[i].opt;
14095 if (!opt_set_p && cl_options[opt].cl_reject_negative)
14097 mask = cl_options[opt].var_value;
14098 var_type = cl_options[opt].var_type;
14104 /* Process the option. */
14107 error ("attribute(target(\"%s\")) is unknown", orig_p);
14110 else if (attrs[i].only_as_pragma && !force_pragma)
14112 /* Value is not allowed for the target attribute. */
14113 error ("Value %qs is not supported by attribute %<target%>",
14118 else if (var_type == CLVC_BIT_SET || var_type == CLVC_BIT_CLEAR)
14120 if (var_type == CLVC_BIT_CLEAR)
14121 opt_set_p = !opt_set_p;
14124 opts->x_target_flags |= mask;
14126 opts->x_target_flags &= ~mask;
14127 new_opts_set->x_target_flags |= mask;
14130 else if (cl_options[opt].var_type == CLVC_BOOLEAN)
14134 if (cl_options[opt].cl_uinteger)
14136 /* Unsigned integer argument. Code based on the function
14137 decode_cmdline_option () in opts-common.c. */
14138 value = integral_argument (p + opt_len);
14141 value = (opt_set_p) ? 1 : 0;
14145 struct cl_decoded_option decoded;
14147 /* Value range check; only implemented for numeric and boolean
14148 options at the moment. */
14149 generate_option (opt, NULL, value, CL_TARGET, &decoded);
14150 s390_handle_option (opts, new_opts_set, &decoded, input_location);
14151 set_option (opts, new_opts_set, opt, value,
14152 p + opt_len, DK_UNSPECIFIED, input_location,
14157 error ("attribute(target(\"%s\")) is unknown", orig_p);
14162 else if (cl_options[opt].var_type == CLVC_ENUM)
14167 arg_ok = opt_enum_arg_to_value (opt, p + opt_len, &value, CL_TARGET);
14169 set_option (opts, new_opts_set, opt, value,
14170 p + opt_len, DK_UNSPECIFIED, input_location,
14174 error ("attribute(target(\"%s\")) is unknown", orig_p);
14180 gcc_unreachable ();
14185 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
14188 s390_valid_target_attribute_tree (tree args,
14189 struct gcc_options *opts,
14190 const struct gcc_options *opts_set,
14193 tree t = NULL_TREE;
14194 struct gcc_options new_opts_set;
14196 memset (&new_opts_set, 0, sizeof (new_opts_set));
14198 /* Process each of the options on the chain. */
14199 if (! s390_valid_target_attribute_inner_p (args, opts, &new_opts_set,
14201 return error_mark_node;
14203 /* If some option was set (even if it has not changed), rerun
14204 s390_option_override_internal, and then save the options away. */
14205 if (new_opts_set.x_target_flags
14206 || new_opts_set.x_s390_arch
14207 || new_opts_set.x_s390_tune
14208 || new_opts_set.x_s390_stack_guard
14209 || new_opts_set.x_s390_stack_size
14210 || new_opts_set.x_s390_branch_cost
14211 || new_opts_set.x_s390_warn_framesize
14212 || new_opts_set.x_s390_warn_dynamicstack_p)
14214 const unsigned char *src = (const unsigned char *)opts_set;
14215 unsigned char *dest = (unsigned char *)&new_opts_set;
14218 /* Merge the original option flags into the new ones. */
14219 for (i = 0; i < sizeof(*opts_set); i++)
14222 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
14223 s390_option_override_internal (false, opts, &new_opts_set);
14224 /* Save the current options unless we are validating options for
14226 t = build_target_option_node (opts);
14231 /* Hook to validate attribute((target("string"))). */
14234 s390_valid_target_attribute_p (tree fndecl,
14235 tree ARG_UNUSED (name),
14237 int ARG_UNUSED (flags))
14239 struct gcc_options func_options;
14240 tree new_target, new_optimize;
14243 /* attribute((target("default"))) does nothing, beyond
14244 affecting multi-versioning. */
14245 if (TREE_VALUE (args)
14246 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
14247 && TREE_CHAIN (args) == NULL_TREE
14248 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
14251 tree old_optimize = build_optimization_node (&global_options);
14253 /* Get the optimization options of the current function. */
14254 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
14256 if (!func_optimize)
14257 func_optimize = old_optimize;
14259 /* Init func_options. */
14260 memset (&func_options, 0, sizeof (func_options));
14261 init_options_struct (&func_options, NULL);
14262 lang_hooks.init_options_struct (&func_options);
14264 cl_optimization_restore (&func_options, TREE_OPTIMIZATION (func_optimize));
14266 /* Initialize func_options to the default before its target options can
14268 cl_target_option_restore (&func_options,
14269 TREE_TARGET_OPTION (target_option_default_node));
14271 new_target = s390_valid_target_attribute_tree (args, &func_options,
14272 &global_options_set,
14274 current_target_pragma));
14275 new_optimize = build_optimization_node (&func_options);
14276 if (new_target == error_mark_node)
14278 else if (fndecl && new_target)
14280 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
14281 if (old_optimize != new_optimize)
14282 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
14287 /* Restore targets globals from NEW_TREE and invalidate s390_previous_fndecl
14291 s390_activate_target_options (tree new_tree)
14293 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
14294 if (TREE_TARGET_GLOBALS (new_tree))
14295 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
14296 else if (new_tree == target_option_default_node)
14297 restore_target_globals (&default_target_globals);
14299 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
14300 s390_previous_fndecl = NULL_TREE;
14303 /* Establish appropriate back-end context for processing the function
14304 FNDECL. The argument might be NULL to indicate processing at top
14305 level, outside of any function scope. */
14307 s390_set_current_function (tree fndecl)
14309 /* Only change the context if the function changes. This hook is called
14310 several times in the course of compiling a function, and we don't want to
14311 slow things down too much or call target_reinit when it isn't safe. */
14312 if (fndecl == s390_previous_fndecl)
14316 if (s390_previous_fndecl == NULL_TREE)
14317 old_tree = target_option_current_node;
14318 else if (DECL_FUNCTION_SPECIFIC_TARGET (s390_previous_fndecl))
14319 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (s390_previous_fndecl);
14321 old_tree = target_option_default_node;
14323 if (fndecl == NULL_TREE)
14325 if (old_tree != target_option_current_node)
14326 s390_activate_target_options (target_option_current_node);
14330 tree new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
14331 if (new_tree == NULL_TREE)
14332 new_tree = target_option_default_node;
14334 if (old_tree != new_tree)
14335 s390_activate_target_options (new_tree);
14336 s390_previous_fndecl = fndecl;
14340 /* Implement TARGET_USE_BY_PIECES_INFRASTRUCTURE_P. */
14343 s390_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size,
14344 unsigned int align ATTRIBUTE_UNUSED,
14345 enum by_pieces_operation op ATTRIBUTE_UNUSED,
14346 bool speed_p ATTRIBUTE_UNUSED)
14348 return (size == 1 || size == 2
14349 || size == 4 || (TARGET_ZARCH && size == 8));
14352 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
14355 s390_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
14357 tree sfpc = s390_builtin_decls[S390_BUILTIN_s390_sfpc];
14358 tree efpc = s390_builtin_decls[S390_BUILTIN_s390_efpc];
14359 tree call_efpc = build_call_expr (efpc, 0);
14360 tree fenv_var = create_tmp_var_raw (unsigned_type_node);
14362 #define FPC_EXCEPTION_MASK HOST_WIDE_INT_UC (0xf8000000)
14363 #define FPC_FLAGS_MASK HOST_WIDE_INT_UC (0x00f80000)
14364 #define FPC_DXC_MASK HOST_WIDE_INT_UC (0x0000ff00)
14365 #define FPC_EXCEPTION_MASK_SHIFT HOST_WIDE_INT_UC (24)
14366 #define FPC_FLAGS_SHIFT HOST_WIDE_INT_UC (16)
14367 #define FPC_DXC_SHIFT HOST_WIDE_INT_UC (8)
14369 /* Generates the equivalent of feholdexcept (&fenv_var)
14371 fenv_var = __builtin_s390_efpc ();
14372 __builtin_s390_sfpc (fenv_var & mask) */
14373 tree old_fpc = build2 (MODIFY_EXPR, unsigned_type_node, fenv_var, call_efpc);
14375 build2 (BIT_AND_EXPR, unsigned_type_node, fenv_var,
14376 build_int_cst (unsigned_type_node,
14377 ~(FPC_DXC_MASK | FPC_FLAGS_MASK |
14378 FPC_EXCEPTION_MASK)));
14379 tree set_new_fpc = build_call_expr (sfpc, 1, new_fpc);
14380 *hold = build2 (COMPOUND_EXPR, void_type_node, old_fpc, set_new_fpc);
14382 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT)
14384 __builtin_s390_sfpc (__builtin_s390_efpc () & mask) */
14385 new_fpc = build2 (BIT_AND_EXPR, unsigned_type_node, call_efpc,
14386 build_int_cst (unsigned_type_node,
14387 ~(FPC_DXC_MASK | FPC_FLAGS_MASK)));
14388 *clear = build_call_expr (sfpc, 1, new_fpc);
14390 /* Generates the equivalent of feupdateenv (fenv_var)
14392 old_fpc = __builtin_s390_efpc ();
14393 __builtin_s390_sfpc (fenv_var);
14394 __atomic_feraiseexcept ((old_fpc & FPC_FLAGS_MASK) >> FPC_FLAGS_SHIFT); */
14396 old_fpc = create_tmp_var_raw (unsigned_type_node);
14397 tree store_old_fpc = build2 (MODIFY_EXPR, void_type_node,
14398 old_fpc, call_efpc);
14400 set_new_fpc = build_call_expr (sfpc, 1, fenv_var);
14402 tree raise_old_except = build2 (BIT_AND_EXPR, unsigned_type_node, old_fpc,
14403 build_int_cst (unsigned_type_node,
14405 raise_old_except = build2 (RSHIFT_EXPR, unsigned_type_node, raise_old_except,
14406 build_int_cst (unsigned_type_node,
14408 tree atomic_feraiseexcept
14409 = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
14410 raise_old_except = build_call_expr (atomic_feraiseexcept,
14411 1, raise_old_except);
14413 *update = build2 (COMPOUND_EXPR, void_type_node,
14414 build2 (COMPOUND_EXPR, void_type_node,
14415 store_old_fpc, set_new_fpc),
14418 #undef FPC_EXCEPTION_MASK
14419 #undef FPC_FLAGS_MASK
14420 #undef FPC_DXC_MASK
14421 #undef FPC_EXCEPTION_MASK_SHIFT
14422 #undef FPC_FLAGS_SHIFT
14423 #undef FPC_DXC_SHIFT
14426 /* Return the vector mode to be used for inner mode MODE when doing
14428 static machine_mode
14429 s390_preferred_simd_mode (machine_mode mode)
14449 /* Our hardware does not require vectors to be strictly aligned. */
14451 s390_support_vector_misalignment (machine_mode mode ATTRIBUTE_UNUSED,
14452 const_tree type ATTRIBUTE_UNUSED,
14453 int misalignment ATTRIBUTE_UNUSED,
14454 bool is_packed ATTRIBUTE_UNUSED)
14459 return default_builtin_support_vector_misalignment (mode, type, misalignment,
14463 /* The vector ABI requires vector types to be aligned on an 8 byte
14464 boundary (our stack alignment). However, we allow this to be
14465 overriden by the user, while this definitely breaks the ABI. */
14466 static HOST_WIDE_INT
14467 s390_vector_alignment (const_tree type)
14469 if (!TARGET_VX_ABI)
14470 return default_vector_alignment (type);
14472 if (TYPE_USER_ALIGN (type))
14473 return TYPE_ALIGN (type);
14475 return MIN (64, tree_to_shwi (TYPE_SIZE (type)));
14478 #ifdef HAVE_AS_MACHINE_MACHINEMODE
14479 /* Implement TARGET_ASM_FILE_START. */
14481 s390_asm_file_start (void)
14483 s390_asm_output_machine_for_arch (asm_out_file);
14487 /* Implement TARGET_ASM_FILE_END. */
14489 s390_asm_file_end (void)
14491 #ifdef HAVE_AS_GNU_ATTRIBUTE
14492 varpool_node *vnode;
14493 cgraph_node *cnode;
14495 FOR_EACH_VARIABLE (vnode)
14496 if (TREE_PUBLIC (vnode->decl))
14497 s390_check_type_for_vector_abi (TREE_TYPE (vnode->decl), false, false);
14499 FOR_EACH_FUNCTION (cnode)
14500 if (TREE_PUBLIC (cnode->decl))
14501 s390_check_type_for_vector_abi (TREE_TYPE (cnode->decl), false, false);
14504 if (s390_vector_abi != 0)
14505 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
14508 file_end_indicate_exec_stack ();
14511 /* Return true if TYPE is a vector bool type. */
14513 s390_vector_bool_type_p (const_tree type)
14515 return TYPE_VECTOR_OPAQUE (type);
14518 /* Return the diagnostic message string if the binary operation OP is
14519 not permitted on TYPE1 and TYPE2, NULL otherwise. */
14521 s390_invalid_binary_op (int op ATTRIBUTE_UNUSED, const_tree type1, const_tree type2)
14523 bool bool1_p, bool2_p;
14527 machine_mode mode1, mode2;
14529 if (!TARGET_ZVECTOR)
14532 if (!VECTOR_TYPE_P (type1) || !VECTOR_TYPE_P (type2))
14535 bool1_p = s390_vector_bool_type_p (type1);
14536 bool2_p = s390_vector_bool_type_p (type2);
14538 /* Mixing signed and unsigned types is forbidden for all
14540 if (!bool1_p && !bool2_p
14541 && TYPE_UNSIGNED (type1) != TYPE_UNSIGNED (type2))
14542 return N_("types differ in signess");
14544 plusminus_p = (op == PLUS_EXPR || op == MINUS_EXPR);
14545 muldiv_p = (op == MULT_EXPR || op == RDIV_EXPR || op == TRUNC_DIV_EXPR
14546 || op == CEIL_DIV_EXPR || op == FLOOR_DIV_EXPR
14547 || op == ROUND_DIV_EXPR);
14548 compare_p = (op == LT_EXPR || op == LE_EXPR || op == GT_EXPR || op == GE_EXPR
14549 || op == EQ_EXPR || op == NE_EXPR);
14551 if (bool1_p && bool2_p && (plusminus_p || muldiv_p))
14552 return N_("binary operator does not support two vector bool operands");
14554 if (bool1_p != bool2_p && (muldiv_p || compare_p))
14555 return N_("binary operator does not support vector bool operand");
14557 mode1 = TYPE_MODE (type1);
14558 mode2 = TYPE_MODE (type2);
14560 if (bool1_p != bool2_p && plusminus_p
14561 && (GET_MODE_CLASS (mode1) == MODE_VECTOR_FLOAT
14562 || GET_MODE_CLASS (mode2) == MODE_VECTOR_FLOAT))
14563 return N_("binary operator does not support mixing vector "
14564 "bool with floating point vector operands");
14569 /* Initialize GCC target structure. */
14571 #undef TARGET_ASM_ALIGNED_HI_OP
14572 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
14573 #undef TARGET_ASM_ALIGNED_DI_OP
14574 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
14575 #undef TARGET_ASM_INTEGER
14576 #define TARGET_ASM_INTEGER s390_assemble_integer
14578 #undef TARGET_ASM_OPEN_PAREN
14579 #define TARGET_ASM_OPEN_PAREN ""
14581 #undef TARGET_ASM_CLOSE_PAREN
14582 #define TARGET_ASM_CLOSE_PAREN ""
14584 #undef TARGET_OPTION_OVERRIDE
14585 #define TARGET_OPTION_OVERRIDE s390_option_override
14587 #undef TARGET_ENCODE_SECTION_INFO
14588 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
14590 #undef TARGET_SCALAR_MODE_SUPPORTED_P
14591 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
14594 #undef TARGET_HAVE_TLS
14595 #define TARGET_HAVE_TLS true
14597 #undef TARGET_CANNOT_FORCE_CONST_MEM
14598 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
14600 #undef TARGET_DELEGITIMIZE_ADDRESS
14601 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
14603 #undef TARGET_LEGITIMIZE_ADDRESS
14604 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
14606 #undef TARGET_RETURN_IN_MEMORY
14607 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
14609 #undef TARGET_INIT_BUILTINS
14610 #define TARGET_INIT_BUILTINS s390_init_builtins
14611 #undef TARGET_EXPAND_BUILTIN
14612 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
14613 #undef TARGET_BUILTIN_DECL
14614 #define TARGET_BUILTIN_DECL s390_builtin_decl
14616 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
14617 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
14619 #undef TARGET_ASM_OUTPUT_MI_THUNK
14620 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
14621 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
14622 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
14624 #undef TARGET_SCHED_ADJUST_PRIORITY
14625 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
14626 #undef TARGET_SCHED_ISSUE_RATE
14627 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
14628 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
14629 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
14631 #undef TARGET_SCHED_VARIABLE_ISSUE
14632 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
14633 #undef TARGET_SCHED_REORDER
14634 #define TARGET_SCHED_REORDER s390_sched_reorder
14635 #undef TARGET_SCHED_INIT
14636 #define TARGET_SCHED_INIT s390_sched_init
14638 #undef TARGET_CANNOT_COPY_INSN_P
14639 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
14640 #undef TARGET_RTX_COSTS
14641 #define TARGET_RTX_COSTS s390_rtx_costs
14642 #undef TARGET_ADDRESS_COST
14643 #define TARGET_ADDRESS_COST s390_address_cost
14644 #undef TARGET_REGISTER_MOVE_COST
14645 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
14646 #undef TARGET_MEMORY_MOVE_COST
14647 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
14649 #undef TARGET_MACHINE_DEPENDENT_REORG
14650 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
14652 #undef TARGET_VALID_POINTER_MODE
14653 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
14655 #undef TARGET_BUILD_BUILTIN_VA_LIST
14656 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
14657 #undef TARGET_EXPAND_BUILTIN_VA_START
14658 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
14659 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
14660 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
14662 #undef TARGET_PROMOTE_FUNCTION_MODE
14663 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
14664 #undef TARGET_PASS_BY_REFERENCE
14665 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
14667 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
14668 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
14669 #undef TARGET_FUNCTION_ARG
14670 #define TARGET_FUNCTION_ARG s390_function_arg
14671 #undef TARGET_FUNCTION_ARG_ADVANCE
14672 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
14673 #undef TARGET_FUNCTION_VALUE
14674 #define TARGET_FUNCTION_VALUE s390_function_value
14675 #undef TARGET_LIBCALL_VALUE
14676 #define TARGET_LIBCALL_VALUE s390_libcall_value
14677 #undef TARGET_STRICT_ARGUMENT_NAMING
14678 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
14680 #undef TARGET_KEEP_LEAF_WHEN_PROFILED
14681 #define TARGET_KEEP_LEAF_WHEN_PROFILED s390_keep_leaf_when_profiled
14683 #undef TARGET_FIXED_CONDITION_CODE_REGS
14684 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
14686 #undef TARGET_CC_MODES_COMPATIBLE
14687 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
14689 #undef TARGET_INVALID_WITHIN_DOLOOP
14690 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_insn_null
14693 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
14694 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
14697 #undef TARGET_DWARF_FRAME_REG_MODE
14698 #define TARGET_DWARF_FRAME_REG_MODE s390_dwarf_frame_reg_mode
14700 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
14701 #undef TARGET_MANGLE_TYPE
14702 #define TARGET_MANGLE_TYPE s390_mangle_type
14705 #undef TARGET_SCALAR_MODE_SUPPORTED_P
14706 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
14708 #undef TARGET_VECTOR_MODE_SUPPORTED_P
14709 #define TARGET_VECTOR_MODE_SUPPORTED_P s390_vector_mode_supported_p
14711 #undef TARGET_PREFERRED_RELOAD_CLASS
14712 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
14714 #undef TARGET_SECONDARY_RELOAD
14715 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
14717 #undef TARGET_LIBGCC_CMP_RETURN_MODE
14718 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
14720 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
14721 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
14723 #undef TARGET_LEGITIMATE_ADDRESS_P
14724 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
14726 #undef TARGET_LEGITIMATE_CONSTANT_P
14727 #define TARGET_LEGITIMATE_CONSTANT_P s390_legitimate_constant_p
14729 #undef TARGET_LRA_P
14730 #define TARGET_LRA_P s390_lra_p
14732 #undef TARGET_CAN_ELIMINATE
14733 #define TARGET_CAN_ELIMINATE s390_can_eliminate
14735 #undef TARGET_CONDITIONAL_REGISTER_USAGE
14736 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
14738 #undef TARGET_LOOP_UNROLL_ADJUST
14739 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
14741 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
14742 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
14743 #undef TARGET_TRAMPOLINE_INIT
14744 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
14746 #undef TARGET_UNWIND_WORD_MODE
14747 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
14749 #undef TARGET_CANONICALIZE_COMPARISON
14750 #define TARGET_CANONICALIZE_COMPARISON s390_canonicalize_comparison
14752 #undef TARGET_HARD_REGNO_SCRATCH_OK
14753 #define TARGET_HARD_REGNO_SCRATCH_OK s390_hard_regno_scratch_ok
14755 #undef TARGET_ATTRIBUTE_TABLE
14756 #define TARGET_ATTRIBUTE_TABLE s390_attribute_table
14758 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
14759 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
14761 #undef TARGET_SET_UP_BY_PROLOGUE
14762 #define TARGET_SET_UP_BY_PROLOGUE s300_set_up_by_prologue
14764 #undef TARGET_USE_BY_PIECES_INFRASTRUCTURE_P
14765 #define TARGET_USE_BY_PIECES_INFRASTRUCTURE_P \
14766 s390_use_by_pieces_infrastructure_p
14768 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
14769 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV s390_atomic_assign_expand_fenv
14771 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
14772 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN s390_invalid_arg_for_unprototyped_fn
14774 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
14775 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE s390_preferred_simd_mode
14777 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
14778 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT s390_support_vector_misalignment
14780 #undef TARGET_VECTOR_ALIGNMENT
14781 #define TARGET_VECTOR_ALIGNMENT s390_vector_alignment
14783 #undef TARGET_INVALID_BINARY_OP
14784 #define TARGET_INVALID_BINARY_OP s390_invalid_binary_op
14786 #ifdef HAVE_AS_MACHINE_MACHINEMODE
14787 #undef TARGET_ASM_FILE_START
14788 #define TARGET_ASM_FILE_START s390_asm_file_start
14791 #undef TARGET_ASM_FILE_END
14792 #define TARGET_ASM_FILE_END s390_asm_file_end
14794 #if S390_USE_TARGET_ATTRIBUTE
14795 #undef TARGET_SET_CURRENT_FUNCTION
14796 #define TARGET_SET_CURRENT_FUNCTION s390_set_current_function
14798 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
14799 #define TARGET_OPTION_VALID_ATTRIBUTE_P s390_valid_target_attribute_p
14802 #undef TARGET_OPTION_RESTORE
14803 #define TARGET_OPTION_RESTORE s390_function_specific_restore
14805 struct gcc_target targetm = TARGET_INITIALIZER;
14807 #include "gt-s390.h"