1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999-2017 Free Software Foundation, Inc.
3 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
4 Ulrich Weigand (uweigand@de.ibm.com) and
5 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
28 #include "target-globals.h"
37 #include "stringpool.h"
44 #include "diagnostic-core.h"
45 #include "diagnostic.h"
47 #include "fold-const.h"
48 #include "print-tree.h"
49 #include "stor-layout.h"
52 #include "conditions.h"
54 #include "insn-attr.h"
66 #include "cfgcleanup.h"
68 #include "langhooks.h"
69 #include "internal-fn.h"
70 #include "gimple-fold.h"
75 #include "tree-pass.h"
80 #include "tm-constrs.h"
82 /* This file should be included last. */
83 #include "target-def.h"
85 /* Remember the last target of s390_set_current_function. */
86 static GTY(()) tree s390_previous_fndecl;
88 /* Define the specific costs for a given cpu. */
90 struct processor_costs
93 const int m; /* cost of an M instruction. */
94 const int mghi; /* cost of an MGHI instruction. */
95 const int mh; /* cost of an MH instruction. */
96 const int mhi; /* cost of an MHI instruction. */
97 const int ml; /* cost of an ML instruction. */
98 const int mr; /* cost of an MR instruction. */
99 const int ms; /* cost of an MS instruction. */
100 const int msg; /* cost of an MSG instruction. */
101 const int msgf; /* cost of an MSGF instruction. */
102 const int msgfr; /* cost of an MSGFR instruction. */
103 const int msgr; /* cost of an MSGR instruction. */
104 const int msr; /* cost of an MSR instruction. */
105 const int mult_df; /* cost of multiplication in DFmode. */
108 const int sqxbr; /* cost of square root in TFmode. */
109 const int sqdbr; /* cost of square root in DFmode. */
110 const int sqebr; /* cost of square root in SFmode. */
111 /* multiply and add */
112 const int madbr; /* cost of multiply and add in DFmode. */
113 const int maebr; /* cost of multiply and add in SFmode. */
125 #define s390_cost ((const struct processor_costs *)(s390_cost_pointer))
128 struct processor_costs z900_cost =
130 COSTS_N_INSNS (5), /* M */
131 COSTS_N_INSNS (10), /* MGHI */
132 COSTS_N_INSNS (5), /* MH */
133 COSTS_N_INSNS (4), /* MHI */
134 COSTS_N_INSNS (5), /* ML */
135 COSTS_N_INSNS (5), /* MR */
136 COSTS_N_INSNS (4), /* MS */
137 COSTS_N_INSNS (15), /* MSG */
138 COSTS_N_INSNS (7), /* MSGF */
139 COSTS_N_INSNS (7), /* MSGFR */
140 COSTS_N_INSNS (10), /* MSGR */
141 COSTS_N_INSNS (4), /* MSR */
142 COSTS_N_INSNS (7), /* multiplication in DFmode */
143 COSTS_N_INSNS (13), /* MXBR */
144 COSTS_N_INSNS (136), /* SQXBR */
145 COSTS_N_INSNS (44), /* SQDBR */
146 COSTS_N_INSNS (35), /* SQEBR */
147 COSTS_N_INSNS (18), /* MADBR */
148 COSTS_N_INSNS (13), /* MAEBR */
149 COSTS_N_INSNS (134), /* DXBR */
150 COSTS_N_INSNS (30), /* DDBR */
151 COSTS_N_INSNS (27), /* DEBR */
152 COSTS_N_INSNS (220), /* DLGR */
153 COSTS_N_INSNS (34), /* DLR */
154 COSTS_N_INSNS (34), /* DR */
155 COSTS_N_INSNS (32), /* DSGFR */
156 COSTS_N_INSNS (32), /* DSGR */
160 struct processor_costs z990_cost =
162 COSTS_N_INSNS (4), /* M */
163 COSTS_N_INSNS (2), /* MGHI */
164 COSTS_N_INSNS (2), /* MH */
165 COSTS_N_INSNS (2), /* MHI */
166 COSTS_N_INSNS (4), /* ML */
167 COSTS_N_INSNS (4), /* MR */
168 COSTS_N_INSNS (5), /* MS */
169 COSTS_N_INSNS (6), /* MSG */
170 COSTS_N_INSNS (4), /* MSGF */
171 COSTS_N_INSNS (4), /* MSGFR */
172 COSTS_N_INSNS (4), /* MSGR */
173 COSTS_N_INSNS (4), /* MSR */
174 COSTS_N_INSNS (1), /* multiplication in DFmode */
175 COSTS_N_INSNS (28), /* MXBR */
176 COSTS_N_INSNS (130), /* SQXBR */
177 COSTS_N_INSNS (66), /* SQDBR */
178 COSTS_N_INSNS (38), /* SQEBR */
179 COSTS_N_INSNS (1), /* MADBR */
180 COSTS_N_INSNS (1), /* MAEBR */
181 COSTS_N_INSNS (60), /* DXBR */
182 COSTS_N_INSNS (40), /* DDBR */
183 COSTS_N_INSNS (26), /* DEBR */
184 COSTS_N_INSNS (176), /* DLGR */
185 COSTS_N_INSNS (31), /* DLR */
186 COSTS_N_INSNS (31), /* DR */
187 COSTS_N_INSNS (31), /* DSGFR */
188 COSTS_N_INSNS (31), /* DSGR */
192 struct processor_costs z9_109_cost =
194 COSTS_N_INSNS (4), /* M */
195 COSTS_N_INSNS (2), /* MGHI */
196 COSTS_N_INSNS (2), /* MH */
197 COSTS_N_INSNS (2), /* MHI */
198 COSTS_N_INSNS (4), /* ML */
199 COSTS_N_INSNS (4), /* MR */
200 COSTS_N_INSNS (5), /* MS */
201 COSTS_N_INSNS (6), /* MSG */
202 COSTS_N_INSNS (4), /* MSGF */
203 COSTS_N_INSNS (4), /* MSGFR */
204 COSTS_N_INSNS (4), /* MSGR */
205 COSTS_N_INSNS (4), /* MSR */
206 COSTS_N_INSNS (1), /* multiplication in DFmode */
207 COSTS_N_INSNS (28), /* MXBR */
208 COSTS_N_INSNS (130), /* SQXBR */
209 COSTS_N_INSNS (66), /* SQDBR */
210 COSTS_N_INSNS (38), /* SQEBR */
211 COSTS_N_INSNS (1), /* MADBR */
212 COSTS_N_INSNS (1), /* MAEBR */
213 COSTS_N_INSNS (60), /* DXBR */
214 COSTS_N_INSNS (40), /* DDBR */
215 COSTS_N_INSNS (26), /* DEBR */
216 COSTS_N_INSNS (30), /* DLGR */
217 COSTS_N_INSNS (23), /* DLR */
218 COSTS_N_INSNS (23), /* DR */
219 COSTS_N_INSNS (24), /* DSGFR */
220 COSTS_N_INSNS (24), /* DSGR */
224 struct processor_costs z10_cost =
226 COSTS_N_INSNS (10), /* M */
227 COSTS_N_INSNS (10), /* MGHI */
228 COSTS_N_INSNS (10), /* MH */
229 COSTS_N_INSNS (10), /* MHI */
230 COSTS_N_INSNS (10), /* ML */
231 COSTS_N_INSNS (10), /* MR */
232 COSTS_N_INSNS (10), /* MS */
233 COSTS_N_INSNS (10), /* MSG */
234 COSTS_N_INSNS (10), /* MSGF */
235 COSTS_N_INSNS (10), /* MSGFR */
236 COSTS_N_INSNS (10), /* MSGR */
237 COSTS_N_INSNS (10), /* MSR */
238 COSTS_N_INSNS (1) , /* multiplication in DFmode */
239 COSTS_N_INSNS (50), /* MXBR */
240 COSTS_N_INSNS (120), /* SQXBR */
241 COSTS_N_INSNS (52), /* SQDBR */
242 COSTS_N_INSNS (38), /* SQEBR */
243 COSTS_N_INSNS (1), /* MADBR */
244 COSTS_N_INSNS (1), /* MAEBR */
245 COSTS_N_INSNS (111), /* DXBR */
246 COSTS_N_INSNS (39), /* DDBR */
247 COSTS_N_INSNS (32), /* DEBR */
248 COSTS_N_INSNS (160), /* DLGR */
249 COSTS_N_INSNS (71), /* DLR */
250 COSTS_N_INSNS (71), /* DR */
251 COSTS_N_INSNS (71), /* DSGFR */
252 COSTS_N_INSNS (71), /* DSGR */
256 struct processor_costs z196_cost =
258 COSTS_N_INSNS (7), /* M */
259 COSTS_N_INSNS (5), /* MGHI */
260 COSTS_N_INSNS (5), /* MH */
261 COSTS_N_INSNS (5), /* MHI */
262 COSTS_N_INSNS (7), /* ML */
263 COSTS_N_INSNS (7), /* MR */
264 COSTS_N_INSNS (6), /* MS */
265 COSTS_N_INSNS (8), /* MSG */
266 COSTS_N_INSNS (6), /* MSGF */
267 COSTS_N_INSNS (6), /* MSGFR */
268 COSTS_N_INSNS (8), /* MSGR */
269 COSTS_N_INSNS (6), /* MSR */
270 COSTS_N_INSNS (1) , /* multiplication in DFmode */
271 COSTS_N_INSNS (40), /* MXBR B+40 */
272 COSTS_N_INSNS (100), /* SQXBR B+100 */
273 COSTS_N_INSNS (42), /* SQDBR B+42 */
274 COSTS_N_INSNS (28), /* SQEBR B+28 */
275 COSTS_N_INSNS (1), /* MADBR B */
276 COSTS_N_INSNS (1), /* MAEBR B */
277 COSTS_N_INSNS (101), /* DXBR B+101 */
278 COSTS_N_INSNS (29), /* DDBR */
279 COSTS_N_INSNS (22), /* DEBR */
280 COSTS_N_INSNS (160), /* DLGR cracked */
281 COSTS_N_INSNS (160), /* DLR cracked */
282 COSTS_N_INSNS (160), /* DR expanded */
283 COSTS_N_INSNS (160), /* DSGFR cracked */
284 COSTS_N_INSNS (160), /* DSGR cracked */
288 struct processor_costs zEC12_cost =
290 COSTS_N_INSNS (7), /* M */
291 COSTS_N_INSNS (5), /* MGHI */
292 COSTS_N_INSNS (5), /* MH */
293 COSTS_N_INSNS (5), /* MHI */
294 COSTS_N_INSNS (7), /* ML */
295 COSTS_N_INSNS (7), /* MR */
296 COSTS_N_INSNS (6), /* MS */
297 COSTS_N_INSNS (8), /* MSG */
298 COSTS_N_INSNS (6), /* MSGF */
299 COSTS_N_INSNS (6), /* MSGFR */
300 COSTS_N_INSNS (8), /* MSGR */
301 COSTS_N_INSNS (6), /* MSR */
302 COSTS_N_INSNS (1) , /* multiplication in DFmode */
303 COSTS_N_INSNS (40), /* MXBR B+40 */
304 COSTS_N_INSNS (100), /* SQXBR B+100 */
305 COSTS_N_INSNS (42), /* SQDBR B+42 */
306 COSTS_N_INSNS (28), /* SQEBR B+28 */
307 COSTS_N_INSNS (1), /* MADBR B */
308 COSTS_N_INSNS (1), /* MAEBR B */
309 COSTS_N_INSNS (131), /* DXBR B+131 */
310 COSTS_N_INSNS (29), /* DDBR */
311 COSTS_N_INSNS (22), /* DEBR */
312 COSTS_N_INSNS (160), /* DLGR cracked */
313 COSTS_N_INSNS (160), /* DLR cracked */
314 COSTS_N_INSNS (160), /* DR expanded */
315 COSTS_N_INSNS (160), /* DSGFR cracked */
316 COSTS_N_INSNS (160), /* DSGR cracked */
321 const char *const name;
322 const enum processor_type processor;
323 const struct processor_costs *cost;
325 const processor_table[] =
327 { "g5", PROCESSOR_9672_G5, &z900_cost },
328 { "g6", PROCESSOR_9672_G6, &z900_cost },
329 { "z900", PROCESSOR_2064_Z900, &z900_cost },
330 { "z990", PROCESSOR_2084_Z990, &z990_cost },
331 { "z9-109", PROCESSOR_2094_Z9_109, &z9_109_cost },
332 { "z9-ec", PROCESSOR_2094_Z9_EC, &z9_109_cost },
333 { "z10", PROCESSOR_2097_Z10, &z10_cost },
334 { "z196", PROCESSOR_2817_Z196, &z196_cost },
335 { "zEC12", PROCESSOR_2827_ZEC12, &zEC12_cost },
336 { "z13", PROCESSOR_2964_Z13, &zEC12_cost },
337 { "arch12", PROCESSOR_ARCH12, &zEC12_cost },
338 { "native", PROCESSOR_NATIVE, NULL }
341 extern int reload_completed;
343 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
344 static rtx_insn *last_scheduled_insn;
345 #define MAX_SCHED_UNITS 3
346 static int last_scheduled_unit_distance[MAX_SCHED_UNITS];
348 /* The maximum score added for an instruction whose unit hasn't been
349 in use for MAX_SCHED_MIX_DISTANCE steps. Increase this value to
350 give instruction mix scheduling more priority over instruction
352 #define MAX_SCHED_MIX_SCORE 8
354 /* The maximum distance up to which individual scores will be
355 calculated. Everything beyond this gives MAX_SCHED_MIX_SCORE.
356 Increase this with the OOO windows size of the machine. */
357 #define MAX_SCHED_MIX_DISTANCE 100
359 /* Structure used to hold the components of a S/390 memory
360 address. A legitimate address on S/390 is of the general
362 base + index + displacement
363 where any of the components is optional.
365 base and index are registers of the class ADDR_REGS,
366 displacement is an unsigned 12-bit immediate constant. */
377 /* The following structure is embedded in the machine
378 specific part of struct function. */
380 struct GTY (()) s390_frame_layout
382 /* Offset within stack frame. */
383 HOST_WIDE_INT gprs_offset;
384 HOST_WIDE_INT f0_offset;
385 HOST_WIDE_INT f4_offset;
386 HOST_WIDE_INT f8_offset;
387 HOST_WIDE_INT backchain_offset;
389 /* Number of first and last gpr where slots in the register
390 save area are reserved for. */
391 int first_save_gpr_slot;
392 int last_save_gpr_slot;
394 /* Location (FP register number) where GPRs (r0-r15) should
396 0 - does not need to be saved at all
398 #define SAVE_SLOT_NONE 0
399 #define SAVE_SLOT_STACK -1
400 signed char gpr_save_slots[16];
402 /* Number of first and last gpr to be saved, restored. */
404 int first_restore_gpr;
406 int last_restore_gpr;
408 /* Bits standing for floating point registers. Set, if the
409 respective register has to be saved. Starting with reg 16 (f0)
410 at the rightmost bit.
411 Bit 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
412 fpr 15 13 11 9 14 12 10 8 7 5 3 1 6 4 2 0
413 reg 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 */
414 unsigned int fpr_bitmap;
416 /* Number of floating point registers f8-f15 which must be saved. */
419 /* Set if return address needs to be saved.
420 This flag is set by s390_return_addr_rtx if it could not use
421 the initial value of r14 and therefore depends on r14 saved
423 bool save_return_addr_p;
425 /* Size of stack frame. */
426 HOST_WIDE_INT frame_size;
429 /* Define the structure for the machine field in struct function. */
431 struct GTY(()) machine_function
433 struct s390_frame_layout frame_layout;
435 /* Literal pool base register. */
438 /* True if we may need to perform branch splitting. */
439 bool split_branches_pending_p;
441 bool has_landing_pad_p;
443 /* True if the current function may contain a tbegin clobbering
447 /* For -fsplit-stack support: A stack local which holds a pointer to
448 the stack arguments for a function with a variable number of
449 arguments. This is set at the start of the function and is used
450 to initialize the overflow_arg_area field of the va_list
452 rtx split_stack_varargs_pointer;
455 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
457 #define cfun_frame_layout (cfun->machine->frame_layout)
458 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
459 #define cfun_save_arg_fprs_p (!!(TARGET_64BIT \
460 ? cfun_frame_layout.fpr_bitmap & 0x0f \
461 : cfun_frame_layout.fpr_bitmap & 0x03))
462 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
463 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
464 #define cfun_set_fpr_save(REGNO) (cfun->machine->frame_layout.fpr_bitmap |= \
465 (1 << (REGNO - FPR0_REGNUM)))
466 #define cfun_fpr_save_p(REGNO) (!!(cfun->machine->frame_layout.fpr_bitmap & \
467 (1 << (REGNO - FPR0_REGNUM))))
468 #define cfun_gpr_save_slot(REGNO) \
469 cfun->machine->frame_layout.gpr_save_slots[REGNO]
471 /* Number of GPRs and FPRs used for argument passing. */
472 #define GP_ARG_NUM_REG 5
473 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
474 #define VEC_ARG_NUM_REG 8
476 /* A couple of shortcuts. */
477 #define CONST_OK_FOR_J(x) \
478 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
479 #define CONST_OK_FOR_K(x) \
480 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
481 #define CONST_OK_FOR_Os(x) \
482 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
483 #define CONST_OK_FOR_Op(x) \
484 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
485 #define CONST_OK_FOR_On(x) \
486 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
488 #define REGNO_PAIR_OK(REGNO, MODE) \
489 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
491 /* That's the read ahead of the dynamic branch prediction unit in
492 bytes on a z10 (or higher) CPU. */
493 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
496 /* Indicate which ABI has been used for passing vector args.
497 0 - no vector type arguments have been passed where the ABI is relevant
498 1 - the old ABI has been used
499 2 - a vector type argument has been passed either in a vector register
500 or on the stack by value */
501 static int s390_vector_abi = 0;
503 /* Set the vector ABI marker if TYPE is subject to the vector ABI
504 switch. The vector ABI affects only vector data types. There are
505 two aspects of the vector ABI relevant here:
507 1. vectors >= 16 bytes have an alignment of 8 bytes with the new
508 ABI and natural alignment with the old.
510 2. vector <= 16 bytes are passed in VRs or by value on the stack
511 with the new ABI but by reference on the stack with the old.
513 If ARG_P is true TYPE is used for a function argument or return
514 value. The ABI marker then is set for all vector data types. If
515 ARG_P is false only type 1 vectors are being checked. */
518 s390_check_type_for_vector_abi (const_tree type, bool arg_p, bool in_struct_p)
520 static hash_set<const_tree> visited_types_hash;
525 if (type == NULL_TREE || TREE_CODE (type) == ERROR_MARK)
528 if (visited_types_hash.contains (type))
531 visited_types_hash.add (type);
533 if (VECTOR_TYPE_P (type))
535 int type_size = int_size_in_bytes (type);
537 /* Outside arguments only the alignment is changing and this
538 only happens for vector types >= 16 bytes. */
539 if (!arg_p && type_size < 16)
542 /* In arguments vector types > 16 are passed as before (GCC
543 never enforced the bigger alignment for arguments which was
544 required by the old vector ABI). However, it might still be
545 ABI relevant due to the changed alignment if it is a struct
547 if (arg_p && type_size > 16 && !in_struct_p)
550 s390_vector_abi = TARGET_VX_ABI ? 2 : 1;
552 else if (POINTER_TYPE_P (type) || TREE_CODE (type) == ARRAY_TYPE)
554 /* ARRAY_TYPE: Since with neither of the ABIs we have more than
555 natural alignment there will never be ABI dependent padding
556 in an array type. That's why we do not set in_struct_p to
558 s390_check_type_for_vector_abi (TREE_TYPE (type), arg_p, in_struct_p);
560 else if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
564 /* Check the return type. */
565 s390_check_type_for_vector_abi (TREE_TYPE (type), true, false);
567 for (arg_chain = TYPE_ARG_TYPES (type);
569 arg_chain = TREE_CHAIN (arg_chain))
570 s390_check_type_for_vector_abi (TREE_VALUE (arg_chain), true, false);
572 else if (RECORD_OR_UNION_TYPE_P (type))
576 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
578 if (TREE_CODE (field) != FIELD_DECL)
581 s390_check_type_for_vector_abi (TREE_TYPE (field), arg_p, true);
587 /* System z builtins. */
589 #include "s390-builtins.h"
591 const unsigned int bflags_builtin[S390_BUILTIN_MAX + 1] =
596 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, ...) BFLAGS,
598 #define OB_DEF_VAR(...)
599 #include "s390-builtins.def"
603 const unsigned int opflags_builtin[S390_BUILTIN_MAX + 1] =
608 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, OPFLAGS, ...) OPFLAGS,
610 #define OB_DEF_VAR(...)
611 #include "s390-builtins.def"
615 const unsigned int bflags_overloaded_builtin[S390_OVERLOADED_BUILTIN_MAX + 1] =
621 #define OB_DEF(NAME, FIRST_VAR_NAME, LAST_VAR_NAME, BFLAGS, ...) BFLAGS,
622 #define OB_DEF_VAR(...)
623 #include "s390-builtins.def"
628 bflags_overloaded_builtin_var[S390_OVERLOADED_BUILTIN_VAR_MAX + 1] =
635 #define OB_DEF_VAR(NAME, PATTERN, FLAGS, OPFLAGS, FNTYPE) FLAGS,
636 #include "s390-builtins.def"
641 opflags_overloaded_builtin_var[S390_OVERLOADED_BUILTIN_VAR_MAX + 1] =
648 #define OB_DEF_VAR(NAME, PATTERN, FLAGS, OPFLAGS, FNTYPE) OPFLAGS,
649 #include "s390-builtins.def"
653 tree s390_builtin_types[BT_MAX];
654 tree s390_builtin_fn_types[BT_FN_MAX];
655 tree s390_builtin_decls[S390_BUILTIN_MAX +
656 S390_OVERLOADED_BUILTIN_MAX +
657 S390_OVERLOADED_BUILTIN_VAR_MAX];
659 static enum insn_code const code_for_builtin[S390_BUILTIN_MAX + 1] = {
663 #define B_DEF(NAME, PATTERN, ...) CODE_FOR_##PATTERN,
665 #define OB_DEF_VAR(...)
667 #include "s390-builtins.def"
672 s390_init_builtins (void)
674 /* These definitions are being used in s390-builtins.def. */
675 tree returns_twice_attr = tree_cons (get_identifier ("returns_twice"),
677 tree noreturn_attr = tree_cons (get_identifier ("noreturn"), NULL, NULL);
678 tree c_uint64_type_node;
680 /* The uint64_type_node from tree.c is not compatible to the C99
681 uint64_t data type. What we want is c_uint64_type_node from
682 c-common.c. But since backend code is not supposed to interface
683 with the frontend we recreate it here. */
685 c_uint64_type_node = long_unsigned_type_node;
687 c_uint64_type_node = long_long_unsigned_type_node;
690 #define DEF_TYPE(INDEX, NODE, CONST_P) \
691 if (s390_builtin_types[INDEX] == NULL) \
692 s390_builtin_types[INDEX] = (!CONST_P) ? \
693 (NODE) : build_type_variant ((NODE), 1, 0);
695 #undef DEF_POINTER_TYPE
696 #define DEF_POINTER_TYPE(INDEX, INDEX_BASE) \
697 if (s390_builtin_types[INDEX] == NULL) \
698 s390_builtin_types[INDEX] = \
699 build_pointer_type (s390_builtin_types[INDEX_BASE]);
701 #undef DEF_DISTINCT_TYPE
702 #define DEF_DISTINCT_TYPE(INDEX, INDEX_BASE) \
703 if (s390_builtin_types[INDEX] == NULL) \
704 s390_builtin_types[INDEX] = \
705 build_distinct_type_copy (s390_builtin_types[INDEX_BASE]);
707 #undef DEF_VECTOR_TYPE
708 #define DEF_VECTOR_TYPE(INDEX, INDEX_BASE, ELEMENTS) \
709 if (s390_builtin_types[INDEX] == NULL) \
710 s390_builtin_types[INDEX] = \
711 build_vector_type (s390_builtin_types[INDEX_BASE], ELEMENTS);
713 #undef DEF_OPAQUE_VECTOR_TYPE
714 #define DEF_OPAQUE_VECTOR_TYPE(INDEX, INDEX_BASE, ELEMENTS) \
715 if (s390_builtin_types[INDEX] == NULL) \
716 s390_builtin_types[INDEX] = \
717 build_opaque_vector_type (s390_builtin_types[INDEX_BASE], ELEMENTS);
720 #define DEF_FN_TYPE(INDEX, args...) \
721 if (s390_builtin_fn_types[INDEX] == NULL) \
722 s390_builtin_fn_types[INDEX] = \
723 build_function_type_list (args, NULL_TREE);
725 #define DEF_OV_TYPE(...)
726 #include "s390-builtin-types.def"
729 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, OPFLAGS, FNTYPE) \
730 if (s390_builtin_decls[S390_BUILTIN_##NAME] == NULL) \
731 s390_builtin_decls[S390_BUILTIN_##NAME] = \
732 add_builtin_function ("__builtin_" #NAME, \
733 s390_builtin_fn_types[FNTYPE], \
734 S390_BUILTIN_##NAME, \
739 #define OB_DEF(NAME, FIRST_VAR_NAME, LAST_VAR_NAME, BFLAGS, FNTYPE) \
740 if (s390_builtin_decls[S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX] \
742 s390_builtin_decls[S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX] = \
743 add_builtin_function ("__builtin_" #NAME, \
744 s390_builtin_fn_types[FNTYPE], \
745 S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX, \
750 #define OB_DEF_VAR(...)
751 #include "s390-builtins.def"
755 /* Return true if ARG is appropriate as argument number ARGNUM of
756 builtin DECL. The operand flags from s390-builtins.def have to
757 passed as OP_FLAGS. */
759 s390_const_operand_ok (tree arg, int argnum, int op_flags, tree decl)
761 if (O_UIMM_P (op_flags))
763 int bitwidths[] = { 1, 2, 3, 4, 5, 8, 12, 16, 32 };
764 int bitwidth = bitwidths[op_flags - O_U1];
766 if (!tree_fits_uhwi_p (arg)
767 || tree_to_uhwi (arg) > (HOST_WIDE_INT_1U << bitwidth) - 1)
769 error("constant argument %d for builtin %qF is out of range (0.."
770 HOST_WIDE_INT_PRINT_UNSIGNED ")",
772 (HOST_WIDE_INT_1U << bitwidth) - 1);
777 if (O_SIMM_P (op_flags))
779 int bitwidths[] = { 2, 3, 4, 5, 8, 12, 16, 32 };
780 int bitwidth = bitwidths[op_flags - O_S2];
782 if (!tree_fits_shwi_p (arg)
783 || tree_to_shwi (arg) < -(HOST_WIDE_INT_1 << (bitwidth - 1))
784 || tree_to_shwi (arg) > ((HOST_WIDE_INT_1 << (bitwidth - 1)) - 1))
786 error("constant argument %d for builtin %qF is out of range ("
787 HOST_WIDE_INT_PRINT_DEC ".."
788 HOST_WIDE_INT_PRINT_DEC ")",
790 -(HOST_WIDE_INT_1 << (bitwidth - 1)),
791 (HOST_WIDE_INT_1 << (bitwidth - 1)) - 1);
798 /* Expand an expression EXP that calls a built-in function,
799 with result going to TARGET if that's convenient
800 (and in mode MODE if that's convenient).
801 SUBTARGET may be used as the target for computing one of EXP's operands.
802 IGNORE is nonzero if the value is to be ignored. */
805 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
806 machine_mode mode ATTRIBUTE_UNUSED,
807 int ignore ATTRIBUTE_UNUSED)
811 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
812 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
813 enum insn_code icode;
814 rtx op[MAX_ARGS], pat;
818 call_expr_arg_iterator iter;
819 unsigned int all_op_flags = opflags_for_builtin (fcode);
820 machine_mode last_vec_mode = VOIDmode;
822 if (TARGET_DEBUG_ARG)
825 "s390_expand_builtin, code = %4d, %s, bflags = 0x%x\n",
826 (int)fcode, IDENTIFIER_POINTER (DECL_NAME (fndecl)),
827 bflags_for_builtin (fcode));
830 if (S390_USE_TARGET_ATTRIBUTE)
834 bflags = bflags_for_builtin (fcode);
835 if ((bflags & B_HTM) && !TARGET_HTM)
837 error ("builtin %qF is not supported without -mhtm "
838 "(default with -march=zEC12 and higher).", fndecl);
841 if (((bflags & B_VX) || (bflags & B_VXE)) && !TARGET_VX)
843 error ("builtin %qF requires -mvx "
844 "(default with -march=z13 and higher).", fndecl);
848 if ((bflags & B_VXE) && !TARGET_VXE)
850 error ("Builtin %qF requires arch12 or higher.", fndecl);
854 if (fcode >= S390_OVERLOADED_BUILTIN_VAR_OFFSET
855 && fcode < S390_ALL_BUILTIN_MAX)
859 else if (fcode < S390_OVERLOADED_BUILTIN_OFFSET)
861 icode = code_for_builtin[fcode];
862 /* Set a flag in the machine specific cfun part in order to support
863 saving/restoring of FPRs. */
864 if (fcode == S390_BUILTIN_tbegin || fcode == S390_BUILTIN_tbegin_retry)
865 cfun->machine->tbegin_p = true;
867 else if (fcode < S390_OVERLOADED_BUILTIN_VAR_OFFSET)
869 error ("unresolved overloaded builtin");
873 internal_error ("bad builtin fcode");
876 internal_error ("bad builtin icode");
878 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
882 machine_mode tmode = insn_data[icode].operand[0].mode;
884 || GET_MODE (target) != tmode
885 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
886 target = gen_reg_rtx (tmode);
888 /* There are builtins (e.g. vec_promote) with no vector
889 arguments but an element selector. So we have to also look
890 at the vector return type when emitting the modulo
892 if (VECTOR_MODE_P (insn_data[icode].operand[0].mode))
893 last_vec_mode = insn_data[icode].operand[0].mode;
897 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
900 const struct insn_operand_data *insn_op;
901 unsigned int op_flags = all_op_flags & ((1 << O_SHIFT) - 1);
903 all_op_flags = all_op_flags >> O_SHIFT;
905 if (arg == error_mark_node)
907 if (arity >= MAX_ARGS)
910 if (O_IMM_P (op_flags)
911 && TREE_CODE (arg) != INTEGER_CST)
913 error ("constant value required for builtin %qF argument %d",
918 if (!s390_const_operand_ok (arg, arity + 1, op_flags, fndecl))
921 insn_op = &insn_data[icode].operand[arity + nonvoid];
922 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
924 /* expand_expr truncates constants to the target mode only if it
925 is "convenient". However, our checks below rely on this
927 if (CONST_INT_P (op[arity])
928 && SCALAR_INT_MODE_P (insn_op->mode)
929 && GET_MODE (op[arity]) != insn_op->mode)
930 op[arity] = GEN_INT (trunc_int_for_mode (INTVAL (op[arity]),
933 /* Wrap the expanded RTX for pointer types into a MEM expr with
934 the proper mode. This allows us to use e.g. (match_operand
935 "memory_operand"..) in the insn patterns instead of (mem
936 (match_operand "address_operand)). This is helpful for
937 patterns not just accepting MEMs. */
938 if (POINTER_TYPE_P (TREE_TYPE (arg))
939 && insn_op->predicate != address_operand)
940 op[arity] = gen_rtx_MEM (insn_op->mode, op[arity]);
942 /* Expand the module operation required on element selectors. */
943 if (op_flags == O_ELEM)
945 gcc_assert (last_vec_mode != VOIDmode);
946 op[arity] = simplify_expand_binop (SImode, code_to_optab (AND),
948 GEN_INT (GET_MODE_NUNITS (last_vec_mode) - 1),
949 NULL_RTX, 1, OPTAB_DIRECT);
952 /* Record the vector mode used for an element selector. This assumes:
953 1. There is no builtin with two different vector modes and an element selector
954 2. The element selector comes after the vector type it is referring to.
955 This currently the true for all the builtins but FIXME we
956 should better check for that. */
957 if (VECTOR_MODE_P (insn_op->mode))
958 last_vec_mode = insn_op->mode;
960 if (insn_op->predicate (op[arity], insn_op->mode))
966 if (MEM_P (op[arity])
967 && insn_op->predicate == memory_operand
968 && (GET_MODE (XEXP (op[arity], 0)) == Pmode
969 || GET_MODE (XEXP (op[arity], 0)) == VOIDmode))
971 op[arity] = replace_equiv_address (op[arity],
972 copy_to_mode_reg (Pmode,
973 XEXP (op[arity], 0)));
975 /* Some of the builtins require different modes/types than the
976 pattern in order to implement a specific API. Instead of
977 adding many expanders which do the mode change we do it here.
978 E.g. s390_vec_add_u128 required to have vector unsigned char
979 arguments is mapped to addti3. */
980 else if (insn_op->mode != VOIDmode
981 && GET_MODE (op[arity]) != VOIDmode
982 && GET_MODE (op[arity]) != insn_op->mode
983 && ((tmp_rtx = simplify_gen_subreg (insn_op->mode, op[arity],
984 GET_MODE (op[arity]), 0))
989 else if (GET_MODE (op[arity]) == insn_op->mode
990 || GET_MODE (op[arity]) == VOIDmode
991 || (insn_op->predicate == address_operand
992 && GET_MODE (op[arity]) == Pmode))
994 /* An address_operand usually has VOIDmode in the expander
995 so we cannot use this. */
996 machine_mode target_mode =
997 (insn_op->predicate == address_operand
998 ? Pmode : insn_op->mode);
999 op[arity] = copy_to_mode_reg (target_mode, op[arity]);
1002 if (!insn_op->predicate (op[arity], insn_op->mode))
1004 error ("invalid argument %d for builtin %qF", arity + 1, fndecl);
1013 pat = GEN_FCN (icode) (target);
1017 pat = GEN_FCN (icode) (target, op[0]);
1019 pat = GEN_FCN (icode) (op[0]);
1023 pat = GEN_FCN (icode) (target, op[0], op[1]);
1025 pat = GEN_FCN (icode) (op[0], op[1]);
1029 pat = GEN_FCN (icode) (target, op[0], op[1], op[2]);
1031 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
1035 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3]);
1037 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
1041 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4]);
1043 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]);
1047 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4], op[5]);
1049 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4], op[5]);
1065 static const int s390_hotpatch_hw_max = 1000000;
1066 static int s390_hotpatch_hw_before_label = 0;
1067 static int s390_hotpatch_hw_after_label = 0;
1069 /* Check whether the hotpatch attribute is applied to a function and, if it has
1070 an argument, the argument is valid. */
1073 s390_handle_hotpatch_attribute (tree *node, tree name, tree args,
1074 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1080 if (TREE_CODE (*node) != FUNCTION_DECL)
1082 warning (OPT_Wattributes, "%qE attribute only applies to functions",
1084 *no_add_attrs = true;
1086 if (args != NULL && TREE_CHAIN (args) != NULL)
1088 expr = TREE_VALUE (args);
1089 expr2 = TREE_VALUE (TREE_CHAIN (args));
1091 if (args == NULL || TREE_CHAIN (args) == NULL)
1093 else if (TREE_CODE (expr) != INTEGER_CST
1094 || !INTEGRAL_TYPE_P (TREE_TYPE (expr))
1095 || wi::gtu_p (expr, s390_hotpatch_hw_max))
1097 else if (TREE_CODE (expr2) != INTEGER_CST
1098 || !INTEGRAL_TYPE_P (TREE_TYPE (expr2))
1099 || wi::gtu_p (expr2, s390_hotpatch_hw_max))
1105 error ("requested %qE attribute is not a comma separated pair of"
1106 " non-negative integer constants or too large (max. %d)", name,
1107 s390_hotpatch_hw_max);
1108 *no_add_attrs = true;
1114 /* Expand the s390_vector_bool type attribute. */
1117 s390_handle_vectorbool_attribute (tree *node, tree name ATTRIBUTE_UNUSED,
1118 tree args ATTRIBUTE_UNUSED,
1119 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1121 tree type = *node, result = NULL_TREE;
1124 while (POINTER_TYPE_P (type)
1125 || TREE_CODE (type) == FUNCTION_TYPE
1126 || TREE_CODE (type) == METHOD_TYPE
1127 || TREE_CODE (type) == ARRAY_TYPE)
1128 type = TREE_TYPE (type);
1130 mode = TYPE_MODE (type);
1133 case DImode: case V2DImode: result = s390_builtin_types[BT_BV2DI]; break;
1134 case SImode: case V4SImode: result = s390_builtin_types[BT_BV4SI]; break;
1135 case HImode: case V8HImode: result = s390_builtin_types[BT_BV8HI]; break;
1136 case QImode: case V16QImode: result = s390_builtin_types[BT_BV16QI];
1140 *no_add_attrs = true; /* No need to hang on to the attribute. */
1143 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
1148 static const struct attribute_spec s390_attribute_table[] = {
1149 { "hotpatch", 2, 2, true, false, false, s390_handle_hotpatch_attribute, false },
1150 { "s390_vector_bool", 0, 0, false, true, false, s390_handle_vectorbool_attribute, true },
1152 { NULL, 0, 0, false, false, false, NULL, false }
1155 /* Return the alignment for LABEL. We default to the -falign-labels
1156 value except for the literal pool base label. */
1158 s390_label_align (rtx_insn *label)
1160 rtx_insn *prev_insn = prev_active_insn (label);
1163 if (prev_insn == NULL_RTX)
1166 set = single_set (prev_insn);
1168 if (set == NULL_RTX)
1171 src = SET_SRC (set);
1173 /* Don't align literal pool base labels. */
1174 if (GET_CODE (src) == UNSPEC
1175 && XINT (src, 1) == UNSPEC_MAIN_BASE)
1179 return align_labels_log;
1182 static GTY(()) rtx got_symbol;
1184 /* Return the GOT table symbol. The symbol will be created when the
1185 function is invoked for the first time. */
1188 s390_got_symbol (void)
1192 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
1193 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
1200 s390_libgcc_cmp_return_mode (void)
1202 return TARGET_64BIT ? DImode : SImode;
1206 s390_libgcc_shift_count_mode (void)
1208 return TARGET_64BIT ? DImode : SImode;
1212 s390_unwind_word_mode (void)
1214 return TARGET_64BIT ? DImode : SImode;
1217 /* Return true if the back end supports mode MODE. */
1219 s390_scalar_mode_supported_p (machine_mode mode)
1221 /* In contrast to the default implementation reject TImode constants on 31bit
1222 TARGET_ZARCH for ABI compliance. */
1223 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
1226 if (DECIMAL_FLOAT_MODE_P (mode))
1227 return default_decimal_float_supported_p ();
1229 return default_scalar_mode_supported_p (mode);
1232 /* Return true if the back end supports vector mode MODE. */
1234 s390_vector_mode_supported_p (machine_mode mode)
1238 if (!VECTOR_MODE_P (mode)
1240 || GET_MODE_SIZE (mode) > 16)
1243 inner = GET_MODE_INNER (mode);
1261 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
1264 s390_set_has_landing_pad_p (bool value)
1266 cfun->machine->has_landing_pad_p = value;
1269 /* If two condition code modes are compatible, return a condition code
1270 mode which is compatible with both. Otherwise, return
1274 s390_cc_modes_compatible (machine_mode m1, machine_mode m2)
1282 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
1283 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
1304 /* Return true if SET either doesn't set the CC register, or else
1305 the source and destination have matching CC modes and that
1306 CC mode is at least as constrained as REQ_MODE. */
1309 s390_match_ccmode_set (rtx set, machine_mode req_mode)
1311 machine_mode set_mode;
1313 gcc_assert (GET_CODE (set) == SET);
1315 /* These modes are supposed to be used only in CC consumer
1317 gcc_assert (req_mode != CCVIALLmode && req_mode != CCVIANYmode
1318 && req_mode != CCVFALLmode && req_mode != CCVFANYmode);
1320 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
1323 set_mode = GET_MODE (SET_DEST (set));
1343 if (req_mode != set_mode)
1348 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
1349 && req_mode != CCSRmode && req_mode != CCURmode
1350 && req_mode != CCZ1mode)
1356 if (req_mode != CCAmode)
1364 return (GET_MODE (SET_SRC (set)) == set_mode);
1367 /* Return true if every SET in INSN that sets the CC register
1368 has source and destination with matching CC modes and that
1369 CC mode is at least as constrained as REQ_MODE.
1370 If REQ_MODE is VOIDmode, always return false. */
1373 s390_match_ccmode (rtx_insn *insn, machine_mode req_mode)
1377 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
1378 if (req_mode == VOIDmode)
1381 if (GET_CODE (PATTERN (insn)) == SET)
1382 return s390_match_ccmode_set (PATTERN (insn), req_mode);
1384 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1385 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1387 rtx set = XVECEXP (PATTERN (insn), 0, i);
1388 if (GET_CODE (set) == SET)
1389 if (!s390_match_ccmode_set (set, req_mode))
1396 /* If a test-under-mask instruction can be used to implement
1397 (compare (and ... OP1) OP2), return the CC mode required
1398 to do that. Otherwise, return VOIDmode.
1399 MIXED is true if the instruction can distinguish between
1400 CC1 and CC2 for mixed selected bits (TMxx), it is false
1401 if the instruction cannot (TM). */
1404 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
1408 /* ??? Fixme: should work on CONST_WIDE_INT as well. */
1409 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
1412 /* Selected bits all zero: CC0.
1413 e.g.: int a; if ((a & (16 + 128)) == 0) */
1414 if (INTVAL (op2) == 0)
1417 /* Selected bits all one: CC3.
1418 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
1419 if (INTVAL (op2) == INTVAL (op1))
1422 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
1424 if ((a & (16 + 128)) == 16) -> CCT1
1425 if ((a & (16 + 128)) == 128) -> CCT2 */
1428 bit1 = exact_log2 (INTVAL (op2));
1429 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
1430 if (bit0 != -1 && bit1 != -1)
1431 return bit0 > bit1 ? CCT1mode : CCT2mode;
1437 /* Given a comparison code OP (EQ, NE, etc.) and the operands
1438 OP0 and OP1 of a COMPARE, return the mode to be used for the
1442 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
1448 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
1449 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1451 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
1452 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
1454 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1455 || GET_CODE (op1) == NEG)
1456 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1459 if (GET_CODE (op0) == AND)
1461 /* Check whether we can potentially do it via TM. */
1462 machine_mode ccmode;
1463 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
1464 if (ccmode != VOIDmode)
1466 /* Relax CCTmode to CCZmode to allow fall-back to AND
1467 if that turns out to be beneficial. */
1468 return ccmode == CCTmode ? CCZmode : ccmode;
1472 if (register_operand (op0, HImode)
1473 && GET_CODE (op1) == CONST_INT
1474 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
1476 if (register_operand (op0, QImode)
1477 && GET_CODE (op1) == CONST_INT
1478 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
1487 /* The only overflow condition of NEG and ABS happens when
1488 -INT_MAX is used as parameter, which stays negative. So
1489 we have an overflow from a positive value to a negative.
1490 Using CCAP mode the resulting cc can be used for comparisons. */
1491 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
1492 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1495 /* If constants are involved in an add instruction it is possible to use
1496 the resulting cc for comparisons with zero. Knowing the sign of the
1497 constant the overflow behavior gets predictable. e.g.:
1498 int a, b; if ((b = a + c) > 0)
1499 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
1500 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
1501 && (CONST_OK_FOR_K (INTVAL (XEXP (op0, 1)))
1502 || (CONST_OK_FOR_CONSTRAINT_P (INTVAL (XEXP (op0, 1)), 'O', "Os")
1503 /* Avoid INT32_MIN on 32 bit. */
1504 && (!TARGET_ZARCH || INTVAL (XEXP (op0, 1)) != -0x7fffffff - 1))))
1506 if (INTVAL (XEXP((op0), 1)) < 0)
1520 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1521 && GET_CODE (op1) != CONST_INT)
1527 if (GET_CODE (op0) == PLUS
1528 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1531 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1532 && GET_CODE (op1) != CONST_INT)
1538 if (GET_CODE (op0) == MINUS
1539 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1542 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1543 && GET_CODE (op1) != CONST_INT)
1552 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
1553 that we can implement more efficiently. */
1556 s390_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
1557 bool op0_preserve_value)
1559 if (op0_preserve_value)
1562 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
1563 if ((*code == EQ || *code == NE)
1564 && *op1 == const0_rtx
1565 && GET_CODE (*op0) == ZERO_EXTRACT
1566 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
1567 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
1568 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
1570 rtx inner = XEXP (*op0, 0);
1571 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
1572 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
1573 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
1575 if (len > 0 && len < modesize
1576 && pos >= 0 && pos + len <= modesize
1577 && modesize <= HOST_BITS_PER_WIDE_INT)
1579 unsigned HOST_WIDE_INT block;
1580 block = (HOST_WIDE_INT_1U << len) - 1;
1581 block <<= modesize - pos - len;
1583 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
1584 gen_int_mode (block, GET_MODE (inner)));
1588 /* Narrow AND of memory against immediate to enable TM. */
1589 if ((*code == EQ || *code == NE)
1590 && *op1 == const0_rtx
1591 && GET_CODE (*op0) == AND
1592 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
1593 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
1595 rtx inner = XEXP (*op0, 0);
1596 rtx mask = XEXP (*op0, 1);
1598 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
1599 if (GET_CODE (inner) == SUBREG
1600 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
1601 && (GET_MODE_SIZE (GET_MODE (inner))
1602 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
1604 & GET_MODE_MASK (GET_MODE (inner))
1605 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
1607 inner = SUBREG_REG (inner);
1609 /* Do not change volatile MEMs. */
1610 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
1612 int part = s390_single_part (XEXP (*op0, 1),
1613 GET_MODE (inner), QImode, 0);
1616 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
1617 inner = adjust_address_nv (inner, QImode, part);
1618 *op0 = gen_rtx_AND (QImode, inner, mask);
1623 /* Narrow comparisons against 0xffff to HImode if possible. */
1624 if ((*code == EQ || *code == NE)
1625 && GET_CODE (*op1) == CONST_INT
1626 && INTVAL (*op1) == 0xffff
1627 && SCALAR_INT_MODE_P (GET_MODE (*op0))
1628 && (nonzero_bits (*op0, GET_MODE (*op0))
1629 & ~HOST_WIDE_INT_UC (0xffff)) == 0)
1631 *op0 = gen_lowpart (HImode, *op0);
1635 /* Remove redundant UNSPEC_STRCMPCC_TO_INT conversions if possible. */
1636 if (GET_CODE (*op0) == UNSPEC
1637 && XINT (*op0, 1) == UNSPEC_STRCMPCC_TO_INT
1638 && XVECLEN (*op0, 0) == 1
1639 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
1640 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
1641 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
1642 && *op1 == const0_rtx)
1644 enum rtx_code new_code = UNKNOWN;
1647 case EQ: new_code = EQ; break;
1648 case NE: new_code = NE; break;
1649 case LT: new_code = GTU; break;
1650 case GT: new_code = LTU; break;
1651 case LE: new_code = GEU; break;
1652 case GE: new_code = LEU; break;
1656 if (new_code != UNKNOWN)
1658 *op0 = XVECEXP (*op0, 0, 0);
1663 /* Remove redundant UNSPEC_CC_TO_INT conversions if possible. */
1664 if (GET_CODE (*op0) == UNSPEC
1665 && XINT (*op0, 1) == UNSPEC_CC_TO_INT
1666 && XVECLEN (*op0, 0) == 1
1667 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
1668 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
1669 && CONST_INT_P (*op1))
1671 enum rtx_code new_code = UNKNOWN;
1672 switch (GET_MODE (XVECEXP (*op0, 0, 0)))
1678 case EQ: new_code = EQ; break;
1679 case NE: new_code = NE; break;
1686 if (new_code != UNKNOWN)
1688 /* For CCRAWmode put the required cc mask into the second
1690 if (GET_MODE (XVECEXP (*op0, 0, 0)) == CCRAWmode
1691 && INTVAL (*op1) >= 0 && INTVAL (*op1) <= 3)
1692 *op1 = gen_rtx_CONST_INT (VOIDmode, 1 << (3 - INTVAL (*op1)));
1693 *op0 = XVECEXP (*op0, 0, 0);
1698 /* Simplify cascaded EQ, NE with const0_rtx. */
1699 if ((*code == NE || *code == EQ)
1700 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
1701 && GET_MODE (*op0) == SImode
1702 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
1703 && REG_P (XEXP (*op0, 0))
1704 && XEXP (*op0, 1) == const0_rtx
1705 && *op1 == const0_rtx)
1707 if ((*code == EQ && GET_CODE (*op0) == NE)
1708 || (*code == NE && GET_CODE (*op0) == EQ))
1712 *op0 = XEXP (*op0, 0);
1715 /* Prefer register over memory as first operand. */
1716 if (MEM_P (*op0) && REG_P (*op1))
1718 rtx tem = *op0; *op0 = *op1; *op1 = tem;
1719 *code = (int)swap_condition ((enum rtx_code)*code);
1722 /* A comparison result is compared against zero. Replace it with
1723 the (perhaps inverted) original comparison.
1724 This probably should be done by simplify_relational_operation. */
1725 if ((*code == EQ || *code == NE)
1726 && *op1 == const0_rtx
1727 && COMPARISON_P (*op0)
1728 && CC_REG_P (XEXP (*op0, 0)))
1730 enum rtx_code new_code;
1733 new_code = reversed_comparison_code_parts (GET_CODE (*op0),
1735 XEXP (*op1, 0), NULL);
1737 new_code = GET_CODE (*op0);
1739 if (new_code != UNKNOWN)
1742 *op1 = XEXP (*op0, 1);
1743 *op0 = XEXP (*op0, 0);
1749 /* Emit a compare instruction suitable to implement the comparison
1750 OP0 CODE OP1. Return the correct condition RTL to be placed in
1751 the IF_THEN_ELSE of the conditional branch testing the result. */
1754 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
1756 machine_mode mode = s390_select_ccmode (code, op0, op1);
1759 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
1761 /* Do not output a redundant compare instruction if a
1762 compare_and_swap pattern already computed the result and the
1763 machine modes are compatible. */
1764 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
1770 cc = gen_rtx_REG (mode, CC_REGNUM);
1771 emit_insn (gen_rtx_SET (cc, gen_rtx_COMPARE (mode, op0, op1)));
1774 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
1777 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
1779 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
1780 conditional branch testing the result. */
1783 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem,
1784 rtx cmp, rtx new_rtx, machine_mode ccmode)
1788 cc = gen_rtx_REG (ccmode, CC_REGNUM);
1789 switch (GET_MODE (mem))
1792 emit_insn (gen_atomic_compare_and_swapsi_internal (old, mem, cmp,
1796 emit_insn (gen_atomic_compare_and_swapdi_internal (old, mem, cmp,
1800 emit_insn (gen_atomic_compare_and_swapti_internal (old, mem, cmp,
1808 return s390_emit_compare (code, cc, const0_rtx);
1811 /* Emit a jump instruction to TARGET and return it. If COND is
1812 NULL_RTX, emit an unconditional jump, else a conditional jump under
1816 s390_emit_jump (rtx target, rtx cond)
1820 target = gen_rtx_LABEL_REF (VOIDmode, target);
1822 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
1824 insn = gen_rtx_SET (pc_rtx, target);
1825 return emit_jump_insn (insn);
1828 /* Return branch condition mask to implement a branch
1829 specified by CODE. Return -1 for invalid comparisons. */
1832 s390_branch_condition_mask (rtx code)
1834 const int CC0 = 1 << 3;
1835 const int CC1 = 1 << 2;
1836 const int CC2 = 1 << 1;
1837 const int CC3 = 1 << 0;
1839 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
1840 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
1841 gcc_assert (XEXP (code, 1) == const0_rtx
1842 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
1843 && CONST_INT_P (XEXP (code, 1))));
1846 switch (GET_MODE (XEXP (code, 0)))
1850 switch (GET_CODE (code))
1852 case EQ: return CC0;
1853 case NE: return CC1 | CC2 | CC3;
1859 switch (GET_CODE (code))
1861 case EQ: return CC1;
1862 case NE: return CC0 | CC2 | CC3;
1868 switch (GET_CODE (code))
1870 case EQ: return CC2;
1871 case NE: return CC0 | CC1 | CC3;
1877 switch (GET_CODE (code))
1879 case EQ: return CC3;
1880 case NE: return CC0 | CC1 | CC2;
1886 switch (GET_CODE (code))
1888 case EQ: return CC0 | CC2;
1889 case NE: return CC1 | CC3;
1895 switch (GET_CODE (code))
1897 case LTU: return CC2 | CC3; /* carry */
1898 case GEU: return CC0 | CC1; /* no carry */
1904 switch (GET_CODE (code))
1906 case GTU: return CC0 | CC1; /* borrow */
1907 case LEU: return CC2 | CC3; /* no borrow */
1913 switch (GET_CODE (code))
1915 case EQ: return CC0 | CC2;
1916 case NE: return CC1 | CC3;
1917 case LTU: return CC1;
1918 case GTU: return CC3;
1919 case LEU: return CC1 | CC2;
1920 case GEU: return CC2 | CC3;
1925 switch (GET_CODE (code))
1927 case EQ: return CC0;
1928 case NE: return CC1 | CC2 | CC3;
1929 case LTU: return CC1;
1930 case GTU: return CC2;
1931 case LEU: return CC0 | CC1;
1932 case GEU: return CC0 | CC2;
1938 switch (GET_CODE (code))
1940 case EQ: return CC0;
1941 case NE: return CC2 | CC1 | CC3;
1942 case LTU: return CC2;
1943 case GTU: return CC1;
1944 case LEU: return CC0 | CC2;
1945 case GEU: return CC0 | CC1;
1951 switch (GET_CODE (code))
1953 case EQ: return CC0;
1954 case NE: return CC1 | CC2 | CC3;
1955 case LT: return CC1 | CC3;
1956 case GT: return CC2;
1957 case LE: return CC0 | CC1 | CC3;
1958 case GE: return CC0 | CC2;
1964 switch (GET_CODE (code))
1966 case EQ: return CC0;
1967 case NE: return CC1 | CC2 | CC3;
1968 case LT: return CC1;
1969 case GT: return CC2 | CC3;
1970 case LE: return CC0 | CC1;
1971 case GE: return CC0 | CC2 | CC3;
1977 switch (GET_CODE (code))
1979 case EQ: return CC0;
1980 case NE: return CC1 | CC2 | CC3;
1981 case LT: return CC1;
1982 case GT: return CC2;
1983 case LE: return CC0 | CC1;
1984 case GE: return CC0 | CC2;
1985 case UNORDERED: return CC3;
1986 case ORDERED: return CC0 | CC1 | CC2;
1987 case UNEQ: return CC0 | CC3;
1988 case UNLT: return CC1 | CC3;
1989 case UNGT: return CC2 | CC3;
1990 case UNLE: return CC0 | CC1 | CC3;
1991 case UNGE: return CC0 | CC2 | CC3;
1992 case LTGT: return CC1 | CC2;
1998 switch (GET_CODE (code))
2000 case EQ: return CC0;
2001 case NE: return CC2 | CC1 | CC3;
2002 case LT: return CC2;
2003 case GT: return CC1;
2004 case LE: return CC0 | CC2;
2005 case GE: return CC0 | CC1;
2006 case UNORDERED: return CC3;
2007 case ORDERED: return CC0 | CC2 | CC1;
2008 case UNEQ: return CC0 | CC3;
2009 case UNLT: return CC2 | CC3;
2010 case UNGT: return CC1 | CC3;
2011 case UNLE: return CC0 | CC2 | CC3;
2012 case UNGE: return CC0 | CC1 | CC3;
2013 case LTGT: return CC2 | CC1;
2018 /* Vector comparison modes. */
2019 /* CC2 will never be set. It however is part of the negated
2022 switch (GET_CODE (code))
2027 case GE: return CC0;
2028 /* The inverted modes are in fact *any* modes. */
2032 case LT: return CC3 | CC1 | CC2;
2037 switch (GET_CODE (code))
2042 case GE: return CC0 | CC1;
2043 /* The inverted modes are in fact *all* modes. */
2047 case LT: return CC3 | CC2;
2051 switch (GET_CODE (code))
2055 case GE: return CC0;
2056 /* The inverted modes are in fact *any* modes. */
2059 case UNLT: return CC3 | CC1 | CC2;
2064 switch (GET_CODE (code))
2068 case GE: return CC0 | CC1;
2069 /* The inverted modes are in fact *all* modes. */
2072 case UNLT: return CC3 | CC2;
2077 switch (GET_CODE (code))
2080 return INTVAL (XEXP (code, 1));
2082 return (INTVAL (XEXP (code, 1))) ^ 0xf;
2093 /* Return branch condition mask to implement a compare and branch
2094 specified by CODE. Return -1 for invalid comparisons. */
2097 s390_compare_and_branch_condition_mask (rtx code)
2099 const int CC0 = 1 << 3;
2100 const int CC1 = 1 << 2;
2101 const int CC2 = 1 << 1;
2103 switch (GET_CODE (code))
2127 /* If INV is false, return assembler mnemonic string to implement
2128 a branch specified by CODE. If INV is true, return mnemonic
2129 for the corresponding inverted branch. */
2132 s390_branch_condition_mnemonic (rtx code, int inv)
2136 static const char *const mnemonic[16] =
2138 NULL, "o", "h", "nle",
2139 "l", "nhe", "lh", "ne",
2140 "e", "nlh", "he", "nl",
2141 "le", "nh", "no", NULL
2144 if (GET_CODE (XEXP (code, 0)) == REG
2145 && REGNO (XEXP (code, 0)) == CC_REGNUM
2146 && (XEXP (code, 1) == const0_rtx
2147 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
2148 && CONST_INT_P (XEXP (code, 1)))))
2149 mask = s390_branch_condition_mask (code);
2151 mask = s390_compare_and_branch_condition_mask (code);
2153 gcc_assert (mask >= 0);
2158 gcc_assert (mask >= 1 && mask <= 14);
2160 return mnemonic[mask];
2163 /* Return the part of op which has a value different from def.
2164 The size of the part is determined by mode.
2165 Use this function only if you already know that op really
2166 contains such a part. */
2168 unsigned HOST_WIDE_INT
2169 s390_extract_part (rtx op, machine_mode mode, int def)
2171 unsigned HOST_WIDE_INT value = 0;
2172 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
2173 int part_bits = GET_MODE_BITSIZE (mode);
2174 unsigned HOST_WIDE_INT part_mask = (HOST_WIDE_INT_1U << part_bits) - 1;
2177 for (i = 0; i < max_parts; i++)
2180 value = UINTVAL (op);
2182 value >>= part_bits;
2184 if ((value & part_mask) != (def & part_mask))
2185 return value & part_mask;
2191 /* If OP is an integer constant of mode MODE with exactly one
2192 part of mode PART_MODE unequal to DEF, return the number of that
2193 part. Otherwise, return -1. */
2196 s390_single_part (rtx op,
2198 machine_mode part_mode,
2201 unsigned HOST_WIDE_INT value = 0;
2202 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
2203 unsigned HOST_WIDE_INT part_mask
2204 = (HOST_WIDE_INT_1U << GET_MODE_BITSIZE (part_mode)) - 1;
2207 if (GET_CODE (op) != CONST_INT)
2210 for (i = 0; i < n_parts; i++)
2213 value = UINTVAL (op);
2215 value >>= GET_MODE_BITSIZE (part_mode);
2217 if ((value & part_mask) != (def & part_mask))
2225 return part == -1 ? -1 : n_parts - 1 - part;
2228 /* Return true if IN contains a contiguous bitfield in the lower SIZE
2229 bits and no other bits are set in (the lower SIZE bits of) IN.
2231 PSTART and PEND can be used to obtain the start and end
2232 position (inclusive) of the bitfield relative to 64
2233 bits. *PSTART / *PEND gives the position of the first/last bit
2234 of the bitfield counting from the highest order bit starting
2238 s390_contiguous_bitmask_nowrap_p (unsigned HOST_WIDE_INT in, int size,
2239 int *pstart, int *pend)
2243 int lowbit = HOST_BITS_PER_WIDE_INT - 1;
2244 int highbit = HOST_BITS_PER_WIDE_INT - size;
2245 unsigned HOST_WIDE_INT bitmask = HOST_WIDE_INT_1U;
2247 gcc_assert (!!pstart == !!pend);
2248 for (start = lowbit; start >= highbit; bitmask <<= 1, start--)
2251 /* Look for the rightmost bit of a contiguous range of ones. */
2258 /* Look for the firt zero bit after the range of ones. */
2259 if (! (bitmask & in))
2263 /* We're one past the last one-bit. */
2267 /* No one bits found. */
2270 if (start > highbit)
2272 unsigned HOST_WIDE_INT mask;
2274 /* Calculate a mask for all bits beyond the contiguous bits. */
2275 mask = ((~HOST_WIDE_INT_0U >> highbit)
2276 & (~HOST_WIDE_INT_0U << (lowbit - start + 1)));
2278 /* There are more bits set beyond the first range of one bits. */
2291 /* Same as s390_contiguous_bitmask_nowrap_p but also returns true
2292 if ~IN contains a contiguous bitfield. In that case, *END is <
2295 If WRAP_P is true, a bitmask that wraps around is also tested.
2296 When a wraparoud occurs *START is greater than *END (in
2297 non-null pointers), and the uppermost (64 - SIZE) bits are thus
2298 part of the range. If WRAP_P is false, no wraparound is
2302 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, bool wrap_p,
2303 int size, int *start, int *end)
2305 int bs = HOST_BITS_PER_WIDE_INT;
2308 gcc_assert (!!start == !!end);
2309 if ((in & ((~HOST_WIDE_INT_0U) >> (bs - size))) == 0)
2310 /* This cannot be expressed as a contiguous bitmask. Exit early because
2311 the second call of s390_contiguous_bitmask_nowrap_p would accept this as
2314 b = s390_contiguous_bitmask_nowrap_p (in, size, start, end);
2319 b = s390_contiguous_bitmask_nowrap_p (~in, size, start, end);
2325 gcc_assert (s >= 1);
2326 *start = ((e + 1) & (bs - 1));
2327 *end = ((s - 1 + bs) & (bs - 1));
2333 /* Return true if OP contains the same contiguous bitfield in *all*
2334 its elements. START and END can be used to obtain the start and
2335 end position of the bitfield.
2337 START/STOP give the position of the first/last bit of the bitfield
2338 counting from the lowest order bit starting with zero. In order to
2339 use these values for S/390 instructions this has to be converted to
2340 "bits big endian" style. */
2343 s390_contiguous_bitmask_vector_p (rtx op, int *start, int *end)
2345 unsigned HOST_WIDE_INT mask;
2350 gcc_assert (!!start == !!end);
2351 if (!const_vec_duplicate_p (op, &elt)
2352 || !CONST_INT_P (elt))
2355 size = GET_MODE_UNIT_BITSIZE (GET_MODE (op));
2357 /* We cannot deal with V1TI/V1TF. This would require a vgmq. */
2361 mask = UINTVAL (elt);
2363 b = s390_contiguous_bitmask_p (mask, true, size, start, end);
2368 *start -= (HOST_BITS_PER_WIDE_INT - size);
2369 *end -= (HOST_BITS_PER_WIDE_INT - size);
2377 /* Return true if C consists only of byte chunks being either 0 or
2378 0xff. If MASK is !=NULL a byte mask is generated which is
2379 appropriate for the vector generate byte mask instruction. */
2382 s390_bytemask_vector_p (rtx op, unsigned *mask)
2385 unsigned tmp_mask = 0;
2386 int nunit, unit_size;
2388 if (!VECTOR_MODE_P (GET_MODE (op))
2389 || GET_CODE (op) != CONST_VECTOR
2390 || !CONST_INT_P (XVECEXP (op, 0, 0)))
2393 nunit = GET_MODE_NUNITS (GET_MODE (op));
2394 unit_size = GET_MODE_UNIT_SIZE (GET_MODE (op));
2396 for (i = 0; i < nunit; i++)
2398 unsigned HOST_WIDE_INT c;
2401 if (!CONST_INT_P (XVECEXP (op, 0, i)))
2404 c = UINTVAL (XVECEXP (op, 0, i));
2405 for (j = 0; j < unit_size; j++)
2407 if ((c & 0xff) != 0 && (c & 0xff) != 0xff)
2409 tmp_mask |= (c & 1) << ((nunit - 1 - i) * unit_size + j);
2410 c = c >> BITS_PER_UNIT;
2420 /* Check whether a rotate of ROTL followed by an AND of CONTIG is
2421 equivalent to a shift followed by the AND. In particular, CONTIG
2422 should not overlap the (rotated) bit 0/bit 63 gap. Negative values
2423 for ROTL indicate a rotate to the right. */
2426 s390_extzv_shift_ok (int bitsize, int rotl, unsigned HOST_WIDE_INT contig)
2431 ok = s390_contiguous_bitmask_nowrap_p (contig, bitsize, &start, &end);
2435 return (64 - end >= rotl);
2438 /* Translate "- rotate right" in BITSIZE mode to "rotate left" in
2440 rotl = -rotl + (64 - bitsize);
2441 return (start >= rotl);
2445 /* Check whether we can (and want to) split a double-word
2446 move in mode MODE from SRC to DST into two single-word
2447 moves, moving the subword FIRST_SUBWORD first. */
2450 s390_split_ok_p (rtx dst, rtx src, machine_mode mode, int first_subword)
2452 /* Floating point and vector registers cannot be split. */
2453 if (FP_REG_P (src) || FP_REG_P (dst) || VECTOR_REG_P (src) || VECTOR_REG_P (dst))
2456 /* Non-offsettable memory references cannot be split. */
2457 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
2458 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
2461 /* Moving the first subword must not clobber a register
2462 needed to move the second subword. */
2463 if (register_operand (dst, mode))
2465 rtx subreg = operand_subword (dst, first_subword, 0, mode);
2466 if (reg_overlap_mentioned_p (subreg, src))
2473 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
2474 and [MEM2, MEM2 + SIZE] do overlap and false
2478 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
2480 rtx addr1, addr2, addr_delta;
2481 HOST_WIDE_INT delta;
2483 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
2489 addr1 = XEXP (mem1, 0);
2490 addr2 = XEXP (mem2, 0);
2492 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
2494 /* This overlapping check is used by peepholes merging memory block operations.
2495 Overlapping operations would otherwise be recognized by the S/390 hardware
2496 and would fall back to a slower implementation. Allowing overlapping
2497 operations would lead to slow code but not to wrong code. Therefore we are
2498 somewhat optimistic if we cannot prove that the memory blocks are
2500 That's why we return false here although this may accept operations on
2501 overlapping memory areas. */
2502 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
2505 delta = INTVAL (addr_delta);
2508 || (delta > 0 && delta < size)
2509 || (delta < 0 && -delta < size))
2515 /* Check whether the address of memory reference MEM2 equals exactly
2516 the address of memory reference MEM1 plus DELTA. Return true if
2517 we can prove this to be the case, false otherwise. */
2520 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
2522 rtx addr1, addr2, addr_delta;
2524 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
2527 addr1 = XEXP (mem1, 0);
2528 addr2 = XEXP (mem2, 0);
2530 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
2531 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
2537 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
2540 s390_expand_logical_operator (enum rtx_code code, machine_mode mode,
2543 machine_mode wmode = mode;
2544 rtx dst = operands[0];
2545 rtx src1 = operands[1];
2546 rtx src2 = operands[2];
2549 /* If we cannot handle the operation directly, use a temp register. */
2550 if (!s390_logical_operator_ok_p (operands))
2551 dst = gen_reg_rtx (mode);
2553 /* QImode and HImode patterns make sense only if we have a destination
2554 in memory. Otherwise perform the operation in SImode. */
2555 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
2558 /* Widen operands if required. */
2561 if (GET_CODE (dst) == SUBREG
2562 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
2564 else if (REG_P (dst))
2565 dst = gen_rtx_SUBREG (wmode, dst, 0);
2567 dst = gen_reg_rtx (wmode);
2569 if (GET_CODE (src1) == SUBREG
2570 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
2572 else if (GET_MODE (src1) != VOIDmode)
2573 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
2575 if (GET_CODE (src2) == SUBREG
2576 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
2578 else if (GET_MODE (src2) != VOIDmode)
2579 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
2582 /* Emit the instruction. */
2583 op = gen_rtx_SET (dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
2584 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
2585 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
2587 /* Fix up the destination if needed. */
2588 if (dst != operands[0])
2589 emit_move_insn (operands[0], gen_lowpart (mode, dst));
2592 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
2595 s390_logical_operator_ok_p (rtx *operands)
2597 /* If the destination operand is in memory, it needs to coincide
2598 with one of the source operands. After reload, it has to be
2599 the first source operand. */
2600 if (GET_CODE (operands[0]) == MEM)
2601 return rtx_equal_p (operands[0], operands[1])
2602 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
2607 /* Narrow logical operation CODE of memory operand MEMOP with immediate
2608 operand IMMOP to switch from SS to SI type instructions. */
2611 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
2613 int def = code == AND ? -1 : 0;
2617 gcc_assert (GET_CODE (*memop) == MEM);
2618 gcc_assert (!MEM_VOLATILE_P (*memop));
2620 mask = s390_extract_part (*immop, QImode, def);
2621 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
2622 gcc_assert (part >= 0);
2624 *memop = adjust_address (*memop, QImode, part);
2625 *immop = gen_int_mode (mask, QImode);
2629 /* How to allocate a 'struct machine_function'. */
2631 static struct machine_function *
2632 s390_init_machine_status (void)
2634 return ggc_cleared_alloc<machine_function> ();
2637 /* Map for smallest class containing reg regno. */
2639 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
2640 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 0 */
2641 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 4 */
2642 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 8 */
2643 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 12 */
2644 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 16 */
2645 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 20 */
2646 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 24 */
2647 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 28 */
2648 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS, /* 32 */
2649 ACCESS_REGS, ACCESS_REGS, VEC_REGS, VEC_REGS, /* 36 */
2650 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 40 */
2651 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 44 */
2652 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 48 */
2653 VEC_REGS, VEC_REGS /* 52 */
2656 /* Return attribute type of insn. */
2658 static enum attr_type
2659 s390_safe_attr_type (rtx_insn *insn)
2661 if (recog_memoized (insn) >= 0)
2662 return get_attr_type (insn);
2667 /* Return true if DISP is a valid short displacement. */
2670 s390_short_displacement (rtx disp)
2672 /* No displacement is OK. */
2676 /* Without the long displacement facility we don't need to
2677 distingiush between long and short displacement. */
2678 if (!TARGET_LONG_DISPLACEMENT)
2681 /* Integer displacement in range. */
2682 if (GET_CODE (disp) == CONST_INT)
2683 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
2685 /* GOT offset is not OK, the GOT can be large. */
2686 if (GET_CODE (disp) == CONST
2687 && GET_CODE (XEXP (disp, 0)) == UNSPEC
2688 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
2689 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
2692 /* All other symbolic constants are literal pool references,
2693 which are OK as the literal pool must be small. */
2694 if (GET_CODE (disp) == CONST)
2700 /* Decompose a RTL expression ADDR for a memory address into
2701 its components, returned in OUT.
2703 Returns false if ADDR is not a valid memory address, true
2704 otherwise. If OUT is NULL, don't return the components,
2705 but check for validity only.
2707 Note: Only addresses in canonical form are recognized.
2708 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
2709 canonical form so that they will be recognized. */
2712 s390_decompose_address (rtx addr, struct s390_address *out)
2714 HOST_WIDE_INT offset = 0;
2715 rtx base = NULL_RTX;
2716 rtx indx = NULL_RTX;
2717 rtx disp = NULL_RTX;
2719 bool pointer = false;
2720 bool base_ptr = false;
2721 bool indx_ptr = false;
2722 bool literal_pool = false;
2724 /* We may need to substitute the literal pool base register into the address
2725 below. However, at this point we do not know which register is going to
2726 be used as base, so we substitute the arg pointer register. This is going
2727 to be treated as holding a pointer below -- it shouldn't be used for any
2729 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
2731 /* Decompose address into base + index + displacement. */
2733 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
2736 else if (GET_CODE (addr) == PLUS)
2738 rtx op0 = XEXP (addr, 0);
2739 rtx op1 = XEXP (addr, 1);
2740 enum rtx_code code0 = GET_CODE (op0);
2741 enum rtx_code code1 = GET_CODE (op1);
2743 if (code0 == REG || code0 == UNSPEC)
2745 if (code1 == REG || code1 == UNSPEC)
2747 indx = op0; /* index + base */
2753 base = op0; /* base + displacement */
2758 else if (code0 == PLUS)
2760 indx = XEXP (op0, 0); /* index + base + disp */
2761 base = XEXP (op0, 1);
2772 disp = addr; /* displacement */
2774 /* Extract integer part of displacement. */
2778 if (GET_CODE (disp) == CONST_INT)
2780 offset = INTVAL (disp);
2783 else if (GET_CODE (disp) == CONST
2784 && GET_CODE (XEXP (disp, 0)) == PLUS
2785 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
2787 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
2788 disp = XEXP (XEXP (disp, 0), 0);
2792 /* Strip off CONST here to avoid special case tests later. */
2793 if (disp && GET_CODE (disp) == CONST)
2794 disp = XEXP (disp, 0);
2796 /* We can convert literal pool addresses to
2797 displacements by basing them off the base register. */
2798 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
2803 base = fake_pool_base, literal_pool = true;
2805 /* Mark up the displacement. */
2806 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
2807 UNSPEC_LTREL_OFFSET);
2810 /* Validate base register. */
2813 if (GET_CODE (base) == UNSPEC)
2814 switch (XINT (base, 1))
2818 disp = gen_rtx_UNSPEC (Pmode,
2819 gen_rtvec (1, XVECEXP (base, 0, 0)),
2820 UNSPEC_LTREL_OFFSET);
2824 base = XVECEXP (base, 0, 1);
2827 case UNSPEC_LTREL_BASE:
2828 if (XVECLEN (base, 0) == 1)
2829 base = fake_pool_base, literal_pool = true;
2831 base = XVECEXP (base, 0, 1);
2838 if (!REG_P (base) || GET_MODE (base) != Pmode)
2841 if (REGNO (base) == STACK_POINTER_REGNUM
2842 || REGNO (base) == FRAME_POINTER_REGNUM
2843 || ((reload_completed || reload_in_progress)
2844 && frame_pointer_needed
2845 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
2846 || REGNO (base) == ARG_POINTER_REGNUM
2848 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
2849 pointer = base_ptr = true;
2851 if ((reload_completed || reload_in_progress)
2852 && base == cfun->machine->base_reg)
2853 pointer = base_ptr = literal_pool = true;
2856 /* Validate index register. */
2859 if (GET_CODE (indx) == UNSPEC)
2860 switch (XINT (indx, 1))
2864 disp = gen_rtx_UNSPEC (Pmode,
2865 gen_rtvec (1, XVECEXP (indx, 0, 0)),
2866 UNSPEC_LTREL_OFFSET);
2870 indx = XVECEXP (indx, 0, 1);
2873 case UNSPEC_LTREL_BASE:
2874 if (XVECLEN (indx, 0) == 1)
2875 indx = fake_pool_base, literal_pool = true;
2877 indx = XVECEXP (indx, 0, 1);
2884 if (!REG_P (indx) || GET_MODE (indx) != Pmode)
2887 if (REGNO (indx) == STACK_POINTER_REGNUM
2888 || REGNO (indx) == FRAME_POINTER_REGNUM
2889 || ((reload_completed || reload_in_progress)
2890 && frame_pointer_needed
2891 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
2892 || REGNO (indx) == ARG_POINTER_REGNUM
2894 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
2895 pointer = indx_ptr = true;
2897 if ((reload_completed || reload_in_progress)
2898 && indx == cfun->machine->base_reg)
2899 pointer = indx_ptr = literal_pool = true;
2902 /* Prefer to use pointer as base, not index. */
2903 if (base && indx && !base_ptr
2904 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
2911 /* Validate displacement. */
2914 /* If virtual registers are involved, the displacement will change later
2915 anyway as the virtual registers get eliminated. This could make a
2916 valid displacement invalid, but it is more likely to make an invalid
2917 displacement valid, because we sometimes access the register save area
2918 via negative offsets to one of those registers.
2919 Thus we don't check the displacement for validity here. If after
2920 elimination the displacement turns out to be invalid after all,
2921 this is fixed up by reload in any case. */
2922 /* LRA maintains always displacements up to date and we need to
2923 know the displacement is right during all LRA not only at the
2924 final elimination. */
2926 || (base != arg_pointer_rtx
2927 && indx != arg_pointer_rtx
2928 && base != return_address_pointer_rtx
2929 && indx != return_address_pointer_rtx
2930 && base != frame_pointer_rtx
2931 && indx != frame_pointer_rtx
2932 && base != virtual_stack_vars_rtx
2933 && indx != virtual_stack_vars_rtx))
2934 if (!DISP_IN_RANGE (offset))
2939 /* All the special cases are pointers. */
2942 /* In the small-PIC case, the linker converts @GOT
2943 and @GOTNTPOFF offsets to possible displacements. */
2944 if (GET_CODE (disp) == UNSPEC
2945 && (XINT (disp, 1) == UNSPEC_GOT
2946 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
2952 /* Accept pool label offsets. */
2953 else if (GET_CODE (disp) == UNSPEC
2954 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
2957 /* Accept literal pool references. */
2958 else if (GET_CODE (disp) == UNSPEC
2959 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
2961 /* In case CSE pulled a non literal pool reference out of
2962 the pool we have to reject the address. This is
2963 especially important when loading the GOT pointer on non
2964 zarch CPUs. In this case the literal pool contains an lt
2965 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
2966 will most likely exceed the displacement. */
2967 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
2968 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
2971 orig_disp = gen_rtx_CONST (Pmode, disp);
2974 /* If we have an offset, make sure it does not
2975 exceed the size of the constant pool entry. */
2976 rtx sym = XVECEXP (disp, 0, 0);
2977 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
2980 orig_disp = plus_constant (Pmode, orig_disp, offset);
2995 out->disp = orig_disp;
2996 out->pointer = pointer;
2997 out->literal_pool = literal_pool;
3003 /* Decompose a RTL expression OP for an address style operand into its
3004 components, and return the base register in BASE and the offset in
3005 OFFSET. While OP looks like an address it is never supposed to be
3008 Return true if OP is a valid address operand, false if not. */
3011 s390_decompose_addrstyle_without_index (rtx op, rtx *base,
3012 HOST_WIDE_INT *offset)
3016 /* We can have an integer constant, an address register,
3017 or a sum of the two. */
3018 if (CONST_SCALAR_INT_P (op))
3023 if (op && GET_CODE (op) == PLUS && CONST_SCALAR_INT_P (XEXP (op, 1)))
3028 while (op && GET_CODE (op) == SUBREG)
3029 op = SUBREG_REG (op);
3031 if (op && GET_CODE (op) != REG)
3036 if (off == NULL_RTX)
3038 else if (CONST_INT_P (off))
3039 *offset = INTVAL (off);
3040 else if (CONST_WIDE_INT_P (off))
3041 /* The offset will anyway be cut down to 12 bits so take just
3042 the lowest order chunk of the wide int. */
3043 *offset = CONST_WIDE_INT_ELT (off, 0);
3054 /* Return true if CODE is a valid address without index. */
3057 s390_legitimate_address_without_index_p (rtx op)
3059 struct s390_address addr;
3061 if (!s390_decompose_address (XEXP (op, 0), &addr))
3070 /* Return TRUE if ADDR is an operand valid for a load/store relative
3071 instruction. Be aware that the alignment of the operand needs to
3072 be checked separately.
3073 Valid addresses are single references or a sum of a reference and a
3074 constant integer. Return these parts in SYMREF and ADDEND. You can
3075 pass NULL in REF and/or ADDEND if you are not interested in these
3076 values. Literal pool references are *not* considered symbol
3080 s390_loadrelative_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
3082 HOST_WIDE_INT tmpaddend = 0;
3084 if (GET_CODE (addr) == CONST)
3085 addr = XEXP (addr, 0);
3087 if (GET_CODE (addr) == PLUS)
3089 if (!CONST_INT_P (XEXP (addr, 1)))
3092 tmpaddend = INTVAL (XEXP (addr, 1));
3093 addr = XEXP (addr, 0);
3096 if ((GET_CODE (addr) == SYMBOL_REF && !CONSTANT_POOL_ADDRESS_P (addr))
3097 || (GET_CODE (addr) == UNSPEC
3098 && (XINT (addr, 1) == UNSPEC_GOTENT
3099 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
3104 *addend = tmpaddend;
3111 /* Return true if the address in OP is valid for constraint letter C
3112 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
3113 pool MEMs should be accepted. Only the Q, R, S, T constraint
3114 letters are allowed for C. */
3117 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
3119 struct s390_address addr;
3120 bool decomposed = false;
3122 if (!address_operand (op, GET_MODE (op)))
3125 /* This check makes sure that no symbolic address (except literal
3126 pool references) are accepted by the R or T constraints. */
3127 if (s390_loadrelative_operand_p (op, NULL, NULL))
3130 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
3133 if (!s390_decompose_address (op, &addr))
3135 if (addr.literal_pool)
3140 /* With reload, we sometimes get intermediate address forms that are
3141 actually invalid as-is, but we need to accept them in the most
3142 generic cases below ('R' or 'T'), since reload will in fact fix
3143 them up. LRA behaves differently here; we never see such forms,
3144 but on the other hand, we need to strictly reject every invalid
3145 address form. Perform this check right up front. */
3146 if (lra_in_progress)
3148 if (!decomposed && !s390_decompose_address (op, &addr))
3155 case 'Q': /* no index short displacement */
3156 if (!decomposed && !s390_decompose_address (op, &addr))
3160 if (!s390_short_displacement (addr.disp))
3164 case 'R': /* with index short displacement */
3165 if (TARGET_LONG_DISPLACEMENT)
3167 if (!decomposed && !s390_decompose_address (op, &addr))
3169 if (!s390_short_displacement (addr.disp))
3172 /* Any invalid address here will be fixed up by reload,
3173 so accept it for the most generic constraint. */
3176 case 'S': /* no index long displacement */
3177 if (!decomposed && !s390_decompose_address (op, &addr))
3183 case 'T': /* with index long displacement */
3184 /* Any invalid address here will be fixed up by reload,
3185 so accept it for the most generic constraint. */
3195 /* Evaluates constraint strings described by the regular expression
3196 ([A|B|Z](Q|R|S|T))|Y and returns 1 if OP is a valid operand for
3197 the constraint given in STR, or 0 else. */
3200 s390_mem_constraint (const char *str, rtx op)
3207 /* Check for offsettable variants of memory constraints. */
3208 if (!MEM_P (op) || MEM_VOLATILE_P (op))
3210 if ((reload_completed || reload_in_progress)
3211 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
3213 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
3215 /* Check for non-literal-pool variants of memory constraints. */
3218 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
3223 if (GET_CODE (op) != MEM)
3225 return s390_check_qrst_address (c, XEXP (op, 0), true);
3227 /* Simply check for the basic form of a shift count. Reload will
3228 take care of making sure we have a proper base register. */
3229 if (!s390_decompose_addrstyle_without_index (op, NULL, NULL))
3233 return s390_check_qrst_address (str[1], op, true);
3241 /* Evaluates constraint strings starting with letter O. Input
3242 parameter C is the second letter following the "O" in the constraint
3243 string. Returns 1 if VALUE meets the respective constraint and 0
3247 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
3255 return trunc_int_for_mode (value, SImode) == value;
3259 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
3262 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
3270 /* Evaluates constraint strings starting with letter N. Parameter STR
3271 contains the letters following letter "N" in the constraint string.
3272 Returns true if VALUE matches the constraint. */
3275 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
3277 machine_mode mode, part_mode;
3279 int part, part_goal;
3285 part_goal = str[0] - '0';
3329 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
3332 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
3335 if (part_goal != -1 && part_goal != part)
3342 /* Returns true if the input parameter VALUE is a float zero. */
3345 s390_float_const_zero_p (rtx value)
3347 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
3348 && value == CONST0_RTX (GET_MODE (value)));
3351 /* Implement TARGET_REGISTER_MOVE_COST. */
3354 s390_register_move_cost (machine_mode mode,
3355 reg_class_t from, reg_class_t to)
3357 /* On s390, copy between fprs and gprs is expensive. */
3359 /* It becomes somewhat faster having ldgr/lgdr. */
3360 if (TARGET_Z10 && GET_MODE_SIZE (mode) == 8)
3362 /* ldgr is single cycle. */
3363 if (reg_classes_intersect_p (from, GENERAL_REGS)
3364 && reg_classes_intersect_p (to, FP_REGS))
3366 /* lgdr needs 3 cycles. */
3367 if (reg_classes_intersect_p (to, GENERAL_REGS)
3368 && reg_classes_intersect_p (from, FP_REGS))
3372 /* Otherwise copying is done via memory. */
3373 if ((reg_classes_intersect_p (from, GENERAL_REGS)
3374 && reg_classes_intersect_p (to, FP_REGS))
3375 || (reg_classes_intersect_p (from, FP_REGS)
3376 && reg_classes_intersect_p (to, GENERAL_REGS)))
3382 /* Implement TARGET_MEMORY_MOVE_COST. */
3385 s390_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
3386 reg_class_t rclass ATTRIBUTE_UNUSED,
3387 bool in ATTRIBUTE_UNUSED)
3392 /* Compute a (partial) cost for rtx X. Return true if the complete
3393 cost has been computed, and false if subexpressions should be
3394 scanned. In either case, *TOTAL contains the cost result. The
3395 initial value of *TOTAL is the default value computed by
3396 rtx_cost. It may be left unmodified. OUTER_CODE contains the
3397 code of the superexpression of x. */
3400 s390_rtx_costs (rtx x, machine_mode mode, int outer_code,
3401 int opno ATTRIBUTE_UNUSED,
3402 int *total, bool speed ATTRIBUTE_UNUSED)
3404 int code = GET_CODE (x);
3412 case CONST_WIDE_INT:
3419 if (GET_CODE (XEXP (x, 0)) == AND
3420 && GET_CODE (XEXP (x, 1)) == ASHIFT
3421 && REG_P (XEXP (XEXP (x, 0), 0))
3422 && REG_P (XEXP (XEXP (x, 1), 0))
3423 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
3424 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
3425 && (UINTVAL (XEXP (XEXP (x, 0), 1)) ==
3426 (HOST_WIDE_INT_1U << UINTVAL (XEXP (XEXP (x, 1), 1))) - 1))
3428 *total = COSTS_N_INSNS (2);
3432 /* ~AND on a 128 bit mode. This can be done using a vector
3435 && GET_CODE (XEXP (x, 0)) == NOT
3436 && GET_CODE (XEXP (x, 1)) == NOT
3437 && REG_P (XEXP (XEXP (x, 0), 0))
3438 && REG_P (XEXP (XEXP (x, 1), 0))
3439 && GET_MODE_SIZE (GET_MODE (XEXP (XEXP (x, 0), 0))) == 16
3440 && s390_hard_regno_mode_ok (VR0_REGNUM,
3441 GET_MODE (XEXP (XEXP (x, 0), 0))))
3443 *total = COSTS_N_INSNS (1);
3456 *total = COSTS_N_INSNS (1);
3461 *total = COSTS_N_INSNS (1);
3469 rtx left = XEXP (x, 0);
3470 rtx right = XEXP (x, 1);
3471 if (GET_CODE (right) == CONST_INT
3472 && CONST_OK_FOR_K (INTVAL (right)))
3473 *total = s390_cost->mhi;
3474 else if (GET_CODE (left) == SIGN_EXTEND)
3475 *total = s390_cost->mh;
3477 *total = s390_cost->ms; /* msr, ms, msy */
3482 rtx left = XEXP (x, 0);
3483 rtx right = XEXP (x, 1);
3486 if (GET_CODE (right) == CONST_INT
3487 && CONST_OK_FOR_K (INTVAL (right)))
3488 *total = s390_cost->mghi;
3489 else if (GET_CODE (left) == SIGN_EXTEND)
3490 *total = s390_cost->msgf;
3492 *total = s390_cost->msg; /* msgr, msg */
3494 else /* TARGET_31BIT */
3496 if (GET_CODE (left) == SIGN_EXTEND
3497 && GET_CODE (right) == SIGN_EXTEND)
3498 /* mulsidi case: mr, m */
3499 *total = s390_cost->m;
3500 else if (GET_CODE (left) == ZERO_EXTEND
3501 && GET_CODE (right) == ZERO_EXTEND
3502 && TARGET_CPU_ZARCH)
3503 /* umulsidi case: ml, mlr */
3504 *total = s390_cost->ml;
3506 /* Complex calculation is required. */
3507 *total = COSTS_N_INSNS (40);
3513 *total = s390_cost->mult_df;
3516 *total = s390_cost->mxbr;
3527 *total = s390_cost->madbr;
3530 *total = s390_cost->maebr;
3535 /* Negate in the third argument is free: FMSUB. */
3536 if (GET_CODE (XEXP (x, 2)) == NEG)
3538 *total += (rtx_cost (XEXP (x, 0), mode, FMA, 0, speed)
3539 + rtx_cost (XEXP (x, 1), mode, FMA, 1, speed)
3540 + rtx_cost (XEXP (XEXP (x, 2), 0), mode, FMA, 2, speed));
3547 if (mode == TImode) /* 128 bit division */
3548 *total = s390_cost->dlgr;
3549 else if (mode == DImode)
3551 rtx right = XEXP (x, 1);
3552 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
3553 *total = s390_cost->dlr;
3554 else /* 64 by 64 bit division */
3555 *total = s390_cost->dlgr;
3557 else if (mode == SImode) /* 32 bit division */
3558 *total = s390_cost->dlr;
3565 rtx right = XEXP (x, 1);
3566 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
3568 *total = s390_cost->dsgfr;
3570 *total = s390_cost->dr;
3571 else /* 64 by 64 bit division */
3572 *total = s390_cost->dsgr;
3574 else if (mode == SImode) /* 32 bit division */
3575 *total = s390_cost->dlr;
3576 else if (mode == SFmode)
3578 *total = s390_cost->debr;
3580 else if (mode == DFmode)
3582 *total = s390_cost->ddbr;
3584 else if (mode == TFmode)
3586 *total = s390_cost->dxbr;
3592 *total = s390_cost->sqebr;
3593 else if (mode == DFmode)
3594 *total = s390_cost->sqdbr;
3596 *total = s390_cost->sqxbr;
3601 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
3602 || outer_code == PLUS || outer_code == MINUS
3603 || outer_code == COMPARE)
3608 *total = COSTS_N_INSNS (1);
3609 if (GET_CODE (XEXP (x, 0)) == AND
3610 && GET_CODE (XEXP (x, 1)) == CONST_INT
3611 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
3613 rtx op0 = XEXP (XEXP (x, 0), 0);
3614 rtx op1 = XEXP (XEXP (x, 0), 1);
3615 rtx op2 = XEXP (x, 1);
3617 if (memory_operand (op0, GET_MODE (op0))
3618 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
3620 if (register_operand (op0, GET_MODE (op0))
3621 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
3631 /* Return the cost of an address rtx ADDR. */
3634 s390_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
3635 addr_space_t as ATTRIBUTE_UNUSED,
3636 bool speed ATTRIBUTE_UNUSED)
3638 struct s390_address ad;
3639 if (!s390_decompose_address (addr, &ad))
3642 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
3645 /* Implement targetm.vectorize.builtin_vectorization_cost. */
3647 s390_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
3649 int misalign ATTRIBUTE_UNUSED)
3651 switch (type_of_cost)
3661 case cond_branch_not_taken:
3663 case vec_promote_demote:
3664 case unaligned_load:
3665 case unaligned_store:
3668 case cond_branch_taken:
3672 return TYPE_VECTOR_SUBPARTS (vectype) - 1;
3679 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
3680 otherwise return 0. */
3683 tls_symbolic_operand (rtx op)
3685 if (GET_CODE (op) != SYMBOL_REF)
3687 return SYMBOL_REF_TLS_MODEL (op);
3690 /* Split DImode access register reference REG (on 64-bit) into its constituent
3691 low and high parts, and store them into LO and HI. Note that gen_lowpart/
3692 gen_highpart cannot be used as they assume all registers are word-sized,
3693 while our access registers have only half that size. */
3696 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
3698 gcc_assert (TARGET_64BIT);
3699 gcc_assert (ACCESS_REG_P (reg));
3700 gcc_assert (GET_MODE (reg) == DImode);
3701 gcc_assert (!(REGNO (reg) & 1));
3703 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
3704 *hi = gen_rtx_REG (SImode, REGNO (reg));
3707 /* Return true if OP contains a symbol reference */
3710 symbolic_reference_mentioned_p (rtx op)
3715 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
3718 fmt = GET_RTX_FORMAT (GET_CODE (op));
3719 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3725 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3726 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3730 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
3737 /* Return true if OP contains a reference to a thread-local symbol. */
3740 tls_symbolic_reference_mentioned_p (rtx op)
3745 if (GET_CODE (op) == SYMBOL_REF)
3746 return tls_symbolic_operand (op);
3748 fmt = GET_RTX_FORMAT (GET_CODE (op));
3749 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3755 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3756 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3760 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
3768 /* Return true if OP is a legitimate general operand when
3769 generating PIC code. It is given that flag_pic is on
3770 and that OP satisfies CONSTANT_P. */
3773 legitimate_pic_operand_p (rtx op)
3775 /* Accept all non-symbolic constants. */
3776 if (!SYMBOLIC_CONST (op))
3779 /* Reject everything else; must be handled
3780 via emit_symbolic_move. */
3784 /* Returns true if the constant value OP is a legitimate general operand.
3785 It is given that OP satisfies CONSTANT_P. */
3788 s390_legitimate_constant_p (machine_mode mode, rtx op)
3790 if (TARGET_VX && VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
3792 if (GET_MODE_SIZE (mode) != 16)
3795 if (!satisfies_constraint_j00 (op)
3796 && !satisfies_constraint_jm1 (op)
3797 && !satisfies_constraint_jKK (op)
3798 && !satisfies_constraint_jxx (op)
3799 && !satisfies_constraint_jyy (op))
3803 /* Accept all non-symbolic constants. */
3804 if (!SYMBOLIC_CONST (op))
3807 /* Accept immediate LARL operands. */
3808 if (TARGET_CPU_ZARCH && larl_operand (op, mode))
3811 /* Thread-local symbols are never legal constants. This is
3812 so that emit_call knows that computing such addresses
3813 might require a function call. */
3814 if (TLS_SYMBOLIC_CONST (op))
3817 /* In the PIC case, symbolic constants must *not* be
3818 forced into the literal pool. We accept them here,
3819 so that they will be handled by emit_symbolic_move. */
3823 /* All remaining non-PIC symbolic constants are
3824 forced into the literal pool. */
3828 /* Determine if it's legal to put X into the constant pool. This
3829 is not possible if X contains the address of a symbol that is
3830 not constant (TLS) or not known at final link time (PIC). */
3833 s390_cannot_force_const_mem (machine_mode mode, rtx x)
3835 switch (GET_CODE (x))
3839 case CONST_WIDE_INT:
3841 /* Accept all non-symbolic constants. */
3845 /* Labels are OK iff we are non-PIC. */
3846 return flag_pic != 0;
3849 /* 'Naked' TLS symbol references are never OK,
3850 non-TLS symbols are OK iff we are non-PIC. */
3851 if (tls_symbolic_operand (x))
3854 return flag_pic != 0;
3857 return s390_cannot_force_const_mem (mode, XEXP (x, 0));
3860 return s390_cannot_force_const_mem (mode, XEXP (x, 0))
3861 || s390_cannot_force_const_mem (mode, XEXP (x, 1));
3864 switch (XINT (x, 1))
3866 /* Only lt-relative or GOT-relative UNSPECs are OK. */
3867 case UNSPEC_LTREL_OFFSET:
3875 case UNSPEC_GOTNTPOFF:
3876 case UNSPEC_INDNTPOFF:
3879 /* If the literal pool shares the code section, be put
3880 execute template placeholders into the pool as well. */
3882 return TARGET_CPU_ZARCH;
3894 /* Returns true if the constant value OP is a legitimate general
3895 operand during and after reload. The difference to
3896 legitimate_constant_p is that this function will not accept
3897 a constant that would need to be forced to the literal pool
3898 before it can be used as operand.
3899 This function accepts all constants which can be loaded directly
3903 legitimate_reload_constant_p (rtx op)
3905 /* Accept la(y) operands. */
3906 if (GET_CODE (op) == CONST_INT
3907 && DISP_IN_RANGE (INTVAL (op)))
3910 /* Accept l(g)hi/l(g)fi operands. */
3911 if (GET_CODE (op) == CONST_INT
3912 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
3915 /* Accept lliXX operands. */
3917 && GET_CODE (op) == CONST_INT
3918 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
3919 && s390_single_part (op, word_mode, HImode, 0) >= 0)
3923 && GET_CODE (op) == CONST_INT
3924 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
3925 && s390_single_part (op, word_mode, SImode, 0) >= 0)
3928 /* Accept larl operands. */
3929 if (TARGET_CPU_ZARCH
3930 && larl_operand (op, VOIDmode))
3933 /* Accept floating-point zero operands that fit into a single GPR. */
3934 if (GET_CODE (op) == CONST_DOUBLE
3935 && s390_float_const_zero_p (op)
3936 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
3939 /* Accept double-word operands that can be split. */
3940 if (GET_CODE (op) == CONST_WIDE_INT
3941 || (GET_CODE (op) == CONST_INT
3942 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op)))
3944 machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
3945 rtx hi = operand_subword (op, 0, 0, dword_mode);
3946 rtx lo = operand_subword (op, 1, 0, dword_mode);
3947 return legitimate_reload_constant_p (hi)
3948 && legitimate_reload_constant_p (lo);
3951 /* Everything else cannot be handled without reload. */
3955 /* Returns true if the constant value OP is a legitimate fp operand
3956 during and after reload.
3957 This function accepts all constants which can be loaded directly
3961 legitimate_reload_fp_constant_p (rtx op)
3963 /* Accept floating-point zero operands if the load zero instruction
3964 can be used. Prior to z196 the load fp zero instruction caused a
3965 performance penalty if the result is used as BFP number. */
3967 && GET_CODE (op) == CONST_DOUBLE
3968 && s390_float_const_zero_p (op))
3974 /* Returns true if the constant value OP is a legitimate vector operand
3975 during and after reload.
3976 This function accepts all constants which can be loaded directly
3980 legitimate_reload_vector_constant_p (rtx op)
3982 if (TARGET_VX && GET_MODE_SIZE (GET_MODE (op)) == 16
3983 && (satisfies_constraint_j00 (op)
3984 || satisfies_constraint_jm1 (op)
3985 || satisfies_constraint_jKK (op)
3986 || satisfies_constraint_jxx (op)
3987 || satisfies_constraint_jyy (op)))
3993 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
3994 return the class of reg to actually use. */
3997 s390_preferred_reload_class (rtx op, reg_class_t rclass)
3999 switch (GET_CODE (op))
4001 /* Constants we cannot reload into general registers
4002 must be forced into the literal pool. */
4006 case CONST_WIDE_INT:
4007 if (reg_class_subset_p (GENERAL_REGS, rclass)
4008 && legitimate_reload_constant_p (op))
4009 return GENERAL_REGS;
4010 else if (reg_class_subset_p (ADDR_REGS, rclass)
4011 && legitimate_reload_constant_p (op))
4013 else if (reg_class_subset_p (FP_REGS, rclass)
4014 && legitimate_reload_fp_constant_p (op))
4016 else if (reg_class_subset_p (VEC_REGS, rclass)
4017 && legitimate_reload_vector_constant_p (op))
4022 /* If a symbolic constant or a PLUS is reloaded,
4023 it is most likely being used as an address, so
4024 prefer ADDR_REGS. If 'class' is not a superset
4025 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
4027 /* Symrefs cannot be pushed into the literal pool with -fPIC
4028 so we *MUST NOT* return NO_REGS for these cases
4029 (s390_cannot_force_const_mem will return true).
4031 On the other hand we MUST return NO_REGS for symrefs with
4032 invalid addend which might have been pushed to the literal
4033 pool (no -fPIC). Usually we would expect them to be
4034 handled via secondary reload but this does not happen if
4035 they are used as literal pool slot replacement in reload
4036 inheritance (see emit_input_reload_insns). */
4037 if (TARGET_CPU_ZARCH
4038 && GET_CODE (XEXP (op, 0)) == PLUS
4039 && GET_CODE (XEXP (XEXP(op, 0), 0)) == SYMBOL_REF
4040 && GET_CODE (XEXP (XEXP(op, 0), 1)) == CONST_INT)
4042 if (flag_pic && reg_class_subset_p (ADDR_REGS, rclass))
4050 if (!legitimate_reload_constant_p (op))
4054 /* load address will be used. */
4055 if (reg_class_subset_p (ADDR_REGS, rclass))
4067 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
4068 multiple of ALIGNMENT and the SYMBOL_REF being naturally
4072 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
4074 HOST_WIDE_INT addend;
4077 /* The "required alignment" might be 0 (e.g. for certain structs
4078 accessed via BLKmode). Early abort in this case, as well as when
4079 an alignment > 8 is required. */
4080 if (alignment < 2 || alignment > 8)
4083 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
4086 if (addend & (alignment - 1))
4089 if (GET_CODE (symref) == SYMBOL_REF)
4091 /* We have load-relative instructions for 2-byte, 4-byte, and
4092 8-byte alignment so allow only these. */
4095 case 8: return !SYMBOL_FLAG_NOTALIGN8_P (symref);
4096 case 4: return !SYMBOL_FLAG_NOTALIGN4_P (symref);
4097 case 2: return !SYMBOL_FLAG_NOTALIGN2_P (symref);
4098 default: return false;
4102 if (GET_CODE (symref) == UNSPEC
4103 && alignment <= UNITS_PER_LONG)
4109 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
4110 operand SCRATCH is used to reload the even part of the address and
4114 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
4116 HOST_WIDE_INT addend;
4119 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
4123 /* Easy case. The addend is even so larl will do fine. */
4124 emit_move_insn (reg, addr);
4127 /* We can leave the scratch register untouched if the target
4128 register is a valid base register. */
4129 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
4130 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
4133 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
4134 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
4137 emit_move_insn (scratch,
4138 gen_rtx_CONST (Pmode,
4139 gen_rtx_PLUS (Pmode, symref,
4140 GEN_INT (addend - 1))));
4142 emit_move_insn (scratch, symref);
4144 /* Increment the address using la in order to avoid clobbering cc. */
4145 s390_load_address (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
4149 /* Generate what is necessary to move between REG and MEM using
4150 SCRATCH. The direction is given by TOMEM. */
4153 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
4155 /* Reload might have pulled a constant out of the literal pool.
4156 Force it back in. */
4157 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
4158 || GET_CODE (mem) == CONST_WIDE_INT
4159 || GET_CODE (mem) == CONST_VECTOR
4160 || GET_CODE (mem) == CONST)
4161 mem = force_const_mem (GET_MODE (reg), mem);
4163 gcc_assert (MEM_P (mem));
4165 /* For a load from memory we can leave the scratch register
4166 untouched if the target register is a valid base register. */
4168 && REGNO (reg) < FIRST_PSEUDO_REGISTER
4169 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
4170 && GET_MODE (reg) == GET_MODE (scratch))
4173 /* Load address into scratch register. Since we can't have a
4174 secondary reload for a secondary reload we have to cover the case
4175 where larl would need a secondary reload here as well. */
4176 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
4178 /* Now we can use a standard load/store to do the move. */
4180 emit_move_insn (replace_equiv_address (mem, scratch), reg);
4182 emit_move_insn (reg, replace_equiv_address (mem, scratch));
4185 /* Inform reload about cases where moving X with a mode MODE to a register in
4186 RCLASS requires an extra scratch or immediate register. Return the class
4187 needed for the immediate register. */
4190 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
4191 machine_mode mode, secondary_reload_info *sri)
4193 enum reg_class rclass = (enum reg_class) rclass_i;
4195 /* Intermediate register needed. */
4196 if (reg_classes_intersect_p (CC_REGS, rclass))
4197 return GENERAL_REGS;
4201 /* The vst/vl vector move instructions allow only for short
4204 && GET_CODE (XEXP (x, 0)) == PLUS
4205 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4206 && !SHORT_DISP_IN_RANGE(INTVAL (XEXP (XEXP (x, 0), 1)))
4207 && reg_class_subset_p (rclass, VEC_REGS)
4208 && (!reg_class_subset_p (rclass, FP_REGS)
4209 || (GET_MODE_SIZE (mode) > 8
4210 && s390_class_max_nregs (FP_REGS, mode) == 1)))
4213 sri->icode = (TARGET_64BIT ?
4214 CODE_FOR_reloaddi_la_in :
4215 CODE_FOR_reloadsi_la_in);
4217 sri->icode = (TARGET_64BIT ?
4218 CODE_FOR_reloaddi_la_out :
4219 CODE_FOR_reloadsi_la_out);
4225 HOST_WIDE_INT offset;
4228 /* On z10 several optimizer steps may generate larl operands with
4231 && s390_loadrelative_operand_p (x, &symref, &offset)
4233 && !SYMBOL_FLAG_NOTALIGN2_P (symref)
4234 && (offset & 1) == 1)
4235 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
4236 : CODE_FOR_reloadsi_larl_odd_addend_z10);
4238 /* Handle all the (mem (symref)) accesses we cannot use the z10
4239 instructions for. */
4241 && s390_loadrelative_operand_p (XEXP (x, 0), NULL, NULL)
4243 || !reg_class_subset_p (rclass, GENERAL_REGS)
4244 || GET_MODE_SIZE (mode) > UNITS_PER_WORD
4245 || !s390_check_symref_alignment (XEXP (x, 0),
4246 GET_MODE_SIZE (mode))))
4248 #define __SECONDARY_RELOAD_CASE(M,m) \
4251 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
4252 CODE_FOR_reload##m##di_tomem_z10; \
4254 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
4255 CODE_FOR_reload##m##si_tomem_z10; \
4258 switch (GET_MODE (x))
4260 __SECONDARY_RELOAD_CASE (QI, qi);
4261 __SECONDARY_RELOAD_CASE (HI, hi);
4262 __SECONDARY_RELOAD_CASE (SI, si);
4263 __SECONDARY_RELOAD_CASE (DI, di);
4264 __SECONDARY_RELOAD_CASE (TI, ti);
4265 __SECONDARY_RELOAD_CASE (SF, sf);
4266 __SECONDARY_RELOAD_CASE (DF, df);
4267 __SECONDARY_RELOAD_CASE (TF, tf);
4268 __SECONDARY_RELOAD_CASE (SD, sd);
4269 __SECONDARY_RELOAD_CASE (DD, dd);
4270 __SECONDARY_RELOAD_CASE (TD, td);
4271 __SECONDARY_RELOAD_CASE (V1QI, v1qi);
4272 __SECONDARY_RELOAD_CASE (V2QI, v2qi);
4273 __SECONDARY_RELOAD_CASE (V4QI, v4qi);
4274 __SECONDARY_RELOAD_CASE (V8QI, v8qi);
4275 __SECONDARY_RELOAD_CASE (V16QI, v16qi);
4276 __SECONDARY_RELOAD_CASE (V1HI, v1hi);
4277 __SECONDARY_RELOAD_CASE (V2HI, v2hi);
4278 __SECONDARY_RELOAD_CASE (V4HI, v4hi);
4279 __SECONDARY_RELOAD_CASE (V8HI, v8hi);
4280 __SECONDARY_RELOAD_CASE (V1SI, v1si);
4281 __SECONDARY_RELOAD_CASE (V2SI, v2si);
4282 __SECONDARY_RELOAD_CASE (V4SI, v4si);
4283 __SECONDARY_RELOAD_CASE (V1DI, v1di);
4284 __SECONDARY_RELOAD_CASE (V2DI, v2di);
4285 __SECONDARY_RELOAD_CASE (V1TI, v1ti);
4286 __SECONDARY_RELOAD_CASE (V1SF, v1sf);
4287 __SECONDARY_RELOAD_CASE (V2SF, v2sf);
4288 __SECONDARY_RELOAD_CASE (V4SF, v4sf);
4289 __SECONDARY_RELOAD_CASE (V1DF, v1df);
4290 __SECONDARY_RELOAD_CASE (V2DF, v2df);
4291 __SECONDARY_RELOAD_CASE (V1TF, v1tf);
4295 #undef __SECONDARY_RELOAD_CASE
4299 /* We need a scratch register when loading a PLUS expression which
4300 is not a legitimate operand of the LOAD ADDRESS instruction. */
4301 /* LRA can deal with transformation of plus op very well -- so we
4302 don't need to prompt LRA in this case. */
4303 if (! lra_in_progress && in_p && s390_plus_operand (x, mode))
4304 sri->icode = (TARGET_64BIT ?
4305 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
4307 /* Performing a multiword move from or to memory we have to make sure the
4308 second chunk in memory is addressable without causing a displacement
4309 overflow. If that would be the case we calculate the address in
4310 a scratch register. */
4312 && GET_CODE (XEXP (x, 0)) == PLUS
4313 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4314 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
4315 + GET_MODE_SIZE (mode) - 1))
4317 /* For GENERAL_REGS a displacement overflow is no problem if occurring
4318 in a s_operand address since we may fallback to lm/stm. So we only
4319 have to care about overflows in the b+i+d case. */
4320 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
4321 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
4322 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
4323 /* For FP_REGS no lm/stm is available so this check is triggered
4324 for displacement overflows in b+i+d and b+d like addresses. */
4325 || (reg_classes_intersect_p (FP_REGS, rclass)
4326 && s390_class_max_nregs (FP_REGS, mode) > 1))
4329 sri->icode = (TARGET_64BIT ?
4330 CODE_FOR_reloaddi_la_in :
4331 CODE_FOR_reloadsi_la_in);
4333 sri->icode = (TARGET_64BIT ?
4334 CODE_FOR_reloaddi_la_out :
4335 CODE_FOR_reloadsi_la_out);
4339 /* A scratch address register is needed when a symbolic constant is
4340 copied to r0 compiling with -fPIC. In other cases the target
4341 register might be used as temporary (see legitimize_pic_address). */
4342 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
4343 sri->icode = (TARGET_64BIT ?
4344 CODE_FOR_reloaddi_PIC_addr :
4345 CODE_FOR_reloadsi_PIC_addr);
4347 /* Either scratch or no register needed. */
4351 /* Generate code to load SRC, which is PLUS that is not a
4352 legitimate operand for the LA instruction, into TARGET.
4353 SCRATCH may be used as scratch register. */
4356 s390_expand_plus_operand (rtx target, rtx src,
4360 struct s390_address ad;
4362 /* src must be a PLUS; get its two operands. */
4363 gcc_assert (GET_CODE (src) == PLUS);
4364 gcc_assert (GET_MODE (src) == Pmode);
4366 /* Check if any of the two operands is already scheduled
4367 for replacement by reload. This can happen e.g. when
4368 float registers occur in an address. */
4369 sum1 = find_replacement (&XEXP (src, 0));
4370 sum2 = find_replacement (&XEXP (src, 1));
4371 src = gen_rtx_PLUS (Pmode, sum1, sum2);
4373 /* If the address is already strictly valid, there's nothing to do. */
4374 if (!s390_decompose_address (src, &ad)
4375 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4376 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
4378 /* Otherwise, one of the operands cannot be an address register;
4379 we reload its value into the scratch register. */
4380 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
4382 emit_move_insn (scratch, sum1);
4385 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
4387 emit_move_insn (scratch, sum2);
4391 /* According to the way these invalid addresses are generated
4392 in reload.c, it should never happen (at least on s390) that
4393 *neither* of the PLUS components, after find_replacements
4394 was applied, is an address register. */
4395 if (sum1 == scratch && sum2 == scratch)
4401 src = gen_rtx_PLUS (Pmode, sum1, sum2);
4404 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
4405 is only ever performed on addresses, so we can mark the
4406 sum as legitimate for LA in any case. */
4407 s390_load_address (target, src);
4411 /* Return true if ADDR is a valid memory address.
4412 STRICT specifies whether strict register checking applies. */
4415 s390_legitimate_address_p (machine_mode mode, rtx addr, bool strict)
4417 struct s390_address ad;
4420 && larl_operand (addr, VOIDmode)
4421 && (mode == VOIDmode
4422 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
4425 if (!s390_decompose_address (addr, &ad))
4430 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4433 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
4439 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
4440 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
4444 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
4445 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
4451 /* Return true if OP is a valid operand for the LA instruction.
4452 In 31-bit, we need to prove that the result is used as an
4453 address, as LA performs only a 31-bit addition. */
4456 legitimate_la_operand_p (rtx op)
4458 struct s390_address addr;
4459 if (!s390_decompose_address (op, &addr))
4462 return (TARGET_64BIT || addr.pointer);
4465 /* Return true if it is valid *and* preferable to use LA to
4466 compute the sum of OP1 and OP2. */
4469 preferred_la_operand_p (rtx op1, rtx op2)
4471 struct s390_address addr;
4473 if (op2 != const0_rtx)
4474 op1 = gen_rtx_PLUS (Pmode, op1, op2);
4476 if (!s390_decompose_address (op1, &addr))
4478 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
4480 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
4483 /* Avoid LA instructions with index register on z196; it is
4484 preferable to use regular add instructions when possible.
4485 Starting with zEC12 the la with index register is "uncracked"
4487 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
4490 if (!TARGET_64BIT && !addr.pointer)
4496 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
4497 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
4503 /* Emit a forced load-address operation to load SRC into DST.
4504 This will use the LOAD ADDRESS instruction even in situations
4505 where legitimate_la_operand_p (SRC) returns false. */
4508 s390_load_address (rtx dst, rtx src)
4511 emit_move_insn (dst, src);
4513 emit_insn (gen_force_la_31 (dst, src));
4516 /* Return true if it ok to use SYMBOL_REF in a relative address. */
4519 s390_rel_address_ok_p (rtx symbol_ref)
4523 if (symbol_ref == s390_got_symbol () || CONSTANT_POOL_ADDRESS_P (symbol_ref))
4526 decl = SYMBOL_REF_DECL (symbol_ref);
4528 if (!flag_pic || SYMBOL_REF_LOCAL_P (symbol_ref))
4529 return (s390_pic_data_is_text_relative
4531 && TREE_CODE (decl) == FUNCTION_DECL));
4536 /* Return a legitimate reference for ORIG (an address) using the
4537 register REG. If REG is 0, a new pseudo is generated.
4539 There are two types of references that must be handled:
4541 1. Global data references must load the address from the GOT, via
4542 the PIC reg. An insn is emitted to do this load, and the reg is
4545 2. Static data references, constant pool addresses, and code labels
4546 compute the address as an offset from the GOT, whose base is in
4547 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
4548 differentiate them from global data objects. The returned
4549 address is the PIC reg + an unspec constant.
4551 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
4552 reg also appears in the address. */
4555 legitimize_pic_address (rtx orig, rtx reg)
4558 rtx addend = const0_rtx;
4561 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
4563 if (GET_CODE (addr) == CONST)
4564 addr = XEXP (addr, 0);
4566 if (GET_CODE (addr) == PLUS)
4568 addend = XEXP (addr, 1);
4569 addr = XEXP (addr, 0);
4572 if ((GET_CODE (addr) == LABEL_REF
4573 || (SYMBOL_REF_P (addr) && s390_rel_address_ok_p (addr))
4574 || (GET_CODE (addr) == UNSPEC &&
4575 (XINT (addr, 1) == UNSPEC_GOTENT
4576 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
4577 && GET_CODE (addend) == CONST_INT)
4579 /* This can be locally addressed. */
4581 /* larl_operand requires UNSPECs to be wrapped in a const rtx. */
4582 rtx const_addr = (GET_CODE (addr) == UNSPEC ?
4583 gen_rtx_CONST (Pmode, addr) : addr);
4585 if (TARGET_CPU_ZARCH
4586 && larl_operand (const_addr, VOIDmode)
4587 && INTVAL (addend) < HOST_WIDE_INT_1 << 31
4588 && INTVAL (addend) >= -(HOST_WIDE_INT_1 << 31))
4590 if (INTVAL (addend) & 1)
4592 /* LARL can't handle odd offsets, so emit a pair of LARL
4594 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4596 if (!DISP_IN_RANGE (INTVAL (addend)))
4598 HOST_WIDE_INT even = INTVAL (addend) - 1;
4599 addr = gen_rtx_PLUS (Pmode, addr, GEN_INT (even));
4600 addr = gen_rtx_CONST (Pmode, addr);
4601 addend = const1_rtx;
4604 emit_move_insn (temp, addr);
4605 new_rtx = gen_rtx_PLUS (Pmode, temp, addend);
4609 s390_load_address (reg, new_rtx);
4615 /* If the offset is even, we can just use LARL. This
4616 will happen automatically. */
4621 /* No larl - Access local symbols relative to the GOT. */
4623 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4625 if (reload_in_progress || reload_completed)
4626 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4628 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
4629 if (addend != const0_rtx)
4630 addr = gen_rtx_PLUS (Pmode, addr, addend);
4631 addr = gen_rtx_CONST (Pmode, addr);
4632 addr = force_const_mem (Pmode, addr);
4633 emit_move_insn (temp, addr);
4635 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4638 s390_load_address (reg, new_rtx);
4643 else if (GET_CODE (addr) == SYMBOL_REF && addend == const0_rtx)
4645 /* A non-local symbol reference without addend.
4647 The symbol ref is wrapped into an UNSPEC to make sure the
4648 proper operand modifier (@GOT or @GOTENT) will be emitted.
4649 This will tell the linker to put the symbol into the GOT.
4651 Additionally the code dereferencing the GOT slot is emitted here.
4653 An addend to the symref needs to be added afterwards.
4654 legitimize_pic_address calls itself recursively to handle
4655 that case. So no need to do it here. */
4658 reg = gen_reg_rtx (Pmode);
4662 /* Use load relative if possible.
4663 lgrl <target>, sym@GOTENT */
4664 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
4665 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4666 new_rtx = gen_const_mem (GET_MODE (reg), new_rtx);
4668 emit_move_insn (reg, new_rtx);
4671 else if (flag_pic == 1)
4673 /* Assume GOT offset is a valid displacement operand (< 4k
4674 or < 512k with z990). This is handled the same way in
4675 both 31- and 64-bit code (@GOT).
4676 lg <target>, sym@GOT(r12) */
4678 if (reload_in_progress || reload_completed)
4679 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4681 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
4682 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4683 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
4684 new_rtx = gen_const_mem (Pmode, new_rtx);
4685 emit_move_insn (reg, new_rtx);
4688 else if (TARGET_CPU_ZARCH)
4690 /* If the GOT offset might be >= 4k, we determine the position
4691 of the GOT entry via a PC-relative LARL (@GOTENT).
4692 larl temp, sym@GOTENT
4693 lg <target>, 0(temp) */
4695 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
4697 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
4698 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
4700 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
4701 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4702 emit_move_insn (temp, new_rtx);
4704 new_rtx = gen_const_mem (Pmode, temp);
4705 emit_move_insn (reg, new_rtx);
4711 /* If the GOT offset might be >= 4k, we have to load it
4712 from the literal pool (@GOT).
4714 lg temp, lit-litbase(r13)
4715 lg <target>, 0(temp)
4716 lit: .long sym@GOT */
4718 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
4720 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
4721 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
4723 if (reload_in_progress || reload_completed)
4724 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4726 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
4727 addr = gen_rtx_CONST (Pmode, addr);
4728 addr = force_const_mem (Pmode, addr);
4729 emit_move_insn (temp, addr);
4731 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4732 new_rtx = gen_const_mem (Pmode, new_rtx);
4733 emit_move_insn (reg, new_rtx);
4737 else if (GET_CODE (addr) == UNSPEC && GET_CODE (addend) == CONST_INT)
4739 gcc_assert (XVECLEN (addr, 0) == 1);
4740 switch (XINT (addr, 1))
4742 /* These address symbols (or PLT slots) relative to the GOT
4743 (not GOT slots!). In general this will exceed the
4744 displacement range so these value belong into the literal
4748 new_rtx = force_const_mem (Pmode, orig);
4751 /* For -fPIC the GOT size might exceed the displacement
4752 range so make sure the value is in the literal pool. */
4755 new_rtx = force_const_mem (Pmode, orig);
4758 /* For @GOTENT larl is used. This is handled like local
4764 /* @PLT is OK as is on 64-bit, must be converted to
4765 GOT-relative @PLTOFF on 31-bit. */
4767 if (!TARGET_CPU_ZARCH)
4769 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4771 if (reload_in_progress || reload_completed)
4772 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4774 addr = XVECEXP (addr, 0, 0);
4775 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
4777 if (addend != const0_rtx)
4778 addr = gen_rtx_PLUS (Pmode, addr, addend);
4779 addr = gen_rtx_CONST (Pmode, addr);
4780 addr = force_const_mem (Pmode, addr);
4781 emit_move_insn (temp, addr);
4783 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4786 s390_load_address (reg, new_rtx);
4791 /* On 64 bit larl can be used. This case is handled like
4792 local symbol refs. */
4796 /* Everything else cannot happen. */
4801 else if (addend != const0_rtx)
4803 /* Otherwise, compute the sum. */
4805 rtx base = legitimize_pic_address (addr, reg);
4806 new_rtx = legitimize_pic_address (addend,
4807 base == reg ? NULL_RTX : reg);
4808 if (GET_CODE (new_rtx) == CONST_INT)
4809 new_rtx = plus_constant (Pmode, base, INTVAL (new_rtx));
4812 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
4814 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
4815 new_rtx = XEXP (new_rtx, 1);
4817 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
4820 if (GET_CODE (new_rtx) == CONST)
4821 new_rtx = XEXP (new_rtx, 0);
4822 new_rtx = force_operand (new_rtx, 0);
4828 /* Load the thread pointer into a register. */
4831 s390_get_thread_pointer (void)
4833 rtx tp = gen_reg_rtx (Pmode);
4835 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
4836 mark_reg_pointer (tp, BITS_PER_WORD);
4841 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
4842 in s390_tls_symbol which always refers to __tls_get_offset.
4843 The returned offset is written to RESULT_REG and an USE rtx is
4844 generated for TLS_CALL. */
4846 static GTY(()) rtx s390_tls_symbol;
4849 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
4854 emit_insn (s390_load_got ());
4856 if (!s390_tls_symbol)
4857 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
4859 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
4860 gen_rtx_REG (Pmode, RETURN_REGNUM));
4862 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
4863 RTL_CONST_CALL_P (insn) = 1;
4866 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
4867 this (thread-local) address. REG may be used as temporary. */
4870 legitimize_tls_address (rtx addr, rtx reg)
4872 rtx new_rtx, tls_call, temp, base, r2;
4875 if (GET_CODE (addr) == SYMBOL_REF)
4876 switch (tls_symbolic_operand (addr))
4878 case TLS_MODEL_GLOBAL_DYNAMIC:
4880 r2 = gen_rtx_REG (Pmode, 2);
4881 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
4882 new_rtx = gen_rtx_CONST (Pmode, tls_call);
4883 new_rtx = force_const_mem (Pmode, new_rtx);
4884 emit_move_insn (r2, new_rtx);
4885 s390_emit_tls_call_insn (r2, tls_call);
4886 insn = get_insns ();
4889 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
4890 temp = gen_reg_rtx (Pmode);
4891 emit_libcall_block (insn, temp, r2, new_rtx);
4893 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4896 s390_load_address (reg, new_rtx);
4901 case TLS_MODEL_LOCAL_DYNAMIC:
4903 r2 = gen_rtx_REG (Pmode, 2);
4904 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
4905 new_rtx = gen_rtx_CONST (Pmode, tls_call);
4906 new_rtx = force_const_mem (Pmode, new_rtx);
4907 emit_move_insn (r2, new_rtx);
4908 s390_emit_tls_call_insn (r2, tls_call);
4909 insn = get_insns ();
4912 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
4913 temp = gen_reg_rtx (Pmode);
4914 emit_libcall_block (insn, temp, r2, new_rtx);
4916 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4917 base = gen_reg_rtx (Pmode);
4918 s390_load_address (base, new_rtx);
4920 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
4921 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4922 new_rtx = force_const_mem (Pmode, new_rtx);
4923 temp = gen_reg_rtx (Pmode);
4924 emit_move_insn (temp, new_rtx);
4926 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
4929 s390_load_address (reg, new_rtx);
4934 case TLS_MODEL_INITIAL_EXEC:
4937 /* Assume GOT offset < 4k. This is handled the same way
4938 in both 31- and 64-bit code. */
4940 if (reload_in_progress || reload_completed)
4941 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4943 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
4944 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4945 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
4946 new_rtx = gen_const_mem (Pmode, new_rtx);
4947 temp = gen_reg_rtx (Pmode);
4948 emit_move_insn (temp, new_rtx);
4950 else if (TARGET_CPU_ZARCH)
4952 /* If the GOT offset might be >= 4k, we determine the position
4953 of the GOT entry via a PC-relative LARL. */
4955 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
4956 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4957 temp = gen_reg_rtx (Pmode);
4958 emit_move_insn (temp, new_rtx);
4960 new_rtx = gen_const_mem (Pmode, temp);
4961 temp = gen_reg_rtx (Pmode);
4962 emit_move_insn (temp, new_rtx);
4966 /* If the GOT offset might be >= 4k, we have to load it
4967 from the literal pool. */
4969 if (reload_in_progress || reload_completed)
4970 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4972 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
4973 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4974 new_rtx = force_const_mem (Pmode, new_rtx);
4975 temp = gen_reg_rtx (Pmode);
4976 emit_move_insn (temp, new_rtx);
4978 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4979 new_rtx = gen_const_mem (Pmode, new_rtx);
4981 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
4982 temp = gen_reg_rtx (Pmode);
4983 emit_insn (gen_rtx_SET (temp, new_rtx));
4987 /* In position-dependent code, load the absolute address of
4988 the GOT entry from the literal pool. */
4990 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
4991 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4992 new_rtx = force_const_mem (Pmode, new_rtx);
4993 temp = gen_reg_rtx (Pmode);
4994 emit_move_insn (temp, new_rtx);
4997 new_rtx = gen_const_mem (Pmode, new_rtx);
4998 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
4999 temp = gen_reg_rtx (Pmode);
5000 emit_insn (gen_rtx_SET (temp, new_rtx));
5003 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
5006 s390_load_address (reg, new_rtx);
5011 case TLS_MODEL_LOCAL_EXEC:
5012 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
5013 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5014 new_rtx = force_const_mem (Pmode, new_rtx);
5015 temp = gen_reg_rtx (Pmode);
5016 emit_move_insn (temp, new_rtx);
5018 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
5021 s390_load_address (reg, new_rtx);
5030 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
5032 switch (XINT (XEXP (addr, 0), 1))
5034 case UNSPEC_INDNTPOFF:
5035 gcc_assert (TARGET_CPU_ZARCH);
5044 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
5045 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
5047 new_rtx = XEXP (XEXP (addr, 0), 0);
5048 if (GET_CODE (new_rtx) != SYMBOL_REF)
5049 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5051 new_rtx = legitimize_tls_address (new_rtx, reg);
5052 new_rtx = plus_constant (Pmode, new_rtx,
5053 INTVAL (XEXP (XEXP (addr, 0), 1)));
5054 new_rtx = force_operand (new_rtx, 0);
5058 gcc_unreachable (); /* for now ... */
5063 /* Emit insns making the address in operands[1] valid for a standard
5064 move to operands[0]. operands[1] is replaced by an address which
5065 should be used instead of the former RTX to emit the move
5069 emit_symbolic_move (rtx *operands)
5071 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
5073 if (GET_CODE (operands[0]) == MEM)
5074 operands[1] = force_reg (Pmode, operands[1]);
5075 else if (TLS_SYMBOLIC_CONST (operands[1]))
5076 operands[1] = legitimize_tls_address (operands[1], temp);
5078 operands[1] = legitimize_pic_address (operands[1], temp);
5081 /* Try machine-dependent ways of modifying an illegitimate address X
5082 to be legitimate. If we find one, return the new, valid address.
5084 OLDX is the address as it was before break_out_memory_refs was called.
5085 In some cases it is useful to look at this to decide what needs to be done.
5087 MODE is the mode of the operand pointed to by X.
5089 When -fpic is used, special handling is needed for symbolic references.
5090 See comments by legitimize_pic_address for details. */
5093 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
5094 machine_mode mode ATTRIBUTE_UNUSED)
5096 rtx constant_term = const0_rtx;
5098 if (TLS_SYMBOLIC_CONST (x))
5100 x = legitimize_tls_address (x, 0);
5102 if (s390_legitimate_address_p (mode, x, FALSE))
5105 else if (GET_CODE (x) == PLUS
5106 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
5107 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
5113 if (SYMBOLIC_CONST (x)
5114 || (GET_CODE (x) == PLUS
5115 && (SYMBOLIC_CONST (XEXP (x, 0))
5116 || SYMBOLIC_CONST (XEXP (x, 1)))))
5117 x = legitimize_pic_address (x, 0);
5119 if (s390_legitimate_address_p (mode, x, FALSE))
5123 x = eliminate_constant_term (x, &constant_term);
5125 /* Optimize loading of large displacements by splitting them
5126 into the multiple of 4K and the rest; this allows the
5127 former to be CSE'd if possible.
5129 Don't do this if the displacement is added to a register
5130 pointing into the stack frame, as the offsets will
5131 change later anyway. */
5133 if (GET_CODE (constant_term) == CONST_INT
5134 && !TARGET_LONG_DISPLACEMENT
5135 && !DISP_IN_RANGE (INTVAL (constant_term))
5136 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
5138 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
5139 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
5141 rtx temp = gen_reg_rtx (Pmode);
5142 rtx val = force_operand (GEN_INT (upper), temp);
5144 emit_move_insn (temp, val);
5146 x = gen_rtx_PLUS (Pmode, x, temp);
5147 constant_term = GEN_INT (lower);
5150 if (GET_CODE (x) == PLUS)
5152 if (GET_CODE (XEXP (x, 0)) == REG)
5154 rtx temp = gen_reg_rtx (Pmode);
5155 rtx val = force_operand (XEXP (x, 1), temp);
5157 emit_move_insn (temp, val);
5159 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
5162 else if (GET_CODE (XEXP (x, 1)) == REG)
5164 rtx temp = gen_reg_rtx (Pmode);
5165 rtx val = force_operand (XEXP (x, 0), temp);
5167 emit_move_insn (temp, val);
5169 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
5173 if (constant_term != const0_rtx)
5174 x = gen_rtx_PLUS (Pmode, x, constant_term);
5179 /* Try a machine-dependent way of reloading an illegitimate address AD
5180 operand. If we find one, push the reload and return the new address.
5182 MODE is the mode of the enclosing MEM. OPNUM is the operand number
5183 and TYPE is the reload type of the current reload. */
5186 legitimize_reload_address (rtx ad, machine_mode mode ATTRIBUTE_UNUSED,
5187 int opnum, int type)
5189 if (!optimize || TARGET_LONG_DISPLACEMENT)
5192 if (GET_CODE (ad) == PLUS)
5194 rtx tem = simplify_binary_operation (PLUS, Pmode,
5195 XEXP (ad, 0), XEXP (ad, 1));
5200 if (GET_CODE (ad) == PLUS
5201 && GET_CODE (XEXP (ad, 0)) == REG
5202 && GET_CODE (XEXP (ad, 1)) == CONST_INT
5203 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
5205 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
5206 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
5207 rtx cst, tem, new_rtx;
5209 cst = GEN_INT (upper);
5210 if (!legitimate_reload_constant_p (cst))
5211 cst = force_const_mem (Pmode, cst);
5213 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
5214 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
5216 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
5217 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
5218 opnum, (enum reload_type) type);
5225 /* Emit code to move LEN bytes from DST to SRC. */
5228 s390_expand_movmem (rtx dst, rtx src, rtx len)
5230 /* When tuning for z10 or higher we rely on the Glibc functions to
5231 do the right thing. Only for constant lengths below 64k we will
5232 generate inline code. */
5233 if (s390_tune >= PROCESSOR_2097_Z10
5234 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
5237 /* Expand memcpy for constant length operands without a loop if it
5238 is shorter that way.
5240 With a constant length argument a
5241 memcpy loop (without pfd) is 36 bytes -> 6 * mvc */
5242 if (GET_CODE (len) == CONST_INT
5243 && INTVAL (len) >= 0
5244 && INTVAL (len) <= 256 * 6
5245 && (!TARGET_MVCLE || INTVAL (len) <= 256))
5249 for (l = INTVAL (len), o = 0; l > 0; l -= 256, o += 256)
5251 rtx newdst = adjust_address (dst, BLKmode, o);
5252 rtx newsrc = adjust_address (src, BLKmode, o);
5253 emit_insn (gen_movmem_short (newdst, newsrc,
5254 GEN_INT (l > 256 ? 255 : l - 1)));
5258 else if (TARGET_MVCLE)
5260 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
5265 rtx dst_addr, src_addr, count, blocks, temp;
5266 rtx_code_label *loop_start_label = gen_label_rtx ();
5267 rtx_code_label *loop_end_label = gen_label_rtx ();
5268 rtx_code_label *end_label = gen_label_rtx ();
5271 mode = GET_MODE (len);
5272 if (mode == VOIDmode)
5275 dst_addr = gen_reg_rtx (Pmode);
5276 src_addr = gen_reg_rtx (Pmode);
5277 count = gen_reg_rtx (mode);
5278 blocks = gen_reg_rtx (mode);
5280 convert_move (count, len, 1);
5281 emit_cmp_and_jump_insns (count, const0_rtx,
5282 EQ, NULL_RTX, mode, 1, end_label);
5284 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
5285 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
5286 dst = change_address (dst, VOIDmode, dst_addr);
5287 src = change_address (src, VOIDmode, src_addr);
5289 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5292 emit_move_insn (count, temp);
5294 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5297 emit_move_insn (blocks, temp);
5299 emit_cmp_and_jump_insns (blocks, const0_rtx,
5300 EQ, NULL_RTX, mode, 1, loop_end_label);
5302 emit_label (loop_start_label);
5305 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
5309 /* Issue a read prefetch for the +3 cache line. */
5310 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
5311 const0_rtx, const0_rtx);
5312 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5313 emit_insn (prefetch);
5315 /* Issue a write prefetch for the +3 cache line. */
5316 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
5317 const1_rtx, const0_rtx);
5318 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5319 emit_insn (prefetch);
5322 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
5323 s390_load_address (dst_addr,
5324 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
5325 s390_load_address (src_addr,
5326 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
5328 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5331 emit_move_insn (blocks, temp);
5333 emit_cmp_and_jump_insns (blocks, const0_rtx,
5334 EQ, NULL_RTX, mode, 1, loop_end_label);
5336 emit_jump (loop_start_label);
5337 emit_label (loop_end_label);
5339 emit_insn (gen_movmem_short (dst, src,
5340 convert_to_mode (Pmode, count, 1)));
5341 emit_label (end_label);
5346 /* Emit code to set LEN bytes at DST to VAL.
5347 Make use of clrmem if VAL is zero. */
5350 s390_expand_setmem (rtx dst, rtx len, rtx val)
5352 const int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
5354 if (GET_CODE (len) == CONST_INT && INTVAL (len) <= 0)
5357 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
5359 /* Expand setmem/clrmem for a constant length operand without a
5360 loop if it will be shorter that way.
5361 With a constant length and without pfd argument a
5362 clrmem loop is 32 bytes -> 5.3 * xc
5363 setmem loop is 36 bytes -> 3.6 * (mvi/stc + mvc) */
5364 if (GET_CODE (len) == CONST_INT
5365 && ((INTVAL (len) <= 256 * 5 && val == const0_rtx)
5366 || INTVAL (len) <= 257 * 3)
5367 && (!TARGET_MVCLE || INTVAL (len) <= 256))
5371 if (val == const0_rtx)
5372 /* clrmem: emit 256 byte blockwise XCs. */
5373 for (l = INTVAL (len), o = 0; l > 0; l -= 256, o += 256)
5375 rtx newdst = adjust_address (dst, BLKmode, o);
5376 emit_insn (gen_clrmem_short (newdst,
5377 GEN_INT (l > 256 ? 255 : l - 1)));
5380 /* setmem: emit 1(mvi) + 256(mvc) byte blockwise memsets by
5381 setting first byte to val and using a 256 byte mvc with one
5382 byte overlap to propagate the byte. */
5383 for (l = INTVAL (len), o = 0; l > 0; l -= 257, o += 257)
5385 rtx newdst = adjust_address (dst, BLKmode, o);
5386 emit_move_insn (adjust_address (dst, QImode, o), val);
5389 rtx newdstp1 = adjust_address (dst, BLKmode, o + 1);
5390 emit_insn (gen_movmem_short (newdstp1, newdst,
5391 GEN_INT (l > 257 ? 255 : l - 2)));
5396 else if (TARGET_MVCLE)
5398 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
5400 emit_insn (gen_setmem_long_di (dst, convert_to_mode (Pmode, len, 1),
5403 emit_insn (gen_setmem_long_si (dst, convert_to_mode (Pmode, len, 1),
5409 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
5410 rtx_code_label *loop_start_label = gen_label_rtx ();
5411 rtx_code_label *onebyte_end_label = gen_label_rtx ();
5412 rtx_code_label *zerobyte_end_label = gen_label_rtx ();
5413 rtx_code_label *restbyte_end_label = gen_label_rtx ();
5416 mode = GET_MODE (len);
5417 if (mode == VOIDmode)
5420 dst_addr = gen_reg_rtx (Pmode);
5421 count = gen_reg_rtx (mode);
5422 blocks = gen_reg_rtx (mode);
5424 convert_move (count, len, 1);
5425 emit_cmp_and_jump_insns (count, const0_rtx,
5426 EQ, NULL_RTX, mode, 1, zerobyte_end_label,
5429 /* We need to make a copy of the target address since memset is
5430 supposed to return it unmodified. We have to make it here
5431 already since the new reg is used at onebyte_end_label. */
5432 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
5433 dst = change_address (dst, VOIDmode, dst_addr);
5435 if (val != const0_rtx)
5437 /* When using the overlapping mvc the original target
5438 address is only accessed as single byte entity (even by
5439 the mvc reading this value). */
5440 set_mem_size (dst, 1);
5441 dstp1 = adjust_address (dst, VOIDmode, 1);
5442 emit_cmp_and_jump_insns (count,
5443 const1_rtx, EQ, NULL_RTX, mode, 1,
5444 onebyte_end_label, very_unlikely);
5447 /* There is one unconditional (mvi+mvc)/xc after the loop
5448 dealing with the rest of the bytes, subtracting two (mvi+mvc)
5449 or one (xc) here leaves this number of bytes to be handled by
5451 temp = expand_binop (mode, add_optab, count,
5452 val == const0_rtx ? constm1_rtx : GEN_INT (-2),
5453 count, 1, OPTAB_DIRECT);
5455 emit_move_insn (count, temp);
5457 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5460 emit_move_insn (blocks, temp);
5462 emit_cmp_and_jump_insns (blocks, const0_rtx,
5463 EQ, NULL_RTX, mode, 1, restbyte_end_label);
5465 emit_jump (loop_start_label);
5467 if (val != const0_rtx)
5469 /* The 1 byte != 0 special case. Not handled efficiently
5470 since we require two jumps for that. However, this
5471 should be very rare. */
5472 emit_label (onebyte_end_label);
5473 emit_move_insn (adjust_address (dst, QImode, 0), val);
5474 emit_jump (zerobyte_end_label);
5477 emit_label (loop_start_label);
5480 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
5482 /* Issue a write prefetch for the +4 cache line. */
5483 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
5485 const1_rtx, const0_rtx);
5486 emit_insn (prefetch);
5487 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5490 if (val == const0_rtx)
5491 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
5494 /* Set the first byte in the block to the value and use an
5495 overlapping mvc for the block. */
5496 emit_move_insn (adjust_address (dst, QImode, 0), val);
5497 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (254)));
5499 s390_load_address (dst_addr,
5500 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
5502 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5505 emit_move_insn (blocks, temp);
5507 emit_cmp_and_jump_insns (blocks, const0_rtx,
5508 NE, NULL_RTX, mode, 1, loop_start_label);
5510 emit_label (restbyte_end_label);
5512 if (val == const0_rtx)
5513 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
5516 /* Set the first byte in the block to the value and use an
5517 overlapping mvc for the block. */
5518 emit_move_insn (adjust_address (dst, QImode, 0), val);
5519 /* execute only uses the lowest 8 bits of count that's
5520 exactly what we need here. */
5521 emit_insn (gen_movmem_short (dstp1, dst,
5522 convert_to_mode (Pmode, count, 1)));
5525 emit_label (zerobyte_end_label);
5529 /* Emit code to compare LEN bytes at OP0 with those at OP1,
5530 and return the result in TARGET. */
5533 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
5535 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
5538 /* When tuning for z10 or higher we rely on the Glibc functions to
5539 do the right thing. Only for constant lengths below 64k we will
5540 generate inline code. */
5541 if (s390_tune >= PROCESSOR_2097_Z10
5542 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
5545 /* As the result of CMPINT is inverted compared to what we need,
5546 we have to swap the operands. */
5547 tmp = op0; op0 = op1; op1 = tmp;
5549 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
5551 if (INTVAL (len) > 0)
5553 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
5554 emit_insn (gen_cmpint (target, ccreg));
5557 emit_move_insn (target, const0_rtx);
5559 else if (TARGET_MVCLE)
5561 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
5562 emit_insn (gen_cmpint (target, ccreg));
5566 rtx addr0, addr1, count, blocks, temp;
5567 rtx_code_label *loop_start_label = gen_label_rtx ();
5568 rtx_code_label *loop_end_label = gen_label_rtx ();
5569 rtx_code_label *end_label = gen_label_rtx ();
5572 mode = GET_MODE (len);
5573 if (mode == VOIDmode)
5576 addr0 = gen_reg_rtx (Pmode);
5577 addr1 = gen_reg_rtx (Pmode);
5578 count = gen_reg_rtx (mode);
5579 blocks = gen_reg_rtx (mode);
5581 convert_move (count, len, 1);
5582 emit_cmp_and_jump_insns (count, const0_rtx,
5583 EQ, NULL_RTX, mode, 1, end_label);
5585 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
5586 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
5587 op0 = change_address (op0, VOIDmode, addr0);
5588 op1 = change_address (op1, VOIDmode, addr1);
5590 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5593 emit_move_insn (count, temp);
5595 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5598 emit_move_insn (blocks, temp);
5600 emit_cmp_and_jump_insns (blocks, const0_rtx,
5601 EQ, NULL_RTX, mode, 1, loop_end_label);
5603 emit_label (loop_start_label);
5606 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
5610 /* Issue a read prefetch for the +2 cache line of operand 1. */
5611 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
5612 const0_rtx, const0_rtx);
5613 emit_insn (prefetch);
5614 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5616 /* Issue a read prefetch for the +2 cache line of operand 2. */
5617 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
5618 const0_rtx, const0_rtx);
5619 emit_insn (prefetch);
5620 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5623 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
5624 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
5625 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
5626 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
5627 temp = gen_rtx_SET (pc_rtx, temp);
5628 emit_jump_insn (temp);
5630 s390_load_address (addr0,
5631 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
5632 s390_load_address (addr1,
5633 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
5635 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5638 emit_move_insn (blocks, temp);
5640 emit_cmp_and_jump_insns (blocks, const0_rtx,
5641 EQ, NULL_RTX, mode, 1, loop_end_label);
5643 emit_jump (loop_start_label);
5644 emit_label (loop_end_label);
5646 emit_insn (gen_cmpmem_short (op0, op1,
5647 convert_to_mode (Pmode, count, 1)));
5648 emit_label (end_label);
5650 emit_insn (gen_cmpint (target, ccreg));
5655 /* Emit a conditional jump to LABEL for condition code mask MASK using
5656 comparsion operator COMPARISON. Return the emitted jump insn. */
5659 s390_emit_ccraw_jump (HOST_WIDE_INT mask, enum rtx_code comparison, rtx label)
5663 gcc_assert (comparison == EQ || comparison == NE);
5664 gcc_assert (mask > 0 && mask < 15);
5666 temp = gen_rtx_fmt_ee (comparison, VOIDmode,
5667 gen_rtx_REG (CCRAWmode, CC_REGNUM), GEN_INT (mask));
5668 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
5669 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
5670 temp = gen_rtx_SET (pc_rtx, temp);
5671 return emit_jump_insn (temp);
5674 /* Emit the instructions to implement strlen of STRING and store the
5675 result in TARGET. The string has the known ALIGNMENT. This
5676 version uses vector instructions and is therefore not appropriate
5677 for targets prior to z13. */
5680 s390_expand_vec_strlen (rtx target, rtx string, rtx alignment)
5682 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
5683 int very_likely = REG_BR_PROB_BASE - 1;
5684 rtx highest_index_to_load_reg = gen_reg_rtx (Pmode);
5685 rtx str_reg = gen_reg_rtx (V16QImode);
5686 rtx str_addr_base_reg = gen_reg_rtx (Pmode);
5687 rtx str_idx_reg = gen_reg_rtx (Pmode);
5688 rtx result_reg = gen_reg_rtx (V16QImode);
5689 rtx is_aligned_label = gen_label_rtx ();
5690 rtx into_loop_label = NULL_RTX;
5691 rtx loop_start_label = gen_label_rtx ();
5693 rtx len = gen_reg_rtx (QImode);
5696 s390_load_address (str_addr_base_reg, XEXP (string, 0));
5697 emit_move_insn (str_idx_reg, const0_rtx);
5699 if (INTVAL (alignment) < 16)
5701 /* Check whether the address happens to be aligned properly so
5702 jump directly to the aligned loop. */
5703 emit_cmp_and_jump_insns (gen_rtx_AND (Pmode,
5704 str_addr_base_reg, GEN_INT (15)),
5705 const0_rtx, EQ, NULL_RTX,
5706 Pmode, 1, is_aligned_label);
5708 temp = gen_reg_rtx (Pmode);
5709 temp = expand_binop (Pmode, and_optab, str_addr_base_reg,
5710 GEN_INT (15), temp, 1, OPTAB_DIRECT);
5711 gcc_assert (REG_P (temp));
5712 highest_index_to_load_reg =
5713 expand_binop (Pmode, sub_optab, GEN_INT (15), temp,
5714 highest_index_to_load_reg, 1, OPTAB_DIRECT);
5715 gcc_assert (REG_P (highest_index_to_load_reg));
5716 emit_insn (gen_vllv16qi (str_reg,
5717 convert_to_mode (SImode, highest_index_to_load_reg, 1),
5718 gen_rtx_MEM (BLKmode, str_addr_base_reg)));
5720 into_loop_label = gen_label_rtx ();
5721 s390_emit_jump (into_loop_label, NULL_RTX);
5725 emit_label (is_aligned_label);
5726 LABEL_NUSES (is_aligned_label) = INTVAL (alignment) < 16 ? 2 : 1;
5728 /* Reaching this point we are only performing 16 bytes aligned
5730 emit_move_insn (highest_index_to_load_reg, GEN_INT (15));
5732 emit_label (loop_start_label);
5733 LABEL_NUSES (loop_start_label) = 1;
5735 /* Load 16 bytes of the string into VR. */
5736 emit_move_insn (str_reg,
5737 gen_rtx_MEM (V16QImode,
5738 gen_rtx_PLUS (Pmode, str_idx_reg,
5739 str_addr_base_reg)));
5740 if (into_loop_label != NULL_RTX)
5742 emit_label (into_loop_label);
5743 LABEL_NUSES (into_loop_label) = 1;
5746 /* Increment string index by 16 bytes. */
5747 expand_binop (Pmode, add_optab, str_idx_reg, GEN_INT (16),
5748 str_idx_reg, 1, OPTAB_DIRECT);
5750 emit_insn (gen_vec_vfenesv16qi (result_reg, str_reg, str_reg,
5751 GEN_INT (VSTRING_FLAG_ZS | VSTRING_FLAG_CS)));
5753 add_int_reg_note (s390_emit_ccraw_jump (8, NE, loop_start_label),
5754 REG_BR_PROB, very_likely);
5755 emit_insn (gen_vec_extractv16qi (len, result_reg, GEN_INT (7)));
5757 /* If the string pointer wasn't aligned we have loaded less then 16
5758 bytes and the remaining bytes got filled with zeros (by vll).
5759 Now we have to check whether the resulting index lies within the
5760 bytes actually part of the string. */
5762 cond = s390_emit_compare (GT, convert_to_mode (Pmode, len, 1),
5763 highest_index_to_load_reg);
5764 s390_load_address (highest_index_to_load_reg,
5765 gen_rtx_PLUS (Pmode, highest_index_to_load_reg,
5768 emit_insn (gen_movdicc (str_idx_reg, cond,
5769 highest_index_to_load_reg, str_idx_reg));
5771 emit_insn (gen_movsicc (str_idx_reg, cond,
5772 highest_index_to_load_reg, str_idx_reg));
5774 add_int_reg_note (s390_emit_jump (is_aligned_label, cond), REG_BR_PROB,
5777 expand_binop (Pmode, add_optab, str_idx_reg,
5778 GEN_INT (-16), str_idx_reg, 1, OPTAB_DIRECT);
5779 /* FIXME: len is already zero extended - so avoid the llgcr emitted
5781 temp = expand_binop (Pmode, add_optab, str_idx_reg,
5782 convert_to_mode (Pmode, len, 1),
5783 target, 1, OPTAB_DIRECT);
5785 emit_move_insn (target, temp);
5789 s390_expand_vec_movstr (rtx result, rtx dst, rtx src)
5791 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
5792 rtx temp = gen_reg_rtx (Pmode);
5793 rtx src_addr = XEXP (src, 0);
5794 rtx dst_addr = XEXP (dst, 0);
5795 rtx src_addr_reg = gen_reg_rtx (Pmode);
5796 rtx dst_addr_reg = gen_reg_rtx (Pmode);
5797 rtx offset = gen_reg_rtx (Pmode);
5798 rtx vsrc = gen_reg_rtx (V16QImode);
5799 rtx vpos = gen_reg_rtx (V16QImode);
5800 rtx loadlen = gen_reg_rtx (SImode);
5801 rtx gpos_qi = gen_reg_rtx(QImode);
5802 rtx gpos = gen_reg_rtx (SImode);
5803 rtx done_label = gen_label_rtx ();
5804 rtx loop_label = gen_label_rtx ();
5805 rtx exit_label = gen_label_rtx ();
5806 rtx full_label = gen_label_rtx ();
5808 /* Perform a quick check for string ending on the first up to 16
5809 bytes and exit early if successful. */
5811 emit_insn (gen_vlbb (vsrc, src, GEN_INT (6)));
5812 emit_insn (gen_lcbb (loadlen, src_addr, GEN_INT (6)));
5813 emit_insn (gen_vfenezv16qi (vpos, vsrc, vsrc));
5814 emit_insn (gen_vec_extractv16qi (gpos_qi, vpos, GEN_INT (7)));
5815 emit_move_insn (gpos, gen_rtx_SUBREG (SImode, gpos_qi, 0));
5816 /* gpos is the byte index if a zero was found and 16 otherwise.
5817 So if it is lower than the loaded bytes we have a hit. */
5818 emit_cmp_and_jump_insns (gpos, loadlen, GE, NULL_RTX, SImode, 1,
5820 emit_insn (gen_vstlv16qi (vsrc, gpos, dst));
5822 force_expand_binop (Pmode, add_optab, dst_addr, gpos, result,
5824 emit_jump (exit_label);
5827 emit_label (full_label);
5828 LABEL_NUSES (full_label) = 1;
5830 /* Calculate `offset' so that src + offset points to the last byte
5831 before 16 byte alignment. */
5833 /* temp = src_addr & 0xf */
5834 force_expand_binop (Pmode, and_optab, src_addr, GEN_INT (15), temp,
5837 /* offset = 0xf - temp */
5838 emit_move_insn (offset, GEN_INT (15));
5839 force_expand_binop (Pmode, sub_optab, offset, temp, offset,
5842 /* Store `offset' bytes in the dstination string. The quick check
5843 has loaded at least `offset' bytes into vsrc. */
5845 emit_insn (gen_vstlv16qi (vsrc, gen_lowpart (SImode, offset), dst));
5847 /* Advance to the next byte to be loaded. */
5848 force_expand_binop (Pmode, add_optab, offset, const1_rtx, offset,
5851 /* Make sure the addresses are single regs which can be used as a
5853 emit_move_insn (src_addr_reg, src_addr);
5854 emit_move_insn (dst_addr_reg, dst_addr);
5858 emit_label (loop_label);
5859 LABEL_NUSES (loop_label) = 1;
5861 emit_move_insn (vsrc,
5862 gen_rtx_MEM (V16QImode,
5863 gen_rtx_PLUS (Pmode, src_addr_reg, offset)));
5865 emit_insn (gen_vec_vfenesv16qi (vpos, vsrc, vsrc,
5866 GEN_INT (VSTRING_FLAG_ZS | VSTRING_FLAG_CS)));
5867 add_int_reg_note (s390_emit_ccraw_jump (8, EQ, done_label),
5868 REG_BR_PROB, very_unlikely);
5870 emit_move_insn (gen_rtx_MEM (V16QImode,
5871 gen_rtx_PLUS (Pmode, dst_addr_reg, offset)),
5874 force_expand_binop (Pmode, add_optab, offset, GEN_INT (16),
5875 offset, 1, OPTAB_DIRECT);
5877 emit_jump (loop_label);
5882 /* We are done. Add the offset of the zero character to the dst_addr
5883 pointer to get the result. */
5885 emit_label (done_label);
5886 LABEL_NUSES (done_label) = 1;
5888 force_expand_binop (Pmode, add_optab, dst_addr_reg, offset, dst_addr_reg,
5891 emit_insn (gen_vec_extractv16qi (gpos_qi, vpos, GEN_INT (7)));
5892 emit_move_insn (gpos, gen_rtx_SUBREG (SImode, gpos_qi, 0));
5894 emit_insn (gen_vstlv16qi (vsrc, gpos, gen_rtx_MEM (BLKmode, dst_addr_reg)));
5896 force_expand_binop (Pmode, add_optab, dst_addr_reg, gpos, result,
5901 emit_label (exit_label);
5902 LABEL_NUSES (exit_label) = 1;
5906 /* Expand conditional increment or decrement using alc/slb instructions.
5907 Should generate code setting DST to either SRC or SRC + INCREMENT,
5908 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
5909 Returns true if successful, false otherwise.
5911 That makes it possible to implement some if-constructs without jumps e.g.:
5912 (borrow = CC0 | CC1 and carry = CC2 | CC3)
5913 unsigned int a, b, c;
5914 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
5915 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
5916 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
5917 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
5919 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
5920 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
5921 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
5922 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
5923 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
5926 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
5927 rtx dst, rtx src, rtx increment)
5929 machine_mode cmp_mode;
5930 machine_mode cc_mode;
5936 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
5937 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
5939 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
5940 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
5945 /* Try ADD LOGICAL WITH CARRY. */
5946 if (increment == const1_rtx)
5948 /* Determine CC mode to use. */
5949 if (cmp_code == EQ || cmp_code == NE)
5951 if (cmp_op1 != const0_rtx)
5953 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
5954 NULL_RTX, 0, OPTAB_WIDEN);
5955 cmp_op1 = const0_rtx;
5958 cmp_code = cmp_code == EQ ? LEU : GTU;
5961 if (cmp_code == LTU || cmp_code == LEU)
5966 cmp_code = swap_condition (cmp_code);
5983 /* Emit comparison instruction pattern. */
5984 if (!register_operand (cmp_op0, cmp_mode))
5985 cmp_op0 = force_reg (cmp_mode, cmp_op0);
5987 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
5988 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
5989 /* We use insn_invalid_p here to add clobbers if required. */
5990 ret = insn_invalid_p (emit_insn (insn), false);
5993 /* Emit ALC instruction pattern. */
5994 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
5995 gen_rtx_REG (cc_mode, CC_REGNUM),
5998 if (src != const0_rtx)
6000 if (!register_operand (src, GET_MODE (dst)))
6001 src = force_reg (GET_MODE (dst), src);
6003 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
6004 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
6007 p = rtvec_alloc (2);
6009 gen_rtx_SET (dst, op_res);
6011 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6012 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
6017 /* Try SUBTRACT LOGICAL WITH BORROW. */
6018 if (increment == constm1_rtx)
6020 /* Determine CC mode to use. */
6021 if (cmp_code == EQ || cmp_code == NE)
6023 if (cmp_op1 != const0_rtx)
6025 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
6026 NULL_RTX, 0, OPTAB_WIDEN);
6027 cmp_op1 = const0_rtx;
6030 cmp_code = cmp_code == EQ ? LEU : GTU;
6033 if (cmp_code == GTU || cmp_code == GEU)
6038 cmp_code = swap_condition (cmp_code);
6055 /* Emit comparison instruction pattern. */
6056 if (!register_operand (cmp_op0, cmp_mode))
6057 cmp_op0 = force_reg (cmp_mode, cmp_op0);
6059 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
6060 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
6061 /* We use insn_invalid_p here to add clobbers if required. */
6062 ret = insn_invalid_p (emit_insn (insn), false);
6065 /* Emit SLB instruction pattern. */
6066 if (!register_operand (src, GET_MODE (dst)))
6067 src = force_reg (GET_MODE (dst), src);
6069 op_res = gen_rtx_MINUS (GET_MODE (dst),
6070 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
6071 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
6072 gen_rtx_REG (cc_mode, CC_REGNUM),
6074 p = rtvec_alloc (2);
6076 gen_rtx_SET (dst, op_res);
6078 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6079 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
6087 /* Expand code for the insv template. Return true if successful. */
6090 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
6092 int bitsize = INTVAL (op1);
6093 int bitpos = INTVAL (op2);
6094 machine_mode mode = GET_MODE (dest);
6096 int smode_bsize, mode_bsize;
6099 if (bitsize + bitpos > GET_MODE_BITSIZE (mode))
6102 /* Generate INSERT IMMEDIATE (IILL et al). */
6103 /* (set (ze (reg)) (const_int)). */
6105 && register_operand (dest, word_mode)
6106 && (bitpos % 16) == 0
6107 && (bitsize % 16) == 0
6108 && const_int_operand (src, VOIDmode))
6110 HOST_WIDE_INT val = INTVAL (src);
6111 int regpos = bitpos + bitsize;
6113 while (regpos > bitpos)
6115 machine_mode putmode;
6118 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
6123 putsize = GET_MODE_BITSIZE (putmode);
6125 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
6128 gen_int_mode (val, putmode));
6131 gcc_assert (regpos == bitpos);
6135 smode = smallest_mode_for_size (bitsize, MODE_INT);
6136 smode_bsize = GET_MODE_BITSIZE (smode);
6137 mode_bsize = GET_MODE_BITSIZE (mode);
6139 /* Generate STORE CHARACTERS UNDER MASK (STCM et al). */
6141 && (bitsize % BITS_PER_UNIT) == 0
6143 && (register_operand (src, word_mode)
6144 || const_int_operand (src, VOIDmode)))
6146 /* Emit standard pattern if possible. */
6147 if (smode_bsize == bitsize)
6149 emit_move_insn (adjust_address (dest, smode, 0),
6150 gen_lowpart (smode, src));
6154 /* (set (ze (mem)) (const_int)). */
6155 else if (const_int_operand (src, VOIDmode))
6157 int size = bitsize / BITS_PER_UNIT;
6158 rtx src_mem = adjust_address (force_const_mem (word_mode, src),
6160 UNITS_PER_WORD - size);
6162 dest = adjust_address (dest, BLKmode, 0);
6163 set_mem_size (dest, size);
6164 s390_expand_movmem (dest, src_mem, GEN_INT (size));
6168 /* (set (ze (mem)) (reg)). */
6169 else if (register_operand (src, word_mode))
6172 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
6176 /* Emit st,stcmh sequence. */
6177 int stcmh_width = bitsize - 32;
6178 int size = stcmh_width / BITS_PER_UNIT;
6180 emit_move_insn (adjust_address (dest, SImode, size),
6181 gen_lowpart (SImode, src));
6182 set_mem_size (dest, size);
6183 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
6184 GEN_INT (stcmh_width),
6186 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT (32)));
6192 /* Generate INSERT CHARACTERS UNDER MASK (IC, ICM et al). */
6193 if ((bitpos % BITS_PER_UNIT) == 0
6194 && (bitsize % BITS_PER_UNIT) == 0
6195 && (bitpos & 32) == ((bitpos + bitsize - 1) & 32)
6197 && (mode == DImode || mode == SImode)
6198 && register_operand (dest, mode))
6200 /* Emit a strict_low_part pattern if possible. */
6201 if (smode_bsize == bitsize && bitpos == mode_bsize - smode_bsize)
6203 op = gen_rtx_STRICT_LOW_PART (VOIDmode, gen_lowpart (smode, dest));
6204 op = gen_rtx_SET (op, gen_lowpart (smode, src));
6205 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6206 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
6210 /* ??? There are more powerful versions of ICM that are not
6211 completely represented in the md file. */
6214 /* For z10, generate ROTATE THEN INSERT SELECTED BITS (RISBG et al). */
6215 if (TARGET_Z10 && (mode == DImode || mode == SImode))
6217 machine_mode mode_s = GET_MODE (src);
6219 if (CONSTANT_P (src))
6221 /* For constant zero values the representation with AND
6222 appears to be folded in more situations than the (set
6223 (zero_extract) ...).
6224 We only do this when the start and end of the bitfield
6225 remain in the same SImode chunk. That way nihf or nilf
6227 The AND patterns might still generate a risbg for this. */
6228 if (src == const0_rtx && bitpos / 32 == (bitpos + bitsize - 1) / 32)
6231 src = force_reg (mode, src);
6233 else if (mode_s != mode)
6235 gcc_assert (GET_MODE_BITSIZE (mode_s) >= bitsize);
6236 src = force_reg (mode_s, src);
6237 src = gen_lowpart (mode, src);
6240 op = gen_rtx_ZERO_EXTRACT (mode, dest, op1, op2),
6241 op = gen_rtx_SET (op, src);
6245 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6246 op = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber));
6256 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
6257 register that holds VAL of mode MODE shifted by COUNT bits. */
6260 s390_expand_mask_and_shift (rtx val, machine_mode mode, rtx count)
6262 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
6263 NULL_RTX, 1, OPTAB_DIRECT);
6264 return expand_simple_binop (SImode, ASHIFT, val, count,
6265 NULL_RTX, 1, OPTAB_DIRECT);
6268 /* Generate a vector comparison COND of CMP_OP1 and CMP_OP2 and store
6269 the result in TARGET. */
6272 s390_expand_vec_compare (rtx target, enum rtx_code cond,
6273 rtx cmp_op1, rtx cmp_op2)
6275 machine_mode mode = GET_MODE (target);
6276 bool neg_p = false, swap_p = false;
6279 if (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_VECTOR_FLOAT)
6283 /* NE a != b -> !(a == b) */
6284 case NE: cond = EQ; neg_p = true; break;
6285 /* UNGT a u> b -> !(b >= a) */
6286 case UNGT: cond = GE; neg_p = true; swap_p = true; break;
6287 /* UNGE a u>= b -> !(b > a) */
6288 case UNGE: cond = GT; neg_p = true; swap_p = true; break;
6289 /* LE: a <= b -> b >= a */
6290 case LE: cond = GE; swap_p = true; break;
6291 /* UNLE: a u<= b -> !(a > b) */
6292 case UNLE: cond = GT; neg_p = true; break;
6293 /* LT: a < b -> b > a */
6294 case LT: cond = GT; swap_p = true; break;
6295 /* UNLT: a u< b -> !(a >= b) */
6296 case UNLT: cond = GE; neg_p = true; break;
6298 emit_insn (gen_vec_cmpuneqv2df (target, cmp_op1, cmp_op2));
6301 emit_insn (gen_vec_cmpltgtv2df (target, cmp_op1, cmp_op2));
6304 emit_insn (gen_vec_orderedv2df (target, cmp_op1, cmp_op2));
6307 emit_insn (gen_vec_unorderedv2df (target, cmp_op1, cmp_op2));
6316 /* NE: a != b -> !(a == b) */
6317 case NE: cond = EQ; neg_p = true; break;
6318 /* GE: a >= b -> !(b > a) */
6319 case GE: cond = GT; neg_p = true; swap_p = true; break;
6320 /* GEU: a >= b -> !(b > a) */
6321 case GEU: cond = GTU; neg_p = true; swap_p = true; break;
6322 /* LE: a <= b -> !(a > b) */
6323 case LE: cond = GT; neg_p = true; break;
6324 /* LEU: a <= b -> !(a > b) */
6325 case LEU: cond = GTU; neg_p = true; break;
6326 /* LT: a < b -> b > a */
6327 case LT: cond = GT; swap_p = true; break;
6328 /* LTU: a < b -> b > a */
6329 case LTU: cond = GTU; swap_p = true; break;
6336 tmp = cmp_op1; cmp_op1 = cmp_op2; cmp_op2 = tmp;
6339 emit_insn (gen_rtx_SET (target, gen_rtx_fmt_ee (cond,
6341 cmp_op1, cmp_op2)));
6343 emit_insn (gen_rtx_SET (target, gen_rtx_NOT (mode, target)));
6346 /* Expand the comparison CODE of CMP1 and CMP2 and copy 1 or 0 into
6347 TARGET if either all (ALL_P is true) or any (ALL_P is false) of the
6348 elements in CMP1 and CMP2 fulfill the comparison.
6349 This function is only used to emit patterns for the vx builtins and
6350 therefore only handles comparison codes required by the
6353 s390_expand_vec_compare_cc (rtx target, enum rtx_code code,
6354 rtx cmp1, rtx cmp2, bool all_p)
6356 machine_mode cc_producer_mode, cc_consumer_mode, scratch_mode;
6357 rtx tmp_reg = gen_reg_rtx (SImode);
6358 bool swap_p = false;
6360 if (GET_MODE_CLASS (GET_MODE (cmp1)) == MODE_VECTOR_INT)
6366 cc_producer_mode = CCVEQmode;
6370 code = swap_condition (code);
6375 cc_producer_mode = CCVIHmode;
6379 code = swap_condition (code);
6384 cc_producer_mode = CCVIHUmode;
6390 scratch_mode = GET_MODE (cmp1);
6391 /* These codes represent inverted CC interpretations. Inverting
6392 an ALL CC mode results in an ANY CC mode and the other way
6393 around. Invert the all_p flag here to compensate for
6395 if (code == NE || code == LE || code == LEU)
6398 cc_consumer_mode = all_p ? CCVIALLmode : CCVIANYmode;
6400 else if (GET_MODE_CLASS (GET_MODE (cmp1)) == MODE_VECTOR_FLOAT)
6406 case EQ: cc_producer_mode = CCVEQmode; break;
6407 case NE: cc_producer_mode = CCVEQmode; inv_p = true; break;
6408 case GT: cc_producer_mode = CCVFHmode; break;
6409 case GE: cc_producer_mode = CCVFHEmode; break;
6410 case UNLE: cc_producer_mode = CCVFHmode; inv_p = true; break;
6411 case UNLT: cc_producer_mode = CCVFHEmode; inv_p = true; break;
6412 case LT: cc_producer_mode = CCVFHmode; code = GT; swap_p = true; break;
6413 case LE: cc_producer_mode = CCVFHEmode; code = GE; swap_p = true; break;
6414 default: gcc_unreachable ();
6416 scratch_mode = mode_for_vector (
6417 int_mode_for_mode (GET_MODE_INNER (GET_MODE (cmp1))),
6418 GET_MODE_NUNITS (GET_MODE (cmp1)));
6419 gcc_assert (scratch_mode != BLKmode);
6424 cc_consumer_mode = all_p ? CCVFALLmode : CCVFANYmode;
6436 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6437 gen_rtvec (2, gen_rtx_SET (
6438 gen_rtx_REG (cc_producer_mode, CC_REGNUM),
6439 gen_rtx_COMPARE (cc_producer_mode, cmp1, cmp2)),
6440 gen_rtx_CLOBBER (VOIDmode,
6441 gen_rtx_SCRATCH (scratch_mode)))));
6442 emit_move_insn (target, const0_rtx);
6443 emit_move_insn (tmp_reg, const1_rtx);
6445 emit_move_insn (target,
6446 gen_rtx_IF_THEN_ELSE (SImode,
6447 gen_rtx_fmt_ee (code, VOIDmode,
6448 gen_rtx_REG (cc_consumer_mode, CC_REGNUM),
6453 /* Invert the comparison CODE applied to a CC mode. This is only safe
6454 if we know whether there result was created by a floating point
6455 compare or not. For the CCV modes this is encoded as part of the
6458 s390_reverse_condition (machine_mode mode, enum rtx_code code)
6460 /* Reversal of FP compares takes care -- an ordered compare
6461 becomes an unordered compare and vice versa. */
6462 if (mode == CCVFALLmode || mode == CCVFANYmode)
6463 return reverse_condition_maybe_unordered (code);
6464 else if (mode == CCVIALLmode || mode == CCVIANYmode)
6465 return reverse_condition (code);
6470 /* Generate a vector comparison expression loading either elements of
6471 THEN or ELS into TARGET depending on the comparison COND of CMP_OP1
6475 s390_expand_vcond (rtx target, rtx then, rtx els,
6476 enum rtx_code cond, rtx cmp_op1, rtx cmp_op2)
6479 machine_mode result_mode;
6482 machine_mode target_mode = GET_MODE (target);
6483 machine_mode cmp_mode = GET_MODE (cmp_op1);
6484 rtx op = (cond == LT) ? els : then;
6486 /* Try to optimize x < 0 ? -1 : 0 into (signed) x >> 31
6487 and x < 0 ? 1 : 0 into (unsigned) x >> 31. Likewise
6488 for short and byte (x >> 15 and x >> 7 respectively). */
6489 if ((cond == LT || cond == GE)
6490 && target_mode == cmp_mode
6491 && cmp_op2 == CONST0_RTX (cmp_mode)
6492 && op == CONST0_RTX (target_mode)
6493 && s390_vector_mode_supported_p (target_mode)
6494 && GET_MODE_CLASS (target_mode) == MODE_VECTOR_INT)
6496 rtx negop = (cond == LT) ? then : els;
6498 int shift = GET_MODE_BITSIZE (GET_MODE_INNER (target_mode)) - 1;
6500 /* if x < 0 ? 1 : 0 or if x >= 0 ? 0 : 1 */
6501 if (negop == CONST1_RTX (target_mode))
6503 rtx res = expand_simple_binop (cmp_mode, LSHIFTRT, cmp_op1,
6504 GEN_INT (shift), target,
6507 emit_move_insn (target, res);
6511 /* if x < 0 ? -1 : 0 or if x >= 0 ? 0 : -1 */
6512 else if (all_ones_operand (negop, target_mode))
6514 rtx res = expand_simple_binop (cmp_mode, ASHIFTRT, cmp_op1,
6515 GEN_INT (shift), target,
6518 emit_move_insn (target, res);
6523 /* We always use an integral type vector to hold the comparison
6525 result_mode = mode_for_vector (int_mode_for_mode (GET_MODE_INNER (cmp_mode)),
6526 GET_MODE_NUNITS (cmp_mode));
6527 result_target = gen_reg_rtx (result_mode);
6529 /* We allow vector immediates as comparison operands that
6530 can be handled by the optimization above but not by the
6531 following code. Hence, force them into registers here. */
6532 if (!REG_P (cmp_op1))
6533 cmp_op1 = force_reg (GET_MODE (cmp_op1), cmp_op1);
6535 if (!REG_P (cmp_op2))
6536 cmp_op2 = force_reg (GET_MODE (cmp_op2), cmp_op2);
6538 s390_expand_vec_compare (result_target, cond,
6541 /* If the results are supposed to be either -1 or 0 we are done
6542 since this is what our compare instructions generate anyway. */
6543 if (all_ones_operand (then, GET_MODE (then))
6544 && const0_operand (els, GET_MODE (els)))
6546 emit_move_insn (target, gen_rtx_SUBREG (target_mode,
6551 /* Otherwise we will do a vsel afterwards. */
6552 /* This gets triggered e.g.
6553 with gcc.c-torture/compile/pr53410-1.c */
6555 then = force_reg (target_mode, then);
6558 els = force_reg (target_mode, els);
6560 tmp = gen_rtx_fmt_ee (EQ, VOIDmode,
6562 CONST0_RTX (result_mode));
6564 /* We compared the result against zero above so we have to swap then
6566 tmp = gen_rtx_IF_THEN_ELSE (target_mode, tmp, els, then);
6568 gcc_assert (target_mode == GET_MODE (then));
6569 emit_insn (gen_rtx_SET (target, tmp));
6572 /* Emit the RTX necessary to initialize the vector TARGET with values
6575 s390_expand_vec_init (rtx target, rtx vals)
6577 machine_mode mode = GET_MODE (target);
6578 machine_mode inner_mode = GET_MODE_INNER (mode);
6579 int n_elts = GET_MODE_NUNITS (mode);
6580 bool all_same = true, all_regs = true, all_const_int = true;
6584 for (i = 0; i < n_elts; ++i)
6586 x = XVECEXP (vals, 0, i);
6588 if (!CONST_INT_P (x))
6589 all_const_int = false;
6591 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6598 /* Use vector gen mask or vector gen byte mask if possible. */
6599 if (all_same && all_const_int
6600 && (XVECEXP (vals, 0, 0) == const0_rtx
6601 || s390_contiguous_bitmask_vector_p (XVECEXP (vals, 0, 0),
6603 || s390_bytemask_vector_p (XVECEXP (vals, 0, 0), NULL)))
6605 emit_insn (gen_rtx_SET (target,
6606 gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0))));
6612 emit_insn (gen_rtx_SET (target,
6613 gen_rtx_VEC_DUPLICATE (mode,
6614 XVECEXP (vals, 0, 0))));
6621 && GET_MODE_SIZE (inner_mode) == 8)
6623 /* Use vector load pair. */
6624 emit_insn (gen_rtx_SET (target,
6625 gen_rtx_VEC_CONCAT (mode,
6626 XVECEXP (vals, 0, 0),
6627 XVECEXP (vals, 0, 1))));
6631 /* Use vector load logical element and zero. */
6632 if (TARGET_VXE && (mode == V4SImode || mode == V4SFmode))
6636 x = XVECEXP (vals, 0, 0);
6637 if (memory_operand (x, inner_mode))
6639 for (i = 1; i < n_elts; ++i)
6640 found = found && XVECEXP (vals, 0, i) == const0_rtx;
6644 machine_mode half_mode = (inner_mode == SFmode
6645 ? V2SFmode : V2SImode);
6646 emit_insn (gen_rtx_SET (target,
6647 gen_rtx_VEC_CONCAT (mode,
6648 gen_rtx_VEC_CONCAT (half_mode,
6651 gen_rtx_VEC_CONCAT (half_mode,
6659 /* We are about to set the vector elements one by one. Zero out the
6660 full register first in order to help the data flow framework to
6661 detect it as full VR set. */
6662 emit_insn (gen_rtx_SET (target, CONST0_RTX (mode)));
6664 /* Unfortunately the vec_init expander is not allowed to fail. So
6665 we have to implement the fallback ourselves. */
6666 for (i = 0; i < n_elts; i++)
6668 rtx elem = XVECEXP (vals, 0, i);
6669 if (!general_operand (elem, GET_MODE (elem)))
6670 elem = force_reg (inner_mode, elem);
6672 emit_insn (gen_rtx_SET (target,
6673 gen_rtx_UNSPEC (mode,
6675 GEN_INT (i), target),
6680 /* Structure to hold the initial parameters for a compare_and_swap operation
6681 in HImode and QImode. */
6683 struct alignment_context
6685 rtx memsi; /* SI aligned memory location. */
6686 rtx shift; /* Bit offset with regard to lsb. */
6687 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
6688 rtx modemaski; /* ~modemask */
6689 bool aligned; /* True if memory is aligned, false else. */
6692 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
6693 structure AC for transparent simplifying, if the memory alignment is known
6694 to be at least 32bit. MEM is the memory location for the actual operation
6695 and MODE its mode. */
6698 init_alignment_context (struct alignment_context *ac, rtx mem,
6701 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
6702 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
6705 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
6708 /* Alignment is unknown. */
6709 rtx byteoffset, addr, align;
6711 /* Force the address into a register. */
6712 addr = force_reg (Pmode, XEXP (mem, 0));
6714 /* Align it to SImode. */
6715 align = expand_simple_binop (Pmode, AND, addr,
6716 GEN_INT (-GET_MODE_SIZE (SImode)),
6717 NULL_RTX, 1, OPTAB_DIRECT);
6719 ac->memsi = gen_rtx_MEM (SImode, align);
6720 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
6721 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
6722 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
6724 /* Calculate shiftcount. */
6725 byteoffset = expand_simple_binop (Pmode, AND, addr,
6726 GEN_INT (GET_MODE_SIZE (SImode) - 1),
6727 NULL_RTX, 1, OPTAB_DIRECT);
6728 /* As we already have some offset, evaluate the remaining distance. */
6729 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
6730 NULL_RTX, 1, OPTAB_DIRECT);
6733 /* Shift is the byte count, but we need the bitcount. */
6734 ac->shift = expand_simple_binop (SImode, ASHIFT, ac->shift, GEN_INT (3),
6735 NULL_RTX, 1, OPTAB_DIRECT);
6737 /* Calculate masks. */
6738 ac->modemask = expand_simple_binop (SImode, ASHIFT,
6739 GEN_INT (GET_MODE_MASK (mode)),
6740 ac->shift, NULL_RTX, 1, OPTAB_DIRECT);
6741 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask,
6745 /* A subroutine of s390_expand_cs_hqi. Insert INS into VAL. If possible,
6746 use a single insv insn into SEQ2. Otherwise, put prep insns in SEQ1 and
6747 perform the merge in SEQ2. */
6750 s390_two_part_insv (struct alignment_context *ac, rtx *seq1, rtx *seq2,
6751 machine_mode mode, rtx val, rtx ins)
6758 tmp = copy_to_mode_reg (SImode, val);
6759 if (s390_expand_insv (tmp, GEN_INT (GET_MODE_BITSIZE (mode)),
6763 *seq2 = get_insns ();
6770 /* Failed to use insv. Generate a two part shift and mask. */
6772 tmp = s390_expand_mask_and_shift (ins, mode, ac->shift);
6773 *seq1 = get_insns ();
6777 tmp = expand_simple_binop (SImode, IOR, tmp, val, NULL_RTX, 1, OPTAB_DIRECT);
6778 *seq2 = get_insns ();
6784 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
6785 the memory location, CMP the old value to compare MEM with and NEW_RTX the
6786 value to set if CMP == MEM. */
6789 s390_expand_cs_hqi (machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
6790 rtx cmp, rtx new_rtx, bool is_weak)
6792 struct alignment_context ac;
6793 rtx cmpv, newv, val, cc, seq0, seq1, seq2, seq3;
6794 rtx res = gen_reg_rtx (SImode);
6795 rtx_code_label *csloop = NULL, *csend = NULL;
6797 gcc_assert (MEM_P (mem));
6799 init_alignment_context (&ac, mem, mode);
6801 /* Load full word. Subsequent loads are performed by CS. */
6802 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
6803 NULL_RTX, 1, OPTAB_DIRECT);
6805 /* Prepare insertions of cmp and new_rtx into the loaded value. When
6806 possible, we try to use insv to make this happen efficiently. If
6807 that fails we'll generate code both inside and outside the loop. */
6808 cmpv = s390_two_part_insv (&ac, &seq0, &seq2, mode, val, cmp);
6809 newv = s390_two_part_insv (&ac, &seq1, &seq3, mode, val, new_rtx);
6816 /* Start CS loop. */
6819 /* Begin assuming success. */
6820 emit_move_insn (btarget, const1_rtx);
6822 csloop = gen_label_rtx ();
6823 csend = gen_label_rtx ();
6824 emit_label (csloop);
6827 /* val = "<mem>00..0<mem>"
6828 * cmp = "00..0<cmp>00..0"
6829 * new = "00..0<new>00..0"
6835 cc = s390_emit_compare_and_swap (EQ, res, ac.memsi, cmpv, newv, CCZ1mode);
6837 emit_insn (gen_cstorecc4 (btarget, cc, XEXP (cc, 0), XEXP (cc, 1)));
6842 /* Jump to end if we're done (likely?). */
6843 s390_emit_jump (csend, cc);
6845 /* Check for changes outside mode, and loop internal if so.
6846 Arrange the moves so that the compare is adjacent to the
6847 branch so that we can generate CRJ. */
6848 tmp = copy_to_reg (val);
6849 force_expand_binop (SImode, and_optab, res, ac.modemaski, val,
6851 cc = s390_emit_compare (NE, val, tmp);
6852 s390_emit_jump (csloop, cc);
6855 emit_move_insn (btarget, const0_rtx);
6859 /* Return the correct part of the bitfield. */
6860 convert_move (vtarget, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
6861 NULL_RTX, 1, OPTAB_DIRECT), 1);
6864 /* Variant of s390_expand_cs for SI, DI and TI modes. */
6866 s390_expand_cs_tdsi (machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
6867 rtx cmp, rtx new_rtx, bool is_weak)
6869 rtx output = vtarget;
6870 rtx_code_label *skip_cs_label = NULL;
6871 bool do_const_opt = false;
6873 if (!register_operand (output, mode))
6874 output = gen_reg_rtx (mode);
6876 /* If IS_WEAK is true and the INPUT value is a constant, compare the memory
6877 with the constant first and skip the compare_and_swap because its very
6878 expensive and likely to fail anyway.
6879 Note 1: This is done only for IS_WEAK. C11 allows optimizations that may
6880 cause spurious in that case.
6881 Note 2: It may be useful to do this also for non-constant INPUT.
6882 Note 3: Currently only targets with "load on condition" are supported
6883 (z196 and newer). */
6886 && (mode == SImode || mode == DImode))
6887 do_const_opt = (is_weak && CONST_INT_P (cmp));
6891 const int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
6892 rtx cc = gen_rtx_REG (CCZmode, CC_REGNUM);
6894 skip_cs_label = gen_label_rtx ();
6895 emit_move_insn (btarget, const0_rtx);
6896 if (CONST_INT_P (cmp) && INTVAL (cmp) == 0)
6898 rtvec lt = rtvec_alloc (2);
6900 /* Load-and-test + conditional jump. */
6902 = gen_rtx_SET (cc, gen_rtx_COMPARE (CCZmode, mem, cmp));
6903 RTVEC_ELT (lt, 1) = gen_rtx_SET (output, mem);
6904 emit_insn (gen_rtx_PARALLEL (VOIDmode, lt));
6908 emit_move_insn (output, mem);
6909 emit_insn (gen_rtx_SET (cc, gen_rtx_COMPARE (CCZmode, output, cmp)));
6911 s390_emit_jump (skip_cs_label, gen_rtx_NE (VOIDmode, cc, const0_rtx));
6912 add_int_reg_note (get_last_insn (), REG_BR_PROB, very_unlikely);
6913 /* If the jump is not taken, OUTPUT is the expected value. */
6915 /* Reload newval to a register manually, *after* the compare and jump
6916 above. Otherwise Reload might place it before the jump. */
6919 cmp = force_reg (mode, cmp);
6920 new_rtx = force_reg (mode, new_rtx);
6921 s390_emit_compare_and_swap (EQ, output, mem, cmp, new_rtx,
6922 (do_const_opt) ? CCZmode : CCZ1mode);
6923 if (skip_cs_label != NULL)
6924 emit_label (skip_cs_label);
6926 /* We deliberately accept non-register operands in the predicate
6927 to ensure the write back to the output operand happens *before*
6928 the store-flags code below. This makes it easier for combine
6929 to merge the store-flags code with a potential test-and-branch
6930 pattern following (immediately!) afterwards. */
6931 if (output != vtarget)
6932 emit_move_insn (vtarget, output);
6938 /* Do not use gen_cstorecc4 here because it writes either 1 or 0, but
6939 btarget has already been initialized with 0 above. */
6940 cc = gen_rtx_REG (CCZmode, CC_REGNUM);
6941 cond = gen_rtx_EQ (VOIDmode, cc, const0_rtx);
6942 ite = gen_rtx_IF_THEN_ELSE (SImode, cond, const1_rtx, btarget);
6943 emit_insn (gen_rtx_SET (btarget, ite));
6949 cc = gen_rtx_REG (CCZ1mode, CC_REGNUM);
6950 cond = gen_rtx_EQ (SImode, cc, const0_rtx);
6951 emit_insn (gen_cstorecc4 (btarget, cond, cc, const0_rtx));
6955 /* Expand an atomic compare and swap operation. MEM is the memory location,
6956 CMP the old value to compare MEM with and NEW_RTX the value to set if
6960 s390_expand_cs (machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
6961 rtx cmp, rtx new_rtx, bool is_weak)
6968 s390_expand_cs_tdsi (mode, btarget, vtarget, mem, cmp, new_rtx, is_weak);
6972 s390_expand_cs_hqi (mode, btarget, vtarget, mem, cmp, new_rtx, is_weak);
6979 /* Expand an atomic_exchange operation simulated with a compare-and-swap loop.
6980 The memory location MEM is set to INPUT. OUTPUT is set to the previous value
6984 s390_expand_atomic_exchange_tdsi (rtx output, rtx mem, rtx input)
6986 machine_mode mode = GET_MODE (mem);
6987 rtx_code_label *csloop;
6990 && (mode == DImode || mode == SImode)
6991 && CONST_INT_P (input) && INTVAL (input) == 0)
6993 emit_move_insn (output, const0_rtx);
6995 emit_insn (gen_atomic_fetch_anddi (output, mem, const0_rtx, input));
6997 emit_insn (gen_atomic_fetch_andsi (output, mem, const0_rtx, input));
7001 input = force_reg (mode, input);
7002 emit_move_insn (output, mem);
7003 csloop = gen_label_rtx ();
7004 emit_label (csloop);
7005 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, output, mem, output,
7009 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
7010 and VAL the value to play with. If AFTER is true then store the value
7011 MEM holds after the operation, if AFTER is false then store the value MEM
7012 holds before the operation. If TARGET is zero then discard that value, else
7013 store it to TARGET. */
7016 s390_expand_atomic (machine_mode mode, enum rtx_code code,
7017 rtx target, rtx mem, rtx val, bool after)
7019 struct alignment_context ac;
7021 rtx new_rtx = gen_reg_rtx (SImode);
7022 rtx orig = gen_reg_rtx (SImode);
7023 rtx_code_label *csloop = gen_label_rtx ();
7025 gcc_assert (!target || register_operand (target, VOIDmode));
7026 gcc_assert (MEM_P (mem));
7028 init_alignment_context (&ac, mem, mode);
7030 /* Shift val to the correct bit positions.
7031 Preserve "icm", but prevent "ex icm". */
7032 if (!(ac.aligned && code == SET && MEM_P (val)))
7033 val = s390_expand_mask_and_shift (val, mode, ac.shift);
7035 /* Further preparation insns. */
7036 if (code == PLUS || code == MINUS)
7037 emit_move_insn (orig, val);
7038 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
7039 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
7040 NULL_RTX, 1, OPTAB_DIRECT);
7042 /* Load full word. Subsequent loads are performed by CS. */
7043 cmp = force_reg (SImode, ac.memsi);
7045 /* Start CS loop. */
7046 emit_label (csloop);
7047 emit_move_insn (new_rtx, cmp);
7049 /* Patch new with val at correct position. */
7054 val = expand_simple_binop (SImode, code, new_rtx, orig,
7055 NULL_RTX, 1, OPTAB_DIRECT);
7056 val = expand_simple_binop (SImode, AND, val, ac.modemask,
7057 NULL_RTX, 1, OPTAB_DIRECT);
7060 if (ac.aligned && MEM_P (val))
7061 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0,
7062 0, 0, SImode, val, false);
7065 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
7066 NULL_RTX, 1, OPTAB_DIRECT);
7067 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
7068 NULL_RTX, 1, OPTAB_DIRECT);
7074 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
7075 NULL_RTX, 1, OPTAB_DIRECT);
7077 case MULT: /* NAND */
7078 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
7079 NULL_RTX, 1, OPTAB_DIRECT);
7080 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
7081 NULL_RTX, 1, OPTAB_DIRECT);
7087 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
7088 ac.memsi, cmp, new_rtx,
7091 /* Return the correct part of the bitfield. */
7093 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
7094 after ? new_rtx : cmp, ac.shift,
7095 NULL_RTX, 1, OPTAB_DIRECT), 1);
7098 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
7099 We need to emit DTP-relative relocations. */
7101 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
7104 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
7109 fputs ("\t.long\t", file);
7112 fputs ("\t.quad\t", file);
7117 output_addr_const (file, x);
7118 fputs ("@DTPOFF", file);
7121 /* Return the proper mode for REGNO being represented in the dwarf
7124 s390_dwarf_frame_reg_mode (int regno)
7126 machine_mode save_mode = default_dwarf_frame_reg_mode (regno);
7128 /* Make sure not to return DImode for any GPR with -m31 -mzarch. */
7129 if (GENERAL_REGNO_P (regno))
7132 /* The rightmost 64 bits of vector registers are call-clobbered. */
7133 if (GET_MODE_SIZE (save_mode) > 8)
7139 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
7140 /* Implement TARGET_MANGLE_TYPE. */
7143 s390_mangle_type (const_tree type)
7145 type = TYPE_MAIN_VARIANT (type);
7147 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
7148 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
7151 if (type == s390_builtin_types[BT_BV16QI]) return "U6__boolc";
7152 if (type == s390_builtin_types[BT_BV8HI]) return "U6__bools";
7153 if (type == s390_builtin_types[BT_BV4SI]) return "U6__booli";
7154 if (type == s390_builtin_types[BT_BV2DI]) return "U6__booll";
7156 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
7157 && TARGET_LONG_DOUBLE_128)
7160 /* For all other types, use normal C++ mangling. */
7165 /* In the name of slightly smaller debug output, and to cater to
7166 general assembler lossage, recognize various UNSPEC sequences
7167 and turn them back into a direct symbol reference. */
7170 s390_delegitimize_address (rtx orig_x)
7174 orig_x = delegitimize_mem_from_attrs (orig_x);
7177 /* Extract the symbol ref from:
7178 (plus:SI (reg:SI 12 %r12)
7179 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
7180 UNSPEC_GOTOFF/PLTOFF)))
7182 (plus:SI (reg:SI 12 %r12)
7183 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
7184 UNSPEC_GOTOFF/PLTOFF)
7185 (const_int 4 [0x4])))) */
7186 if (GET_CODE (x) == PLUS
7187 && REG_P (XEXP (x, 0))
7188 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
7189 && GET_CODE (XEXP (x, 1)) == CONST)
7191 HOST_WIDE_INT offset = 0;
7193 /* The const operand. */
7194 y = XEXP (XEXP (x, 1), 0);
7196 if (GET_CODE (y) == PLUS
7197 && GET_CODE (XEXP (y, 1)) == CONST_INT)
7199 offset = INTVAL (XEXP (y, 1));
7203 if (GET_CODE (y) == UNSPEC
7204 && (XINT (y, 1) == UNSPEC_GOTOFF
7205 || XINT (y, 1) == UNSPEC_PLTOFF))
7206 return plus_constant (Pmode, XVECEXP (y, 0, 0), offset);
7209 if (GET_CODE (x) != MEM)
7213 if (GET_CODE (x) == PLUS
7214 && GET_CODE (XEXP (x, 1)) == CONST
7215 && GET_CODE (XEXP (x, 0)) == REG
7216 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
7218 y = XEXP (XEXP (x, 1), 0);
7219 if (GET_CODE (y) == UNSPEC
7220 && XINT (y, 1) == UNSPEC_GOT)
7221 y = XVECEXP (y, 0, 0);
7225 else if (GET_CODE (x) == CONST)
7227 /* Extract the symbol ref from:
7228 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
7229 UNSPEC_PLT/GOTENT))) */
7232 if (GET_CODE (y) == UNSPEC
7233 && (XINT (y, 1) == UNSPEC_GOTENT
7234 || XINT (y, 1) == UNSPEC_PLT))
7235 y = XVECEXP (y, 0, 0);
7242 if (GET_MODE (orig_x) != Pmode)
7244 if (GET_MODE (orig_x) == BLKmode)
7246 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
7253 /* Output operand OP to stdio stream FILE.
7254 OP is an address (register + offset) which is not used to address data;
7255 instead the rightmost bits are interpreted as the value. */
7258 print_addrstyle_operand (FILE *file, rtx op)
7260 HOST_WIDE_INT offset;
7263 /* Extract base register and offset. */
7264 if (!s390_decompose_addrstyle_without_index (op, &base, &offset))
7270 gcc_assert (GET_CODE (base) == REG);
7271 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
7272 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
7275 /* Offsets are constricted to twelve bits. */
7276 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
7278 fprintf (file, "(%s)", reg_names[REGNO (base)]);
7281 /* Assigns the number of NOP halfwords to be emitted before and after the
7282 function label to *HW_BEFORE and *HW_AFTER. Both pointers must not be NULL.
7283 If hotpatching is disabled for the function, the values are set to zero.
7287 s390_function_num_hotpatch_hw (tree decl,
7293 attr = lookup_attribute ("hotpatch", DECL_ATTRIBUTES (decl));
7295 /* Handle the arguments of the hotpatch attribute. The values
7296 specified via attribute might override the cmdline argument
7300 tree args = TREE_VALUE (attr);
7302 *hw_before = TREE_INT_CST_LOW (TREE_VALUE (args));
7303 *hw_after = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (args)));
7307 /* Use the values specified by the cmdline arguments. */
7308 *hw_before = s390_hotpatch_hw_before_label;
7309 *hw_after = s390_hotpatch_hw_after_label;
7313 /* Write the current .machine and .machinemode specification to the assembler
7316 #ifdef HAVE_AS_MACHINE_MACHINEMODE
7318 s390_asm_output_machine_for_arch (FILE *asm_out_file)
7320 fprintf (asm_out_file, "\t.machinemode %s\n",
7321 (TARGET_ZARCH) ? "zarch" : "esa");
7322 fprintf (asm_out_file, "\t.machine \"%s", processor_table[s390_arch].name);
7323 if (S390_USE_ARCHITECTURE_MODIFIERS)
7327 cpu_flags = processor_flags_table[(int) s390_arch];
7328 if (TARGET_HTM && !(cpu_flags & PF_TX))
7329 fprintf (asm_out_file, "+htm");
7330 else if (!TARGET_HTM && (cpu_flags & PF_TX))
7331 fprintf (asm_out_file, "+nohtm");
7332 if (TARGET_VX && !(cpu_flags & PF_VX))
7333 fprintf (asm_out_file, "+vx");
7334 else if (!TARGET_VX && (cpu_flags & PF_VX))
7335 fprintf (asm_out_file, "+novx");
7337 fprintf (asm_out_file, "\"\n");
7340 /* Write an extra function header before the very start of the function. */
7343 s390_asm_output_function_prefix (FILE *asm_out_file,
7344 const char *fnname ATTRIBUTE_UNUSED)
7346 if (DECL_FUNCTION_SPECIFIC_TARGET (current_function_decl) == NULL)
7348 /* Since only the function specific options are saved but not the indications
7349 which options are set, it's too much work here to figure out which options
7350 have actually changed. Thus, generate .machine and .machinemode whenever a
7351 function has the target attribute or pragma. */
7352 fprintf (asm_out_file, "\t.machinemode push\n");
7353 fprintf (asm_out_file, "\t.machine push\n");
7354 s390_asm_output_machine_for_arch (asm_out_file);
7357 /* Write an extra function footer after the very end of the function. */
7360 s390_asm_declare_function_size (FILE *asm_out_file,
7361 const char *fnname, tree decl)
7363 if (!flag_inhibit_size_directive)
7364 ASM_OUTPUT_MEASURED_SIZE (asm_out_file, fnname);
7365 if (DECL_FUNCTION_SPECIFIC_TARGET (decl) == NULL)
7367 fprintf (asm_out_file, "\t.machine pop\n");
7368 fprintf (asm_out_file, "\t.machinemode pop\n");
7372 /* Write the extra assembler code needed to declare a function properly. */
7375 s390_asm_output_function_label (FILE *asm_out_file, const char *fname,
7378 int hw_before, hw_after;
7380 s390_function_num_hotpatch_hw (decl, &hw_before, &hw_after);
7383 unsigned int function_alignment;
7386 /* Add a trampoline code area before the function label and initialize it
7387 with two-byte nop instructions. This area can be overwritten with code
7388 that jumps to a patched version of the function. */
7389 asm_fprintf (asm_out_file, "\tnopr\t%%r0"
7390 "\t# pre-label NOPs for hotpatch (%d halfwords)\n",
7392 for (i = 1; i < hw_before; i++)
7393 fputs ("\tnopr\t%r0\n", asm_out_file);
7395 /* Note: The function label must be aligned so that (a) the bytes of the
7396 following nop do not cross a cacheline boundary, and (b) a jump address
7397 (eight bytes for 64 bit targets, 4 bytes for 32 bit targets) can be
7398 stored directly before the label without crossing a cacheline
7399 boundary. All this is necessary to make sure the trampoline code can
7400 be changed atomically.
7401 This alignment is done automatically using the FOUNCTION_BOUNDARY, but
7402 if there are NOPs before the function label, the alignment is placed
7403 before them. So it is necessary to duplicate the alignment after the
7405 function_alignment = MAX (8, DECL_ALIGN (decl) / BITS_PER_UNIT);
7406 if (! DECL_USER_ALIGN (decl))
7407 function_alignment = MAX (function_alignment,
7408 (unsigned int) align_functions);
7409 fputs ("\t# alignment for hotpatch\n", asm_out_file);
7410 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (function_alignment));
7413 if (S390_USE_TARGET_ATTRIBUTE && TARGET_DEBUG_ARG)
7415 asm_fprintf (asm_out_file, "\t# fn:%s ar%d\n", fname, s390_arch);
7416 asm_fprintf (asm_out_file, "\t# fn:%s tu%d\n", fname, s390_tune);
7417 asm_fprintf (asm_out_file, "\t# fn:%s sg%d\n", fname, s390_stack_guard);
7418 asm_fprintf (asm_out_file, "\t# fn:%s ss%d\n", fname, s390_stack_size);
7419 asm_fprintf (asm_out_file, "\t# fn:%s bc%d\n", fname, s390_branch_cost);
7420 asm_fprintf (asm_out_file, "\t# fn:%s wf%d\n", fname,
7421 s390_warn_framesize);
7422 asm_fprintf (asm_out_file, "\t# fn:%s ba%d\n", fname, TARGET_BACKCHAIN);
7423 asm_fprintf (asm_out_file, "\t# fn:%s hd%d\n", fname, TARGET_HARD_DFP);
7424 asm_fprintf (asm_out_file, "\t# fn:%s hf%d\n", fname, !TARGET_SOFT_FLOAT);
7425 asm_fprintf (asm_out_file, "\t# fn:%s ht%d\n", fname, TARGET_OPT_HTM);
7426 asm_fprintf (asm_out_file, "\t# fn:%s vx%d\n", fname, TARGET_OPT_VX);
7427 asm_fprintf (asm_out_file, "\t# fn:%s ps%d\n", fname,
7428 TARGET_PACKED_STACK);
7429 asm_fprintf (asm_out_file, "\t# fn:%s se%d\n", fname, TARGET_SMALL_EXEC);
7430 asm_fprintf (asm_out_file, "\t# fn:%s mv%d\n", fname, TARGET_MVCLE);
7431 asm_fprintf (asm_out_file, "\t# fn:%s zv%d\n", fname, TARGET_ZVECTOR);
7432 asm_fprintf (asm_out_file, "\t# fn:%s wd%d\n", fname,
7433 s390_warn_dynamicstack_p);
7435 ASM_OUTPUT_LABEL (asm_out_file, fname);
7437 asm_fprintf (asm_out_file,
7438 "\t# post-label NOPs for hotpatch (%d halfwords)\n",
7442 /* Output machine-dependent UNSPECs occurring in address constant X
7443 in assembler syntax to stdio stream FILE. Returns true if the
7444 constant X could be recognized, false otherwise. */
7447 s390_output_addr_const_extra (FILE *file, rtx x)
7449 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
7450 switch (XINT (x, 1))
7453 output_addr_const (file, XVECEXP (x, 0, 0));
7454 fprintf (file, "@GOTENT");
7457 output_addr_const (file, XVECEXP (x, 0, 0));
7458 fprintf (file, "@GOT");
7461 output_addr_const (file, XVECEXP (x, 0, 0));
7462 fprintf (file, "@GOTOFF");
7465 output_addr_const (file, XVECEXP (x, 0, 0));
7466 fprintf (file, "@PLT");
7469 output_addr_const (file, XVECEXP (x, 0, 0));
7470 fprintf (file, "@PLTOFF");
7473 output_addr_const (file, XVECEXP (x, 0, 0));
7474 fprintf (file, "@TLSGD");
7477 assemble_name (file, get_some_local_dynamic_name ());
7478 fprintf (file, "@TLSLDM");
7481 output_addr_const (file, XVECEXP (x, 0, 0));
7482 fprintf (file, "@DTPOFF");
7485 output_addr_const (file, XVECEXP (x, 0, 0));
7486 fprintf (file, "@NTPOFF");
7488 case UNSPEC_GOTNTPOFF:
7489 output_addr_const (file, XVECEXP (x, 0, 0));
7490 fprintf (file, "@GOTNTPOFF");
7492 case UNSPEC_INDNTPOFF:
7493 output_addr_const (file, XVECEXP (x, 0, 0));
7494 fprintf (file, "@INDNTPOFF");
7498 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
7499 switch (XINT (x, 1))
7501 case UNSPEC_POOL_OFFSET:
7502 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
7503 output_addr_const (file, x);
7509 /* Output address operand ADDR in assembler syntax to
7510 stdio stream FILE. */
7513 print_operand_address (FILE *file, rtx addr)
7515 struct s390_address ad;
7516 memset (&ad, 0, sizeof (s390_address));
7518 if (s390_loadrelative_operand_p (addr, NULL, NULL))
7522 output_operand_lossage ("symbolic memory references are "
7523 "only supported on z10 or later");
7526 output_addr_const (file, addr);
7530 if (!s390_decompose_address (addr, &ad)
7531 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7532 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
7533 output_operand_lossage ("cannot decompose address");
7536 output_addr_const (file, ad.disp);
7538 fprintf (file, "0");
7540 if (ad.base && ad.indx)
7541 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
7542 reg_names[REGNO (ad.base)]);
7544 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
7547 /* Output operand X in assembler syntax to stdio stream FILE.
7548 CODE specified the format flag. The following format flags
7551 'C': print opcode suffix for branch condition.
7552 'D': print opcode suffix for inverse branch condition.
7553 'E': print opcode suffix for branch on index instruction.
7554 'G': print the size of the operand in bytes.
7555 'J': print tls_load/tls_gdcall/tls_ldcall suffix
7556 'M': print the second word of a TImode operand.
7557 'N': print the second word of a DImode operand.
7558 'O': print only the displacement of a memory reference or address.
7559 'R': print only the base register of a memory reference or address.
7560 'S': print S-type memory reference (base+displacement).
7561 'Y': print address style operand without index (e.g. shift count or setmem
7564 'b': print integer X as if it's an unsigned byte.
7565 'c': print integer X as if it's an signed byte.
7566 'e': "end" contiguous bitmask X in either DImode or vector inner mode.
7567 'f': "end" contiguous bitmask X in SImode.
7568 'h': print integer X as if it's a signed halfword.
7569 'i': print the first nonzero HImode part of X.
7570 'j': print the first HImode part unequal to -1 of X.
7571 'k': print the first nonzero SImode part of X.
7572 'm': print the first SImode part unequal to -1 of X.
7573 'o': print integer X as if it's an unsigned 32bit word.
7574 's': "start" of contiguous bitmask X in either DImode or vector inner mode.
7575 't': CONST_INT: "start" of contiguous bitmask X in SImode.
7576 CONST_VECTOR: Generate a bitmask for vgbm instruction.
7577 'x': print integer X as if it's an unsigned halfword.
7578 'v': print register number as vector register (v1 instead of f1).
7582 print_operand (FILE *file, rtx x, int code)
7589 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
7593 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
7597 if (GET_CODE (x) == LE)
7598 fprintf (file, "l");
7599 else if (GET_CODE (x) == GT)
7600 fprintf (file, "h");
7602 output_operand_lossage ("invalid comparison operator "
7603 "for 'E' output modifier");
7607 if (GET_CODE (x) == SYMBOL_REF)
7609 fprintf (file, "%s", ":tls_load:");
7610 output_addr_const (file, x);
7612 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
7614 fprintf (file, "%s", ":tls_gdcall:");
7615 output_addr_const (file, XVECEXP (x, 0, 0));
7617 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
7619 fprintf (file, "%s", ":tls_ldcall:");
7620 const char *name = get_some_local_dynamic_name ();
7622 assemble_name (file, name);
7625 output_operand_lossage ("invalid reference for 'J' output modifier");
7629 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
7634 struct s390_address ad;
7637 ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
7640 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7643 output_operand_lossage ("invalid address for 'O' output modifier");
7648 output_addr_const (file, ad.disp);
7650 fprintf (file, "0");
7656 struct s390_address ad;
7659 ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
7662 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7665 output_operand_lossage ("invalid address for 'R' output modifier");
7670 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
7672 fprintf (file, "0");
7678 struct s390_address ad;
7683 output_operand_lossage ("memory reference expected for "
7684 "'S' output modifier");
7687 ret = s390_decompose_address (XEXP (x, 0), &ad);
7690 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7693 output_operand_lossage ("invalid address for 'S' output modifier");
7698 output_addr_const (file, ad.disp);
7700 fprintf (file, "0");
7703 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
7708 if (GET_CODE (x) == REG)
7709 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
7710 else if (GET_CODE (x) == MEM)
7711 x = change_address (x, VOIDmode,
7712 plus_constant (Pmode, XEXP (x, 0), 4));
7714 output_operand_lossage ("register or memory expression expected "
7715 "for 'N' output modifier");
7719 if (GET_CODE (x) == REG)
7720 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
7721 else if (GET_CODE (x) == MEM)
7722 x = change_address (x, VOIDmode,
7723 plus_constant (Pmode, XEXP (x, 0), 8));
7725 output_operand_lossage ("register or memory expression expected "
7726 "for 'M' output modifier");
7730 print_addrstyle_operand (file, x);
7734 switch (GET_CODE (x))
7737 /* Print FP regs as fx instead of vx when they are accessed
7738 through non-vector mode. */
7740 || VECTOR_NOFP_REG_P (x)
7741 || (FP_REG_P (x) && VECTOR_MODE_P (GET_MODE (x)))
7742 || (VECTOR_REG_P (x)
7743 && (GET_MODE_SIZE (GET_MODE (x)) /
7744 s390_class_max_nregs (FP_REGS, GET_MODE (x))) > 8))
7745 fprintf (file, "%%v%s", reg_names[REGNO (x)] + 2);
7747 fprintf (file, "%s", reg_names[REGNO (x)]);
7751 output_address (GET_MODE (x), XEXP (x, 0));
7758 output_addr_const (file, x);
7771 ival = ((ival & 0xff) ^ 0x80) - 0x80;
7777 ival = ((ival & 0xffff) ^ 0x8000) - 0x8000;
7780 ival = s390_extract_part (x, HImode, 0);
7783 ival = s390_extract_part (x, HImode, -1);
7786 ival = s390_extract_part (x, SImode, 0);
7789 ival = s390_extract_part (x, SImode, -1);
7801 len = (code == 's' || code == 'e' ? 64 : 32);
7802 ok = s390_contiguous_bitmask_p (ival, true, len, &start, &end);
7804 if (code == 's' || code == 't')
7811 output_operand_lossage ("invalid constant for output modifier '%c'", code);
7813 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
7816 case CONST_WIDE_INT:
7818 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7819 CONST_WIDE_INT_ELT (x, 0) & 0xff);
7820 else if (code == 'x')
7821 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7822 CONST_WIDE_INT_ELT (x, 0) & 0xffff);
7823 else if (code == 'h')
7824 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7825 ((CONST_WIDE_INT_ELT (x, 0) & 0xffff) ^ 0x8000) - 0x8000);
7829 output_operand_lossage ("invalid constant - try using "
7830 "an output modifier");
7832 output_operand_lossage ("invalid constant for output modifier '%c'",
7840 gcc_assert (const_vec_duplicate_p (x));
7841 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7842 ((INTVAL (XVECEXP (x, 0, 0)) & 0xffff) ^ 0x8000) - 0x8000);
7850 ok = s390_contiguous_bitmask_vector_p (x, &start, &end);
7852 ival = (code == 's') ? start : end;
7853 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
7859 bool ok = s390_bytemask_vector_p (x, &mask);
7861 fprintf (file, "%u", mask);
7866 output_operand_lossage ("invalid constant vector for output "
7867 "modifier '%c'", code);
7873 output_operand_lossage ("invalid expression - try using "
7874 "an output modifier");
7876 output_operand_lossage ("invalid expression for output "
7877 "modifier '%c'", code);
7882 /* Target hook for assembling integer objects. We need to define it
7883 here to work a round a bug in some versions of GAS, which couldn't
7884 handle values smaller than INT_MIN when printed in decimal. */
7887 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
7889 if (size == 8 && aligned_p
7890 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
7892 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
7896 return default_assemble_integer (x, size, aligned_p);
7899 /* Returns true if register REGNO is used for forming
7900 a memory address in expression X. */
7903 reg_used_in_mem_p (int regno, rtx x)
7905 enum rtx_code code = GET_CODE (x);
7911 if (refers_to_regno_p (regno, XEXP (x, 0)))
7914 else if (code == SET
7915 && GET_CODE (SET_DEST (x)) == PC)
7917 if (refers_to_regno_p (regno, SET_SRC (x)))
7921 fmt = GET_RTX_FORMAT (code);
7922 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7925 && reg_used_in_mem_p (regno, XEXP (x, i)))
7928 else if (fmt[i] == 'E')
7929 for (j = 0; j < XVECLEN (x, i); j++)
7930 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
7936 /* Returns true if expression DEP_RTX sets an address register
7937 used by instruction INSN to address memory. */
7940 addr_generation_dependency_p (rtx dep_rtx, rtx_insn *insn)
7944 if (NONJUMP_INSN_P (dep_rtx))
7945 dep_rtx = PATTERN (dep_rtx);
7947 if (GET_CODE (dep_rtx) == SET)
7949 target = SET_DEST (dep_rtx);
7950 if (GET_CODE (target) == STRICT_LOW_PART)
7951 target = XEXP (target, 0);
7952 while (GET_CODE (target) == SUBREG)
7953 target = SUBREG_REG (target);
7955 if (GET_CODE (target) == REG)
7957 int regno = REGNO (target);
7959 if (s390_safe_attr_type (insn) == TYPE_LA)
7961 pat = PATTERN (insn);
7962 if (GET_CODE (pat) == PARALLEL)
7964 gcc_assert (XVECLEN (pat, 0) == 2);
7965 pat = XVECEXP (pat, 0, 0);
7967 gcc_assert (GET_CODE (pat) == SET);
7968 return refers_to_regno_p (regno, SET_SRC (pat));
7970 else if (get_attr_atype (insn) == ATYPE_AGEN)
7971 return reg_used_in_mem_p (regno, PATTERN (insn));
7977 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
7980 s390_agen_dep_p (rtx_insn *dep_insn, rtx_insn *insn)
7982 rtx dep_rtx = PATTERN (dep_insn);
7985 if (GET_CODE (dep_rtx) == SET
7986 && addr_generation_dependency_p (dep_rtx, insn))
7988 else if (GET_CODE (dep_rtx) == PARALLEL)
7990 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
7992 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
8000 /* A C statement (sans semicolon) to update the integer scheduling priority
8001 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
8002 reduce the priority to execute INSN later. Do not define this macro if
8003 you do not need to adjust the scheduling priorities of insns.
8005 A STD instruction should be scheduled earlier,
8006 in order to use the bypass. */
8008 s390_adjust_priority (rtx_insn *insn, int priority)
8010 if (! INSN_P (insn))
8013 if (s390_tune <= PROCESSOR_2064_Z900)
8016 switch (s390_safe_attr_type (insn))
8020 priority = priority << 3;
8024 priority = priority << 1;
8033 /* The number of instructions that can be issued per cycle. */
8036 s390_issue_rate (void)
8040 case PROCESSOR_2084_Z990:
8041 case PROCESSOR_2094_Z9_109:
8042 case PROCESSOR_2094_Z9_EC:
8043 case PROCESSOR_2817_Z196:
8045 case PROCESSOR_2097_Z10:
8047 case PROCESSOR_9672_G5:
8048 case PROCESSOR_9672_G6:
8049 case PROCESSOR_2064_Z900:
8050 /* Starting with EC12 we use the sched_reorder hook to take care
8051 of instruction dispatch constraints. The algorithm only
8052 picks the best instruction and assumes only a single
8053 instruction gets issued per cycle. */
8054 case PROCESSOR_2827_ZEC12:
8055 case PROCESSOR_2964_Z13:
8056 case PROCESSOR_ARCH12:
8063 s390_first_cycle_multipass_dfa_lookahead (void)
8068 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
8069 Fix up MEMs as required. */
8072 annotate_constant_pool_refs (rtx *x)
8077 gcc_assert (GET_CODE (*x) != SYMBOL_REF
8078 || !CONSTANT_POOL_ADDRESS_P (*x));
8080 /* Literal pool references can only occur inside a MEM ... */
8081 if (GET_CODE (*x) == MEM)
8083 rtx memref = XEXP (*x, 0);
8085 if (GET_CODE (memref) == SYMBOL_REF
8086 && CONSTANT_POOL_ADDRESS_P (memref))
8088 rtx base = cfun->machine->base_reg;
8089 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
8092 *x = replace_equiv_address (*x, addr);
8096 if (GET_CODE (memref) == CONST
8097 && GET_CODE (XEXP (memref, 0)) == PLUS
8098 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
8099 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
8100 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
8102 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
8103 rtx sym = XEXP (XEXP (memref, 0), 0);
8104 rtx base = cfun->machine->base_reg;
8105 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
8108 *x = replace_equiv_address (*x, plus_constant (Pmode, addr, off));
8113 /* ... or a load-address type pattern. */
8114 if (GET_CODE (*x) == SET)
8116 rtx addrref = SET_SRC (*x);
8118 if (GET_CODE (addrref) == SYMBOL_REF
8119 && CONSTANT_POOL_ADDRESS_P (addrref))
8121 rtx base = cfun->machine->base_reg;
8122 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
8125 SET_SRC (*x) = addr;
8129 if (GET_CODE (addrref) == CONST
8130 && GET_CODE (XEXP (addrref, 0)) == PLUS
8131 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
8132 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
8133 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
8135 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
8136 rtx sym = XEXP (XEXP (addrref, 0), 0);
8137 rtx base = cfun->machine->base_reg;
8138 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
8141 SET_SRC (*x) = plus_constant (Pmode, addr, off);
8146 /* Annotate LTREL_BASE as well. */
8147 if (GET_CODE (*x) == UNSPEC
8148 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
8150 rtx base = cfun->machine->base_reg;
8151 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
8156 fmt = GET_RTX_FORMAT (GET_CODE (*x));
8157 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
8161 annotate_constant_pool_refs (&XEXP (*x, i));
8163 else if (fmt[i] == 'E')
8165 for (j = 0; j < XVECLEN (*x, i); j++)
8166 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
8171 /* Split all branches that exceed the maximum distance.
8172 Returns true if this created a new literal pool entry. */
8175 s390_split_branches (void)
8177 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8178 int new_literal = 0, ret;
8183 /* We need correct insn addresses. */
8185 shorten_branches (get_insns ());
8187 /* Find all branches that exceed 64KB, and split them. */
8189 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8191 if (! JUMP_P (insn) || tablejump_p (insn, NULL, NULL))
8194 pat = PATTERN (insn);
8195 if (GET_CODE (pat) == PARALLEL)
8196 pat = XVECEXP (pat, 0, 0);
8197 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
8200 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
8202 label = &SET_SRC (pat);
8204 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
8206 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
8207 label = &XEXP (SET_SRC (pat), 1);
8208 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
8209 label = &XEXP (SET_SRC (pat), 2);
8216 if (get_attr_length (insn) <= 4)
8219 /* We are going to use the return register as scratch register,
8220 make sure it will be saved/restored by the prologue/epilogue. */
8221 cfun_frame_layout.save_return_addr_p = 1;
8226 rtx mem = force_const_mem (Pmode, *label);
8227 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, mem),
8229 INSN_ADDRESSES_NEW (set_insn, -1);
8230 annotate_constant_pool_refs (&PATTERN (set_insn));
8237 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
8238 UNSPEC_LTREL_OFFSET);
8239 target = gen_rtx_CONST (Pmode, target);
8240 target = force_const_mem (Pmode, target);
8241 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, target),
8243 INSN_ADDRESSES_NEW (set_insn, -1);
8244 annotate_constant_pool_refs (&PATTERN (set_insn));
8246 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
8247 cfun->machine->base_reg),
8249 target = gen_rtx_PLUS (Pmode, temp_reg, target);
8252 ret = validate_change (insn, label, target, 0);
8260 /* Find an annotated literal pool symbol referenced in RTX X,
8261 and store it at REF. Will abort if X contains references to
8262 more than one such pool symbol; multiple references to the same
8263 symbol are allowed, however.
8265 The rtx pointed to by REF must be initialized to NULL_RTX
8266 by the caller before calling this routine. */
8269 find_constant_pool_ref (rtx x, rtx *ref)
8274 /* Ignore LTREL_BASE references. */
8275 if (GET_CODE (x) == UNSPEC
8276 && XINT (x, 1) == UNSPEC_LTREL_BASE)
8278 /* Likewise POOL_ENTRY insns. */
8279 if (GET_CODE (x) == UNSPEC_VOLATILE
8280 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
8283 gcc_assert (GET_CODE (x) != SYMBOL_REF
8284 || !CONSTANT_POOL_ADDRESS_P (x));
8286 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
8288 rtx sym = XVECEXP (x, 0, 0);
8289 gcc_assert (GET_CODE (sym) == SYMBOL_REF
8290 && CONSTANT_POOL_ADDRESS_P (sym));
8292 if (*ref == NULL_RTX)
8295 gcc_assert (*ref == sym);
8300 fmt = GET_RTX_FORMAT (GET_CODE (x));
8301 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8305 find_constant_pool_ref (XEXP (x, i), ref);
8307 else if (fmt[i] == 'E')
8309 for (j = 0; j < XVECLEN (x, i); j++)
8310 find_constant_pool_ref (XVECEXP (x, i, j), ref);
8315 /* Replace every reference to the annotated literal pool
8316 symbol REF in X by its base plus OFFSET. */
8319 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
8324 gcc_assert (*x != ref);
8326 if (GET_CODE (*x) == UNSPEC
8327 && XINT (*x, 1) == UNSPEC_LTREF
8328 && XVECEXP (*x, 0, 0) == ref)
8330 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
8334 if (GET_CODE (*x) == PLUS
8335 && GET_CODE (XEXP (*x, 1)) == CONST_INT
8336 && GET_CODE (XEXP (*x, 0)) == UNSPEC
8337 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
8338 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
8340 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
8341 *x = plus_constant (Pmode, addr, INTVAL (XEXP (*x, 1)));
8345 fmt = GET_RTX_FORMAT (GET_CODE (*x));
8346 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
8350 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
8352 else if (fmt[i] == 'E')
8354 for (j = 0; j < XVECLEN (*x, i); j++)
8355 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
8360 /* Check whether X contains an UNSPEC_LTREL_BASE.
8361 Return its constant pool symbol if found, NULL_RTX otherwise. */
8364 find_ltrel_base (rtx x)
8369 if (GET_CODE (x) == UNSPEC
8370 && XINT (x, 1) == UNSPEC_LTREL_BASE)
8371 return XVECEXP (x, 0, 0);
8373 fmt = GET_RTX_FORMAT (GET_CODE (x));
8374 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8378 rtx fnd = find_ltrel_base (XEXP (x, i));
8382 else if (fmt[i] == 'E')
8384 for (j = 0; j < XVECLEN (x, i); j++)
8386 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
8396 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
8399 replace_ltrel_base (rtx *x)
8404 if (GET_CODE (*x) == UNSPEC
8405 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
8407 *x = XVECEXP (*x, 0, 1);
8411 fmt = GET_RTX_FORMAT (GET_CODE (*x));
8412 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
8416 replace_ltrel_base (&XEXP (*x, i));
8418 else if (fmt[i] == 'E')
8420 for (j = 0; j < XVECLEN (*x, i); j++)
8421 replace_ltrel_base (&XVECEXP (*x, i, j));
8427 /* We keep a list of constants which we have to add to internal
8428 constant tables in the middle of large functions. */
8430 #define NR_C_MODES 32
8431 machine_mode constant_modes[NR_C_MODES] =
8433 TFmode, TImode, TDmode,
8434 V16QImode, V8HImode, V4SImode, V2DImode, V1TImode,
8435 V4SFmode, V2DFmode, V1TFmode,
8436 DFmode, DImode, DDmode,
8437 V8QImode, V4HImode, V2SImode, V1DImode, V2SFmode, V1DFmode,
8438 SFmode, SImode, SDmode,
8439 V4QImode, V2HImode, V1SImode, V1SFmode,
8448 struct constant *next;
8450 rtx_code_label *label;
8453 struct constant_pool
8455 struct constant_pool *next;
8456 rtx_insn *first_insn;
8457 rtx_insn *pool_insn;
8459 rtx_insn *emit_pool_after;
8461 struct constant *constants[NR_C_MODES];
8462 struct constant *execute;
8463 rtx_code_label *label;
8467 /* Allocate new constant_pool structure. */
8469 static struct constant_pool *
8470 s390_alloc_pool (void)
8472 struct constant_pool *pool;
8475 pool = (struct constant_pool *) xmalloc (sizeof *pool);
8477 for (i = 0; i < NR_C_MODES; i++)
8478 pool->constants[i] = NULL;
8480 pool->execute = NULL;
8481 pool->label = gen_label_rtx ();
8482 pool->first_insn = NULL;
8483 pool->pool_insn = NULL;
8484 pool->insns = BITMAP_ALLOC (NULL);
8486 pool->emit_pool_after = NULL;
8491 /* Create new constant pool covering instructions starting at INSN
8492 and chain it to the end of POOL_LIST. */
8494 static struct constant_pool *
8495 s390_start_pool (struct constant_pool **pool_list, rtx_insn *insn)
8497 struct constant_pool *pool, **prev;
8499 pool = s390_alloc_pool ();
8500 pool->first_insn = insn;
8502 for (prev = pool_list; *prev; prev = &(*prev)->next)
8509 /* End range of instructions covered by POOL at INSN and emit
8510 placeholder insn representing the pool. */
8513 s390_end_pool (struct constant_pool *pool, rtx_insn *insn)
8515 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
8518 insn = get_last_insn ();
8520 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
8521 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8524 /* Add INSN to the list of insns covered by POOL. */
8527 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
8529 bitmap_set_bit (pool->insns, INSN_UID (insn));
8532 /* Return pool out of POOL_LIST that covers INSN. */
8534 static struct constant_pool *
8535 s390_find_pool (struct constant_pool *pool_list, rtx insn)
8537 struct constant_pool *pool;
8539 for (pool = pool_list; pool; pool = pool->next)
8540 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
8546 /* Add constant VAL of mode MODE to the constant pool POOL. */
8549 s390_add_constant (struct constant_pool *pool, rtx val, machine_mode mode)
8554 for (i = 0; i < NR_C_MODES; i++)
8555 if (constant_modes[i] == mode)
8557 gcc_assert (i != NR_C_MODES);
8559 for (c = pool->constants[i]; c != NULL; c = c->next)
8560 if (rtx_equal_p (val, c->value))
8565 c = (struct constant *) xmalloc (sizeof *c);
8567 c->label = gen_label_rtx ();
8568 c->next = pool->constants[i];
8569 pool->constants[i] = c;
8570 pool->size += GET_MODE_SIZE (mode);
8574 /* Return an rtx that represents the offset of X from the start of
8578 s390_pool_offset (struct constant_pool *pool, rtx x)
8582 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
8583 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
8584 UNSPEC_POOL_OFFSET);
8585 return gen_rtx_CONST (GET_MODE (x), x);
8588 /* Find constant VAL of mode MODE in the constant pool POOL.
8589 Return an RTX describing the distance from the start of
8590 the pool to the location of the new constant. */
8593 s390_find_constant (struct constant_pool *pool, rtx val,
8599 for (i = 0; i < NR_C_MODES; i++)
8600 if (constant_modes[i] == mode)
8602 gcc_assert (i != NR_C_MODES);
8604 for (c = pool->constants[i]; c != NULL; c = c->next)
8605 if (rtx_equal_p (val, c->value))
8610 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
8613 /* Check whether INSN is an execute. Return the label_ref to its
8614 execute target template if so, NULL_RTX otherwise. */
8617 s390_execute_label (rtx insn)
8619 if (NONJUMP_INSN_P (insn)
8620 && GET_CODE (PATTERN (insn)) == PARALLEL
8621 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
8622 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
8623 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
8628 /* Add execute target for INSN to the constant pool POOL. */
8631 s390_add_execute (struct constant_pool *pool, rtx insn)
8635 for (c = pool->execute; c != NULL; c = c->next)
8636 if (INSN_UID (insn) == INSN_UID (c->value))
8641 c = (struct constant *) xmalloc (sizeof *c);
8643 c->label = gen_label_rtx ();
8644 c->next = pool->execute;
8650 /* Find execute target for INSN in the constant pool POOL.
8651 Return an RTX describing the distance from the start of
8652 the pool to the location of the execute target. */
8655 s390_find_execute (struct constant_pool *pool, rtx insn)
8659 for (c = pool->execute; c != NULL; c = c->next)
8660 if (INSN_UID (insn) == INSN_UID (c->value))
8665 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
8668 /* For an execute INSN, extract the execute target template. */
8671 s390_execute_target (rtx insn)
8673 rtx pattern = PATTERN (insn);
8674 gcc_assert (s390_execute_label (insn));
8676 if (XVECLEN (pattern, 0) == 2)
8678 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
8682 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
8685 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
8686 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
8688 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
8694 /* Indicate that INSN cannot be duplicated. This is the case for
8695 execute insns that carry a unique label. */
8698 s390_cannot_copy_insn_p (rtx_insn *insn)
8700 rtx label = s390_execute_label (insn);
8701 return label && label != const0_rtx;
8704 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
8705 do not emit the pool base label. */
8708 s390_dump_pool (struct constant_pool *pool, bool remote_label)
8711 rtx_insn *insn = pool->pool_insn;
8714 /* Switch to rodata section. */
8715 if (TARGET_CPU_ZARCH)
8717 insn = emit_insn_after (gen_pool_section_start (), insn);
8718 INSN_ADDRESSES_NEW (insn, -1);
8721 /* Ensure minimum pool alignment. */
8722 if (TARGET_CPU_ZARCH)
8723 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
8725 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
8726 INSN_ADDRESSES_NEW (insn, -1);
8728 /* Emit pool base label. */
8731 insn = emit_label_after (pool->label, insn);
8732 INSN_ADDRESSES_NEW (insn, -1);
8735 /* Dump constants in descending alignment requirement order,
8736 ensuring proper alignment for every constant. */
8737 for (i = 0; i < NR_C_MODES; i++)
8738 for (c = pool->constants[i]; c; c = c->next)
8740 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
8741 rtx value = copy_rtx (c->value);
8742 if (GET_CODE (value) == CONST
8743 && GET_CODE (XEXP (value, 0)) == UNSPEC
8744 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
8745 && XVECLEN (XEXP (value, 0), 0) == 1)
8746 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
8748 insn = emit_label_after (c->label, insn);
8749 INSN_ADDRESSES_NEW (insn, -1);
8751 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
8752 gen_rtvec (1, value),
8753 UNSPECV_POOL_ENTRY);
8754 insn = emit_insn_after (value, insn);
8755 INSN_ADDRESSES_NEW (insn, -1);
8758 /* Ensure minimum alignment for instructions. */
8759 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
8760 INSN_ADDRESSES_NEW (insn, -1);
8762 /* Output in-pool execute template insns. */
8763 for (c = pool->execute; c; c = c->next)
8765 insn = emit_label_after (c->label, insn);
8766 INSN_ADDRESSES_NEW (insn, -1);
8768 insn = emit_insn_after (s390_execute_target (c->value), insn);
8769 INSN_ADDRESSES_NEW (insn, -1);
8772 /* Switch back to previous section. */
8773 if (TARGET_CPU_ZARCH)
8775 insn = emit_insn_after (gen_pool_section_end (), insn);
8776 INSN_ADDRESSES_NEW (insn, -1);
8779 insn = emit_barrier_after (insn);
8780 INSN_ADDRESSES_NEW (insn, -1);
8782 /* Remove placeholder insn. */
8783 remove_insn (pool->pool_insn);
8786 /* Free all memory used by POOL. */
8789 s390_free_pool (struct constant_pool *pool)
8791 struct constant *c, *next;
8794 for (i = 0; i < NR_C_MODES; i++)
8795 for (c = pool->constants[i]; c; c = next)
8801 for (c = pool->execute; c; c = next)
8807 BITMAP_FREE (pool->insns);
8812 /* Collect main literal pool. Return NULL on overflow. */
8814 static struct constant_pool *
8815 s390_mainpool_start (void)
8817 struct constant_pool *pool;
8820 pool = s390_alloc_pool ();
8822 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8824 if (NONJUMP_INSN_P (insn)
8825 && GET_CODE (PATTERN (insn)) == SET
8826 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
8827 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
8829 /* There might be two main_pool instructions if base_reg
8830 is call-clobbered; one for shrink-wrapped code and one
8831 for the rest. We want to keep the first. */
8832 if (pool->pool_insn)
8834 insn = PREV_INSN (insn);
8835 delete_insn (NEXT_INSN (insn));
8838 pool->pool_insn = insn;
8841 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
8843 s390_add_execute (pool, insn);
8845 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8847 rtx pool_ref = NULL_RTX;
8848 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8851 rtx constant = get_pool_constant (pool_ref);
8852 machine_mode mode = get_pool_mode (pool_ref);
8853 s390_add_constant (pool, constant, mode);
8857 /* If hot/cold partitioning is enabled we have to make sure that
8858 the literal pool is emitted in the same section where the
8859 initialization of the literal pool base pointer takes place.
8860 emit_pool_after is only used in the non-overflow case on non
8861 Z cpus where we can emit the literal pool at the end of the
8862 function body within the text section. */
8864 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
8865 && !pool->emit_pool_after)
8866 pool->emit_pool_after = PREV_INSN (insn);
8869 gcc_assert (pool->pool_insn || pool->size == 0);
8871 if (pool->size >= 4096)
8873 /* We're going to chunkify the pool, so remove the main
8874 pool placeholder insn. */
8875 remove_insn (pool->pool_insn);
8877 s390_free_pool (pool);
8881 /* If the functions ends with the section where the literal pool
8882 should be emitted set the marker to its end. */
8883 if (pool && !pool->emit_pool_after)
8884 pool->emit_pool_after = get_last_insn ();
8889 /* POOL holds the main literal pool as collected by s390_mainpool_start.
8890 Modify the current function to output the pool constants as well as
8891 the pool register setup instruction. */
8894 s390_mainpool_finish (struct constant_pool *pool)
8896 rtx base_reg = cfun->machine->base_reg;
8898 /* If the pool is empty, we're done. */
8899 if (pool->size == 0)
8901 /* We don't actually need a base register after all. */
8902 cfun->machine->base_reg = NULL_RTX;
8904 if (pool->pool_insn)
8905 remove_insn (pool->pool_insn);
8906 s390_free_pool (pool);
8910 /* We need correct insn addresses. */
8911 shorten_branches (get_insns ());
8913 /* On zSeries, we use a LARL to load the pool register. The pool is
8914 located in the .rodata section, so we emit it after the function. */
8915 if (TARGET_CPU_ZARCH)
8917 rtx set = gen_main_base_64 (base_reg, pool->label);
8918 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
8919 INSN_ADDRESSES_NEW (insn, -1);
8920 remove_insn (pool->pool_insn);
8922 insn = get_last_insn ();
8923 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8924 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8926 s390_dump_pool (pool, 0);
8929 /* On S/390, if the total size of the function's code plus literal pool
8930 does not exceed 4096 bytes, we use BASR to set up a function base
8931 pointer, and emit the literal pool at the end of the function. */
8932 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
8933 + pool->size + 8 /* alignment slop */ < 4096)
8935 rtx set = gen_main_base_31_small (base_reg, pool->label);
8936 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
8937 INSN_ADDRESSES_NEW (insn, -1);
8938 remove_insn (pool->pool_insn);
8940 insn = emit_label_after (pool->label, insn);
8941 INSN_ADDRESSES_NEW (insn, -1);
8943 /* emit_pool_after will be set by s390_mainpool_start to the
8944 last insn of the section where the literal pool should be
8946 insn = pool->emit_pool_after;
8948 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8949 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8951 s390_dump_pool (pool, 1);
8954 /* Otherwise, we emit an inline literal pool and use BASR to branch
8955 over it, setting up the pool register at the same time. */
8958 rtx_code_label *pool_end = gen_label_rtx ();
8960 rtx pat = gen_main_base_31_large (base_reg, pool->label, pool_end);
8961 rtx_insn *insn = emit_jump_insn_after (pat, pool->pool_insn);
8962 JUMP_LABEL (insn) = pool_end;
8963 INSN_ADDRESSES_NEW (insn, -1);
8964 remove_insn (pool->pool_insn);
8966 insn = emit_label_after (pool->label, insn);
8967 INSN_ADDRESSES_NEW (insn, -1);
8969 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8970 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8972 insn = emit_label_after (pool_end, pool->pool_insn);
8973 INSN_ADDRESSES_NEW (insn, -1);
8975 s390_dump_pool (pool, 1);
8979 /* Replace all literal pool references. */
8981 for (rtx_insn *insn = get_insns (); insn; insn = NEXT_INSN (insn))
8984 replace_ltrel_base (&PATTERN (insn));
8986 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8988 rtx addr, pool_ref = NULL_RTX;
8989 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8992 if (s390_execute_label (insn))
8993 addr = s390_find_execute (pool, insn);
8995 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
8996 get_pool_mode (pool_ref));
8998 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
8999 INSN_CODE (insn) = -1;
9005 /* Free the pool. */
9006 s390_free_pool (pool);
9009 /* POOL holds the main literal pool as collected by s390_mainpool_start.
9010 We have decided we cannot use this pool, so revert all changes
9011 to the current function that were done by s390_mainpool_start. */
9013 s390_mainpool_cancel (struct constant_pool *pool)
9015 /* We didn't actually change the instruction stream, so simply
9016 free the pool memory. */
9017 s390_free_pool (pool);
9021 /* Chunkify the literal pool. */
9023 #define S390_POOL_CHUNK_MIN 0xc00
9024 #define S390_POOL_CHUNK_MAX 0xe00
9026 static struct constant_pool *
9027 s390_chunkify_start (void)
9029 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
9032 rtx pending_ltrel = NULL_RTX;
9035 rtx (*gen_reload_base) (rtx, rtx) =
9036 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
9039 /* We need correct insn addresses. */
9041 shorten_branches (get_insns ());
9043 /* Scan all insns and move literals to pool chunks. */
9045 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9047 bool section_switch_p = false;
9049 /* Check for pending LTREL_BASE. */
9052 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
9055 gcc_assert (ltrel_base == pending_ltrel);
9056 pending_ltrel = NULL_RTX;
9060 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
9063 curr_pool = s390_start_pool (&pool_list, insn);
9065 s390_add_execute (curr_pool, insn);
9066 s390_add_pool_insn (curr_pool, insn);
9068 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
9070 rtx pool_ref = NULL_RTX;
9071 find_constant_pool_ref (PATTERN (insn), &pool_ref);
9074 rtx constant = get_pool_constant (pool_ref);
9075 machine_mode mode = get_pool_mode (pool_ref);
9078 curr_pool = s390_start_pool (&pool_list, insn);
9080 s390_add_constant (curr_pool, constant, mode);
9081 s390_add_pool_insn (curr_pool, insn);
9083 /* Don't split the pool chunk between a LTREL_OFFSET load
9084 and the corresponding LTREL_BASE. */
9085 if (GET_CODE (constant) == CONST
9086 && GET_CODE (XEXP (constant, 0)) == UNSPEC
9087 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
9089 gcc_assert (!pending_ltrel);
9090 pending_ltrel = pool_ref;
9095 if (JUMP_P (insn) || JUMP_TABLE_DATA_P (insn) || LABEL_P (insn))
9098 s390_add_pool_insn (curr_pool, insn);
9099 /* An LTREL_BASE must follow within the same basic block. */
9100 gcc_assert (!pending_ltrel);
9104 switch (NOTE_KIND (insn))
9106 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
9107 section_switch_p = true;
9109 case NOTE_INSN_VAR_LOCATION:
9110 case NOTE_INSN_CALL_ARG_LOCATION:
9117 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
9118 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
9121 if (TARGET_CPU_ZARCH)
9123 if (curr_pool->size < S390_POOL_CHUNK_MAX)
9126 s390_end_pool (curr_pool, NULL);
9131 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
9132 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
9135 /* We will later have to insert base register reload insns.
9136 Those will have an effect on code size, which we need to
9137 consider here. This calculation makes rather pessimistic
9138 worst-case assumptions. */
9142 if (chunk_size < S390_POOL_CHUNK_MIN
9143 && curr_pool->size < S390_POOL_CHUNK_MIN
9144 && !section_switch_p)
9147 /* Pool chunks can only be inserted after BARRIERs ... */
9148 if (BARRIER_P (insn))
9150 s390_end_pool (curr_pool, insn);
9155 /* ... so if we don't find one in time, create one. */
9156 else if (chunk_size > S390_POOL_CHUNK_MAX
9157 || curr_pool->size > S390_POOL_CHUNK_MAX
9158 || section_switch_p)
9160 rtx_insn *label, *jump, *barrier, *next, *prev;
9162 if (!section_switch_p)
9164 /* We can insert the barrier only after a 'real' insn. */
9165 if (! NONJUMP_INSN_P (insn) && ! CALL_P (insn))
9167 if (get_attr_length (insn) == 0)
9169 /* Don't separate LTREL_BASE from the corresponding
9170 LTREL_OFFSET load. */
9177 next = NEXT_INSN (insn);
9181 && (NOTE_KIND (next) == NOTE_INSN_VAR_LOCATION
9182 || NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION));
9186 gcc_assert (!pending_ltrel);
9188 /* The old pool has to end before the section switch
9189 note in order to make it part of the current
9191 insn = PREV_INSN (insn);
9194 label = gen_label_rtx ();
9196 if (prev && NOTE_P (prev))
9197 prev = prev_nonnote_insn (prev);
9199 jump = emit_jump_insn_after_setloc (gen_jump (label), insn,
9200 INSN_LOCATION (prev));
9202 jump = emit_jump_insn_after_noloc (gen_jump (label), insn);
9203 barrier = emit_barrier_after (jump);
9204 insn = emit_label_after (label, barrier);
9205 JUMP_LABEL (jump) = label;
9206 LABEL_NUSES (label) = 1;
9208 INSN_ADDRESSES_NEW (jump, -1);
9209 INSN_ADDRESSES_NEW (barrier, -1);
9210 INSN_ADDRESSES_NEW (insn, -1);
9212 s390_end_pool (curr_pool, barrier);
9220 s390_end_pool (curr_pool, NULL);
9221 gcc_assert (!pending_ltrel);
9223 /* Find all labels that are branched into
9224 from an insn belonging to a different chunk. */
9226 far_labels = BITMAP_ALLOC (NULL);
9228 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9230 rtx_jump_table_data *table;
9232 /* Labels marked with LABEL_PRESERVE_P can be target
9233 of non-local jumps, so we have to mark them.
9234 The same holds for named labels.
9236 Don't do that, however, if it is the label before
9240 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
9242 rtx_insn *vec_insn = NEXT_INSN (insn);
9243 if (! vec_insn || ! JUMP_TABLE_DATA_P (vec_insn))
9244 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
9246 /* Check potential targets in a table jump (casesi_jump). */
9247 else if (tablejump_p (insn, NULL, &table))
9249 rtx vec_pat = PATTERN (table);
9250 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
9252 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
9254 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
9256 if (s390_find_pool (pool_list, label)
9257 != s390_find_pool (pool_list, insn))
9258 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
9261 /* If we have a direct jump (conditional or unconditional),
9262 check all potential targets. */
9263 else if (JUMP_P (insn))
9265 rtx pat = PATTERN (insn);
9267 if (GET_CODE (pat) == PARALLEL)
9268 pat = XVECEXP (pat, 0, 0);
9270 if (GET_CODE (pat) == SET)
9272 rtx label = JUMP_LABEL (insn);
9273 if (label && !ANY_RETURN_P (label))
9275 if (s390_find_pool (pool_list, label)
9276 != s390_find_pool (pool_list, insn))
9277 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
9283 /* Insert base register reload insns before every pool. */
9285 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
9287 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
9289 rtx_insn *insn = curr_pool->first_insn;
9290 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
9293 /* Insert base register reload insns at every far label. */
9295 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9297 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
9299 struct constant_pool *pool = s390_find_pool (pool_list, insn);
9302 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
9304 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
9309 BITMAP_FREE (far_labels);
9312 /* Recompute insn addresses. */
9314 init_insn_lengths ();
9315 shorten_branches (get_insns ());
9320 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
9321 After we have decided to use this list, finish implementing
9322 all changes to the current function as required. */
9325 s390_chunkify_finish (struct constant_pool *pool_list)
9327 struct constant_pool *curr_pool = NULL;
9331 /* Replace all literal pool references. */
9333 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9336 replace_ltrel_base (&PATTERN (insn));
9338 curr_pool = s390_find_pool (pool_list, insn);
9342 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
9344 rtx addr, pool_ref = NULL_RTX;
9345 find_constant_pool_ref (PATTERN (insn), &pool_ref);
9348 if (s390_execute_label (insn))
9349 addr = s390_find_execute (curr_pool, insn);
9351 addr = s390_find_constant (curr_pool,
9352 get_pool_constant (pool_ref),
9353 get_pool_mode (pool_ref));
9355 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
9356 INSN_CODE (insn) = -1;
9361 /* Dump out all literal pools. */
9363 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
9364 s390_dump_pool (curr_pool, 0);
9366 /* Free pool list. */
9370 struct constant_pool *next = pool_list->next;
9371 s390_free_pool (pool_list);
9376 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
9377 We have decided we cannot use this list, so revert all changes
9378 to the current function that were done by s390_chunkify_start. */
9381 s390_chunkify_cancel (struct constant_pool *pool_list)
9383 struct constant_pool *curr_pool = NULL;
9386 /* Remove all pool placeholder insns. */
9388 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
9390 /* Did we insert an extra barrier? Remove it. */
9391 rtx_insn *barrier = PREV_INSN (curr_pool->pool_insn);
9392 rtx_insn *jump = barrier? PREV_INSN (barrier) : NULL;
9393 rtx_insn *label = NEXT_INSN (curr_pool->pool_insn);
9395 if (jump && JUMP_P (jump)
9396 && barrier && BARRIER_P (barrier)
9397 && label && LABEL_P (label)
9398 && GET_CODE (PATTERN (jump)) == SET
9399 && SET_DEST (PATTERN (jump)) == pc_rtx
9400 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
9401 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
9404 remove_insn (barrier);
9405 remove_insn (label);
9408 remove_insn (curr_pool->pool_insn);
9411 /* Remove all base register reload insns. */
9413 for (insn = get_insns (); insn; )
9415 rtx_insn *next_insn = NEXT_INSN (insn);
9417 if (NONJUMP_INSN_P (insn)
9418 && GET_CODE (PATTERN (insn)) == SET
9419 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
9420 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
9426 /* Free pool list. */
9430 struct constant_pool *next = pool_list->next;
9431 s390_free_pool (pool_list);
9436 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
9439 s390_output_pool_entry (rtx exp, machine_mode mode, unsigned int align)
9441 switch (GET_MODE_CLASS (mode))
9444 case MODE_DECIMAL_FLOAT:
9445 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
9447 assemble_real (*CONST_DOUBLE_REAL_VALUE (exp), mode, align);
9451 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
9452 mark_symbol_refs_as_used (exp);
9455 case MODE_VECTOR_INT:
9456 case MODE_VECTOR_FLOAT:
9459 machine_mode inner_mode;
9460 gcc_assert (GET_CODE (exp) == CONST_VECTOR);
9462 inner_mode = GET_MODE_INNER (GET_MODE (exp));
9463 for (i = 0; i < XVECLEN (exp, 0); i++)
9464 s390_output_pool_entry (XVECEXP (exp, 0, i),
9468 : GET_MODE_BITSIZE (inner_mode));
9478 /* Return an RTL expression representing the value of the return address
9479 for the frame COUNT steps up from the current frame. FRAME is the
9480 frame pointer of that frame. */
9483 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
9488 /* Without backchain, we fail for all but the current frame. */
9490 if (!TARGET_BACKCHAIN && count > 0)
9493 /* For the current frame, we need to make sure the initial
9494 value of RETURN_REGNUM is actually saved. */
9498 /* On non-z architectures branch splitting could overwrite r14. */
9499 if (TARGET_CPU_ZARCH)
9500 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
9503 cfun_frame_layout.save_return_addr_p = true;
9504 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
9508 if (TARGET_PACKED_STACK)
9509 offset = -2 * UNITS_PER_LONG;
9511 offset = RETURN_REGNUM * UNITS_PER_LONG;
9513 addr = plus_constant (Pmode, frame, offset);
9514 addr = memory_address (Pmode, addr);
9515 return gen_rtx_MEM (Pmode, addr);
9518 /* Return an RTL expression representing the back chain stored in
9519 the current stack frame. */
9522 s390_back_chain_rtx (void)
9526 gcc_assert (TARGET_BACKCHAIN);
9528 if (TARGET_PACKED_STACK)
9529 chain = plus_constant (Pmode, stack_pointer_rtx,
9530 STACK_POINTER_OFFSET - UNITS_PER_LONG);
9532 chain = stack_pointer_rtx;
9534 chain = gen_rtx_MEM (Pmode, chain);
9538 /* Find first call clobbered register unused in a function.
9539 This could be used as base register in a leaf function
9540 or for holding the return address before epilogue. */
9543 find_unused_clobbered_reg (void)
9546 for (i = 0; i < 6; i++)
9547 if (!df_regs_ever_live_p (i))
9553 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
9554 clobbered hard regs in SETREG. */
9557 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
9559 char *regs_ever_clobbered = (char *)data;
9560 unsigned int i, regno;
9561 machine_mode mode = GET_MODE (setreg);
9563 if (GET_CODE (setreg) == SUBREG)
9565 rtx inner = SUBREG_REG (setreg);
9566 if (!GENERAL_REG_P (inner) && !FP_REG_P (inner))
9568 regno = subreg_regno (setreg);
9570 else if (GENERAL_REG_P (setreg) || FP_REG_P (setreg))
9571 regno = REGNO (setreg);
9576 i < regno + HARD_REGNO_NREGS (regno, mode);
9578 regs_ever_clobbered[i] = 1;
9581 /* Walks through all basic blocks of the current function looking
9582 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
9583 of the passed integer array REGS_EVER_CLOBBERED are set to one for
9584 each of those regs. */
9587 s390_regs_ever_clobbered (char regs_ever_clobbered[])
9593 memset (regs_ever_clobbered, 0, 32);
9595 /* For non-leaf functions we have to consider all call clobbered regs to be
9599 for (i = 0; i < 32; i++)
9600 regs_ever_clobbered[i] = call_really_used_regs[i];
9603 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
9604 this work is done by liveness analysis (mark_regs_live_at_end).
9605 Special care is needed for functions containing landing pads. Landing pads
9606 may use the eh registers, but the code which sets these registers is not
9607 contained in that function. Hence s390_regs_ever_clobbered is not able to
9608 deal with this automatically. */
9609 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
9610 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
9611 if (crtl->calls_eh_return
9612 || (cfun->machine->has_landing_pad_p
9613 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
9614 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
9616 /* For nonlocal gotos all call-saved registers have to be saved.
9617 This flag is also set for the unwinding code in libgcc.
9618 See expand_builtin_unwind_init. For regs_ever_live this is done by
9620 if (crtl->saves_all_registers)
9621 for (i = 0; i < 32; i++)
9622 if (!call_really_used_regs[i])
9623 regs_ever_clobbered[i] = 1;
9625 FOR_EACH_BB_FN (cur_bb, cfun)
9627 FOR_BB_INSNS (cur_bb, cur_insn)
9631 if (!INSN_P (cur_insn))
9634 pat = PATTERN (cur_insn);
9636 /* Ignore GPR restore insns. */
9637 if (epilogue_completed && RTX_FRAME_RELATED_P (cur_insn))
9639 if (GET_CODE (pat) == SET
9640 && GENERAL_REG_P (SET_DEST (pat)))
9643 if (GET_MODE (SET_SRC (pat)) == DImode
9644 && FP_REG_P (SET_SRC (pat)))
9648 if (GET_CODE (SET_SRC (pat)) == MEM)
9653 if (GET_CODE (pat) == PARALLEL
9654 && load_multiple_operation (pat, VOIDmode))
9659 s390_reg_clobbered_rtx,
9660 regs_ever_clobbered);
9665 /* Determine the frame area which actually has to be accessed
9666 in the function epilogue. The values are stored at the
9667 given pointers AREA_BOTTOM (address of the lowest used stack
9668 address) and AREA_TOP (address of the first item which does
9669 not belong to the stack frame). */
9672 s390_frame_area (int *area_bottom, int *area_top)
9679 if (cfun_frame_layout.first_restore_gpr != -1)
9681 b = (cfun_frame_layout.gprs_offset
9682 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
9683 t = b + (cfun_frame_layout.last_restore_gpr
9684 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
9687 if (TARGET_64BIT && cfun_save_high_fprs_p)
9689 b = MIN (b, cfun_frame_layout.f8_offset);
9690 t = MAX (t, (cfun_frame_layout.f8_offset
9691 + cfun_frame_layout.high_fprs * 8));
9696 if (cfun_fpr_save_p (FPR4_REGNUM))
9698 b = MIN (b, cfun_frame_layout.f4_offset);
9699 t = MAX (t, cfun_frame_layout.f4_offset + 8);
9701 if (cfun_fpr_save_p (FPR6_REGNUM))
9703 b = MIN (b, cfun_frame_layout.f4_offset + 8);
9704 t = MAX (t, cfun_frame_layout.f4_offset + 16);
9710 /* Update gpr_save_slots in the frame layout trying to make use of
9711 FPRs as GPR save slots.
9712 This is a helper routine of s390_register_info. */
9715 s390_register_info_gprtofpr ()
9717 int save_reg_slot = FPR0_REGNUM;
9720 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
9723 /* builtin_eh_return needs to be able to modify the return address
9724 on the stack. It could also adjust the FPR save slot instead but
9725 is it worth the trouble?! */
9726 if (crtl->calls_eh_return)
9729 for (i = 15; i >= 6; i--)
9731 if (cfun_gpr_save_slot (i) == SAVE_SLOT_NONE)
9734 /* Advance to the next FP register which can be used as a
9736 while ((!call_really_used_regs[save_reg_slot]
9737 || df_regs_ever_live_p (save_reg_slot)
9738 || cfun_fpr_save_p (save_reg_slot))
9739 && FP_REGNO_P (save_reg_slot))
9741 if (!FP_REGNO_P (save_reg_slot))
9743 /* We only want to use ldgr/lgdr if we can get rid of
9744 stm/lm entirely. So undo the gpr slot allocation in
9745 case we ran out of FPR save slots. */
9746 for (j = 6; j <= 15; j++)
9747 if (FP_REGNO_P (cfun_gpr_save_slot (j)))
9748 cfun_gpr_save_slot (j) = SAVE_SLOT_STACK;
9751 cfun_gpr_save_slot (i) = save_reg_slot++;
9755 /* Set the bits in fpr_bitmap for FPRs which need to be saved due to
9757 This is a helper routine for s390_register_info. */
9760 s390_register_info_stdarg_fpr ()
9766 /* Save the FP argument regs for stdarg. f0, f2 for 31 bit and
9767 f0-f4 for 64 bit. */
9769 || !TARGET_HARD_FLOAT
9770 || !cfun->va_list_fpr_size
9771 || crtl->args.info.fprs >= FP_ARG_NUM_REG)
9774 min_fpr = crtl->args.info.fprs;
9775 max_fpr = min_fpr + cfun->va_list_fpr_size - 1;
9776 if (max_fpr >= FP_ARG_NUM_REG)
9777 max_fpr = FP_ARG_NUM_REG - 1;
9779 /* FPR argument regs start at f0. */
9780 min_fpr += FPR0_REGNUM;
9781 max_fpr += FPR0_REGNUM;
9783 for (i = min_fpr; i <= max_fpr; i++)
9784 cfun_set_fpr_save (i);
9787 /* Reserve the GPR save slots for GPRs which need to be saved due to
9789 This is a helper routine for s390_register_info. */
9792 s390_register_info_stdarg_gpr ()
9799 || !cfun->va_list_gpr_size
9800 || crtl->args.info.gprs >= GP_ARG_NUM_REG)
9803 min_gpr = crtl->args.info.gprs;
9804 max_gpr = min_gpr + cfun->va_list_gpr_size - 1;
9805 if (max_gpr >= GP_ARG_NUM_REG)
9806 max_gpr = GP_ARG_NUM_REG - 1;
9808 /* GPR argument regs start at r2. */
9809 min_gpr += GPR2_REGNUM;
9810 max_gpr += GPR2_REGNUM;
9812 /* If r6 was supposed to be saved into an FPR and now needs to go to
9813 the stack for vararg we have to adjust the restore range to make
9814 sure that the restore is done from stack as well. */
9815 if (FP_REGNO_P (cfun_gpr_save_slot (GPR6_REGNUM))
9816 && min_gpr <= GPR6_REGNUM
9817 && max_gpr >= GPR6_REGNUM)
9819 if (cfun_frame_layout.first_restore_gpr == -1
9820 || cfun_frame_layout.first_restore_gpr > GPR6_REGNUM)
9821 cfun_frame_layout.first_restore_gpr = GPR6_REGNUM;
9822 if (cfun_frame_layout.last_restore_gpr == -1
9823 || cfun_frame_layout.last_restore_gpr < GPR6_REGNUM)
9824 cfun_frame_layout.last_restore_gpr = GPR6_REGNUM;
9827 if (cfun_frame_layout.first_save_gpr == -1
9828 || cfun_frame_layout.first_save_gpr > min_gpr)
9829 cfun_frame_layout.first_save_gpr = min_gpr;
9831 if (cfun_frame_layout.last_save_gpr == -1
9832 || cfun_frame_layout.last_save_gpr < max_gpr)
9833 cfun_frame_layout.last_save_gpr = max_gpr;
9835 for (i = min_gpr; i <= max_gpr; i++)
9836 cfun_gpr_save_slot (i) = SAVE_SLOT_STACK;
9839 /* Calculate the save and restore ranges for stm(g) and lm(g) in the
9840 prologue and epilogue. */
9843 s390_register_info_set_ranges ()
9847 /* Find the first and the last save slot supposed to use the stack
9848 to set the restore range.
9849 Vararg regs might be marked as save to stack but only the
9850 call-saved regs really need restoring (i.e. r6). This code
9851 assumes that the vararg regs have not yet been recorded in
9852 cfun_gpr_save_slot. */
9853 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != SAVE_SLOT_STACK; i++);
9854 for (j = 15; j > i && cfun_gpr_save_slot (j) != SAVE_SLOT_STACK; j--);
9855 cfun_frame_layout.first_restore_gpr = (i == 16) ? -1 : i;
9856 cfun_frame_layout.last_restore_gpr = (i == 16) ? -1 : j;
9857 cfun_frame_layout.first_save_gpr = (i == 16) ? -1 : i;
9858 cfun_frame_layout.last_save_gpr = (i == 16) ? -1 : j;
9861 /* The GPR and FPR save slots in cfun->machine->frame_layout are set
9862 for registers which need to be saved in function prologue.
9863 This function can be used until the insns emitted for save/restore
9864 of the regs are visible in the RTL stream. */
9867 s390_register_info ()
9870 char clobbered_regs[32];
9872 gcc_assert (!epilogue_completed);
9874 if (reload_completed)
9875 /* After reload we rely on our own routine to determine which
9876 registers need saving. */
9877 s390_regs_ever_clobbered (clobbered_regs);
9879 /* During reload we use regs_ever_live as a base since reload
9880 does changes in there which we otherwise would not be aware
9882 for (i = 0; i < 32; i++)
9883 clobbered_regs[i] = df_regs_ever_live_p (i);
9885 for (i = 0; i < 32; i++)
9886 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
9888 /* Mark the call-saved FPRs which need to be saved.
9889 This needs to be done before checking the special GPRs since the
9890 stack pointer usage depends on whether high FPRs have to be saved
9892 cfun_frame_layout.fpr_bitmap = 0;
9893 cfun_frame_layout.high_fprs = 0;
9894 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
9895 if (clobbered_regs[i] && !call_really_used_regs[i])
9897 cfun_set_fpr_save (i);
9898 if (i >= FPR8_REGNUM)
9899 cfun_frame_layout.high_fprs++;
9902 /* Register 12 is used for GOT address, but also as temp in prologue
9903 for split-stack stdarg functions (unless r14 is available). */
9905 |= ((flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
9906 || (flag_split_stack && cfun->stdarg
9907 && (crtl->is_leaf || TARGET_TPF_PROFILING
9908 || has_hard_reg_initial_val (Pmode, RETURN_REGNUM))));
9910 clobbered_regs[BASE_REGNUM]
9911 |= (cfun->machine->base_reg
9912 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
9914 clobbered_regs[HARD_FRAME_POINTER_REGNUM]
9915 |= !!frame_pointer_needed;
9917 /* On pre z900 machines this might take until machine dependent
9919 save_return_addr_p will only be set on non-zarch machines so
9920 there is no risk that r14 goes into an FPR instead of a stack
9922 clobbered_regs[RETURN_REGNUM]
9924 || TARGET_TPF_PROFILING
9925 || cfun->machine->split_branches_pending_p
9926 || cfun_frame_layout.save_return_addr_p
9927 || crtl->calls_eh_return);
9929 clobbered_regs[STACK_POINTER_REGNUM]
9931 || TARGET_TPF_PROFILING
9932 || cfun_save_high_fprs_p
9933 || get_frame_size () > 0
9934 || (reload_completed && cfun_frame_layout.frame_size > 0)
9935 || cfun->calls_alloca);
9937 memset (cfun_frame_layout.gpr_save_slots, SAVE_SLOT_NONE, 16);
9939 for (i = 6; i < 16; i++)
9940 if (clobbered_regs[i])
9941 cfun_gpr_save_slot (i) = SAVE_SLOT_STACK;
9943 s390_register_info_stdarg_fpr ();
9944 s390_register_info_gprtofpr ();
9945 s390_register_info_set_ranges ();
9946 /* stdarg functions might need to save GPRs 2 to 6. This might
9947 override the GPR->FPR save decision made by
9948 s390_register_info_gprtofpr for r6 since vararg regs must go to
9950 s390_register_info_stdarg_gpr ();
9953 /* This function is called by s390_optimize_prologue in order to get
9954 rid of unnecessary GPR save/restore instructions. The register info
9955 for the GPRs is re-computed and the ranges are re-calculated. */
9958 s390_optimize_register_info ()
9960 char clobbered_regs[32];
9963 gcc_assert (epilogue_completed);
9964 gcc_assert (!cfun->machine->split_branches_pending_p);
9966 s390_regs_ever_clobbered (clobbered_regs);
9968 for (i = 0; i < 32; i++)
9969 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
9971 /* There is still special treatment needed for cases invisible to
9972 s390_regs_ever_clobbered. */
9973 clobbered_regs[RETURN_REGNUM]
9974 |= (TARGET_TPF_PROFILING
9975 /* When expanding builtin_return_addr in ESA mode we do not
9976 know whether r14 will later be needed as scratch reg when
9977 doing branch splitting. So the builtin always accesses the
9978 r14 save slot and we need to stick to the save/restore
9979 decision for r14 even if it turns out that it didn't get
9981 || cfun_frame_layout.save_return_addr_p
9982 || crtl->calls_eh_return);
9984 memset (cfun_frame_layout.gpr_save_slots, SAVE_SLOT_NONE, 6);
9986 for (i = 6; i < 16; i++)
9987 if (!clobbered_regs[i])
9988 cfun_gpr_save_slot (i) = SAVE_SLOT_NONE;
9990 s390_register_info_set_ranges ();
9991 s390_register_info_stdarg_gpr ();
9994 /* Fill cfun->machine with info about frame of current function. */
9997 s390_frame_info (void)
9999 HOST_WIDE_INT lowest_offset;
10001 cfun_frame_layout.first_save_gpr_slot = cfun_frame_layout.first_save_gpr;
10002 cfun_frame_layout.last_save_gpr_slot = cfun_frame_layout.last_save_gpr;
10004 /* The va_arg builtin uses a constant distance of 16 *
10005 UNITS_PER_LONG (r0-r15) to reach the FPRs from the reg_save_area
10006 pointer. So even if we are going to save the stack pointer in an
10007 FPR we need the stack space in order to keep the offsets
10009 if (cfun->stdarg && cfun_save_arg_fprs_p)
10011 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
10013 if (cfun_frame_layout.first_save_gpr_slot == -1)
10014 cfun_frame_layout.first_save_gpr_slot = STACK_POINTER_REGNUM;
10017 cfun_frame_layout.frame_size = get_frame_size ();
10018 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
10019 fatal_error (input_location,
10020 "total size of local variables exceeds architecture limit");
10022 if (!TARGET_PACKED_STACK)
10024 /* Fixed stack layout. */
10025 cfun_frame_layout.backchain_offset = 0;
10026 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
10027 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
10028 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
10029 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
10032 else if (TARGET_BACKCHAIN)
10034 /* Kernel stack layout - packed stack, backchain, no float */
10035 gcc_assert (TARGET_SOFT_FLOAT);
10036 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
10039 /* The distance between the backchain and the return address
10040 save slot must not change. So we always need a slot for the
10041 stack pointer which resides in between. */
10042 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
10044 cfun_frame_layout.gprs_offset
10045 = cfun_frame_layout.backchain_offset - cfun_gprs_save_area_size;
10047 /* FPRs will not be saved. Nevertheless pick sane values to
10048 keep area calculations valid. */
10049 cfun_frame_layout.f0_offset =
10050 cfun_frame_layout.f4_offset =
10051 cfun_frame_layout.f8_offset = cfun_frame_layout.gprs_offset;
10057 /* Packed stack layout without backchain. */
10059 /* With stdarg FPRs need their dedicated slots. */
10060 num_fprs = (TARGET_64BIT && cfun->stdarg ? 2
10061 : (cfun_fpr_save_p (FPR4_REGNUM) +
10062 cfun_fpr_save_p (FPR6_REGNUM)));
10063 cfun_frame_layout.f4_offset = STACK_POINTER_OFFSET - 8 * num_fprs;
10065 num_fprs = (cfun->stdarg ? 2
10066 : (cfun_fpr_save_p (FPR0_REGNUM)
10067 + cfun_fpr_save_p (FPR2_REGNUM)));
10068 cfun_frame_layout.f0_offset = cfun_frame_layout.f4_offset - 8 * num_fprs;
10070 cfun_frame_layout.gprs_offset
10071 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
10073 cfun_frame_layout.f8_offset = (cfun_frame_layout.gprs_offset
10074 - cfun_frame_layout.high_fprs * 8);
10077 if (cfun_save_high_fprs_p)
10078 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
10080 if (!crtl->is_leaf)
10081 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
10083 /* In the following cases we have to allocate a STACK_POINTER_OFFSET
10084 sized area at the bottom of the stack. This is required also for
10085 leaf functions. When GCC generates a local stack reference it
10086 will always add STACK_POINTER_OFFSET to all these references. */
10088 && !TARGET_TPF_PROFILING
10089 && cfun_frame_layout.frame_size == 0
10090 && !cfun->calls_alloca)
10093 /* Calculate the number of bytes we have used in our own register
10094 save area. With the packed stack layout we can re-use the
10095 remaining bytes for normal stack elements. */
10097 if (TARGET_PACKED_STACK)
10098 lowest_offset = MIN (MIN (cfun_frame_layout.f0_offset,
10099 cfun_frame_layout.f4_offset),
10100 cfun_frame_layout.gprs_offset);
10104 if (TARGET_BACKCHAIN)
10105 lowest_offset = MIN (lowest_offset, cfun_frame_layout.backchain_offset);
10107 cfun_frame_layout.frame_size += STACK_POINTER_OFFSET - lowest_offset;
10109 /* If under 31 bit an odd number of gprs has to be saved we have to
10110 adjust the frame size to sustain 8 byte alignment of stack
10112 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
10113 STACK_BOUNDARY / BITS_PER_UNIT - 1)
10114 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
10117 /* Generate frame layout. Fills in register and frame data for the current
10118 function in cfun->machine. This routine can be called multiple times;
10119 it will re-do the complete frame layout every time. */
10122 s390_init_frame_layout (void)
10124 HOST_WIDE_INT frame_size;
10127 /* After LRA the frame layout is supposed to be read-only and should
10128 not be re-computed. */
10129 if (reload_completed)
10132 /* On S/390 machines, we may need to perform branch splitting, which
10133 will require both base and return address register. We have no
10134 choice but to assume we're going to need them until right at the
10135 end of the machine dependent reorg phase. */
10136 if (!TARGET_CPU_ZARCH)
10137 cfun->machine->split_branches_pending_p = true;
10141 frame_size = cfun_frame_layout.frame_size;
10143 /* Try to predict whether we'll need the base register. */
10144 base_used = cfun->machine->split_branches_pending_p
10145 || crtl->uses_const_pool
10146 || (!DISP_IN_RANGE (frame_size)
10147 && !CONST_OK_FOR_K (frame_size));
10149 /* Decide which register to use as literal pool base. In small
10150 leaf functions, try to use an unused call-clobbered register
10151 as base register to avoid save/restore overhead. */
10153 cfun->machine->base_reg = NULL_RTX;
10159 /* Prefer r5 (most likely to be free). */
10160 for (br = 5; br >= 2 && df_regs_ever_live_p (br); br--)
10162 cfun->machine->base_reg =
10163 gen_rtx_REG (Pmode, (br >= 2) ? br : BASE_REGNUM);
10166 s390_register_info ();
10167 s390_frame_info ();
10169 while (frame_size != cfun_frame_layout.frame_size);
10172 /* Remove the FPR clobbers from a tbegin insn if it can be proven that
10173 the TX is nonescaping. A transaction is considered escaping if
10174 there is at least one path from tbegin returning CC0 to the
10175 function exit block without an tend.
10177 The check so far has some limitations:
10178 - only single tbegin/tend BBs are supported
10179 - the first cond jump after tbegin must separate the CC0 path from ~CC0
10180 - when CC is copied to a GPR and the CC0 check is done with the GPR
10181 this is not supported
10185 s390_optimize_nonescaping_tx (void)
10187 const unsigned int CC0 = 1 << 3;
10188 basic_block tbegin_bb = NULL;
10189 basic_block tend_bb = NULL;
10192 bool result = true;
10194 rtx_insn *tbegin_insn = NULL;
10196 if (!cfun->machine->tbegin_p)
10199 for (bb_index = 0; bb_index < n_basic_blocks_for_fn (cfun); bb_index++)
10201 bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
10206 FOR_BB_INSNS (bb, insn)
10208 rtx ite, cc, pat, target;
10209 unsigned HOST_WIDE_INT mask;
10211 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
10214 pat = PATTERN (insn);
10216 if (GET_CODE (pat) == PARALLEL)
10217 pat = XVECEXP (pat, 0, 0);
10219 if (GET_CODE (pat) != SET
10220 || GET_CODE (SET_SRC (pat)) != UNSPEC_VOLATILE)
10223 if (XINT (SET_SRC (pat), 1) == UNSPECV_TBEGIN)
10227 tbegin_insn = insn;
10229 /* Just return if the tbegin doesn't have clobbers. */
10230 if (GET_CODE (PATTERN (insn)) != PARALLEL)
10233 if (tbegin_bb != NULL)
10236 /* Find the next conditional jump. */
10237 for (tmp = NEXT_INSN (insn);
10239 tmp = NEXT_INSN (tmp))
10241 if (reg_set_p (gen_rtx_REG (CCmode, CC_REGNUM), tmp))
10246 ite = SET_SRC (PATTERN (tmp));
10247 if (GET_CODE (ite) != IF_THEN_ELSE)
10250 cc = XEXP (XEXP (ite, 0), 0);
10251 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc))
10252 || GET_MODE (cc) != CCRAWmode
10253 || GET_CODE (XEXP (XEXP (ite, 0), 1)) != CONST_INT)
10256 if (bb->succs->length () != 2)
10259 mask = INTVAL (XEXP (XEXP (ite, 0), 1));
10260 if (GET_CODE (XEXP (ite, 0)) == NE)
10264 target = XEXP (ite, 1);
10265 else if (mask == (CC0 ^ 0xf))
10266 target = XEXP (ite, 2);
10274 ei = ei_start (bb->succs);
10275 e1 = ei_safe_edge (ei);
10277 e2 = ei_safe_edge (ei);
10279 if (e2->flags & EDGE_FALLTHRU)
10282 e1 = ei_safe_edge (ei);
10285 if (!(e1->flags & EDGE_FALLTHRU))
10288 tbegin_bb = (target == pc_rtx) ? e1->dest : e2->dest;
10290 if (tmp == BB_END (bb))
10295 if (XINT (SET_SRC (pat), 1) == UNSPECV_TEND)
10297 if (tend_bb != NULL)
10304 /* Either we successfully remove the FPR clobbers here or we are not
10305 able to do anything for this TX. Both cases don't qualify for
10307 cfun->machine->tbegin_p = false;
10309 if (tbegin_bb == NULL || tend_bb == NULL)
10312 calculate_dominance_info (CDI_POST_DOMINATORS);
10313 result = dominated_by_p (CDI_POST_DOMINATORS, tbegin_bb, tend_bb);
10314 free_dominance_info (CDI_POST_DOMINATORS);
10319 PATTERN (tbegin_insn) = gen_rtx_PARALLEL (VOIDmode,
10321 XVECEXP (PATTERN (tbegin_insn), 0, 0),
10322 XVECEXP (PATTERN (tbegin_insn), 0, 1)));
10323 INSN_CODE (tbegin_insn) = -1;
10324 df_insn_rescan (tbegin_insn);
10329 /* Return true if it is legal to put a value with MODE into REGNO. */
10332 s390_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
10334 if (!TARGET_VX && VECTOR_NOFP_REGNO_P (regno))
10337 switch (REGNO_REG_CLASS (regno))
10340 return ((GET_MODE_CLASS (mode) == MODE_INT
10341 && s390_class_max_nregs (VEC_REGS, mode) == 1)
10343 || (TARGET_VXE && mode == SFmode)
10344 || s390_vector_mode_supported_p (mode));
10348 && ((GET_MODE_CLASS (mode) == MODE_INT
10349 && s390_class_max_nregs (FP_REGS, mode) == 1)
10351 || s390_vector_mode_supported_p (mode)))
10354 if (REGNO_PAIR_OK (regno, mode))
10356 if (mode == SImode || mode == DImode)
10359 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
10364 if (FRAME_REGNO_P (regno) && mode == Pmode)
10369 if (REGNO_PAIR_OK (regno, mode))
10372 || (mode != TFmode && mode != TCmode && mode != TDmode))
10377 if (GET_MODE_CLASS (mode) == MODE_CC)
10381 if (REGNO_PAIR_OK (regno, mode))
10383 if (mode == SImode || mode == Pmode)
10394 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
10397 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
10399 /* Once we've decided upon a register to use as base register, it must
10400 no longer be used for any other purpose. */
10401 if (cfun->machine->base_reg)
10402 if (REGNO (cfun->machine->base_reg) == old_reg
10403 || REGNO (cfun->machine->base_reg) == new_reg)
10406 /* Prevent regrename from using call-saved regs which haven't
10407 actually been saved. This is necessary since regrename assumes
10408 the backend save/restore decisions are based on
10409 df_regs_ever_live. Since we have our own routine we have to tell
10410 regrename manually about it. */
10411 if (GENERAL_REGNO_P (new_reg)
10412 && !call_really_used_regs[new_reg]
10413 && cfun_gpr_save_slot (new_reg) == SAVE_SLOT_NONE)
10419 /* Return nonzero if register REGNO can be used as a scratch register
10423 s390_hard_regno_scratch_ok (unsigned int regno)
10425 /* See s390_hard_regno_rename_ok. */
10426 if (GENERAL_REGNO_P (regno)
10427 && !call_really_used_regs[regno]
10428 && cfun_gpr_save_slot (regno) == SAVE_SLOT_NONE)
10434 /* Maximum number of registers to represent a value of mode MODE
10435 in a register of class RCLASS. */
10438 s390_class_max_nregs (enum reg_class rclass, machine_mode mode)
10441 bool reg_pair_required_p = false;
10447 reg_size = TARGET_VX ? 16 : 8;
10449 /* TF and TD modes would fit into a VR but we put them into a
10450 register pair since we do not have 128bit FP instructions on
10453 && SCALAR_FLOAT_MODE_P (mode)
10454 && GET_MODE_SIZE (mode) >= 16)
10455 reg_pair_required_p = true;
10457 /* Even if complex types would fit into a single FPR/VR we force
10458 them into a register pair to deal with the parts more easily.
10459 (FIXME: What about complex ints?) */
10460 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
10461 reg_pair_required_p = true;
10467 reg_size = UNITS_PER_WORD;
10471 if (reg_pair_required_p)
10472 return 2 * ((GET_MODE_SIZE (mode) / 2 + reg_size - 1) / reg_size);
10474 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
10477 /* Return TRUE if changing mode from FROM to TO should not be allowed
10478 for register class CLASS. */
10481 s390_cannot_change_mode_class (machine_mode from_mode,
10482 machine_mode to_mode,
10483 enum reg_class rclass)
10485 machine_mode small_mode;
10486 machine_mode big_mode;
10488 /* V1TF and TF have different representations in vector
10490 if (reg_classes_intersect_p (VEC_REGS, rclass)
10491 && ((from_mode == V1TFmode && to_mode == TFmode)
10492 || (from_mode == TFmode && to_mode == V1TFmode)))
10495 if (GET_MODE_SIZE (from_mode) == GET_MODE_SIZE (to_mode))
10498 if (GET_MODE_SIZE (from_mode) < GET_MODE_SIZE (to_mode))
10500 small_mode = from_mode;
10501 big_mode = to_mode;
10505 small_mode = to_mode;
10506 big_mode = from_mode;
10509 /* Values residing in VRs are little-endian style. All modes are
10510 placed left-aligned in an VR. This means that we cannot allow
10511 switching between modes with differing sizes. Also if the vector
10512 facility is available we still place TFmode values in VR register
10513 pairs, since the only instructions we have operating on TFmodes
10514 only deal with register pairs. Therefore we have to allow DFmode
10515 subregs of TFmodes to enable the TFmode splitters. */
10516 if (reg_classes_intersect_p (VEC_REGS, rclass)
10517 && (GET_MODE_SIZE (small_mode) < 8
10518 || s390_class_max_nregs (VEC_REGS, big_mode) == 1))
10521 /* Likewise for access registers, since they have only half the
10522 word size on 64-bit. */
10523 if (reg_classes_intersect_p (ACCESS_REGS, rclass))
10529 /* Return true if we use LRA instead of reload pass. */
10533 return s390_lra_flag;
10536 /* Return true if register FROM can be eliminated via register TO. */
10539 s390_can_eliminate (const int from, const int to)
10541 /* On zSeries machines, we have not marked the base register as fixed.
10542 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
10543 If a function requires the base register, we say here that this
10544 elimination cannot be performed. This will cause reload to free
10545 up the base register (as if it were fixed). On the other hand,
10546 if the current function does *not* require the base register, we
10547 say here the elimination succeeds, which in turn allows reload
10548 to allocate the base register for any other purpose. */
10549 if (from == BASE_REGNUM && to == BASE_REGNUM)
10551 if (TARGET_CPU_ZARCH)
10553 s390_init_frame_layout ();
10554 return cfun->machine->base_reg == NULL_RTX;
10560 /* Everything else must point into the stack frame. */
10561 gcc_assert (to == STACK_POINTER_REGNUM
10562 || to == HARD_FRAME_POINTER_REGNUM);
10564 gcc_assert (from == FRAME_POINTER_REGNUM
10565 || from == ARG_POINTER_REGNUM
10566 || from == RETURN_ADDRESS_POINTER_REGNUM);
10568 /* Make sure we actually saved the return address. */
10569 if (from == RETURN_ADDRESS_POINTER_REGNUM)
10570 if (!crtl->calls_eh_return
10572 && !cfun_frame_layout.save_return_addr_p)
10578 /* Return offset between register FROM and TO initially after prolog. */
10581 s390_initial_elimination_offset (int from, int to)
10583 HOST_WIDE_INT offset;
10585 /* ??? Why are we called for non-eliminable pairs? */
10586 if (!s390_can_eliminate (from, to))
10591 case FRAME_POINTER_REGNUM:
10592 offset = (get_frame_size()
10593 + STACK_POINTER_OFFSET
10594 + crtl->outgoing_args_size);
10597 case ARG_POINTER_REGNUM:
10598 s390_init_frame_layout ();
10599 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
10602 case RETURN_ADDRESS_POINTER_REGNUM:
10603 s390_init_frame_layout ();
10605 if (cfun_frame_layout.first_save_gpr_slot == -1)
10607 /* If it turns out that for stdarg nothing went into the reg
10608 save area we also do not need the return address
10610 if (cfun->stdarg && !cfun_save_arg_fprs_p)
10613 gcc_unreachable ();
10616 /* In order to make the following work it is not necessary for
10617 r14 to have a save slot. It is sufficient if one other GPR
10618 got one. Since the GPRs are always stored without gaps we
10619 are able to calculate where the r14 save slot would
10621 offset = (cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset +
10622 (RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot) *
10631 gcc_unreachable ();
10637 /* Emit insn to save fpr REGNUM at offset OFFSET relative
10638 to register BASE. Return generated insn. */
10641 save_fpr (rtx base, int offset, int regnum)
10644 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
10646 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
10647 set_mem_alias_set (addr, get_varargs_alias_set ());
10649 set_mem_alias_set (addr, get_frame_alias_set ());
10651 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
10654 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
10655 to register BASE. Return generated insn. */
10658 restore_fpr (rtx base, int offset, int regnum)
10661 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
10662 set_mem_alias_set (addr, get_frame_alias_set ());
10664 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
10667 /* Return true if REGNO is a global register, but not one
10668 of the special ones that need to be saved/restored in anyway. */
10671 global_not_special_regno_p (int regno)
10673 return (global_regs[regno]
10674 /* These registers are special and need to be
10675 restored in any case. */
10676 && !(regno == STACK_POINTER_REGNUM
10677 || regno == RETURN_REGNUM
10678 || regno == BASE_REGNUM
10679 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
10682 /* Generate insn to save registers FIRST to LAST into
10683 the register save area located at offset OFFSET
10684 relative to register BASE. */
10687 save_gprs (rtx base, int offset, int first, int last)
10689 rtx addr, insn, note;
10692 addr = plus_constant (Pmode, base, offset);
10693 addr = gen_rtx_MEM (Pmode, addr);
10695 set_mem_alias_set (addr, get_frame_alias_set ());
10697 /* Special-case single register. */
10701 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
10703 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
10705 if (!global_not_special_regno_p (first))
10706 RTX_FRAME_RELATED_P (insn) = 1;
10711 insn = gen_store_multiple (addr,
10712 gen_rtx_REG (Pmode, first),
10713 GEN_INT (last - first + 1));
10715 if (first <= 6 && cfun->stdarg)
10716 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
10718 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
10720 if (first + i <= 6)
10721 set_mem_alias_set (mem, get_varargs_alias_set ());
10724 /* We need to set the FRAME_RELATED flag on all SETs
10725 inside the store-multiple pattern.
10727 However, we must not emit DWARF records for registers 2..5
10728 if they are stored for use by variable arguments ...
10730 ??? Unfortunately, it is not enough to simply not the
10731 FRAME_RELATED flags for those SETs, because the first SET
10732 of the PARALLEL is always treated as if it had the flag
10733 set, even if it does not. Therefore we emit a new pattern
10734 without those registers as REG_FRAME_RELATED_EXPR note. */
10736 if (first >= 6 && !global_not_special_regno_p (first))
10738 rtx pat = PATTERN (insn);
10740 for (i = 0; i < XVECLEN (pat, 0); i++)
10741 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
10742 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
10744 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
10746 RTX_FRAME_RELATED_P (insn) = 1;
10748 else if (last >= 6)
10752 for (start = first >= 6 ? first : 6; start <= last; start++)
10753 if (!global_not_special_regno_p (start))
10759 addr = plus_constant (Pmode, base,
10760 offset + (start - first) * UNITS_PER_LONG);
10765 note = gen_movdi (gen_rtx_MEM (Pmode, addr),
10766 gen_rtx_REG (Pmode, start));
10768 note = gen_movsi (gen_rtx_MEM (Pmode, addr),
10769 gen_rtx_REG (Pmode, start));
10770 note = PATTERN (note);
10772 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
10773 RTX_FRAME_RELATED_P (insn) = 1;
10778 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
10779 gen_rtx_REG (Pmode, start),
10780 GEN_INT (last - start + 1));
10781 note = PATTERN (note);
10783 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
10785 for (i = 0; i < XVECLEN (note, 0); i++)
10786 if (GET_CODE (XVECEXP (note, 0, i)) == SET
10787 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
10789 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
10791 RTX_FRAME_RELATED_P (insn) = 1;
10797 /* Generate insn to restore registers FIRST to LAST from
10798 the register save area located at offset OFFSET
10799 relative to register BASE. */
10802 restore_gprs (rtx base, int offset, int first, int last)
10806 addr = plus_constant (Pmode, base, offset);
10807 addr = gen_rtx_MEM (Pmode, addr);
10808 set_mem_alias_set (addr, get_frame_alias_set ());
10810 /* Special-case single register. */
10814 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
10816 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
10818 RTX_FRAME_RELATED_P (insn) = 1;
10822 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
10824 GEN_INT (last - first + 1));
10825 RTX_FRAME_RELATED_P (insn) = 1;
10829 /* Return insn sequence to load the GOT register. */
10832 s390_load_got (void)
10836 /* We cannot use pic_offset_table_rtx here since we use this
10837 function also for non-pic if __tls_get_offset is called and in
10838 that case PIC_OFFSET_TABLE_REGNUM as well as pic_offset_table_rtx
10840 rtx got_rtx = gen_rtx_REG (Pmode, 12);
10844 if (TARGET_CPU_ZARCH)
10846 emit_move_insn (got_rtx, s390_got_symbol ());
10852 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, s390_got_symbol ()),
10853 UNSPEC_LTREL_OFFSET);
10854 offset = gen_rtx_CONST (Pmode, offset);
10855 offset = force_const_mem (Pmode, offset);
10857 emit_move_insn (got_rtx, offset);
10859 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
10860 UNSPEC_LTREL_BASE);
10861 offset = gen_rtx_PLUS (Pmode, got_rtx, offset);
10863 emit_move_insn (got_rtx, offset);
10866 insns = get_insns ();
10871 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
10872 and the change to the stack pointer. */
10875 s390_emit_stack_tie (void)
10877 rtx mem = gen_frame_mem (BLKmode,
10878 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
10880 emit_insn (gen_stack_tie (mem));
10883 /* Copy GPRS into FPR save slots. */
10886 s390_save_gprs_to_fprs (void)
10890 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
10893 for (i = 6; i < 16; i++)
10895 if (FP_REGNO_P (cfun_gpr_save_slot (i)))
10898 emit_move_insn (gen_rtx_REG (DImode, cfun_gpr_save_slot (i)),
10899 gen_rtx_REG (DImode, i));
10900 RTX_FRAME_RELATED_P (insn) = 1;
10901 /* This prevents dwarf2cfi from interpreting the set. Doing
10902 so it might emit def_cfa_register infos setting an FPR as
10904 add_reg_note (insn, REG_CFA_REGISTER, copy_rtx (PATTERN (insn)));
10909 /* Restore GPRs from FPR save slots. */
10912 s390_restore_gprs_from_fprs (void)
10916 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
10919 for (i = 6; i < 16; i++)
10923 if (!FP_REGNO_P (cfun_gpr_save_slot (i)))
10926 rtx fpr = gen_rtx_REG (DImode, cfun_gpr_save_slot (i));
10928 if (i == STACK_POINTER_REGNUM)
10929 insn = emit_insn (gen_stack_restore_from_fpr (fpr));
10931 insn = emit_move_insn (gen_rtx_REG (DImode, i), fpr);
10933 df_set_regs_ever_live (i, true);
10934 add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, i));
10935 if (i == STACK_POINTER_REGNUM)
10936 add_reg_note (insn, REG_CFA_DEF_CFA,
10937 plus_constant (Pmode, stack_pointer_rtx,
10938 STACK_POINTER_OFFSET));
10939 RTX_FRAME_RELATED_P (insn) = 1;
10944 /* A pass run immediately before shrink-wrapping and prologue and epilogue
10949 const pass_data pass_data_s390_early_mach =
10951 RTL_PASS, /* type */
10952 "early_mach", /* name */
10953 OPTGROUP_NONE, /* optinfo_flags */
10954 TV_MACH_DEP, /* tv_id */
10955 0, /* properties_required */
10956 0, /* properties_provided */
10957 0, /* properties_destroyed */
10958 0, /* todo_flags_start */
10959 ( TODO_df_verify | TODO_df_finish ), /* todo_flags_finish */
10962 class pass_s390_early_mach : public rtl_opt_pass
10965 pass_s390_early_mach (gcc::context *ctxt)
10966 : rtl_opt_pass (pass_data_s390_early_mach, ctxt)
10969 /* opt_pass methods: */
10970 virtual unsigned int execute (function *);
10972 }; // class pass_s390_early_mach
10975 pass_s390_early_mach::execute (function *fun)
10979 /* Try to get rid of the FPR clobbers. */
10980 s390_optimize_nonescaping_tx ();
10982 /* Re-compute register info. */
10983 s390_register_info ();
10985 /* If we're using a base register, ensure that it is always valid for
10986 the first non-prologue instruction. */
10987 if (fun->machine->base_reg)
10988 emit_insn_at_entry (gen_main_pool (fun->machine->base_reg));
10990 /* Annotate all constant pool references to let the scheduler know
10991 they implicitly use the base register. */
10992 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10995 annotate_constant_pool_refs (&PATTERN (insn));
10996 df_insn_rescan (insn);
11001 } // anon namespace
11003 /* Expand the prologue into a bunch of separate insns. */
11006 s390_emit_prologue (void)
11014 /* Choose best register to use for temp use within prologue.
11015 TPF with profiling must avoid the register 14 - the tracing function
11016 needs the original contents of r14 to be preserved. */
11018 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
11020 && !TARGET_TPF_PROFILING)
11021 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
11022 else if (flag_split_stack && cfun->stdarg)
11023 temp_reg = gen_rtx_REG (Pmode, 12);
11025 temp_reg = gen_rtx_REG (Pmode, 1);
11027 s390_save_gprs_to_fprs ();
11029 /* Save call saved gprs. */
11030 if (cfun_frame_layout.first_save_gpr != -1)
11032 insn = save_gprs (stack_pointer_rtx,
11033 cfun_frame_layout.gprs_offset +
11034 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
11035 - cfun_frame_layout.first_save_gpr_slot),
11036 cfun_frame_layout.first_save_gpr,
11037 cfun_frame_layout.last_save_gpr);
11041 /* Dummy insn to mark literal pool slot. */
11043 if (cfun->machine->base_reg)
11044 emit_insn (gen_main_pool (cfun->machine->base_reg));
11046 offset = cfun_frame_layout.f0_offset;
11048 /* Save f0 and f2. */
11049 for (i = FPR0_REGNUM; i <= FPR0_REGNUM + 1; i++)
11051 if (cfun_fpr_save_p (i))
11053 save_fpr (stack_pointer_rtx, offset, i);
11056 else if (!TARGET_PACKED_STACK || cfun->stdarg)
11060 /* Save f4 and f6. */
11061 offset = cfun_frame_layout.f4_offset;
11062 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
11064 if (cfun_fpr_save_p (i))
11066 insn = save_fpr (stack_pointer_rtx, offset, i);
11069 /* If f4 and f6 are call clobbered they are saved due to
11070 stdargs and therefore are not frame related. */
11071 if (!call_really_used_regs[i])
11072 RTX_FRAME_RELATED_P (insn) = 1;
11074 else if (!TARGET_PACKED_STACK || call_really_used_regs[i])
11078 if (TARGET_PACKED_STACK
11079 && cfun_save_high_fprs_p
11080 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
11082 offset = (cfun_frame_layout.f8_offset
11083 + (cfun_frame_layout.high_fprs - 1) * 8);
11085 for (i = FPR15_REGNUM; i >= FPR8_REGNUM && offset >= 0; i--)
11086 if (cfun_fpr_save_p (i))
11088 insn = save_fpr (stack_pointer_rtx, offset, i);
11090 RTX_FRAME_RELATED_P (insn) = 1;
11093 if (offset >= cfun_frame_layout.f8_offset)
11097 if (!TARGET_PACKED_STACK)
11098 next_fpr = cfun_save_high_fprs_p ? FPR15_REGNUM : 0;
11100 if (flag_stack_usage_info)
11101 current_function_static_stack_size = cfun_frame_layout.frame_size;
11103 /* Decrement stack pointer. */
11105 if (cfun_frame_layout.frame_size > 0)
11107 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
11108 rtx real_frame_off;
11110 if (s390_stack_size)
11112 HOST_WIDE_INT stack_guard;
11114 if (s390_stack_guard)
11115 stack_guard = s390_stack_guard;
11118 /* If no value for stack guard is provided the smallest power of 2
11119 larger than the current frame size is chosen. */
11121 while (stack_guard < cfun_frame_layout.frame_size)
11125 if (cfun_frame_layout.frame_size >= s390_stack_size)
11127 warning (0, "frame size of function %qs is %wd"
11128 " bytes exceeding user provided stack limit of "
11130 "An unconditional trap is added.",
11131 current_function_name(), cfun_frame_layout.frame_size,
11133 emit_insn (gen_trap ());
11138 /* stack_guard has to be smaller than s390_stack_size.
11139 Otherwise we would emit an AND with zero which would
11140 not match the test under mask pattern. */
11141 if (stack_guard >= s390_stack_size)
11143 warning (0, "frame size of function %qs is %wd"
11144 " bytes which is more than half the stack size. "
11145 "The dynamic check would not be reliable. "
11146 "No check emitted for this function.",
11147 current_function_name(),
11148 cfun_frame_layout.frame_size);
11152 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
11153 & ~(stack_guard - 1));
11155 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
11156 GEN_INT (stack_check_mask));
11158 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
11160 t, const0_rtx, const0_rtx));
11162 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
11164 t, const0_rtx, const0_rtx));
11169 if (s390_warn_framesize > 0
11170 && cfun_frame_layout.frame_size >= s390_warn_framesize)
11171 warning (0, "frame size of %qs is %wd bytes",
11172 current_function_name (), cfun_frame_layout.frame_size);
11174 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
11175 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
11177 /* Save incoming stack pointer into temp reg. */
11178 if (TARGET_BACKCHAIN || next_fpr)
11179 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
11181 /* Subtract frame size from stack pointer. */
11183 if (DISP_IN_RANGE (INTVAL (frame_off)))
11185 insn = gen_rtx_SET (stack_pointer_rtx,
11186 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11188 insn = emit_insn (insn);
11192 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
11193 frame_off = force_const_mem (Pmode, frame_off);
11195 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
11196 annotate_constant_pool_refs (&PATTERN (insn));
11199 RTX_FRAME_RELATED_P (insn) = 1;
11200 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
11201 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
11202 gen_rtx_SET (stack_pointer_rtx,
11203 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11206 /* Set backchain. */
11208 if (TARGET_BACKCHAIN)
11210 if (cfun_frame_layout.backchain_offset)
11211 addr = gen_rtx_MEM (Pmode,
11212 plus_constant (Pmode, stack_pointer_rtx,
11213 cfun_frame_layout.backchain_offset));
11215 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
11216 set_mem_alias_set (addr, get_frame_alias_set ());
11217 insn = emit_insn (gen_move_insn (addr, temp_reg));
11220 /* If we support non-call exceptions (e.g. for Java),
11221 we need to make sure the backchain pointer is set up
11222 before any possibly trapping memory access. */
11223 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
11225 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
11226 emit_clobber (addr);
11230 /* Save fprs 8 - 15 (64 bit ABI). */
11232 if (cfun_save_high_fprs_p && next_fpr)
11234 /* If the stack might be accessed through a different register
11235 we have to make sure that the stack pointer decrement is not
11236 moved below the use of the stack slots. */
11237 s390_emit_stack_tie ();
11239 insn = emit_insn (gen_add2_insn (temp_reg,
11240 GEN_INT (cfun_frame_layout.f8_offset)));
11244 for (i = FPR8_REGNUM; i <= next_fpr; i++)
11245 if (cfun_fpr_save_p (i))
11247 rtx addr = plus_constant (Pmode, stack_pointer_rtx,
11248 cfun_frame_layout.frame_size
11249 + cfun_frame_layout.f8_offset
11252 insn = save_fpr (temp_reg, offset, i);
11254 RTX_FRAME_RELATED_P (insn) = 1;
11255 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
11256 gen_rtx_SET (gen_rtx_MEM (DFmode, addr),
11257 gen_rtx_REG (DFmode, i)));
11261 /* Set frame pointer, if needed. */
11263 if (frame_pointer_needed)
11265 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
11266 RTX_FRAME_RELATED_P (insn) = 1;
11269 /* Set up got pointer, if needed. */
11271 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
11273 rtx_insn *insns = s390_load_got ();
11275 for (rtx_insn *insn = insns; insn; insn = NEXT_INSN (insn))
11276 annotate_constant_pool_refs (&PATTERN (insn));
11281 if (TARGET_TPF_PROFILING)
11283 /* Generate a BAS instruction to serve as a function
11284 entry intercept to facilitate the use of tracing
11285 algorithms located at the branch target. */
11286 emit_insn (gen_prologue_tpf ());
11288 /* Emit a blockage here so that all code
11289 lies between the profiling mechanisms. */
11290 emit_insn (gen_blockage ());
11294 /* Expand the epilogue into a bunch of separate insns. */
11297 s390_emit_epilogue (bool sibcall)
11299 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
11300 int area_bottom, area_top, offset = 0;
11305 if (TARGET_TPF_PROFILING)
11308 /* Generate a BAS instruction to serve as a function
11309 entry intercept to facilitate the use of tracing
11310 algorithms located at the branch target. */
11312 /* Emit a blockage here so that all code
11313 lies between the profiling mechanisms. */
11314 emit_insn (gen_blockage ());
11316 emit_insn (gen_epilogue_tpf ());
11319 /* Check whether to use frame or stack pointer for restore. */
11321 frame_pointer = (frame_pointer_needed
11322 ? hard_frame_pointer_rtx : stack_pointer_rtx);
11324 s390_frame_area (&area_bottom, &area_top);
11326 /* Check whether we can access the register save area.
11327 If not, increment the frame pointer as required. */
11329 if (area_top <= area_bottom)
11331 /* Nothing to restore. */
11333 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
11334 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
11336 /* Area is in range. */
11337 offset = cfun_frame_layout.frame_size;
11341 rtx insn, frame_off, cfa;
11343 offset = area_bottom < 0 ? -area_bottom : 0;
11344 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
11346 cfa = gen_rtx_SET (frame_pointer,
11347 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
11348 if (DISP_IN_RANGE (INTVAL (frame_off)))
11350 insn = gen_rtx_SET (frame_pointer,
11351 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
11352 insn = emit_insn (insn);
11356 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
11357 frame_off = force_const_mem (Pmode, frame_off);
11359 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
11360 annotate_constant_pool_refs (&PATTERN (insn));
11362 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
11363 RTX_FRAME_RELATED_P (insn) = 1;
11366 /* Restore call saved fprs. */
11370 if (cfun_save_high_fprs_p)
11372 next_offset = cfun_frame_layout.f8_offset;
11373 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
11375 if (cfun_fpr_save_p (i))
11377 restore_fpr (frame_pointer,
11378 offset + next_offset, i);
11380 = alloc_reg_note (REG_CFA_RESTORE,
11381 gen_rtx_REG (DFmode, i), cfa_restores);
11390 next_offset = cfun_frame_layout.f4_offset;
11392 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
11394 if (cfun_fpr_save_p (i))
11396 restore_fpr (frame_pointer,
11397 offset + next_offset, i);
11399 = alloc_reg_note (REG_CFA_RESTORE,
11400 gen_rtx_REG (DFmode, i), cfa_restores);
11403 else if (!TARGET_PACKED_STACK)
11409 /* Return register. */
11411 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
11413 /* Restore call saved gprs. */
11415 if (cfun_frame_layout.first_restore_gpr != -1)
11420 /* Check for global register and save them
11421 to stack location from where they get restored. */
11423 for (i = cfun_frame_layout.first_restore_gpr;
11424 i <= cfun_frame_layout.last_restore_gpr;
11427 if (global_not_special_regno_p (i))
11429 addr = plus_constant (Pmode, frame_pointer,
11430 offset + cfun_frame_layout.gprs_offset
11431 + (i - cfun_frame_layout.first_save_gpr_slot)
11433 addr = gen_rtx_MEM (Pmode, addr);
11434 set_mem_alias_set (addr, get_frame_alias_set ());
11435 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
11439 = alloc_reg_note (REG_CFA_RESTORE,
11440 gen_rtx_REG (Pmode, i), cfa_restores);
11443 /* Fetch return address from stack before load multiple,
11444 this will do good for scheduling.
11446 Only do this if we already decided that r14 needs to be
11447 saved to a stack slot. (And not just because r14 happens to
11448 be in between two GPRs which need saving.) Otherwise it
11449 would be difficult to take that decision back in
11450 s390_optimize_prologue.
11452 This optimization is only helpful on in-order machines. */
11454 && cfun_gpr_save_slot (RETURN_REGNUM) == SAVE_SLOT_STACK
11455 && s390_tune <= PROCESSOR_2097_Z10)
11457 int return_regnum = find_unused_clobbered_reg();
11458 if (!return_regnum)
11460 return_reg = gen_rtx_REG (Pmode, return_regnum);
11462 addr = plus_constant (Pmode, frame_pointer,
11463 offset + cfun_frame_layout.gprs_offset
11465 - cfun_frame_layout.first_save_gpr_slot)
11467 addr = gen_rtx_MEM (Pmode, addr);
11468 set_mem_alias_set (addr, get_frame_alias_set ());
11469 emit_move_insn (return_reg, addr);
11471 /* Once we did that optimization we have to make sure
11472 s390_optimize_prologue does not try to remove the store
11473 of r14 since we will not be able to find the load issued
11475 cfun_frame_layout.save_return_addr_p = true;
11478 insn = restore_gprs (frame_pointer,
11479 offset + cfun_frame_layout.gprs_offset
11480 + (cfun_frame_layout.first_restore_gpr
11481 - cfun_frame_layout.first_save_gpr_slot)
11483 cfun_frame_layout.first_restore_gpr,
11484 cfun_frame_layout.last_restore_gpr);
11485 insn = emit_insn (insn);
11486 REG_NOTES (insn) = cfa_restores;
11487 add_reg_note (insn, REG_CFA_DEF_CFA,
11488 plus_constant (Pmode, stack_pointer_rtx,
11489 STACK_POINTER_OFFSET));
11490 RTX_FRAME_RELATED_P (insn) = 1;
11493 s390_restore_gprs_from_fprs ();
11498 /* Return to caller. */
11500 p = rtvec_alloc (2);
11502 RTVEC_ELT (p, 0) = ret_rtx;
11503 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
11504 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
11508 /* Implement TARGET_SET_UP_BY_PROLOGUE. */
11511 s300_set_up_by_prologue (hard_reg_set_container *regs)
11513 if (cfun->machine->base_reg
11514 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
11515 SET_HARD_REG_BIT (regs->set, REGNO (cfun->machine->base_reg));
11518 /* -fsplit-stack support. */
11520 /* A SYMBOL_REF for __morestack. */
11521 static GTY(()) rtx morestack_ref;
11523 /* When using -fsplit-stack, the allocation routines set a field in
11524 the TCB to the bottom of the stack plus this much space, measured
11527 #define SPLIT_STACK_AVAILABLE 1024
11529 /* Emit -fsplit-stack prologue, which goes before the regular function
11533 s390_expand_split_stack_prologue (void)
11535 rtx r1, guard, cc = NULL;
11537 /* Offset from thread pointer to __private_ss. */
11538 int psso = TARGET_64BIT ? 0x38 : 0x20;
11539 /* Pointer size in bytes. */
11540 /* Frame size and argument size - the two parameters to __morestack. */
11541 HOST_WIDE_INT frame_size = cfun_frame_layout.frame_size;
11542 /* Align argument size to 8 bytes - simplifies __morestack code. */
11543 HOST_WIDE_INT args_size = crtl->args.size >= 0
11544 ? ((crtl->args.size + 7) & ~7)
11546 /* Label to be called by __morestack. */
11547 rtx_code_label *call_done = NULL;
11548 rtx_code_label *parm_base = NULL;
11551 gcc_assert (flag_split_stack && reload_completed);
11552 if (!TARGET_CPU_ZARCH)
11554 sorry ("CPUs older than z900 are not supported for -fsplit-stack");
11558 r1 = gen_rtx_REG (Pmode, 1);
11560 /* If no stack frame will be allocated, don't do anything. */
11563 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11565 /* If va_start is used, just use r15. */
11566 emit_move_insn (r1,
11567 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11568 GEN_INT (STACK_POINTER_OFFSET)));
11574 if (morestack_ref == NULL_RTX)
11576 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
11577 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
11578 | SYMBOL_FLAG_FUNCTION);
11581 if (CONST_OK_FOR_K (frame_size) || CONST_OK_FOR_Op (frame_size))
11583 /* If frame_size will fit in an add instruction, do a stack space
11584 check, and only call __morestack if there's not enough space. */
11586 /* Get thread pointer. r1 is the only register we can always destroy - r0
11587 could contain a static chain (and cannot be used to address memory
11588 anyway), r2-r6 can contain parameters, and r6-r15 are callee-saved. */
11589 emit_move_insn (r1, gen_rtx_REG (Pmode, TP_REGNUM));
11590 /* Aim at __private_ss. */
11591 guard = gen_rtx_MEM (Pmode, plus_constant (Pmode, r1, psso));
11593 /* If less that 1kiB used, skip addition and compare directly with
11595 if (frame_size > SPLIT_STACK_AVAILABLE)
11597 emit_move_insn (r1, guard);
11599 emit_insn (gen_adddi3 (r1, r1, GEN_INT (frame_size)));
11601 emit_insn (gen_addsi3 (r1, r1, GEN_INT (frame_size)));
11605 /* Compare the (maybe adjusted) guard with the stack pointer. */
11606 cc = s390_emit_compare (LT, stack_pointer_rtx, guard);
11609 call_done = gen_label_rtx ();
11610 parm_base = gen_label_rtx ();
11612 /* Emit the parameter block. */
11613 tmp = gen_split_stack_data (parm_base, call_done,
11614 GEN_INT (frame_size),
11615 GEN_INT (args_size));
11616 insn = emit_insn (tmp);
11617 add_reg_note (insn, REG_LABEL_OPERAND, call_done);
11618 LABEL_NUSES (call_done)++;
11619 add_reg_note (insn, REG_LABEL_OPERAND, parm_base);
11620 LABEL_NUSES (parm_base)++;
11622 /* %r1 = litbase. */
11623 insn = emit_move_insn (r1, gen_rtx_LABEL_REF (VOIDmode, parm_base));
11624 add_reg_note (insn, REG_LABEL_OPERAND, parm_base);
11625 LABEL_NUSES (parm_base)++;
11627 /* Now, we need to call __morestack. It has very special calling
11628 conventions: it preserves param/return/static chain registers for
11629 calling main function body, and looks for its own parameters at %r1. */
11633 tmp = gen_split_stack_cond_call (morestack_ref, cc, call_done);
11635 insn = emit_jump_insn (tmp);
11636 JUMP_LABEL (insn) = call_done;
11637 LABEL_NUSES (call_done)++;
11639 /* Mark the jump as very unlikely to be taken. */
11640 add_int_reg_note (insn, REG_BR_PROB, REG_BR_PROB_BASE / 100);
11642 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11644 /* If va_start is used, and __morestack was not called, just use
11646 emit_move_insn (r1,
11647 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11648 GEN_INT (STACK_POINTER_OFFSET)));
11653 tmp = gen_split_stack_call (morestack_ref, call_done);
11654 insn = emit_jump_insn (tmp);
11655 JUMP_LABEL (insn) = call_done;
11656 LABEL_NUSES (call_done)++;
11660 /* __morestack will call us here. */
11662 emit_label (call_done);
11665 /* We may have to tell the dataflow pass that the split stack prologue
11666 is initializing a register. */
11669 s390_live_on_entry (bitmap regs)
11671 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11673 gcc_assert (flag_split_stack);
11674 bitmap_set_bit (regs, 1);
11678 /* Return true if the function can use simple_return to return outside
11679 of a shrink-wrapped region. At present shrink-wrapping is supported
11683 s390_can_use_simple_return_insn (void)
11688 /* Return true if the epilogue is guaranteed to contain only a return
11689 instruction and if a direct return can therefore be used instead.
11690 One of the main advantages of using direct return instructions
11691 is that we can then use conditional returns. */
11694 s390_can_use_return_insn (void)
11698 if (!reload_completed)
11704 if (TARGET_TPF_PROFILING)
11707 for (i = 0; i < 16; i++)
11708 if (cfun_gpr_save_slot (i) != SAVE_SLOT_NONE)
11711 /* For 31 bit this is not covered by the frame_size check below
11712 since f4, f6 are saved in the register save area without needing
11713 additional stack space. */
11715 && (cfun_fpr_save_p (FPR4_REGNUM) || cfun_fpr_save_p (FPR6_REGNUM)))
11718 if (cfun->machine->base_reg
11719 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
11722 return cfun_frame_layout.frame_size == 0;
11725 /* The VX ABI differs for vararg functions. Therefore we need the
11726 prototype of the callee to be available when passing vector type
11728 static const char *
11729 s390_invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
11731 return ((TARGET_VX_ABI
11733 && VECTOR_TYPE_P (TREE_TYPE (val))
11734 && (funcdecl == NULL_TREE
11735 || (TREE_CODE (funcdecl) == FUNCTION_DECL
11736 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
11737 ? N_("vector argument passed to unprototyped function")
11742 /* Return the size in bytes of a function argument of
11743 type TYPE and/or mode MODE. At least one of TYPE or
11744 MODE must be specified. */
11747 s390_function_arg_size (machine_mode mode, const_tree type)
11750 return int_size_in_bytes (type);
11752 /* No type info available for some library calls ... */
11753 if (mode != BLKmode)
11754 return GET_MODE_SIZE (mode);
11756 /* If we have neither type nor mode, abort */
11757 gcc_unreachable ();
11760 /* Return true if a function argument of type TYPE and mode MODE
11761 is to be passed in a vector register, if available. */
11764 s390_function_arg_vector (machine_mode mode, const_tree type)
11766 if (!TARGET_VX_ABI)
11769 if (s390_function_arg_size (mode, type) > 16)
11772 /* No type info available for some library calls ... */
11774 return VECTOR_MODE_P (mode);
11776 /* The ABI says that record types with a single member are treated
11777 just like that member would be. */
11778 while (TREE_CODE (type) == RECORD_TYPE)
11780 tree field, single = NULL_TREE;
11782 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
11784 if (TREE_CODE (field) != FIELD_DECL)
11787 if (single == NULL_TREE)
11788 single = TREE_TYPE (field);
11793 if (single == NULL_TREE)
11797 /* If the field declaration adds extra byte due to
11798 e.g. padding this is not accepted as vector type. */
11799 if (int_size_in_bytes (single) <= 0
11800 || int_size_in_bytes (single) != int_size_in_bytes (type))
11806 return VECTOR_TYPE_P (type);
11809 /* Return true if a function argument of type TYPE and mode MODE
11810 is to be passed in a floating-point register, if available. */
11813 s390_function_arg_float (machine_mode mode, const_tree type)
11815 if (s390_function_arg_size (mode, type) > 8)
11818 /* Soft-float changes the ABI: no floating-point registers are used. */
11819 if (TARGET_SOFT_FLOAT)
11822 /* No type info available for some library calls ... */
11824 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
11826 /* The ABI says that record types with a single member are treated
11827 just like that member would be. */
11828 while (TREE_CODE (type) == RECORD_TYPE)
11830 tree field, single = NULL_TREE;
11832 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
11834 if (TREE_CODE (field) != FIELD_DECL)
11837 if (single == NULL_TREE)
11838 single = TREE_TYPE (field);
11843 if (single == NULL_TREE)
11849 return TREE_CODE (type) == REAL_TYPE;
11852 /* Return true if a function argument of type TYPE and mode MODE
11853 is to be passed in an integer register, or a pair of integer
11854 registers, if available. */
11857 s390_function_arg_integer (machine_mode mode, const_tree type)
11859 int size = s390_function_arg_size (mode, type);
11863 /* No type info available for some library calls ... */
11865 return GET_MODE_CLASS (mode) == MODE_INT
11866 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
11868 /* We accept small integral (and similar) types. */
11869 if (INTEGRAL_TYPE_P (type)
11870 || POINTER_TYPE_P (type)
11871 || TREE_CODE (type) == NULLPTR_TYPE
11872 || TREE_CODE (type) == OFFSET_TYPE
11873 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
11876 /* We also accept structs of size 1, 2, 4, 8 that are not
11877 passed in floating-point registers. */
11878 if (AGGREGATE_TYPE_P (type)
11879 && exact_log2 (size) >= 0
11880 && !s390_function_arg_float (mode, type))
11886 /* Return 1 if a function argument of type TYPE and mode MODE
11887 is to be passed by reference. The ABI specifies that only
11888 structures of size 1, 2, 4, or 8 bytes are passed by value,
11889 all other structures (and complex numbers) are passed by
11893 s390_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
11894 machine_mode mode, const_tree type,
11895 bool named ATTRIBUTE_UNUSED)
11897 int size = s390_function_arg_size (mode, type);
11899 if (s390_function_arg_vector (mode, type))
11907 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
11910 if (TREE_CODE (type) == COMPLEX_TYPE
11911 || TREE_CODE (type) == VECTOR_TYPE)
11918 /* Update the data in CUM to advance over an argument of mode MODE and
11919 data type TYPE. (TYPE is null for libcalls where that information
11920 may not be available.). The boolean NAMED specifies whether the
11921 argument is a named argument (as opposed to an unnamed argument
11922 matching an ellipsis). */
11925 s390_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
11926 const_tree type, bool named)
11928 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11930 if (s390_function_arg_vector (mode, type))
11932 /* We are called for unnamed vector stdarg arguments which are
11933 passed on the stack. In this case this hook does not have to
11934 do anything since stack arguments are tracked by common
11940 else if (s390_function_arg_float (mode, type))
11944 else if (s390_function_arg_integer (mode, type))
11946 int size = s390_function_arg_size (mode, type);
11947 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
11950 gcc_unreachable ();
11953 /* Define where to put the arguments to a function.
11954 Value is zero to push the argument on the stack,
11955 or a hard register in which to store the argument.
11957 MODE is the argument's machine mode.
11958 TYPE is the data type of the argument (as a tree).
11959 This is null for libcalls where that information may
11961 CUM is a variable of type CUMULATIVE_ARGS which gives info about
11962 the preceding args and about the function being called.
11963 NAMED is nonzero if this argument is a named parameter
11964 (otherwise it is an extra parameter matching an ellipsis).
11966 On S/390, we use general purpose registers 2 through 6 to
11967 pass integer, pointer, and certain structure arguments, and
11968 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
11969 to pass floating point arguments. All remaining arguments
11970 are pushed to the stack. */
11973 s390_function_arg (cumulative_args_t cum_v, machine_mode mode,
11974 const_tree type, bool named)
11976 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11979 s390_check_type_for_vector_abi (type, true, false);
11981 if (s390_function_arg_vector (mode, type))
11983 /* Vector arguments being part of the ellipsis are passed on the
11985 if (!named || (cum->vrs + 1 > VEC_ARG_NUM_REG))
11988 return gen_rtx_REG (mode, cum->vrs + FIRST_VEC_ARG_REGNO);
11990 else if (s390_function_arg_float (mode, type))
11992 if (cum->fprs + 1 > FP_ARG_NUM_REG)
11995 return gen_rtx_REG (mode, cum->fprs + 16);
11997 else if (s390_function_arg_integer (mode, type))
11999 int size = s390_function_arg_size (mode, type);
12000 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
12002 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
12004 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
12005 return gen_rtx_REG (mode, cum->gprs + 2);
12006 else if (n_gprs == 2)
12008 rtvec p = rtvec_alloc (2);
12011 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
12014 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
12017 return gen_rtx_PARALLEL (mode, p);
12021 /* After the real arguments, expand_call calls us once again
12022 with a void_type_node type. Whatever we return here is
12023 passed as operand 2 to the call expanders.
12025 We don't need this feature ... */
12026 else if (type == void_type_node)
12029 gcc_unreachable ();
12032 /* Return true if return values of type TYPE should be returned
12033 in a memory buffer whose address is passed by the caller as
12034 hidden first argument. */
12037 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
12039 /* We accept small integral (and similar) types. */
12040 if (INTEGRAL_TYPE_P (type)
12041 || POINTER_TYPE_P (type)
12042 || TREE_CODE (type) == OFFSET_TYPE
12043 || TREE_CODE (type) == REAL_TYPE)
12044 return int_size_in_bytes (type) > 8;
12046 /* vector types which fit into a VR. */
12048 && VECTOR_TYPE_P (type)
12049 && int_size_in_bytes (type) <= 16)
12052 /* Aggregates and similar constructs are always returned
12054 if (AGGREGATE_TYPE_P (type)
12055 || TREE_CODE (type) == COMPLEX_TYPE
12056 || VECTOR_TYPE_P (type))
12059 /* ??? We get called on all sorts of random stuff from
12060 aggregate_value_p. We can't abort, but it's not clear
12061 what's safe to return. Pretend it's a struct I guess. */
12065 /* Function arguments and return values are promoted to word size. */
12067 static machine_mode
12068 s390_promote_function_mode (const_tree type, machine_mode mode,
12070 const_tree fntype ATTRIBUTE_UNUSED,
12071 int for_return ATTRIBUTE_UNUSED)
12073 if (INTEGRAL_MODE_P (mode)
12074 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
12076 if (type != NULL_TREE && POINTER_TYPE_P (type))
12077 *punsignedp = POINTERS_EXTEND_UNSIGNED;
12084 /* Define where to return a (scalar) value of type RET_TYPE.
12085 If RET_TYPE is null, define where to return a (scalar)
12086 value of mode MODE from a libcall. */
12089 s390_function_and_libcall_value (machine_mode mode,
12090 const_tree ret_type,
12091 const_tree fntype_or_decl,
12092 bool outgoing ATTRIBUTE_UNUSED)
12094 /* For vector return types it is important to use the RET_TYPE
12095 argument whenever available since the middle-end might have
12096 changed the mode to a scalar mode. */
12097 bool vector_ret_type_p = ((ret_type && VECTOR_TYPE_P (ret_type))
12098 || (!ret_type && VECTOR_MODE_P (mode)));
12100 /* For normal functions perform the promotion as
12101 promote_function_mode would do. */
12104 int unsignedp = TYPE_UNSIGNED (ret_type);
12105 mode = promote_function_mode (ret_type, mode, &unsignedp,
12106 fntype_or_decl, 1);
12109 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT
12110 || SCALAR_FLOAT_MODE_P (mode)
12111 || (TARGET_VX_ABI && vector_ret_type_p));
12112 gcc_assert (GET_MODE_SIZE (mode) <= (TARGET_VX_ABI ? 16 : 8));
12114 if (TARGET_VX_ABI && vector_ret_type_p)
12115 return gen_rtx_REG (mode, FIRST_VEC_ARG_REGNO);
12116 else if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
12117 return gen_rtx_REG (mode, 16);
12118 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
12119 || UNITS_PER_LONG == UNITS_PER_WORD)
12120 return gen_rtx_REG (mode, 2);
12121 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
12123 /* This case is triggered when returning a 64 bit value with
12124 -m31 -mzarch. Although the value would fit into a single
12125 register it has to be forced into a 32 bit register pair in
12126 order to match the ABI. */
12127 rtvec p = rtvec_alloc (2);
12130 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
12132 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
12134 return gen_rtx_PARALLEL (mode, p);
12137 gcc_unreachable ();
12140 /* Define where to return a scalar return value of type RET_TYPE. */
12143 s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
12146 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
12147 fn_decl_or_type, outgoing);
12150 /* Define where to return a scalar libcall return value of mode
12154 s390_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
12156 return s390_function_and_libcall_value (mode, NULL_TREE,
12161 /* Create and return the va_list datatype.
12163 On S/390, va_list is an array type equivalent to
12165 typedef struct __va_list_tag
12169 void *__overflow_arg_area;
12170 void *__reg_save_area;
12173 where __gpr and __fpr hold the number of general purpose
12174 or floating point arguments used up to now, respectively,
12175 __overflow_arg_area points to the stack location of the
12176 next argument passed on the stack, and __reg_save_area
12177 always points to the start of the register area in the
12178 call frame of the current function. The function prologue
12179 saves all registers used for argument passing into this
12180 area if the function uses variable arguments. */
12183 s390_build_builtin_va_list (void)
12185 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
12187 record = lang_hooks.types.make_type (RECORD_TYPE);
12190 build_decl (BUILTINS_LOCATION,
12191 TYPE_DECL, get_identifier ("__va_list_tag"), record);
12193 f_gpr = build_decl (BUILTINS_LOCATION,
12194 FIELD_DECL, get_identifier ("__gpr"),
12195 long_integer_type_node);
12196 f_fpr = build_decl (BUILTINS_LOCATION,
12197 FIELD_DECL, get_identifier ("__fpr"),
12198 long_integer_type_node);
12199 f_ovf = build_decl (BUILTINS_LOCATION,
12200 FIELD_DECL, get_identifier ("__overflow_arg_area"),
12202 f_sav = build_decl (BUILTINS_LOCATION,
12203 FIELD_DECL, get_identifier ("__reg_save_area"),
12206 va_list_gpr_counter_field = f_gpr;
12207 va_list_fpr_counter_field = f_fpr;
12209 DECL_FIELD_CONTEXT (f_gpr) = record;
12210 DECL_FIELD_CONTEXT (f_fpr) = record;
12211 DECL_FIELD_CONTEXT (f_ovf) = record;
12212 DECL_FIELD_CONTEXT (f_sav) = record;
12214 TYPE_STUB_DECL (record) = type_decl;
12215 TYPE_NAME (record) = type_decl;
12216 TYPE_FIELDS (record) = f_gpr;
12217 DECL_CHAIN (f_gpr) = f_fpr;
12218 DECL_CHAIN (f_fpr) = f_ovf;
12219 DECL_CHAIN (f_ovf) = f_sav;
12221 layout_type (record);
12223 /* The correct type is an array type of one element. */
12224 return build_array_type (record, build_index_type (size_zero_node));
12227 /* Implement va_start by filling the va_list structure VALIST.
12228 STDARG_P is always true, and ignored.
12229 NEXTARG points to the first anonymous stack argument.
12231 The following global variables are used to initialize
12232 the va_list structure:
12235 holds number of gprs and fprs used for named arguments.
12236 crtl->args.arg_offset_rtx:
12237 holds the offset of the first anonymous stack argument
12238 (relative to the virtual arg pointer). */
12241 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
12243 HOST_WIDE_INT n_gpr, n_fpr;
12245 tree f_gpr, f_fpr, f_ovf, f_sav;
12246 tree gpr, fpr, ovf, sav, t;
12248 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12249 f_fpr = DECL_CHAIN (f_gpr);
12250 f_ovf = DECL_CHAIN (f_fpr);
12251 f_sav = DECL_CHAIN (f_ovf);
12253 valist = build_simple_mem_ref (valist);
12254 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12255 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
12256 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
12257 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
12259 /* Count number of gp and fp argument registers used. */
12261 n_gpr = crtl->args.info.gprs;
12262 n_fpr = crtl->args.info.fprs;
12264 if (cfun->va_list_gpr_size)
12266 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
12267 build_int_cst (NULL_TREE, n_gpr));
12268 TREE_SIDE_EFFECTS (t) = 1;
12269 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12272 if (cfun->va_list_fpr_size)
12274 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
12275 build_int_cst (NULL_TREE, n_fpr));
12276 TREE_SIDE_EFFECTS (t) = 1;
12277 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12280 if (flag_split_stack
12281 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
12283 && cfun->machine->split_stack_varargs_pointer == NULL_RTX)
12288 reg = gen_reg_rtx (Pmode);
12289 cfun->machine->split_stack_varargs_pointer = reg;
12292 emit_move_insn (reg, gen_rtx_REG (Pmode, 1));
12293 seq = get_insns ();
12296 push_topmost_sequence ();
12297 emit_insn_after (seq, entry_of_function ());
12298 pop_topmost_sequence ();
12301 /* Find the overflow area.
12302 FIXME: This currently is too pessimistic when the vector ABI is
12303 enabled. In that case we *always* set up the overflow area
12305 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
12306 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG
12309 if (cfun->machine->split_stack_varargs_pointer == NULL_RTX)
12310 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
12312 t = make_tree (TREE_TYPE (ovf), cfun->machine->split_stack_varargs_pointer);
12314 off = INTVAL (crtl->args.arg_offset_rtx);
12315 off = off < 0 ? 0 : off;
12316 if (TARGET_DEBUG_ARG)
12317 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
12318 (int)n_gpr, (int)n_fpr, off);
12320 t = fold_build_pointer_plus_hwi (t, off);
12322 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
12323 TREE_SIDE_EFFECTS (t) = 1;
12324 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12327 /* Find the register save area. */
12328 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
12329 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
12331 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
12332 t = fold_build_pointer_plus_hwi (t, -RETURN_REGNUM * UNITS_PER_LONG);
12334 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
12335 TREE_SIDE_EFFECTS (t) = 1;
12336 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12340 /* Implement va_arg by updating the va_list structure
12341 VALIST as required to retrieve an argument of type
12342 TYPE, and returning that argument.
12344 Generates code equivalent to:
12346 if (integral value) {
12347 if (size <= 4 && args.gpr < 5 ||
12348 size > 4 && args.gpr < 4 )
12349 ret = args.reg_save_area[args.gpr+8]
12351 ret = *args.overflow_arg_area++;
12352 } else if (vector value) {
12353 ret = *args.overflow_arg_area;
12354 args.overflow_arg_area += size / 8;
12355 } else if (float value) {
12357 ret = args.reg_save_area[args.fpr+64]
12359 ret = *args.overflow_arg_area++;
12360 } else if (aggregate value) {
12362 ret = *args.reg_save_area[args.gpr]
12364 ret = **args.overflow_arg_area++;
12368 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
12369 gimple_seq *post_p ATTRIBUTE_UNUSED)
12371 tree f_gpr, f_fpr, f_ovf, f_sav;
12372 tree gpr, fpr, ovf, sav, reg, t, u;
12373 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
12374 tree lab_false, lab_over = NULL_TREE;
12375 tree addr = create_tmp_var (ptr_type_node, "addr");
12376 bool left_align_p; /* How a value < UNITS_PER_LONG is aligned within
12379 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12380 f_fpr = DECL_CHAIN (f_gpr);
12381 f_ovf = DECL_CHAIN (f_fpr);
12382 f_sav = DECL_CHAIN (f_ovf);
12384 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12385 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
12386 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
12388 /* The tree for args* cannot be shared between gpr/fpr and ovf since
12389 both appear on a lhs. */
12390 valist = unshare_expr (valist);
12391 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
12393 size = int_size_in_bytes (type);
12395 s390_check_type_for_vector_abi (type, true, false);
12397 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
12399 if (TARGET_DEBUG_ARG)
12401 fprintf (stderr, "va_arg: aggregate type");
12405 /* Aggregates are passed by reference. */
12410 /* kernel stack layout on 31 bit: It is assumed here that no padding
12411 will be added by s390_frame_info because for va_args always an even
12412 number of gprs has to be saved r15-r2 = 14 regs. */
12413 sav_ofs = 2 * UNITS_PER_LONG;
12414 sav_scale = UNITS_PER_LONG;
12415 size = UNITS_PER_LONG;
12416 max_reg = GP_ARG_NUM_REG - n_reg;
12417 left_align_p = false;
12419 else if (s390_function_arg_vector (TYPE_MODE (type), type))
12421 if (TARGET_DEBUG_ARG)
12423 fprintf (stderr, "va_arg: vector type");
12433 left_align_p = true;
12435 else if (s390_function_arg_float (TYPE_MODE (type), type))
12437 if (TARGET_DEBUG_ARG)
12439 fprintf (stderr, "va_arg: float type");
12443 /* FP args go in FP registers, if present. */
12447 sav_ofs = 16 * UNITS_PER_LONG;
12449 max_reg = FP_ARG_NUM_REG - n_reg;
12450 left_align_p = false;
12454 if (TARGET_DEBUG_ARG)
12456 fprintf (stderr, "va_arg: other type");
12460 /* Otherwise into GP registers. */
12463 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
12465 /* kernel stack layout on 31 bit: It is assumed here that no padding
12466 will be added by s390_frame_info because for va_args always an even
12467 number of gprs has to be saved r15-r2 = 14 regs. */
12468 sav_ofs = 2 * UNITS_PER_LONG;
12470 if (size < UNITS_PER_LONG)
12471 sav_ofs += UNITS_PER_LONG - size;
12473 sav_scale = UNITS_PER_LONG;
12474 max_reg = GP_ARG_NUM_REG - n_reg;
12475 left_align_p = false;
12478 /* Pull the value out of the saved registers ... */
12480 if (reg != NULL_TREE)
12483 if (reg > ((typeof (reg))max_reg))
12486 addr = sav + sav_ofs + reg * save_scale;
12493 lab_false = create_artificial_label (UNKNOWN_LOCATION);
12494 lab_over = create_artificial_label (UNKNOWN_LOCATION);
12496 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
12497 t = build2 (GT_EXPR, boolean_type_node, reg, t);
12498 u = build1 (GOTO_EXPR, void_type_node, lab_false);
12499 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
12500 gimplify_and_add (t, pre_p);
12502 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
12503 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
12504 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
12505 t = fold_build_pointer_plus (t, u);
12507 gimplify_assign (addr, t, pre_p);
12509 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
12511 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
12514 /* ... Otherwise out of the overflow area. */
12517 if (size < UNITS_PER_LONG && !left_align_p)
12518 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG - size);
12520 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
12522 gimplify_assign (addr, t, pre_p);
12524 if (size < UNITS_PER_LONG && left_align_p)
12525 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG);
12527 t = fold_build_pointer_plus_hwi (t, size);
12529 gimplify_assign (ovf, t, pre_p);
12531 if (reg != NULL_TREE)
12532 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
12535 /* Increment register save count. */
12539 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
12540 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
12541 gimplify_and_add (u, pre_p);
12546 t = build_pointer_type_for_mode (build_pointer_type (type),
12548 addr = fold_convert (t, addr);
12549 addr = build_va_arg_indirect_ref (addr);
12553 t = build_pointer_type_for_mode (type, ptr_mode, true);
12554 addr = fold_convert (t, addr);
12557 return build_va_arg_indirect_ref (addr);
12560 /* Emit rtl for the tbegin or tbegin_retry (RETRY != NULL_RTX)
12562 DEST - Register location where CC will be stored.
12563 TDB - Pointer to a 256 byte area where to store the transaction.
12564 diagnostic block. NULL if TDB is not needed.
12565 RETRY - Retry count value. If non-NULL a retry loop for CC2
12567 CLOBBER_FPRS_P - If true clobbers for all FPRs are emitted as part
12568 of the tbegin instruction pattern. */
12571 s390_expand_tbegin (rtx dest, rtx tdb, rtx retry, bool clobber_fprs_p)
12573 rtx retry_plus_two = gen_reg_rtx (SImode);
12574 rtx retry_reg = gen_reg_rtx (SImode);
12575 rtx_code_label *retry_label = NULL;
12577 if (retry != NULL_RTX)
12579 emit_move_insn (retry_reg, retry);
12580 emit_insn (gen_addsi3 (retry_plus_two, retry_reg, const2_rtx));
12581 emit_insn (gen_addsi3 (retry_reg, retry_reg, const1_rtx));
12582 retry_label = gen_label_rtx ();
12583 emit_label (retry_label);
12586 if (clobber_fprs_p)
12589 emit_insn (gen_tbegin_1_z13 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
12592 emit_insn (gen_tbegin_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
12596 emit_insn (gen_tbegin_nofloat_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
12599 emit_move_insn (dest, gen_rtx_UNSPEC (SImode,
12600 gen_rtvec (1, gen_rtx_REG (CCRAWmode,
12602 UNSPEC_CC_TO_INT));
12603 if (retry != NULL_RTX)
12605 const int CC0 = 1 << 3;
12606 const int CC1 = 1 << 2;
12607 const int CC3 = 1 << 0;
12609 rtx count = gen_reg_rtx (SImode);
12610 rtx_code_label *leave_label = gen_label_rtx ();
12612 /* Exit for success and permanent failures. */
12613 jump = s390_emit_jump (leave_label,
12614 gen_rtx_EQ (VOIDmode,
12615 gen_rtx_REG (CCRAWmode, CC_REGNUM),
12616 gen_rtx_CONST_INT (VOIDmode, CC0 | CC1 | CC3)));
12617 LABEL_NUSES (leave_label) = 1;
12619 /* CC2 - transient failure. Perform retry with ppa. */
12620 emit_move_insn (count, retry_plus_two);
12621 emit_insn (gen_subsi3 (count, count, retry_reg));
12622 emit_insn (gen_tx_assist (count));
12623 jump = emit_jump_insn (gen_doloop_si64 (retry_label,
12626 JUMP_LABEL (jump) = retry_label;
12627 LABEL_NUSES (retry_label) = 1;
12628 emit_label (leave_label);
12633 /* Return the decl for the target specific builtin with the function
12637 s390_builtin_decl (unsigned fcode, bool initialized_p ATTRIBUTE_UNUSED)
12639 if (fcode >= S390_BUILTIN_MAX)
12640 return error_mark_node;
12642 return s390_builtin_decls[fcode];
12645 /* We call mcount before the function prologue. So a profiled leaf
12646 function should stay a leaf function. */
12649 s390_keep_leaf_when_profiled ()
12654 /* Output assembly code for the trampoline template to
12657 On S/390, we use gpr 1 internally in the trampoline code;
12658 gpr 0 is used to hold the static chain. */
12661 s390_asm_trampoline_template (FILE *file)
12664 op[0] = gen_rtx_REG (Pmode, 0);
12665 op[1] = gen_rtx_REG (Pmode, 1);
12669 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
12670 output_asm_insn ("lmg\t%0,%1,14(%1)", op); /* 6 byte */
12671 output_asm_insn ("br\t%1", op); /* 2 byte */
12672 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
12676 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
12677 output_asm_insn ("lm\t%0,%1,6(%1)", op); /* 4 byte */
12678 output_asm_insn ("br\t%1", op); /* 2 byte */
12679 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
12683 /* Emit RTL insns to initialize the variable parts of a trampoline.
12684 FNADDR is an RTX for the address of the function's pure code.
12685 CXT is an RTX for the static chain value for the function. */
12688 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
12690 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
12693 emit_block_move (m_tramp, assemble_trampoline_template (),
12694 GEN_INT (2 * UNITS_PER_LONG), BLOCK_OP_NORMAL);
12696 mem = adjust_address (m_tramp, Pmode, 2 * UNITS_PER_LONG);
12697 emit_move_insn (mem, cxt);
12698 mem = adjust_address (m_tramp, Pmode, 3 * UNITS_PER_LONG);
12699 emit_move_insn (mem, fnaddr);
12702 /* Output assembler code to FILE to increment profiler label # LABELNO
12703 for profiling a function entry. */
12706 s390_function_profiler (FILE *file, int labelno)
12711 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
12713 fprintf (file, "# function profiler \n");
12715 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
12716 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
12717 op[1] = gen_rtx_MEM (Pmode, plus_constant (Pmode, op[1], UNITS_PER_LONG));
12719 op[2] = gen_rtx_REG (Pmode, 1);
12720 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
12721 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
12723 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
12726 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
12727 op[4] = gen_rtx_CONST (Pmode, op[4]);
12732 output_asm_insn ("stg\t%0,%1", op);
12733 output_asm_insn ("larl\t%2,%3", op);
12734 output_asm_insn ("brasl\t%0,%4", op);
12735 output_asm_insn ("lg\t%0,%1", op);
12737 else if (TARGET_CPU_ZARCH)
12739 output_asm_insn ("st\t%0,%1", op);
12740 output_asm_insn ("larl\t%2,%3", op);
12741 output_asm_insn ("brasl\t%0,%4", op);
12742 output_asm_insn ("l\t%0,%1", op);
12744 else if (!flag_pic)
12746 op[6] = gen_label_rtx ();
12748 output_asm_insn ("st\t%0,%1", op);
12749 output_asm_insn ("bras\t%2,%l6", op);
12750 output_asm_insn (".long\t%4", op);
12751 output_asm_insn (".long\t%3", op);
12752 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
12753 output_asm_insn ("l\t%0,0(%2)", op);
12754 output_asm_insn ("l\t%2,4(%2)", op);
12755 output_asm_insn ("basr\t%0,%0", op);
12756 output_asm_insn ("l\t%0,%1", op);
12760 op[5] = gen_label_rtx ();
12761 op[6] = gen_label_rtx ();
12763 output_asm_insn ("st\t%0,%1", op);
12764 output_asm_insn ("bras\t%2,%l6", op);
12765 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
12766 output_asm_insn (".long\t%4-%l5", op);
12767 output_asm_insn (".long\t%3-%l5", op);
12768 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
12769 output_asm_insn ("lr\t%0,%2", op);
12770 output_asm_insn ("a\t%0,0(%2)", op);
12771 output_asm_insn ("a\t%2,4(%2)", op);
12772 output_asm_insn ("basr\t%0,%0", op);
12773 output_asm_insn ("l\t%0,%1", op);
12777 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
12778 into its SYMBOL_REF_FLAGS. */
12781 s390_encode_section_info (tree decl, rtx rtl, int first)
12783 default_encode_section_info (decl, rtl, first);
12785 if (TREE_CODE (decl) == VAR_DECL)
12787 /* Store the alignment to be able to check if we can use
12788 a larl/load-relative instruction. We only handle the cases
12789 that can go wrong (i.e. no FUNC_DECLs). */
12790 if (DECL_ALIGN (decl) == 0 || DECL_ALIGN (decl) % 16)
12791 SYMBOL_FLAG_SET_NOTALIGN2 (XEXP (rtl, 0));
12792 else if (DECL_ALIGN (decl) % 32)
12793 SYMBOL_FLAG_SET_NOTALIGN4 (XEXP (rtl, 0));
12794 else if (DECL_ALIGN (decl) % 64)
12795 SYMBOL_FLAG_SET_NOTALIGN8 (XEXP (rtl, 0));
12798 /* Literal pool references don't have a decl so they are handled
12799 differently here. We rely on the information in the MEM_ALIGN
12800 entry to decide upon the alignment. */
12802 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
12803 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0)))
12805 if (MEM_ALIGN (rtl) == 0 || MEM_ALIGN (rtl) % 16)
12806 SYMBOL_FLAG_SET_NOTALIGN2 (XEXP (rtl, 0));
12807 else if (MEM_ALIGN (rtl) % 32)
12808 SYMBOL_FLAG_SET_NOTALIGN4 (XEXP (rtl, 0));
12809 else if (MEM_ALIGN (rtl) % 64)
12810 SYMBOL_FLAG_SET_NOTALIGN8 (XEXP (rtl, 0));
12814 /* Output thunk to FILE that implements a C++ virtual function call (with
12815 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
12816 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
12817 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
12818 relative to the resulting this pointer. */
12821 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
12822 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
12828 /* Make sure unwind info is emitted for the thunk if needed. */
12829 final_start_function (emit_barrier (), file, 1);
12831 /* Operand 0 is the target function. */
12832 op[0] = XEXP (DECL_RTL (function), 0);
12833 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
12836 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
12837 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
12838 op[0] = gen_rtx_CONST (Pmode, op[0]);
12841 /* Operand 1 is the 'this' pointer. */
12842 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
12843 op[1] = gen_rtx_REG (Pmode, 3);
12845 op[1] = gen_rtx_REG (Pmode, 2);
12847 /* Operand 2 is the delta. */
12848 op[2] = GEN_INT (delta);
12850 /* Operand 3 is the vcall_offset. */
12851 op[3] = GEN_INT (vcall_offset);
12853 /* Operand 4 is the temporary register. */
12854 op[4] = gen_rtx_REG (Pmode, 1);
12856 /* Operands 5 to 8 can be used as labels. */
12862 /* Operand 9 can be used for temporary register. */
12865 /* Generate code. */
12868 /* Setup literal pool pointer if required. */
12869 if ((!DISP_IN_RANGE (delta)
12870 && !CONST_OK_FOR_K (delta)
12871 && !CONST_OK_FOR_Os (delta))
12872 || (!DISP_IN_RANGE (vcall_offset)
12873 && !CONST_OK_FOR_K (vcall_offset)
12874 && !CONST_OK_FOR_Os (vcall_offset)))
12876 op[5] = gen_label_rtx ();
12877 output_asm_insn ("larl\t%4,%5", op);
12880 /* Add DELTA to this pointer. */
12883 if (CONST_OK_FOR_J (delta))
12884 output_asm_insn ("la\t%1,%2(%1)", op);
12885 else if (DISP_IN_RANGE (delta))
12886 output_asm_insn ("lay\t%1,%2(%1)", op);
12887 else if (CONST_OK_FOR_K (delta))
12888 output_asm_insn ("aghi\t%1,%2", op);
12889 else if (CONST_OK_FOR_Os (delta))
12890 output_asm_insn ("agfi\t%1,%2", op);
12893 op[6] = gen_label_rtx ();
12894 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
12898 /* Perform vcall adjustment. */
12901 if (DISP_IN_RANGE (vcall_offset))
12903 output_asm_insn ("lg\t%4,0(%1)", op);
12904 output_asm_insn ("ag\t%1,%3(%4)", op);
12906 else if (CONST_OK_FOR_K (vcall_offset))
12908 output_asm_insn ("lghi\t%4,%3", op);
12909 output_asm_insn ("ag\t%4,0(%1)", op);
12910 output_asm_insn ("ag\t%1,0(%4)", op);
12912 else if (CONST_OK_FOR_Os (vcall_offset))
12914 output_asm_insn ("lgfi\t%4,%3", op);
12915 output_asm_insn ("ag\t%4,0(%1)", op);
12916 output_asm_insn ("ag\t%1,0(%4)", op);
12920 op[7] = gen_label_rtx ();
12921 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
12922 output_asm_insn ("ag\t%4,0(%1)", op);
12923 output_asm_insn ("ag\t%1,0(%4)", op);
12927 /* Jump to target. */
12928 output_asm_insn ("jg\t%0", op);
12930 /* Output literal pool if required. */
12933 output_asm_insn (".align\t4", op);
12934 targetm.asm_out.internal_label (file, "L",
12935 CODE_LABEL_NUMBER (op[5]));
12939 targetm.asm_out.internal_label (file, "L",
12940 CODE_LABEL_NUMBER (op[6]));
12941 output_asm_insn (".long\t%2", op);
12945 targetm.asm_out.internal_label (file, "L",
12946 CODE_LABEL_NUMBER (op[7]));
12947 output_asm_insn (".long\t%3", op);
12952 /* Setup base pointer if required. */
12954 || (!DISP_IN_RANGE (delta)
12955 && !CONST_OK_FOR_K (delta)
12956 && !CONST_OK_FOR_Os (delta))
12957 || (!DISP_IN_RANGE (delta)
12958 && !CONST_OK_FOR_K (vcall_offset)
12959 && !CONST_OK_FOR_Os (vcall_offset)))
12961 op[5] = gen_label_rtx ();
12962 output_asm_insn ("basr\t%4,0", op);
12963 targetm.asm_out.internal_label (file, "L",
12964 CODE_LABEL_NUMBER (op[5]));
12967 /* Add DELTA to this pointer. */
12970 if (CONST_OK_FOR_J (delta))
12971 output_asm_insn ("la\t%1,%2(%1)", op);
12972 else if (DISP_IN_RANGE (delta))
12973 output_asm_insn ("lay\t%1,%2(%1)", op);
12974 else if (CONST_OK_FOR_K (delta))
12975 output_asm_insn ("ahi\t%1,%2", op);
12976 else if (CONST_OK_FOR_Os (delta))
12977 output_asm_insn ("afi\t%1,%2", op);
12980 op[6] = gen_label_rtx ();
12981 output_asm_insn ("a\t%1,%6-%5(%4)", op);
12985 /* Perform vcall adjustment. */
12988 if (CONST_OK_FOR_J (vcall_offset))
12990 output_asm_insn ("l\t%4,0(%1)", op);
12991 output_asm_insn ("a\t%1,%3(%4)", op);
12993 else if (DISP_IN_RANGE (vcall_offset))
12995 output_asm_insn ("l\t%4,0(%1)", op);
12996 output_asm_insn ("ay\t%1,%3(%4)", op);
12998 else if (CONST_OK_FOR_K (vcall_offset))
13000 output_asm_insn ("lhi\t%4,%3", op);
13001 output_asm_insn ("a\t%4,0(%1)", op);
13002 output_asm_insn ("a\t%1,0(%4)", op);
13004 else if (CONST_OK_FOR_Os (vcall_offset))
13006 output_asm_insn ("iilf\t%4,%3", op);
13007 output_asm_insn ("a\t%4,0(%1)", op);
13008 output_asm_insn ("a\t%1,0(%4)", op);
13012 op[7] = gen_label_rtx ();
13013 output_asm_insn ("l\t%4,%7-%5(%4)", op);
13014 output_asm_insn ("a\t%4,0(%1)", op);
13015 output_asm_insn ("a\t%1,0(%4)", op);
13018 /* We had to clobber the base pointer register.
13019 Re-setup the base pointer (with a different base). */
13020 op[5] = gen_label_rtx ();
13021 output_asm_insn ("basr\t%4,0", op);
13022 targetm.asm_out.internal_label (file, "L",
13023 CODE_LABEL_NUMBER (op[5]));
13026 /* Jump to target. */
13027 op[8] = gen_label_rtx ();
13030 output_asm_insn ("l\t%4,%8-%5(%4)", op);
13031 else if (!nonlocal)
13032 output_asm_insn ("a\t%4,%8-%5(%4)", op);
13033 /* We cannot call through .plt, since .plt requires %r12 loaded. */
13034 else if (flag_pic == 1)
13036 output_asm_insn ("a\t%4,%8-%5(%4)", op);
13037 output_asm_insn ("l\t%4,%0(%4)", op);
13039 else if (flag_pic == 2)
13041 op[9] = gen_rtx_REG (Pmode, 0);
13042 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
13043 output_asm_insn ("a\t%4,%8-%5(%4)", op);
13044 output_asm_insn ("ar\t%4,%9", op);
13045 output_asm_insn ("l\t%4,0(%4)", op);
13048 output_asm_insn ("br\t%4", op);
13050 /* Output literal pool. */
13051 output_asm_insn (".align\t4", op);
13053 if (nonlocal && flag_pic == 2)
13054 output_asm_insn (".long\t%0", op);
13057 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
13058 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
13061 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
13063 output_asm_insn (".long\t%0", op);
13065 output_asm_insn (".long\t%0-%5", op);
13069 targetm.asm_out.internal_label (file, "L",
13070 CODE_LABEL_NUMBER (op[6]));
13071 output_asm_insn (".long\t%2", op);
13075 targetm.asm_out.internal_label (file, "L",
13076 CODE_LABEL_NUMBER (op[7]));
13077 output_asm_insn (".long\t%3", op);
13080 final_end_function ();
13084 s390_valid_pointer_mode (machine_mode mode)
13086 return (mode == SImode || (TARGET_64BIT && mode == DImode));
13089 /* Checks whether the given CALL_EXPR would use a caller
13090 saved register. This is used to decide whether sibling call
13091 optimization could be performed on the respective function
13095 s390_call_saved_register_used (tree call_expr)
13097 CUMULATIVE_ARGS cum_v;
13098 cumulative_args_t cum;
13105 INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0);
13106 cum = pack_cumulative_args (&cum_v);
13108 for (i = 0; i < call_expr_nargs (call_expr); i++)
13110 parameter = CALL_EXPR_ARG (call_expr, i);
13111 gcc_assert (parameter);
13113 /* For an undeclared variable passed as parameter we will get
13114 an ERROR_MARK node here. */
13115 if (TREE_CODE (parameter) == ERROR_MARK)
13118 type = TREE_TYPE (parameter);
13121 mode = TYPE_MODE (type);
13124 /* We assume that in the target function all parameters are
13125 named. This only has an impact on vector argument register
13126 usage none of which is call-saved. */
13127 if (pass_by_reference (&cum_v, mode, type, true))
13130 type = build_pointer_type (type);
13133 parm_rtx = s390_function_arg (cum, mode, type, true);
13135 s390_function_arg_advance (cum, mode, type, true);
13140 if (REG_P (parm_rtx))
13143 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
13145 if (!call_used_regs[reg + REGNO (parm_rtx)])
13149 if (GET_CODE (parm_rtx) == PARALLEL)
13153 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
13155 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
13157 gcc_assert (REG_P (r));
13160 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
13162 if (!call_used_regs[reg + REGNO (r)])
13171 /* Return true if the given call expression can be
13172 turned into a sibling call.
13173 DECL holds the declaration of the function to be called whereas
13174 EXP is the call expression itself. */
13177 s390_function_ok_for_sibcall (tree decl, tree exp)
13179 /* The TPF epilogue uses register 1. */
13180 if (TARGET_TPF_PROFILING)
13183 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
13184 which would have to be restored before the sibcall. */
13185 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
13188 /* Register 6 on s390 is available as an argument register but unfortunately
13189 "caller saved". This makes functions needing this register for arguments
13190 not suitable for sibcalls. */
13191 return !s390_call_saved_register_used (exp);
13194 /* Return the fixed registers used for condition codes. */
13197 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
13200 *p2 = INVALID_REGNUM;
13205 /* This function is used by the call expanders of the machine description.
13206 It emits the call insn itself together with the necessary operations
13207 to adjust the target address and returns the emitted insn.
13208 ADDR_LOCATION is the target address rtx
13209 TLS_CALL the location of the thread-local symbol
13210 RESULT_REG the register where the result of the call should be stored
13211 RETADDR_REG the register where the return address should be stored
13212 If this parameter is NULL_RTX the call is considered
13213 to be a sibling call. */
13216 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
13219 bool plt_call = false;
13225 /* Direct function calls need special treatment. */
13226 if (GET_CODE (addr_location) == SYMBOL_REF)
13228 /* When calling a global routine in PIC mode, we must
13229 replace the symbol itself with the PLT stub. */
13230 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
13232 if (TARGET_64BIT || retaddr_reg != NULL_RTX)
13234 addr_location = gen_rtx_UNSPEC (Pmode,
13235 gen_rtvec (1, addr_location),
13237 addr_location = gen_rtx_CONST (Pmode, addr_location);
13241 /* For -fpic code the PLT entries might use r12 which is
13242 call-saved. Therefore we cannot do a sibcall when
13243 calling directly using a symbol ref. When reaching
13244 this point we decided (in s390_function_ok_for_sibcall)
13245 to do a sibcall for a function pointer but one of the
13246 optimizers was able to get rid of the function pointer
13247 by propagating the symbol ref into the call. This
13248 optimization is illegal for S/390 so we turn the direct
13249 call into a indirect call again. */
13250 addr_location = force_reg (Pmode, addr_location);
13253 /* Unless we can use the bras(l) insn, force the
13254 routine address into a register. */
13255 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
13258 addr_location = legitimize_pic_address (addr_location, 0);
13260 addr_location = force_reg (Pmode, addr_location);
13264 /* If it is already an indirect call or the code above moved the
13265 SYMBOL_REF to somewhere else make sure the address can be found in
13267 if (retaddr_reg == NULL_RTX
13268 && GET_CODE (addr_location) != SYMBOL_REF
13271 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
13272 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
13275 addr_location = gen_rtx_MEM (QImode, addr_location);
13276 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
13278 if (result_reg != NULL_RTX)
13279 call = gen_rtx_SET (result_reg, call);
13281 if (retaddr_reg != NULL_RTX)
13283 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
13285 if (tls_call != NULL_RTX)
13286 vec = gen_rtvec (3, call, clobber,
13287 gen_rtx_USE (VOIDmode, tls_call));
13289 vec = gen_rtvec (2, call, clobber);
13291 call = gen_rtx_PARALLEL (VOIDmode, vec);
13294 insn = emit_call_insn (call);
13296 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
13297 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
13299 /* s390_function_ok_for_sibcall should
13300 have denied sibcalls in this case. */
13301 gcc_assert (retaddr_reg != NULL_RTX);
13302 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, 12));
13307 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
13310 s390_conditional_register_usage (void)
13316 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
13317 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
13319 if (TARGET_CPU_ZARCH)
13321 fixed_regs[BASE_REGNUM] = 0;
13322 call_used_regs[BASE_REGNUM] = 0;
13323 fixed_regs[RETURN_REGNUM] = 0;
13324 call_used_regs[RETURN_REGNUM] = 0;
13328 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
13329 call_used_regs[i] = call_really_used_regs[i] = 0;
13333 call_used_regs[FPR4_REGNUM] = call_really_used_regs[FPR4_REGNUM] = 0;
13334 call_used_regs[FPR6_REGNUM] = call_really_used_regs[FPR6_REGNUM] = 0;
13337 if (TARGET_SOFT_FLOAT)
13339 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
13340 call_used_regs[i] = fixed_regs[i] = 1;
13343 /* Disable v16 - v31 for non-vector target. */
13346 for (i = VR16_REGNUM; i <= VR31_REGNUM; i++)
13347 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
13351 /* Corresponding function to eh_return expander. */
13353 static GTY(()) rtx s390_tpf_eh_return_symbol;
13355 s390_emit_tpf_eh_return (rtx target)
13360 if (!s390_tpf_eh_return_symbol)
13361 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
13363 reg = gen_rtx_REG (Pmode, 2);
13364 orig_ra = gen_rtx_REG (Pmode, 3);
13366 emit_move_insn (reg, target);
13367 emit_move_insn (orig_ra, get_hard_reg_initial_val (Pmode, RETURN_REGNUM));
13368 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
13369 gen_rtx_REG (Pmode, RETURN_REGNUM));
13370 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
13371 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), orig_ra);
13373 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
13376 /* Rework the prologue/epilogue to avoid saving/restoring
13377 registers unnecessarily. */
13380 s390_optimize_prologue (void)
13382 rtx_insn *insn, *new_insn, *next_insn;
13384 /* Do a final recompute of the frame-related data. */
13385 s390_optimize_register_info ();
13387 /* If all special registers are in fact used, there's nothing we
13388 can do, so no point in walking the insn list. */
13390 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
13391 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
13392 && (TARGET_CPU_ZARCH
13393 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
13394 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
13397 /* Search for prologue/epilogue insns and replace them. */
13399 for (insn = get_insns (); insn; insn = next_insn)
13401 int first, last, off;
13402 rtx set, base, offset;
13405 next_insn = NEXT_INSN (insn);
13407 if (! NONJUMP_INSN_P (insn) || ! RTX_FRAME_RELATED_P (insn))
13410 pat = PATTERN (insn);
13412 /* Remove ldgr/lgdr instructions used for saving and restore
13413 GPRs if possible. */
13418 if (INSN_CODE (insn) == CODE_FOR_stack_restore_from_fpr)
13419 tmp_pat = XVECEXP (pat, 0, 0);
13421 if (GET_CODE (tmp_pat) == SET
13422 && GET_MODE (SET_SRC (tmp_pat)) == DImode
13423 && REG_P (SET_SRC (tmp_pat))
13424 && REG_P (SET_DEST (tmp_pat)))
13426 int src_regno = REGNO (SET_SRC (tmp_pat));
13427 int dest_regno = REGNO (SET_DEST (tmp_pat));
13431 if (!((GENERAL_REGNO_P (src_regno)
13432 && FP_REGNO_P (dest_regno))
13433 || (FP_REGNO_P (src_regno)
13434 && GENERAL_REGNO_P (dest_regno))))
13437 gpr_regno = GENERAL_REGNO_P (src_regno) ? src_regno : dest_regno;
13438 fpr_regno = FP_REGNO_P (src_regno) ? src_regno : dest_regno;
13440 /* GPR must be call-saved, FPR must be call-clobbered. */
13441 if (!call_really_used_regs[fpr_regno]
13442 || call_really_used_regs[gpr_regno])
13445 /* It must not happen that what we once saved in an FPR now
13446 needs a stack slot. */
13447 gcc_assert (cfun_gpr_save_slot (gpr_regno) != SAVE_SLOT_STACK);
13449 if (cfun_gpr_save_slot (gpr_regno) == SAVE_SLOT_NONE)
13451 remove_insn (insn);
13457 if (GET_CODE (pat) == PARALLEL
13458 && store_multiple_operation (pat, VOIDmode))
13460 set = XVECEXP (pat, 0, 0);
13461 first = REGNO (SET_SRC (set));
13462 last = first + XVECLEN (pat, 0) - 1;
13463 offset = const0_rtx;
13464 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
13465 off = INTVAL (offset);
13467 if (GET_CODE (base) != REG || off < 0)
13469 if (cfun_frame_layout.first_save_gpr != -1
13470 && (cfun_frame_layout.first_save_gpr < first
13471 || cfun_frame_layout.last_save_gpr > last))
13473 if (REGNO (base) != STACK_POINTER_REGNUM
13474 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13476 if (first > BASE_REGNUM || last < BASE_REGNUM)
13479 if (cfun_frame_layout.first_save_gpr != -1)
13481 rtx s_pat = save_gprs (base,
13482 off + (cfun_frame_layout.first_save_gpr
13483 - first) * UNITS_PER_LONG,
13484 cfun_frame_layout.first_save_gpr,
13485 cfun_frame_layout.last_save_gpr);
13486 new_insn = emit_insn_before (s_pat, insn);
13487 INSN_ADDRESSES_NEW (new_insn, -1);
13490 remove_insn (insn);
13494 if (cfun_frame_layout.first_save_gpr == -1
13495 && GET_CODE (pat) == SET
13496 && GENERAL_REG_P (SET_SRC (pat))
13497 && GET_CODE (SET_DEST (pat)) == MEM)
13500 first = REGNO (SET_SRC (set));
13501 offset = const0_rtx;
13502 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
13503 off = INTVAL (offset);
13505 if (GET_CODE (base) != REG || off < 0)
13507 if (REGNO (base) != STACK_POINTER_REGNUM
13508 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13511 remove_insn (insn);
13515 if (GET_CODE (pat) == PARALLEL
13516 && load_multiple_operation (pat, VOIDmode))
13518 set = XVECEXP (pat, 0, 0);
13519 first = REGNO (SET_DEST (set));
13520 last = first + XVECLEN (pat, 0) - 1;
13521 offset = const0_rtx;
13522 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
13523 off = INTVAL (offset);
13525 if (GET_CODE (base) != REG || off < 0)
13528 if (cfun_frame_layout.first_restore_gpr != -1
13529 && (cfun_frame_layout.first_restore_gpr < first
13530 || cfun_frame_layout.last_restore_gpr > last))
13532 if (REGNO (base) != STACK_POINTER_REGNUM
13533 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13535 if (first > BASE_REGNUM || last < BASE_REGNUM)
13538 if (cfun_frame_layout.first_restore_gpr != -1)
13540 rtx rpat = restore_gprs (base,
13541 off + (cfun_frame_layout.first_restore_gpr
13542 - first) * UNITS_PER_LONG,
13543 cfun_frame_layout.first_restore_gpr,
13544 cfun_frame_layout.last_restore_gpr);
13546 /* Remove REG_CFA_RESTOREs for registers that we no
13547 longer need to save. */
13548 REG_NOTES (rpat) = REG_NOTES (insn);
13549 for (rtx *ptr = ®_NOTES (rpat); *ptr; )
13550 if (REG_NOTE_KIND (*ptr) == REG_CFA_RESTORE
13551 && ((int) REGNO (XEXP (*ptr, 0))
13552 < cfun_frame_layout.first_restore_gpr))
13553 *ptr = XEXP (*ptr, 1);
13555 ptr = &XEXP (*ptr, 1);
13556 new_insn = emit_insn_before (rpat, insn);
13557 RTX_FRAME_RELATED_P (new_insn) = 1;
13558 INSN_ADDRESSES_NEW (new_insn, -1);
13561 remove_insn (insn);
13565 if (cfun_frame_layout.first_restore_gpr == -1
13566 && GET_CODE (pat) == SET
13567 && GENERAL_REG_P (SET_DEST (pat))
13568 && GET_CODE (SET_SRC (pat)) == MEM)
13571 first = REGNO (SET_DEST (set));
13572 offset = const0_rtx;
13573 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
13574 off = INTVAL (offset);
13576 if (GET_CODE (base) != REG || off < 0)
13579 if (REGNO (base) != STACK_POINTER_REGNUM
13580 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13583 remove_insn (insn);
13589 /* On z10 and later the dynamic branch prediction must see the
13590 backward jump within a certain windows. If not it falls back to
13591 the static prediction. This function rearranges the loop backward
13592 branch in a way which makes the static prediction always correct.
13593 The function returns true if it added an instruction. */
13595 s390_fix_long_loop_prediction (rtx_insn *insn)
13597 rtx set = single_set (insn);
13598 rtx code_label, label_ref;
13599 rtx_insn *uncond_jump;
13600 rtx_insn *cur_insn;
13604 /* This will exclude branch on count and branch on index patterns
13605 since these are correctly statically predicted. */
13607 || SET_DEST (set) != pc_rtx
13608 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
13611 /* Skip conditional returns. */
13612 if (ANY_RETURN_P (XEXP (SET_SRC (set), 1))
13613 && XEXP (SET_SRC (set), 2) == pc_rtx)
13616 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
13617 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
13619 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
13621 code_label = XEXP (label_ref, 0);
13623 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
13624 || INSN_ADDRESSES (INSN_UID (insn)) == -1
13625 || (INSN_ADDRESSES (INSN_UID (insn))
13626 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
13629 for (distance = 0, cur_insn = PREV_INSN (insn);
13630 distance < PREDICT_DISTANCE - 6;
13631 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
13632 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
13635 rtx_code_label *new_label = gen_label_rtx ();
13636 uncond_jump = emit_jump_insn_after (
13637 gen_rtx_SET (pc_rtx,
13638 gen_rtx_LABEL_REF (VOIDmode, code_label)),
13640 emit_label_after (new_label, uncond_jump);
13642 tmp = XEXP (SET_SRC (set), 1);
13643 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
13644 XEXP (SET_SRC (set), 2) = tmp;
13645 INSN_CODE (insn) = -1;
13647 XEXP (label_ref, 0) = new_label;
13648 JUMP_LABEL (insn) = new_label;
13649 JUMP_LABEL (uncond_jump) = code_label;
13654 /* Returns 1 if INSN reads the value of REG for purposes not related
13655 to addressing of memory, and 0 otherwise. */
13657 s390_non_addr_reg_read_p (rtx reg, rtx_insn *insn)
13659 return reg_referenced_p (reg, PATTERN (insn))
13660 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
13663 /* Starting from INSN find_cond_jump looks downwards in the insn
13664 stream for a single jump insn which is the last user of the
13665 condition code set in INSN. */
13667 find_cond_jump (rtx_insn *insn)
13669 for (; insn; insn = NEXT_INSN (insn))
13673 if (LABEL_P (insn))
13676 if (!JUMP_P (insn))
13678 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
13683 /* This will be triggered by a return. */
13684 if (GET_CODE (PATTERN (insn)) != SET)
13687 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
13688 ite = SET_SRC (PATTERN (insn));
13690 if (GET_CODE (ite) != IF_THEN_ELSE)
13693 cc = XEXP (XEXP (ite, 0), 0);
13694 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
13697 if (find_reg_note (insn, REG_DEAD, cc))
13705 /* Swap the condition in COND and the operands in OP0 and OP1 so that
13706 the semantics does not change. If NULL_RTX is passed as COND the
13707 function tries to find the conditional jump starting with INSN. */
13709 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx_insn *insn)
13713 if (cond == NULL_RTX)
13715 rtx_insn *jump = find_cond_jump (NEXT_INSN (insn));
13716 rtx set = jump ? single_set (jump) : NULL_RTX;
13718 if (set == NULL_RTX)
13721 cond = XEXP (SET_SRC (set), 0);
13726 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
13729 /* On z10, instructions of the compare-and-branch family have the
13730 property to access the register occurring as second operand with
13731 its bits complemented. If such a compare is grouped with a second
13732 instruction that accesses the same register non-complemented, and
13733 if that register's value is delivered via a bypass, then the
13734 pipeline recycles, thereby causing significant performance decline.
13735 This function locates such situations and exchanges the two
13736 operands of the compare. The function return true whenever it
13739 s390_z10_optimize_cmp (rtx_insn *insn)
13741 rtx_insn *prev_insn, *next_insn;
13742 bool insn_added_p = false;
13743 rtx cond, *op0, *op1;
13745 if (GET_CODE (PATTERN (insn)) == PARALLEL)
13747 /* Handle compare and branch and branch on count
13749 rtx pattern = single_set (insn);
13752 || SET_DEST (pattern) != pc_rtx
13753 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
13756 cond = XEXP (SET_SRC (pattern), 0);
13757 op0 = &XEXP (cond, 0);
13758 op1 = &XEXP (cond, 1);
13760 else if (GET_CODE (PATTERN (insn)) == SET)
13764 /* Handle normal compare instructions. */
13765 src = SET_SRC (PATTERN (insn));
13766 dest = SET_DEST (PATTERN (insn));
13769 || !CC_REGNO_P (REGNO (dest))
13770 || GET_CODE (src) != COMPARE)
13773 /* s390_swap_cmp will try to find the conditional
13774 jump when passing NULL_RTX as condition. */
13776 op0 = &XEXP (src, 0);
13777 op1 = &XEXP (src, 1);
13782 if (!REG_P (*op0) || !REG_P (*op1))
13785 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
13788 /* Swap the COMPARE arguments and its mask if there is a
13789 conflicting access in the previous insn. */
13790 prev_insn = prev_active_insn (insn);
13791 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
13792 && reg_referenced_p (*op1, PATTERN (prev_insn)))
13793 s390_swap_cmp (cond, op0, op1, insn);
13795 /* Check if there is a conflict with the next insn. If there
13796 was no conflict with the previous insn, then swap the
13797 COMPARE arguments and its mask. If we already swapped
13798 the operands, or if swapping them would cause a conflict
13799 with the previous insn, issue a NOP after the COMPARE in
13800 order to separate the two instuctions. */
13801 next_insn = next_active_insn (insn);
13802 if (next_insn != NULL_RTX && INSN_P (next_insn)
13803 && s390_non_addr_reg_read_p (*op1, next_insn))
13805 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
13806 && s390_non_addr_reg_read_p (*op0, prev_insn))
13808 if (REGNO (*op1) == 0)
13809 emit_insn_after (gen_nop1 (), insn);
13811 emit_insn_after (gen_nop (), insn);
13812 insn_added_p = true;
13815 s390_swap_cmp (cond, op0, op1, insn);
13817 return insn_added_p;
13820 /* Number of INSNs to be scanned backward in the last BB of the loop
13821 and forward in the first BB of the loop. This usually should be a
13822 bit more than the number of INSNs which could go into one
13824 #define S390_OSC_SCAN_INSN_NUM 5
13826 /* Scan LOOP for static OSC collisions and return true if a osc_break
13827 should be issued for this loop. */
13829 s390_adjust_loop_scan_osc (struct loop* loop)
13832 HARD_REG_SET modregs, newregs;
13833 rtx_insn *insn, *store_insn = NULL;
13835 struct s390_address addr_store, addr_load;
13836 subrtx_iterator::array_type array;
13839 CLEAR_HARD_REG_SET (modregs);
13842 FOR_BB_INSNS_REVERSE (loop->latch, insn)
13844 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
13848 if (insn_count > S390_OSC_SCAN_INSN_NUM)
13851 find_all_hard_reg_sets (insn, &newregs, true);
13852 IOR_HARD_REG_SET (modregs, newregs);
13854 set = single_set (insn);
13858 if (MEM_P (SET_DEST (set))
13859 && s390_decompose_address (XEXP (SET_DEST (set), 0), &addr_store))
13866 if (store_insn == NULL_RTX)
13870 FOR_BB_INSNS (loop->header, insn)
13872 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
13875 if (insn == store_insn)
13879 if (insn_count > S390_OSC_SCAN_INSN_NUM)
13882 find_all_hard_reg_sets (insn, &newregs, true);
13883 IOR_HARD_REG_SET (modregs, newregs);
13885 set = single_set (insn);
13889 /* An intermediate store disrupts static OSC checking
13891 if (MEM_P (SET_DEST (set))
13892 && s390_decompose_address (XEXP (SET_DEST (set), 0), NULL))
13895 FOR_EACH_SUBRTX (iter, array, SET_SRC (set), NONCONST)
13897 && s390_decompose_address (XEXP (*iter, 0), &addr_load)
13898 && rtx_equal_p (addr_load.base, addr_store.base)
13899 && rtx_equal_p (addr_load.indx, addr_store.indx)
13900 && rtx_equal_p (addr_load.disp, addr_store.disp))
13902 if ((addr_load.base != NULL_RTX
13903 && TEST_HARD_REG_BIT (modregs, REGNO (addr_load.base)))
13904 || (addr_load.indx != NULL_RTX
13905 && TEST_HARD_REG_BIT (modregs, REGNO (addr_load.indx))))
13912 /* Look for adjustments which can be done on simple innermost
13915 s390_adjust_loops ()
13917 struct loop *loop = NULL;
13920 compute_bb_for_insn ();
13922 /* Find the loops. */
13923 loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
13925 FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
13929 flow_loop_dump (loop, dump_file, NULL, 0);
13930 fprintf (dump_file, ";; OSC loop scan Loop: ");
13932 if (loop->latch == NULL
13933 || pc_set (BB_END (loop->latch)) == NULL_RTX
13934 || !s390_adjust_loop_scan_osc (loop))
13938 if (loop->latch == NULL)
13939 fprintf (dump_file, " muliple backward jumps\n");
13942 fprintf (dump_file, " header insn: %d latch insn: %d ",
13943 INSN_UID (BB_HEAD (loop->header)),
13944 INSN_UID (BB_END (loop->latch)));
13945 if (pc_set (BB_END (loop->latch)) == NULL_RTX)
13946 fprintf (dump_file, " loop does not end with jump\n");
13948 fprintf (dump_file, " not instrumented\n");
13954 rtx_insn *new_insn;
13957 fprintf (dump_file, " adding OSC break insn: ");
13958 new_insn = emit_insn_before (gen_osc_break (),
13959 BB_END (loop->latch));
13960 INSN_ADDRESSES_NEW (new_insn, -1);
13964 loop_optimizer_finalize ();
13966 df_finish_pass (false);
13969 /* Perform machine-dependent processing. */
13974 bool pool_overflow = false;
13975 int hw_before, hw_after;
13977 if (s390_tune == PROCESSOR_2964_Z13)
13978 s390_adjust_loops ();
13980 /* Make sure all splits have been performed; splits after
13981 machine_dependent_reorg might confuse insn length counts. */
13982 split_all_insns_noflow ();
13984 /* Install the main literal pool and the associated base
13985 register load insns.
13987 In addition, there are two problematic situations we need
13990 - the literal pool might be > 4096 bytes in size, so that
13991 some of its elements cannot be directly accessed
13993 - a branch target might be > 64K away from the branch, so that
13994 it is not possible to use a PC-relative instruction.
13996 To fix those, we split the single literal pool into multiple
13997 pool chunks, reloading the pool base register at various
13998 points throughout the function to ensure it always points to
13999 the pool chunk the following code expects, and / or replace
14000 PC-relative branches by absolute branches.
14002 However, the two problems are interdependent: splitting the
14003 literal pool can move a branch further away from its target,
14004 causing the 64K limit to overflow, and on the other hand,
14005 replacing a PC-relative branch by an absolute branch means
14006 we need to put the branch target address into the literal
14007 pool, possibly causing it to overflow.
14009 So, we loop trying to fix up both problems until we manage
14010 to satisfy both conditions at the same time. Note that the
14011 loop is guaranteed to terminate as every pass of the loop
14012 strictly decreases the total number of PC-relative branches
14013 in the function. (This is not completely true as there
14014 might be branch-over-pool insns introduced by chunkify_start.
14015 Those never need to be split however.) */
14019 struct constant_pool *pool = NULL;
14021 /* Collect the literal pool. */
14022 if (!pool_overflow)
14024 pool = s390_mainpool_start ();
14026 pool_overflow = true;
14029 /* If literal pool overflowed, start to chunkify it. */
14031 pool = s390_chunkify_start ();
14033 /* Split out-of-range branches. If this has created new
14034 literal pool entries, cancel current chunk list and
14035 recompute it. zSeries machines have large branch
14036 instructions, so we never need to split a branch. */
14037 if (!TARGET_CPU_ZARCH && s390_split_branches ())
14040 s390_chunkify_cancel (pool);
14042 s390_mainpool_cancel (pool);
14047 /* If we made it up to here, both conditions are satisfied.
14048 Finish up literal pool related changes. */
14050 s390_chunkify_finish (pool);
14052 s390_mainpool_finish (pool);
14054 /* We're done splitting branches. */
14055 cfun->machine->split_branches_pending_p = false;
14059 /* Generate out-of-pool execute target insns. */
14060 if (TARGET_CPU_ZARCH)
14062 rtx_insn *insn, *target;
14065 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
14067 label = s390_execute_label (insn);
14071 gcc_assert (label != const0_rtx);
14073 target = emit_label (XEXP (label, 0));
14074 INSN_ADDRESSES_NEW (target, -1);
14076 target = emit_insn (s390_execute_target (insn));
14077 INSN_ADDRESSES_NEW (target, -1);
14081 /* Try to optimize prologue and epilogue further. */
14082 s390_optimize_prologue ();
14084 /* Walk over the insns and do some >=z10 specific changes. */
14085 if (s390_tune >= PROCESSOR_2097_Z10)
14088 bool insn_added_p = false;
14090 /* The insn lengths and addresses have to be up to date for the
14091 following manipulations. */
14092 shorten_branches (get_insns ());
14094 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
14096 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
14100 insn_added_p |= s390_fix_long_loop_prediction (insn);
14102 if ((GET_CODE (PATTERN (insn)) == PARALLEL
14103 || GET_CODE (PATTERN (insn)) == SET)
14104 && s390_tune == PROCESSOR_2097_Z10)
14105 insn_added_p |= s390_z10_optimize_cmp (insn);
14108 /* Adjust branches if we added new instructions. */
14110 shorten_branches (get_insns ());
14113 s390_function_num_hotpatch_hw (current_function_decl, &hw_before, &hw_after);
14118 /* Insert NOPs for hotpatching. */
14119 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
14121 1. inside the area covered by debug information to allow setting
14122 breakpoints at the NOPs,
14123 2. before any insn which results in an asm instruction,
14124 3. before in-function labels to avoid jumping to the NOPs, for
14125 example as part of a loop,
14126 4. before any barrier in case the function is completely empty
14127 (__builtin_unreachable ()) and has neither internal labels nor
14130 if (active_insn_p (insn) || BARRIER_P (insn) || LABEL_P (insn))
14132 /* Output a series of NOPs before the first active insn. */
14133 while (insn && hw_after > 0)
14135 if (hw_after >= 3 && TARGET_CPU_ZARCH)
14137 emit_insn_before (gen_nop_6_byte (), insn);
14140 else if (hw_after >= 2)
14142 emit_insn_before (gen_nop_4_byte (), insn);
14147 emit_insn_before (gen_nop_2_byte (), insn);
14154 /* Return true if INSN is a fp load insn writing register REGNO. */
14156 s390_fpload_toreg (rtx_insn *insn, unsigned int regno)
14159 enum attr_type flag = s390_safe_attr_type (insn);
14161 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
14164 set = single_set (insn);
14166 if (set == NULL_RTX)
14169 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
14172 if (REGNO (SET_DEST (set)) != regno)
14178 /* This value describes the distance to be avoided between an
14179 arithmetic fp instruction and an fp load writing the same register.
14180 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
14181 fine but the exact value has to be avoided. Otherwise the FP
14182 pipeline will throw an exception causing a major penalty. */
14183 #define Z10_EARLYLOAD_DISTANCE 7
14185 /* Rearrange the ready list in order to avoid the situation described
14186 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
14187 moved to the very end of the ready list. */
14189 s390_z10_prevent_earlyload_conflicts (rtx_insn **ready, int *nready_p)
14191 unsigned int regno;
14192 int nready = *nready_p;
14197 enum attr_type flag;
14200 /* Skip DISTANCE - 1 active insns. */
14201 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
14202 distance > 0 && insn != NULL_RTX;
14203 distance--, insn = prev_active_insn (insn))
14204 if (CALL_P (insn) || JUMP_P (insn))
14207 if (insn == NULL_RTX)
14210 set = single_set (insn);
14212 if (set == NULL_RTX || !REG_P (SET_DEST (set))
14213 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
14216 flag = s390_safe_attr_type (insn);
14218 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
14221 regno = REGNO (SET_DEST (set));
14224 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
14231 memmove (&ready[1], &ready[0], sizeof (rtx_insn *) * i);
14236 /* The s390_sched_state variable tracks the state of the current or
14237 the last instruction group.
14239 0,1,2 number of instructions scheduled in the current group
14240 3 the last group is complete - normal insns
14241 4 the last group was a cracked/expanded insn */
14243 static int s390_sched_state;
14245 #define S390_SCHED_STATE_NORMAL 3
14246 #define S390_SCHED_STATE_CRACKED 4
14248 #define S390_SCHED_ATTR_MASK_CRACKED 0x1
14249 #define S390_SCHED_ATTR_MASK_EXPANDED 0x2
14250 #define S390_SCHED_ATTR_MASK_ENDGROUP 0x4
14251 #define S390_SCHED_ATTR_MASK_GROUPALONE 0x8
14253 static unsigned int
14254 s390_get_sched_attrmask (rtx_insn *insn)
14256 unsigned int mask = 0;
14260 case PROCESSOR_2827_ZEC12:
14261 if (get_attr_zEC12_cracked (insn))
14262 mask |= S390_SCHED_ATTR_MASK_CRACKED;
14263 if (get_attr_zEC12_expanded (insn))
14264 mask |= S390_SCHED_ATTR_MASK_EXPANDED;
14265 if (get_attr_zEC12_endgroup (insn))
14266 mask |= S390_SCHED_ATTR_MASK_ENDGROUP;
14267 if (get_attr_zEC12_groupalone (insn))
14268 mask |= S390_SCHED_ATTR_MASK_GROUPALONE;
14270 case PROCESSOR_2964_Z13:
14271 case PROCESSOR_ARCH12:
14272 if (get_attr_z13_cracked (insn))
14273 mask |= S390_SCHED_ATTR_MASK_CRACKED;
14274 if (get_attr_z13_expanded (insn))
14275 mask |= S390_SCHED_ATTR_MASK_EXPANDED;
14276 if (get_attr_z13_endgroup (insn))
14277 mask |= S390_SCHED_ATTR_MASK_ENDGROUP;
14278 if (get_attr_z13_groupalone (insn))
14279 mask |= S390_SCHED_ATTR_MASK_GROUPALONE;
14282 gcc_unreachable ();
14287 static unsigned int
14288 s390_get_unit_mask (rtx_insn *insn, int *units)
14290 unsigned int mask = 0;
14294 case PROCESSOR_2964_Z13:
14295 case PROCESSOR_ARCH12:
14297 if (get_attr_z13_unit_lsu (insn))
14299 if (get_attr_z13_unit_fxu (insn))
14301 if (get_attr_z13_unit_vfu (insn))
14305 gcc_unreachable ();
14310 /* Return the scheduling score for INSN. The higher the score the
14311 better. The score is calculated from the OOO scheduling attributes
14312 of INSN and the scheduling state s390_sched_state. */
14314 s390_sched_score (rtx_insn *insn)
14316 unsigned int mask = s390_get_sched_attrmask (insn);
14319 switch (s390_sched_state)
14322 /* Try to put insns into the first slot which would otherwise
14324 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
14325 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
14327 if ((mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
14331 /* Prefer not cracked insns while trying to put together a
14333 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
14334 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0
14335 && (mask & S390_SCHED_ATTR_MASK_GROUPALONE) == 0)
14337 if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) == 0)
14341 /* Prefer not cracked insns while trying to put together a
14343 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
14344 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0
14345 && (mask & S390_SCHED_ATTR_MASK_GROUPALONE) == 0)
14347 /* Prefer endgroup insns in the last slot. */
14348 if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) != 0)
14351 case S390_SCHED_STATE_NORMAL:
14352 /* Prefer not cracked insns if the last was not cracked. */
14353 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
14354 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0)
14356 if ((mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
14359 case S390_SCHED_STATE_CRACKED:
14360 /* Try to keep cracked insns together to prevent them from
14361 interrupting groups. */
14362 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
14363 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
14368 if (s390_tune >= PROCESSOR_2964_Z13)
14371 unsigned unit_mask, m = 1;
14373 unit_mask = s390_get_unit_mask (insn, &units);
14374 gcc_assert (units <= MAX_SCHED_UNITS);
14376 /* Add a score in range 0..MAX_SCHED_MIX_SCORE depending on how long
14377 ago the last insn of this unit type got scheduled. This is
14378 supposed to help providing a proper instruction mix to the
14380 for (i = 0; i < units; i++, m <<= 1)
14382 score += (last_scheduled_unit_distance[i] * MAX_SCHED_MIX_SCORE /
14383 MAX_SCHED_MIX_DISTANCE);
14388 /* This function is called via hook TARGET_SCHED_REORDER before
14389 issuing one insn from list READY which contains *NREADYP entries.
14390 For target z10 it reorders load instructions to avoid early load
14391 conflicts in the floating point pipeline */
14393 s390_sched_reorder (FILE *file, int verbose,
14394 rtx_insn **ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
14396 if (s390_tune == PROCESSOR_2097_Z10
14397 && reload_completed
14399 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
14401 if (s390_tune >= PROCESSOR_2827_ZEC12
14402 && reload_completed
14406 int last_index = *nreadyp - 1;
14407 int max_index = -1;
14408 int max_score = -1;
14411 /* Just move the insn with the highest score to the top (the
14412 end) of the list. A full sort is not needed since a conflict
14413 in the hazard recognition cannot happen. So the top insn in
14414 the ready list will always be taken. */
14415 for (i = last_index; i >= 0; i--)
14419 if (recog_memoized (ready[i]) < 0)
14422 score = s390_sched_score (ready[i]);
14423 if (score > max_score)
14430 if (max_index != -1)
14432 if (max_index != last_index)
14434 tmp = ready[max_index];
14435 ready[max_index] = ready[last_index];
14436 ready[last_index] = tmp;
14440 ";;\t\tBACKEND: move insn %d to the top of list\n",
14441 INSN_UID (ready[last_index]));
14443 else if (verbose > 5)
14445 ";;\t\tBACKEND: best insn %d already on top\n",
14446 INSN_UID (ready[last_index]));
14451 fprintf (file, "ready list ooo attributes - sched state: %d\n",
14454 for (i = last_index; i >= 0; i--)
14456 unsigned int sched_mask;
14457 rtx_insn *insn = ready[i];
14459 if (recog_memoized (insn) < 0)
14462 sched_mask = s390_get_sched_attrmask (insn);
14463 fprintf (file, ";;\t\tBACKEND: insn %d score: %d: ",
14465 s390_sched_score (insn));
14466 #define PRINT_SCHED_ATTR(M, ATTR) fprintf (file, "%s ",\
14467 ((M) & sched_mask) ? #ATTR : "");
14468 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_CRACKED, cracked);
14469 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_EXPANDED, expanded);
14470 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_ENDGROUP, endgroup);
14471 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_GROUPALONE, groupalone);
14472 #undef PRINT_SCHED_ATTR
14473 if (s390_tune >= PROCESSOR_2964_Z13)
14475 unsigned int unit_mask, m = 1;
14478 unit_mask = s390_get_unit_mask (insn, &units);
14479 fprintf (file, "(units:");
14480 for (j = 0; j < units; j++, m <<= 1)
14482 fprintf (file, " u%d", j);
14483 fprintf (file, ")");
14485 fprintf (file, "\n");
14490 return s390_issue_rate ();
14494 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
14495 the scheduler has issued INSN. It stores the last issued insn into
14496 last_scheduled_insn in order to make it available for
14497 s390_sched_reorder. */
14499 s390_sched_variable_issue (FILE *file, int verbose, rtx_insn *insn, int more)
14501 last_scheduled_insn = insn;
14503 if (s390_tune >= PROCESSOR_2827_ZEC12
14504 && reload_completed
14505 && recog_memoized (insn) >= 0)
14507 unsigned int mask = s390_get_sched_attrmask (insn);
14509 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
14510 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
14511 s390_sched_state = S390_SCHED_STATE_CRACKED;
14512 else if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) != 0
14513 || (mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
14514 s390_sched_state = S390_SCHED_STATE_NORMAL;
14517 /* Only normal insns are left (mask == 0). */
14518 switch (s390_sched_state)
14523 case S390_SCHED_STATE_NORMAL:
14524 if (s390_sched_state == S390_SCHED_STATE_NORMAL)
14525 s390_sched_state = 1;
14527 s390_sched_state++;
14530 case S390_SCHED_STATE_CRACKED:
14531 s390_sched_state = S390_SCHED_STATE_NORMAL;
14536 if (s390_tune >= PROCESSOR_2964_Z13)
14539 unsigned unit_mask, m = 1;
14541 unit_mask = s390_get_unit_mask (insn, &units);
14542 gcc_assert (units <= MAX_SCHED_UNITS);
14544 for (i = 0; i < units; i++, m <<= 1)
14546 last_scheduled_unit_distance[i] = 0;
14547 else if (last_scheduled_unit_distance[i] < MAX_SCHED_MIX_DISTANCE)
14548 last_scheduled_unit_distance[i]++;
14553 unsigned int sched_mask;
14555 sched_mask = s390_get_sched_attrmask (insn);
14557 fprintf (file, ";;\t\tBACKEND: insn %d: ", INSN_UID (insn));
14558 #define PRINT_SCHED_ATTR(M, ATTR) fprintf (file, "%s ", ((M) & sched_mask) ? #ATTR : "");
14559 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_CRACKED, cracked);
14560 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_EXPANDED, expanded);
14561 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_ENDGROUP, endgroup);
14562 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_GROUPALONE, groupalone);
14563 #undef PRINT_SCHED_ATTR
14565 if (s390_tune >= PROCESSOR_2964_Z13)
14567 unsigned int unit_mask, m = 1;
14570 unit_mask = s390_get_unit_mask (insn, &units);
14571 fprintf (file, "(units:");
14572 for (j = 0; j < units; j++, m <<= 1)
14574 fprintf (file, " %d", j);
14575 fprintf (file, ")");
14577 fprintf (file, " sched state: %d\n", s390_sched_state);
14579 if (s390_tune >= PROCESSOR_2964_Z13)
14583 s390_get_unit_mask (insn, &units);
14585 fprintf (file, ";;\t\tBACKEND: units unused for: ");
14586 for (j = 0; j < units; j++)
14587 fprintf (file, "%d:%d ", j, last_scheduled_unit_distance[j]);
14588 fprintf (file, "\n");
14593 if (GET_CODE (PATTERN (insn)) != USE
14594 && GET_CODE (PATTERN (insn)) != CLOBBER)
14601 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
14602 int verbose ATTRIBUTE_UNUSED,
14603 int max_ready ATTRIBUTE_UNUSED)
14605 last_scheduled_insn = NULL;
14606 memset (last_scheduled_unit_distance, 0, MAX_SCHED_UNITS * sizeof (int));
14607 s390_sched_state = 0;
14610 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
14611 a new number struct loop *loop should be unrolled if tuned for cpus with
14612 a built-in stride prefetcher.
14613 The loop is analyzed for memory accesses by calling check_dpu for
14614 each rtx of the loop. Depending on the loop_depth and the amount of
14615 memory accesses a new number <=nunroll is returned to improve the
14616 behavior of the hardware prefetch unit. */
14618 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
14623 unsigned mem_count = 0;
14625 if (s390_tune < PROCESSOR_2097_Z10)
14628 /* Count the number of memory references within the loop body. */
14629 bbs = get_loop_body (loop);
14630 subrtx_iterator::array_type array;
14631 for (i = 0; i < loop->num_nodes; i++)
14632 FOR_BB_INSNS (bbs[i], insn)
14633 if (INSN_P (insn) && INSN_CODE (insn) != -1)
14634 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
14639 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
14640 if (mem_count == 0)
14643 switch (loop_depth(loop))
14646 return MIN (nunroll, 28 / mem_count);
14648 return MIN (nunroll, 22 / mem_count);
14650 return MIN (nunroll, 16 / mem_count);
14654 /* Restore the current options. This is a hook function and also called
14658 s390_function_specific_restore (struct gcc_options *opts,
14659 struct cl_target_option *ptr ATTRIBUTE_UNUSED)
14661 opts->x_s390_cost_pointer = (long)processor_table[opts->x_s390_tune].cost;
14665 s390_option_override_internal (bool main_args_p,
14666 struct gcc_options *opts,
14667 const struct gcc_options *opts_set)
14669 const char *prefix;
14670 const char *suffix;
14672 /* Set up prefix/suffix so the error messages refer to either the command
14673 line argument, or the attribute(target). */
14681 prefix = "option(\"";
14686 /* Architecture mode defaults according to ABI. */
14687 if (!(opts_set->x_target_flags & MASK_ZARCH))
14690 opts->x_target_flags |= MASK_ZARCH;
14692 opts->x_target_flags &= ~MASK_ZARCH;
14695 /* Set the march default in case it hasn't been specified on cmdline. */
14696 if (!opts_set->x_s390_arch)
14697 opts->x_s390_arch = PROCESSOR_2064_Z900;
14698 else if (opts->x_s390_arch == PROCESSOR_9672_G5
14699 || opts->x_s390_arch == PROCESSOR_9672_G6)
14700 warning (OPT_Wdeprecated, "%sarch=%s%s is deprecated and will be removed "
14701 "in future releases; use at least %sarch=z900%s",
14702 prefix, opts->x_s390_arch == PROCESSOR_9672_G5 ? "g5" : "g6",
14703 suffix, prefix, suffix);
14705 opts->x_s390_arch_flags = processor_flags_table[(int) opts->x_s390_arch];
14707 /* Determine processor to tune for. */
14708 if (!opts_set->x_s390_tune)
14709 opts->x_s390_tune = opts->x_s390_arch;
14710 else if (opts->x_s390_tune == PROCESSOR_9672_G5
14711 || opts->x_s390_tune == PROCESSOR_9672_G6)
14712 warning (OPT_Wdeprecated, "%stune=%s%s is deprecated and will be removed "
14713 "in future releases; use at least %stune=z900%s",
14714 prefix, opts->x_s390_tune == PROCESSOR_9672_G5 ? "g5" : "g6",
14715 suffix, prefix, suffix);
14717 opts->x_s390_tune_flags = processor_flags_table[opts->x_s390_tune];
14719 /* Sanity checks. */
14720 if (opts->x_s390_arch == PROCESSOR_NATIVE
14721 || opts->x_s390_tune == PROCESSOR_NATIVE)
14722 gcc_unreachable ();
14723 if (TARGET_ZARCH_P (opts->x_target_flags) && !TARGET_CPU_ZARCH_P (opts))
14724 error ("z/Architecture mode not supported on %s",
14725 processor_table[(int)opts->x_s390_arch].name);
14726 if (TARGET_64BIT && !TARGET_ZARCH_P (opts->x_target_flags))
14727 error ("64-bit ABI not supported in ESA/390 mode");
14729 /* Enable hardware transactions if available and not explicitly
14730 disabled by user. E.g. with -m31 -march=zEC12 -mzarch */
14731 if (!TARGET_OPT_HTM_P (opts_set->x_target_flags))
14733 if (TARGET_CPU_HTM_P (opts) && TARGET_ZARCH_P (opts->x_target_flags))
14734 opts->x_target_flags |= MASK_OPT_HTM;
14736 opts->x_target_flags &= ~MASK_OPT_HTM;
14739 if (TARGET_OPT_VX_P (opts_set->x_target_flags))
14741 if (TARGET_OPT_VX_P (opts->x_target_flags))
14743 if (!TARGET_CPU_VX_P (opts))
14744 error ("hardware vector support not available on %s",
14745 processor_table[(int)opts->x_s390_arch].name);
14746 if (TARGET_SOFT_FLOAT_P (opts->x_target_flags))
14747 error ("hardware vector support not available with -msoft-float");
14752 if (TARGET_CPU_VX_P (opts))
14753 /* Enable vector support if available and not explicitly disabled
14754 by user. E.g. with -m31 -march=z13 -mzarch */
14755 opts->x_target_flags |= MASK_OPT_VX;
14757 opts->x_target_flags &= ~MASK_OPT_VX;
14760 /* Use hardware DFP if available and not explicitly disabled by
14761 user. E.g. with -m31 -march=z10 -mzarch */
14762 if (!TARGET_HARD_DFP_P (opts_set->x_target_flags))
14764 if (TARGET_DFP_P (opts))
14765 opts->x_target_flags |= MASK_HARD_DFP;
14767 opts->x_target_flags &= ~MASK_HARD_DFP;
14770 if (TARGET_HARD_DFP_P (opts->x_target_flags) && !TARGET_DFP_P (opts))
14772 if (TARGET_HARD_DFP_P (opts_set->x_target_flags))
14774 if (!TARGET_CPU_DFP_P (opts))
14775 error ("hardware decimal floating point instructions"
14776 " not available on %s",
14777 processor_table[(int)opts->x_s390_arch].name);
14778 if (!TARGET_ZARCH_P (opts->x_target_flags))
14779 error ("hardware decimal floating point instructions"
14780 " not available in ESA/390 mode");
14783 opts->x_target_flags &= ~MASK_HARD_DFP;
14786 if (TARGET_SOFT_FLOAT_P (opts_set->x_target_flags)
14787 && TARGET_SOFT_FLOAT_P (opts->x_target_flags))
14789 if (TARGET_HARD_DFP_P (opts_set->x_target_flags)
14790 && TARGET_HARD_DFP_P (opts->x_target_flags))
14791 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
14793 opts->x_target_flags &= ~MASK_HARD_DFP;
14796 if (TARGET_BACKCHAIN_P (opts->x_target_flags)
14797 && TARGET_PACKED_STACK_P (opts->x_target_flags)
14798 && TARGET_HARD_FLOAT_P (opts->x_target_flags))
14799 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
14802 if (opts->x_s390_stack_size)
14804 if (opts->x_s390_stack_guard >= opts->x_s390_stack_size)
14805 error ("stack size must be greater than the stack guard value");
14806 else if (opts->x_s390_stack_size > 1 << 16)
14807 error ("stack size must not be greater than 64k");
14809 else if (opts->x_s390_stack_guard)
14810 error ("-mstack-guard implies use of -mstack-size");
14812 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
14813 if (!TARGET_LONG_DOUBLE_128_P (opts_set->x_target_flags))
14814 opts->x_target_flags |= MASK_LONG_DOUBLE_128;
14817 if (opts->x_s390_tune >= PROCESSOR_2097_Z10)
14819 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
14820 opts->x_param_values,
14821 opts_set->x_param_values);
14822 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
14823 opts->x_param_values,
14824 opts_set->x_param_values);
14825 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
14826 opts->x_param_values,
14827 opts_set->x_param_values);
14828 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
14829 opts->x_param_values,
14830 opts_set->x_param_values);
14833 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
14834 opts->x_param_values,
14835 opts_set->x_param_values);
14836 /* values for loop prefetching */
14837 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
14838 opts->x_param_values,
14839 opts_set->x_param_values);
14840 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
14841 opts->x_param_values,
14842 opts_set->x_param_values);
14843 /* s390 has more than 2 levels and the size is much larger. Since
14844 we are always running virtualized assume that we only get a small
14845 part of the caches above l1. */
14846 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
14847 opts->x_param_values,
14848 opts_set->x_param_values);
14849 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
14850 opts->x_param_values,
14851 opts_set->x_param_values);
14852 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
14853 opts->x_param_values,
14854 opts_set->x_param_values);
14856 /* Use the alternative scheduling-pressure algorithm by default. */
14857 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
14858 opts->x_param_values,
14859 opts_set->x_param_values);
14861 maybe_set_param_value (PARAM_MIN_VECT_LOOP_BOUND, 2,
14862 opts->x_param_values,
14863 opts_set->x_param_values);
14865 /* Call target specific restore function to do post-init work. At the moment,
14866 this just sets opts->x_s390_cost_pointer. */
14867 s390_function_specific_restore (opts, NULL);
14871 s390_option_override (void)
14874 cl_deferred_option *opt;
14875 vec<cl_deferred_option> *v =
14876 (vec<cl_deferred_option> *) s390_deferred_options;
14879 FOR_EACH_VEC_ELT (*v, i, opt)
14881 switch (opt->opt_index)
14883 case OPT_mhotpatch_:
14890 strncpy (s, opt->arg, 256);
14892 t = strchr (s, ',');
14897 val1 = integral_argument (s);
14898 val2 = integral_argument (t);
14905 if (val1 == -1 || val2 == -1)
14907 /* argument is not a plain number */
14908 error ("arguments to %qs should be non-negative integers",
14912 else if (val1 > s390_hotpatch_hw_max
14913 || val2 > s390_hotpatch_hw_max)
14915 error ("argument to %qs is too large (max. %d)",
14916 "-mhotpatch=n,m", s390_hotpatch_hw_max);
14919 s390_hotpatch_hw_before_label = val1;
14920 s390_hotpatch_hw_after_label = val2;
14924 gcc_unreachable ();
14928 /* Set up function hooks. */
14929 init_machine_status = s390_init_machine_status;
14931 s390_option_override_internal (true, &global_options, &global_options_set);
14933 /* Save the initial options in case the user does function specific
14935 target_option_default_node = build_target_option_node (&global_options);
14936 target_option_current_node = target_option_default_node;
14938 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
14939 requires the arch flags to be evaluated already. Since prefetching
14940 is beneficial on s390, we enable it if available. */
14941 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
14942 flag_prefetch_loop_arrays = 1;
14944 if (!s390_pic_data_is_text_relative && !flag_pic)
14945 error ("-mno-pic-data-is-text-relative cannot be used without -fpic/-fPIC");
14949 /* Don't emit DWARF3/4 unless specifically selected. The TPF
14950 debuggers do not yet support DWARF 3/4. */
14951 if (!global_options_set.x_dwarf_strict)
14953 if (!global_options_set.x_dwarf_version)
14957 /* Register a target-specific optimization-and-lowering pass
14958 to run immediately before prologue and epilogue generation.
14960 Registering the pass must be done at start up. It's
14961 convenient to do it here. */
14962 opt_pass *new_pass = new pass_s390_early_mach (g);
14963 struct register_pass_info insert_pass_s390_early_mach =
14965 new_pass, /* pass */
14966 "pro_and_epilogue", /* reference_pass_name */
14967 1, /* ref_pass_instance_number */
14968 PASS_POS_INSERT_BEFORE /* po_op */
14970 register_pass (&insert_pass_s390_early_mach);
14973 #if S390_USE_TARGET_ATTRIBUTE
14974 /* Inner function to process the attribute((target(...))), take an argument and
14975 set the current options from the argument. If we have a list, recursively go
14979 s390_valid_target_attribute_inner_p (tree args,
14980 struct gcc_options *opts,
14981 struct gcc_options *new_opts_set,
14987 #define S390_ATTRIB(S,O,A) { S, sizeof (S)-1, O, A, 0 }
14988 #define S390_PRAGMA(S,O,A) { S, sizeof (S)-1, O, A, 1 }
14989 static const struct
14991 const char *string;
14995 int only_as_pragma;
14998 S390_ATTRIB ("arch=", OPT_march_, 1),
14999 S390_ATTRIB ("tune=", OPT_mtune_, 1),
15000 /* uinteger options */
15001 S390_ATTRIB ("stack-guard=", OPT_mstack_guard_, 1),
15002 S390_ATTRIB ("stack-size=", OPT_mstack_size_, 1),
15003 S390_ATTRIB ("branch-cost=", OPT_mbranch_cost_, 1),
15004 S390_ATTRIB ("warn-framesize=", OPT_mwarn_framesize_, 1),
15006 S390_ATTRIB ("backchain", OPT_mbackchain, 0),
15007 S390_ATTRIB ("hard-dfp", OPT_mhard_dfp, 0),
15008 S390_ATTRIB ("hard-float", OPT_mhard_float, 0),
15009 S390_ATTRIB ("htm", OPT_mhtm, 0),
15010 S390_ATTRIB ("vx", OPT_mvx, 0),
15011 S390_ATTRIB ("packed-stack", OPT_mpacked_stack, 0),
15012 S390_ATTRIB ("small-exec", OPT_msmall_exec, 0),
15013 S390_ATTRIB ("soft-float", OPT_msoft_float, 0),
15014 S390_ATTRIB ("mvcle", OPT_mmvcle, 0),
15015 S390_PRAGMA ("zvector", OPT_mzvector, 0),
15016 /* boolean options */
15017 S390_ATTRIB ("warn-dynamicstack", OPT_mwarn_dynamicstack, 0),
15022 /* If this is a list, recurse to get the options. */
15023 if (TREE_CODE (args) == TREE_LIST)
15026 int num_pragma_values;
15029 /* Note: attribs.c:decl_attributes prepends the values from
15030 current_target_pragma to the list of target attributes. To determine
15031 whether we're looking at a value of the attribute or the pragma we
15032 assume that the first [list_length (current_target_pragma)] values in
15033 the list are the values from the pragma. */
15034 num_pragma_values = (!force_pragma && current_target_pragma != NULL)
15035 ? list_length (current_target_pragma) : 0;
15036 for (i = 0; args; args = TREE_CHAIN (args), i++)
15040 is_pragma = (force_pragma || i < num_pragma_values);
15041 if (TREE_VALUE (args)
15042 && !s390_valid_target_attribute_inner_p (TREE_VALUE (args),
15043 opts, new_opts_set,
15052 else if (TREE_CODE (args) != STRING_CST)
15054 error ("attribute %<target%> argument not a string");
15058 /* Handle multiple arguments separated by commas. */
15059 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
15061 while (next_optstr && *next_optstr != '\0')
15063 char *p = next_optstr;
15065 char *comma = strchr (next_optstr, ',');
15066 size_t len, opt_len;
15072 enum cl_var_type var_type;
15078 len = comma - next_optstr;
15079 next_optstr = comma + 1;
15084 next_optstr = NULL;
15087 /* Recognize no-xxx. */
15088 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
15097 /* Find the option. */
15100 for (i = 0; i < ARRAY_SIZE (attrs); i++)
15102 opt_len = attrs[i].len;
15103 if (ch == attrs[i].string[0]
15104 && ((attrs[i].has_arg) ? len > opt_len : len == opt_len)
15105 && memcmp (p, attrs[i].string, opt_len) == 0)
15107 opt = attrs[i].opt;
15108 if (!opt_set_p && cl_options[opt].cl_reject_negative)
15110 mask = cl_options[opt].var_value;
15111 var_type = cl_options[opt].var_type;
15117 /* Process the option. */
15120 error ("attribute(target(\"%s\")) is unknown", orig_p);
15123 else if (attrs[i].only_as_pragma && !force_pragma)
15125 /* Value is not allowed for the target attribute. */
15126 error ("value %qs is not supported by attribute %<target%>",
15131 else if (var_type == CLVC_BIT_SET || var_type == CLVC_BIT_CLEAR)
15133 if (var_type == CLVC_BIT_CLEAR)
15134 opt_set_p = !opt_set_p;
15137 opts->x_target_flags |= mask;
15139 opts->x_target_flags &= ~mask;
15140 new_opts_set->x_target_flags |= mask;
15143 else if (cl_options[opt].var_type == CLVC_BOOLEAN)
15147 if (cl_options[opt].cl_uinteger)
15149 /* Unsigned integer argument. Code based on the function
15150 decode_cmdline_option () in opts-common.c. */
15151 value = integral_argument (p + opt_len);
15154 value = (opt_set_p) ? 1 : 0;
15158 struct cl_decoded_option decoded;
15160 /* Value range check; only implemented for numeric and boolean
15161 options at the moment. */
15162 generate_option (opt, NULL, value, CL_TARGET, &decoded);
15163 s390_handle_option (opts, new_opts_set, &decoded, input_location);
15164 set_option (opts, new_opts_set, opt, value,
15165 p + opt_len, DK_UNSPECIFIED, input_location,
15170 error ("attribute(target(\"%s\")) is unknown", orig_p);
15175 else if (cl_options[opt].var_type == CLVC_ENUM)
15180 arg_ok = opt_enum_arg_to_value (opt, p + opt_len, &value, CL_TARGET);
15182 set_option (opts, new_opts_set, opt, value,
15183 p + opt_len, DK_UNSPECIFIED, input_location,
15187 error ("attribute(target(\"%s\")) is unknown", orig_p);
15193 gcc_unreachable ();
15198 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
15201 s390_valid_target_attribute_tree (tree args,
15202 struct gcc_options *opts,
15203 const struct gcc_options *opts_set,
15206 tree t = NULL_TREE;
15207 struct gcc_options new_opts_set;
15209 memset (&new_opts_set, 0, sizeof (new_opts_set));
15211 /* Process each of the options on the chain. */
15212 if (! s390_valid_target_attribute_inner_p (args, opts, &new_opts_set,
15214 return error_mark_node;
15216 /* If some option was set (even if it has not changed), rerun
15217 s390_option_override_internal, and then save the options away. */
15218 if (new_opts_set.x_target_flags
15219 || new_opts_set.x_s390_arch
15220 || new_opts_set.x_s390_tune
15221 || new_opts_set.x_s390_stack_guard
15222 || new_opts_set.x_s390_stack_size
15223 || new_opts_set.x_s390_branch_cost
15224 || new_opts_set.x_s390_warn_framesize
15225 || new_opts_set.x_s390_warn_dynamicstack_p)
15227 const unsigned char *src = (const unsigned char *)opts_set;
15228 unsigned char *dest = (unsigned char *)&new_opts_set;
15231 /* Merge the original option flags into the new ones. */
15232 for (i = 0; i < sizeof(*opts_set); i++)
15235 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
15236 s390_option_override_internal (false, opts, &new_opts_set);
15237 /* Save the current options unless we are validating options for
15239 t = build_target_option_node (opts);
15244 /* Hook to validate attribute((target("string"))). */
15247 s390_valid_target_attribute_p (tree fndecl,
15248 tree ARG_UNUSED (name),
15250 int ARG_UNUSED (flags))
15252 struct gcc_options func_options;
15253 tree new_target, new_optimize;
15256 /* attribute((target("default"))) does nothing, beyond
15257 affecting multi-versioning. */
15258 if (TREE_VALUE (args)
15259 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
15260 && TREE_CHAIN (args) == NULL_TREE
15261 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
15264 tree old_optimize = build_optimization_node (&global_options);
15266 /* Get the optimization options of the current function. */
15267 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
15269 if (!func_optimize)
15270 func_optimize = old_optimize;
15272 /* Init func_options. */
15273 memset (&func_options, 0, sizeof (func_options));
15274 init_options_struct (&func_options, NULL);
15275 lang_hooks.init_options_struct (&func_options);
15277 cl_optimization_restore (&func_options, TREE_OPTIMIZATION (func_optimize));
15279 /* Initialize func_options to the default before its target options can
15281 cl_target_option_restore (&func_options,
15282 TREE_TARGET_OPTION (target_option_default_node));
15284 new_target = s390_valid_target_attribute_tree (args, &func_options,
15285 &global_options_set,
15287 current_target_pragma));
15288 new_optimize = build_optimization_node (&func_options);
15289 if (new_target == error_mark_node)
15291 else if (fndecl && new_target)
15293 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
15294 if (old_optimize != new_optimize)
15295 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
15300 /* Restore targets globals from NEW_TREE and invalidate s390_previous_fndecl
15304 s390_activate_target_options (tree new_tree)
15306 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
15307 if (TREE_TARGET_GLOBALS (new_tree))
15308 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
15309 else if (new_tree == target_option_default_node)
15310 restore_target_globals (&default_target_globals);
15312 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
15313 s390_previous_fndecl = NULL_TREE;
15316 /* Establish appropriate back-end context for processing the function
15317 FNDECL. The argument might be NULL to indicate processing at top
15318 level, outside of any function scope. */
15320 s390_set_current_function (tree fndecl)
15322 /* Only change the context if the function changes. This hook is called
15323 several times in the course of compiling a function, and we don't want to
15324 slow things down too much or call target_reinit when it isn't safe. */
15325 if (fndecl == s390_previous_fndecl)
15329 if (s390_previous_fndecl == NULL_TREE)
15330 old_tree = target_option_current_node;
15331 else if (DECL_FUNCTION_SPECIFIC_TARGET (s390_previous_fndecl))
15332 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (s390_previous_fndecl);
15334 old_tree = target_option_default_node;
15336 if (fndecl == NULL_TREE)
15338 if (old_tree != target_option_current_node)
15339 s390_activate_target_options (target_option_current_node);
15343 tree new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
15344 if (new_tree == NULL_TREE)
15345 new_tree = target_option_default_node;
15347 if (old_tree != new_tree)
15348 s390_activate_target_options (new_tree);
15349 s390_previous_fndecl = fndecl;
15353 /* Implement TARGET_USE_BY_PIECES_INFRASTRUCTURE_P. */
15356 s390_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size,
15357 unsigned int align ATTRIBUTE_UNUSED,
15358 enum by_pieces_operation op ATTRIBUTE_UNUSED,
15359 bool speed_p ATTRIBUTE_UNUSED)
15361 return (size == 1 || size == 2
15362 || size == 4 || (TARGET_ZARCH && size == 8));
15365 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
15368 s390_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
15370 tree sfpc = s390_builtin_decls[S390_BUILTIN_s390_sfpc];
15371 tree efpc = s390_builtin_decls[S390_BUILTIN_s390_efpc];
15372 tree call_efpc = build_call_expr (efpc, 0);
15373 tree fenv_var = create_tmp_var_raw (unsigned_type_node);
15375 #define FPC_EXCEPTION_MASK HOST_WIDE_INT_UC (0xf8000000)
15376 #define FPC_FLAGS_MASK HOST_WIDE_INT_UC (0x00f80000)
15377 #define FPC_DXC_MASK HOST_WIDE_INT_UC (0x0000ff00)
15378 #define FPC_EXCEPTION_MASK_SHIFT HOST_WIDE_INT_UC (24)
15379 #define FPC_FLAGS_SHIFT HOST_WIDE_INT_UC (16)
15380 #define FPC_DXC_SHIFT HOST_WIDE_INT_UC (8)
15382 /* Generates the equivalent of feholdexcept (&fenv_var)
15384 fenv_var = __builtin_s390_efpc ();
15385 __builtin_s390_sfpc (fenv_var & mask) */
15386 tree old_fpc = build2 (MODIFY_EXPR, unsigned_type_node, fenv_var, call_efpc);
15388 build2 (BIT_AND_EXPR, unsigned_type_node, fenv_var,
15389 build_int_cst (unsigned_type_node,
15390 ~(FPC_DXC_MASK | FPC_FLAGS_MASK |
15391 FPC_EXCEPTION_MASK)));
15392 tree set_new_fpc = build_call_expr (sfpc, 1, new_fpc);
15393 *hold = build2 (COMPOUND_EXPR, void_type_node, old_fpc, set_new_fpc);
15395 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT)
15397 __builtin_s390_sfpc (__builtin_s390_efpc () & mask) */
15398 new_fpc = build2 (BIT_AND_EXPR, unsigned_type_node, call_efpc,
15399 build_int_cst (unsigned_type_node,
15400 ~(FPC_DXC_MASK | FPC_FLAGS_MASK)));
15401 *clear = build_call_expr (sfpc, 1, new_fpc);
15403 /* Generates the equivalent of feupdateenv (fenv_var)
15405 old_fpc = __builtin_s390_efpc ();
15406 __builtin_s390_sfpc (fenv_var);
15407 __atomic_feraiseexcept ((old_fpc & FPC_FLAGS_MASK) >> FPC_FLAGS_SHIFT); */
15409 old_fpc = create_tmp_var_raw (unsigned_type_node);
15410 tree store_old_fpc = build2 (MODIFY_EXPR, void_type_node,
15411 old_fpc, call_efpc);
15413 set_new_fpc = build_call_expr (sfpc, 1, fenv_var);
15415 tree raise_old_except = build2 (BIT_AND_EXPR, unsigned_type_node, old_fpc,
15416 build_int_cst (unsigned_type_node,
15418 raise_old_except = build2 (RSHIFT_EXPR, unsigned_type_node, raise_old_except,
15419 build_int_cst (unsigned_type_node,
15421 tree atomic_feraiseexcept
15422 = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
15423 raise_old_except = build_call_expr (atomic_feraiseexcept,
15424 1, raise_old_except);
15426 *update = build2 (COMPOUND_EXPR, void_type_node,
15427 build2 (COMPOUND_EXPR, void_type_node,
15428 store_old_fpc, set_new_fpc),
15431 #undef FPC_EXCEPTION_MASK
15432 #undef FPC_FLAGS_MASK
15433 #undef FPC_DXC_MASK
15434 #undef FPC_EXCEPTION_MASK_SHIFT
15435 #undef FPC_FLAGS_SHIFT
15436 #undef FPC_DXC_SHIFT
15439 /* Return the vector mode to be used for inner mode MODE when doing
15441 static machine_mode
15442 s390_preferred_simd_mode (machine_mode mode)
15462 /* Our hardware does not require vectors to be strictly aligned. */
15464 s390_support_vector_misalignment (machine_mode mode ATTRIBUTE_UNUSED,
15465 const_tree type ATTRIBUTE_UNUSED,
15466 int misalignment ATTRIBUTE_UNUSED,
15467 bool is_packed ATTRIBUTE_UNUSED)
15472 return default_builtin_support_vector_misalignment (mode, type, misalignment,
15476 /* The vector ABI requires vector types to be aligned on an 8 byte
15477 boundary (our stack alignment). However, we allow this to be
15478 overriden by the user, while this definitely breaks the ABI. */
15479 static HOST_WIDE_INT
15480 s390_vector_alignment (const_tree type)
15482 if (!TARGET_VX_ABI)
15483 return default_vector_alignment (type);
15485 if (TYPE_USER_ALIGN (type))
15486 return TYPE_ALIGN (type);
15488 return MIN (64, tree_to_shwi (TYPE_SIZE (type)));
15491 #ifdef HAVE_AS_MACHINE_MACHINEMODE
15492 /* Implement TARGET_ASM_FILE_START. */
15494 s390_asm_file_start (void)
15496 default_file_start ();
15497 s390_asm_output_machine_for_arch (asm_out_file);
15501 /* Implement TARGET_ASM_FILE_END. */
15503 s390_asm_file_end (void)
15505 #ifdef HAVE_AS_GNU_ATTRIBUTE
15506 varpool_node *vnode;
15507 cgraph_node *cnode;
15509 FOR_EACH_VARIABLE (vnode)
15510 if (TREE_PUBLIC (vnode->decl))
15511 s390_check_type_for_vector_abi (TREE_TYPE (vnode->decl), false, false);
15513 FOR_EACH_FUNCTION (cnode)
15514 if (TREE_PUBLIC (cnode->decl))
15515 s390_check_type_for_vector_abi (TREE_TYPE (cnode->decl), false, false);
15518 if (s390_vector_abi != 0)
15519 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
15522 file_end_indicate_exec_stack ();
15524 if (flag_split_stack)
15525 file_end_indicate_split_stack ();
15528 /* Return true if TYPE is a vector bool type. */
15530 s390_vector_bool_type_p (const_tree type)
15532 return TYPE_VECTOR_OPAQUE (type);
15535 /* Return the diagnostic message string if the binary operation OP is
15536 not permitted on TYPE1 and TYPE2, NULL otherwise. */
15538 s390_invalid_binary_op (int op ATTRIBUTE_UNUSED, const_tree type1, const_tree type2)
15540 bool bool1_p, bool2_p;
15544 machine_mode mode1, mode2;
15546 if (!TARGET_ZVECTOR)
15549 if (!VECTOR_TYPE_P (type1) || !VECTOR_TYPE_P (type2))
15552 bool1_p = s390_vector_bool_type_p (type1);
15553 bool2_p = s390_vector_bool_type_p (type2);
15555 /* Mixing signed and unsigned types is forbidden for all
15557 if (!bool1_p && !bool2_p
15558 && TYPE_UNSIGNED (type1) != TYPE_UNSIGNED (type2))
15559 return N_("types differ in signedness");
15561 plusminus_p = (op == PLUS_EXPR || op == MINUS_EXPR);
15562 muldiv_p = (op == MULT_EXPR || op == RDIV_EXPR || op == TRUNC_DIV_EXPR
15563 || op == CEIL_DIV_EXPR || op == FLOOR_DIV_EXPR
15564 || op == ROUND_DIV_EXPR);
15565 compare_p = (op == LT_EXPR || op == LE_EXPR || op == GT_EXPR || op == GE_EXPR
15566 || op == EQ_EXPR || op == NE_EXPR);
15568 if (bool1_p && bool2_p && (plusminus_p || muldiv_p))
15569 return N_("binary operator does not support two vector bool operands");
15571 if (bool1_p != bool2_p && (muldiv_p || compare_p))
15572 return N_("binary operator does not support vector bool operand");
15574 mode1 = TYPE_MODE (type1);
15575 mode2 = TYPE_MODE (type2);
15577 if (bool1_p != bool2_p && plusminus_p
15578 && (GET_MODE_CLASS (mode1) == MODE_VECTOR_FLOAT
15579 || GET_MODE_CLASS (mode2) == MODE_VECTOR_FLOAT))
15580 return N_("binary operator does not support mixing vector "
15581 "bool with floating point vector operands");
15586 /* Implement TARGET_C_EXCESS_PRECISION.
15588 FIXME: For historical reasons, float_t and double_t are typedef'ed to
15589 double on s390, causing operations on float_t to operate in a higher
15590 precision than is necessary. However, it is not the case that SFmode
15591 operations have implicit excess precision, and we generate more optimal
15592 code if we let the compiler know no implicit extra precision is added.
15594 That means when we are compiling with -fexcess-precision=fast, the value
15595 we set for FLT_EVAL_METHOD will be out of line with the actual precision of
15596 float_t (though they would be correct for -fexcess-precision=standard).
15598 A complete fix would modify glibc to remove the unnecessary typedef
15599 of float_t to double. */
15601 static enum flt_eval_method
15602 s390_excess_precision (enum excess_precision_type type)
15606 case EXCESS_PRECISION_TYPE_IMPLICIT:
15607 case EXCESS_PRECISION_TYPE_FAST:
15608 /* The fastest type to promote to will always be the native type,
15609 whether that occurs with implicit excess precision or
15611 return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT;
15612 case EXCESS_PRECISION_TYPE_STANDARD:
15613 /* Otherwise, when we are in a standards compliant mode, to
15614 ensure consistency with the implementation in glibc, report that
15615 float is evaluated to the range and precision of double. */
15616 return FLT_EVAL_METHOD_PROMOTE_TO_DOUBLE;
15618 gcc_unreachable ();
15620 return FLT_EVAL_METHOD_UNPREDICTABLE;
15623 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
15625 static unsigned HOST_WIDE_INT
15626 s390_asan_shadow_offset (void)
15628 return TARGET_64BIT ? HOST_WIDE_INT_1U << 52 : HOST_WIDE_INT_UC (0x20000000);
15631 /* Initialize GCC target structure. */
15633 #undef TARGET_ASM_ALIGNED_HI_OP
15634 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
15635 #undef TARGET_ASM_ALIGNED_DI_OP
15636 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
15637 #undef TARGET_ASM_INTEGER
15638 #define TARGET_ASM_INTEGER s390_assemble_integer
15640 #undef TARGET_ASM_OPEN_PAREN
15641 #define TARGET_ASM_OPEN_PAREN ""
15643 #undef TARGET_ASM_CLOSE_PAREN
15644 #define TARGET_ASM_CLOSE_PAREN ""
15646 #undef TARGET_OPTION_OVERRIDE
15647 #define TARGET_OPTION_OVERRIDE s390_option_override
15649 #ifdef TARGET_THREAD_SSP_OFFSET
15650 #undef TARGET_STACK_PROTECT_GUARD
15651 #define TARGET_STACK_PROTECT_GUARD hook_tree_void_null
15654 #undef TARGET_ENCODE_SECTION_INFO
15655 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
15657 #undef TARGET_SCALAR_MODE_SUPPORTED_P
15658 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
15661 #undef TARGET_HAVE_TLS
15662 #define TARGET_HAVE_TLS true
15664 #undef TARGET_CANNOT_FORCE_CONST_MEM
15665 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
15667 #undef TARGET_DELEGITIMIZE_ADDRESS
15668 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
15670 #undef TARGET_LEGITIMIZE_ADDRESS
15671 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
15673 #undef TARGET_RETURN_IN_MEMORY
15674 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
15676 #undef TARGET_INIT_BUILTINS
15677 #define TARGET_INIT_BUILTINS s390_init_builtins
15678 #undef TARGET_EXPAND_BUILTIN
15679 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
15680 #undef TARGET_BUILTIN_DECL
15681 #define TARGET_BUILTIN_DECL s390_builtin_decl
15683 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
15684 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
15686 #undef TARGET_ASM_OUTPUT_MI_THUNK
15687 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
15688 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
15689 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
15691 #undef TARGET_C_EXCESS_PRECISION
15692 #define TARGET_C_EXCESS_PRECISION s390_excess_precision
15694 #undef TARGET_SCHED_ADJUST_PRIORITY
15695 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
15696 #undef TARGET_SCHED_ISSUE_RATE
15697 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
15698 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
15699 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
15701 #undef TARGET_SCHED_VARIABLE_ISSUE
15702 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
15703 #undef TARGET_SCHED_REORDER
15704 #define TARGET_SCHED_REORDER s390_sched_reorder
15705 #undef TARGET_SCHED_INIT
15706 #define TARGET_SCHED_INIT s390_sched_init
15708 #undef TARGET_CANNOT_COPY_INSN_P
15709 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
15710 #undef TARGET_RTX_COSTS
15711 #define TARGET_RTX_COSTS s390_rtx_costs
15712 #undef TARGET_ADDRESS_COST
15713 #define TARGET_ADDRESS_COST s390_address_cost
15714 #undef TARGET_REGISTER_MOVE_COST
15715 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
15716 #undef TARGET_MEMORY_MOVE_COST
15717 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
15718 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
15719 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
15720 s390_builtin_vectorization_cost
15722 #undef TARGET_MACHINE_DEPENDENT_REORG
15723 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
15725 #undef TARGET_VALID_POINTER_MODE
15726 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
15728 #undef TARGET_BUILD_BUILTIN_VA_LIST
15729 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
15730 #undef TARGET_EXPAND_BUILTIN_VA_START
15731 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
15732 #undef TARGET_ASAN_SHADOW_OFFSET
15733 #define TARGET_ASAN_SHADOW_OFFSET s390_asan_shadow_offset
15734 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
15735 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
15737 #undef TARGET_PROMOTE_FUNCTION_MODE
15738 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
15739 #undef TARGET_PASS_BY_REFERENCE
15740 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
15742 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
15743 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
15744 #undef TARGET_FUNCTION_ARG
15745 #define TARGET_FUNCTION_ARG s390_function_arg
15746 #undef TARGET_FUNCTION_ARG_ADVANCE
15747 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
15748 #undef TARGET_FUNCTION_VALUE
15749 #define TARGET_FUNCTION_VALUE s390_function_value
15750 #undef TARGET_LIBCALL_VALUE
15751 #define TARGET_LIBCALL_VALUE s390_libcall_value
15752 #undef TARGET_STRICT_ARGUMENT_NAMING
15753 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
15755 #undef TARGET_KEEP_LEAF_WHEN_PROFILED
15756 #define TARGET_KEEP_LEAF_WHEN_PROFILED s390_keep_leaf_when_profiled
15758 #undef TARGET_FIXED_CONDITION_CODE_REGS
15759 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
15761 #undef TARGET_CC_MODES_COMPATIBLE
15762 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
15764 #undef TARGET_INVALID_WITHIN_DOLOOP
15765 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_insn_null
15768 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
15769 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
15772 #undef TARGET_DWARF_FRAME_REG_MODE
15773 #define TARGET_DWARF_FRAME_REG_MODE s390_dwarf_frame_reg_mode
15775 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
15776 #undef TARGET_MANGLE_TYPE
15777 #define TARGET_MANGLE_TYPE s390_mangle_type
15780 #undef TARGET_SCALAR_MODE_SUPPORTED_P
15781 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
15783 #undef TARGET_VECTOR_MODE_SUPPORTED_P
15784 #define TARGET_VECTOR_MODE_SUPPORTED_P s390_vector_mode_supported_p
15786 #undef TARGET_PREFERRED_RELOAD_CLASS
15787 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
15789 #undef TARGET_SECONDARY_RELOAD
15790 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
15792 #undef TARGET_LIBGCC_CMP_RETURN_MODE
15793 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
15795 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
15796 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
15798 #undef TARGET_LEGITIMATE_ADDRESS_P
15799 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
15801 #undef TARGET_LEGITIMATE_CONSTANT_P
15802 #define TARGET_LEGITIMATE_CONSTANT_P s390_legitimate_constant_p
15804 #undef TARGET_LRA_P
15805 #define TARGET_LRA_P s390_lra_p
15807 #undef TARGET_CAN_ELIMINATE
15808 #define TARGET_CAN_ELIMINATE s390_can_eliminate
15810 #undef TARGET_CONDITIONAL_REGISTER_USAGE
15811 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
15813 #undef TARGET_LOOP_UNROLL_ADJUST
15814 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
15816 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
15817 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
15818 #undef TARGET_TRAMPOLINE_INIT
15819 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
15822 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
15823 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
15825 #undef TARGET_UNWIND_WORD_MODE
15826 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
15828 #undef TARGET_CANONICALIZE_COMPARISON
15829 #define TARGET_CANONICALIZE_COMPARISON s390_canonicalize_comparison
15831 #undef TARGET_HARD_REGNO_SCRATCH_OK
15832 #define TARGET_HARD_REGNO_SCRATCH_OK s390_hard_regno_scratch_ok
15834 #undef TARGET_ATTRIBUTE_TABLE
15835 #define TARGET_ATTRIBUTE_TABLE s390_attribute_table
15837 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
15838 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
15840 #undef TARGET_SET_UP_BY_PROLOGUE
15841 #define TARGET_SET_UP_BY_PROLOGUE s300_set_up_by_prologue
15843 #undef TARGET_EXTRA_LIVE_ON_ENTRY
15844 #define TARGET_EXTRA_LIVE_ON_ENTRY s390_live_on_entry
15846 #undef TARGET_USE_BY_PIECES_INFRASTRUCTURE_P
15847 #define TARGET_USE_BY_PIECES_INFRASTRUCTURE_P \
15848 s390_use_by_pieces_infrastructure_p
15850 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
15851 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV s390_atomic_assign_expand_fenv
15853 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
15854 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN s390_invalid_arg_for_unprototyped_fn
15856 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
15857 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE s390_preferred_simd_mode
15859 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
15860 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT s390_support_vector_misalignment
15862 #undef TARGET_VECTOR_ALIGNMENT
15863 #define TARGET_VECTOR_ALIGNMENT s390_vector_alignment
15865 #undef TARGET_INVALID_BINARY_OP
15866 #define TARGET_INVALID_BINARY_OP s390_invalid_binary_op
15868 #ifdef HAVE_AS_MACHINE_MACHINEMODE
15869 #undef TARGET_ASM_FILE_START
15870 #define TARGET_ASM_FILE_START s390_asm_file_start
15873 #undef TARGET_ASM_FILE_END
15874 #define TARGET_ASM_FILE_END s390_asm_file_end
15876 #if S390_USE_TARGET_ATTRIBUTE
15877 #undef TARGET_SET_CURRENT_FUNCTION
15878 #define TARGET_SET_CURRENT_FUNCTION s390_set_current_function
15880 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
15881 #define TARGET_OPTION_VALID_ATTRIBUTE_P s390_valid_target_attribute_p
15884 #undef TARGET_OPTION_RESTORE
15885 #define TARGET_OPTION_RESTORE s390_function_specific_restore
15887 struct gcc_target targetm = TARGET_INITIALIZER;
15889 #include "gt-s390.h"