1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999-2017 Free Software Foundation, Inc.
3 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
4 Ulrich Weigand (uweigand@de.ibm.com) and
5 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
28 #include "target-globals.h"
37 #include "stringpool.h"
45 #include "diagnostic-core.h"
46 #include "diagnostic.h"
48 #include "fold-const.h"
49 #include "print-tree.h"
50 #include "stor-layout.h"
53 #include "conditions.h"
55 #include "insn-attr.h"
67 #include "cfgcleanup.h"
69 #include "langhooks.h"
70 #include "internal-fn.h"
71 #include "gimple-fold.h"
76 #include "tree-pass.h"
81 #include "tm-constrs.h"
83 #include "symbol-summary.h"
85 #include "ipa-fnsummary.h"
87 /* This file should be included last. */
88 #include "target-def.h"
90 /* Remember the last target of s390_set_current_function. */
91 static GTY(()) tree s390_previous_fndecl;
93 /* Define the specific costs for a given cpu. */
95 struct processor_costs
98 const int m; /* cost of an M instruction. */
99 const int mghi; /* cost of an MGHI instruction. */
100 const int mh; /* cost of an MH instruction. */
101 const int mhi; /* cost of an MHI instruction. */
102 const int ml; /* cost of an ML instruction. */
103 const int mr; /* cost of an MR instruction. */
104 const int ms; /* cost of an MS instruction. */
105 const int msg; /* cost of an MSG instruction. */
106 const int msgf; /* cost of an MSGF instruction. */
107 const int msgfr; /* cost of an MSGFR instruction. */
108 const int msgr; /* cost of an MSGR instruction. */
109 const int msr; /* cost of an MSR instruction. */
110 const int mult_df; /* cost of multiplication in DFmode. */
113 const int sqxbr; /* cost of square root in TFmode. */
114 const int sqdbr; /* cost of square root in DFmode. */
115 const int sqebr; /* cost of square root in SFmode. */
116 /* multiply and add */
117 const int madbr; /* cost of multiply and add in DFmode. */
118 const int maebr; /* cost of multiply and add in SFmode. */
130 #define s390_cost ((const struct processor_costs *)(s390_cost_pointer))
133 struct processor_costs z900_cost =
135 COSTS_N_INSNS (5), /* M */
136 COSTS_N_INSNS (10), /* MGHI */
137 COSTS_N_INSNS (5), /* MH */
138 COSTS_N_INSNS (4), /* MHI */
139 COSTS_N_INSNS (5), /* ML */
140 COSTS_N_INSNS (5), /* MR */
141 COSTS_N_INSNS (4), /* MS */
142 COSTS_N_INSNS (15), /* MSG */
143 COSTS_N_INSNS (7), /* MSGF */
144 COSTS_N_INSNS (7), /* MSGFR */
145 COSTS_N_INSNS (10), /* MSGR */
146 COSTS_N_INSNS (4), /* MSR */
147 COSTS_N_INSNS (7), /* multiplication in DFmode */
148 COSTS_N_INSNS (13), /* MXBR */
149 COSTS_N_INSNS (136), /* SQXBR */
150 COSTS_N_INSNS (44), /* SQDBR */
151 COSTS_N_INSNS (35), /* SQEBR */
152 COSTS_N_INSNS (18), /* MADBR */
153 COSTS_N_INSNS (13), /* MAEBR */
154 COSTS_N_INSNS (134), /* DXBR */
155 COSTS_N_INSNS (30), /* DDBR */
156 COSTS_N_INSNS (27), /* DEBR */
157 COSTS_N_INSNS (220), /* DLGR */
158 COSTS_N_INSNS (34), /* DLR */
159 COSTS_N_INSNS (34), /* DR */
160 COSTS_N_INSNS (32), /* DSGFR */
161 COSTS_N_INSNS (32), /* DSGR */
165 struct processor_costs z990_cost =
167 COSTS_N_INSNS (4), /* M */
168 COSTS_N_INSNS (2), /* MGHI */
169 COSTS_N_INSNS (2), /* MH */
170 COSTS_N_INSNS (2), /* MHI */
171 COSTS_N_INSNS (4), /* ML */
172 COSTS_N_INSNS (4), /* MR */
173 COSTS_N_INSNS (5), /* MS */
174 COSTS_N_INSNS (6), /* MSG */
175 COSTS_N_INSNS (4), /* MSGF */
176 COSTS_N_INSNS (4), /* MSGFR */
177 COSTS_N_INSNS (4), /* MSGR */
178 COSTS_N_INSNS (4), /* MSR */
179 COSTS_N_INSNS (1), /* multiplication in DFmode */
180 COSTS_N_INSNS (28), /* MXBR */
181 COSTS_N_INSNS (130), /* SQXBR */
182 COSTS_N_INSNS (66), /* SQDBR */
183 COSTS_N_INSNS (38), /* SQEBR */
184 COSTS_N_INSNS (1), /* MADBR */
185 COSTS_N_INSNS (1), /* MAEBR */
186 COSTS_N_INSNS (60), /* DXBR */
187 COSTS_N_INSNS (40), /* DDBR */
188 COSTS_N_INSNS (26), /* DEBR */
189 COSTS_N_INSNS (176), /* DLGR */
190 COSTS_N_INSNS (31), /* DLR */
191 COSTS_N_INSNS (31), /* DR */
192 COSTS_N_INSNS (31), /* DSGFR */
193 COSTS_N_INSNS (31), /* DSGR */
197 struct processor_costs z9_109_cost =
199 COSTS_N_INSNS (4), /* M */
200 COSTS_N_INSNS (2), /* MGHI */
201 COSTS_N_INSNS (2), /* MH */
202 COSTS_N_INSNS (2), /* MHI */
203 COSTS_N_INSNS (4), /* ML */
204 COSTS_N_INSNS (4), /* MR */
205 COSTS_N_INSNS (5), /* MS */
206 COSTS_N_INSNS (6), /* MSG */
207 COSTS_N_INSNS (4), /* MSGF */
208 COSTS_N_INSNS (4), /* MSGFR */
209 COSTS_N_INSNS (4), /* MSGR */
210 COSTS_N_INSNS (4), /* MSR */
211 COSTS_N_INSNS (1), /* multiplication in DFmode */
212 COSTS_N_INSNS (28), /* MXBR */
213 COSTS_N_INSNS (130), /* SQXBR */
214 COSTS_N_INSNS (66), /* SQDBR */
215 COSTS_N_INSNS (38), /* SQEBR */
216 COSTS_N_INSNS (1), /* MADBR */
217 COSTS_N_INSNS (1), /* MAEBR */
218 COSTS_N_INSNS (60), /* DXBR */
219 COSTS_N_INSNS (40), /* DDBR */
220 COSTS_N_INSNS (26), /* DEBR */
221 COSTS_N_INSNS (30), /* DLGR */
222 COSTS_N_INSNS (23), /* DLR */
223 COSTS_N_INSNS (23), /* DR */
224 COSTS_N_INSNS (24), /* DSGFR */
225 COSTS_N_INSNS (24), /* DSGR */
229 struct processor_costs z10_cost =
231 COSTS_N_INSNS (10), /* M */
232 COSTS_N_INSNS (10), /* MGHI */
233 COSTS_N_INSNS (10), /* MH */
234 COSTS_N_INSNS (10), /* MHI */
235 COSTS_N_INSNS (10), /* ML */
236 COSTS_N_INSNS (10), /* MR */
237 COSTS_N_INSNS (10), /* MS */
238 COSTS_N_INSNS (10), /* MSG */
239 COSTS_N_INSNS (10), /* MSGF */
240 COSTS_N_INSNS (10), /* MSGFR */
241 COSTS_N_INSNS (10), /* MSGR */
242 COSTS_N_INSNS (10), /* MSR */
243 COSTS_N_INSNS (1) , /* multiplication in DFmode */
244 COSTS_N_INSNS (50), /* MXBR */
245 COSTS_N_INSNS (120), /* SQXBR */
246 COSTS_N_INSNS (52), /* SQDBR */
247 COSTS_N_INSNS (38), /* SQEBR */
248 COSTS_N_INSNS (1), /* MADBR */
249 COSTS_N_INSNS (1), /* MAEBR */
250 COSTS_N_INSNS (111), /* DXBR */
251 COSTS_N_INSNS (39), /* DDBR */
252 COSTS_N_INSNS (32), /* DEBR */
253 COSTS_N_INSNS (160), /* DLGR */
254 COSTS_N_INSNS (71), /* DLR */
255 COSTS_N_INSNS (71), /* DR */
256 COSTS_N_INSNS (71), /* DSGFR */
257 COSTS_N_INSNS (71), /* DSGR */
261 struct processor_costs z196_cost =
263 COSTS_N_INSNS (7), /* M */
264 COSTS_N_INSNS (5), /* MGHI */
265 COSTS_N_INSNS (5), /* MH */
266 COSTS_N_INSNS (5), /* MHI */
267 COSTS_N_INSNS (7), /* ML */
268 COSTS_N_INSNS (7), /* MR */
269 COSTS_N_INSNS (6), /* MS */
270 COSTS_N_INSNS (8), /* MSG */
271 COSTS_N_INSNS (6), /* MSGF */
272 COSTS_N_INSNS (6), /* MSGFR */
273 COSTS_N_INSNS (8), /* MSGR */
274 COSTS_N_INSNS (6), /* MSR */
275 COSTS_N_INSNS (1) , /* multiplication in DFmode */
276 COSTS_N_INSNS (40), /* MXBR B+40 */
277 COSTS_N_INSNS (100), /* SQXBR B+100 */
278 COSTS_N_INSNS (42), /* SQDBR B+42 */
279 COSTS_N_INSNS (28), /* SQEBR B+28 */
280 COSTS_N_INSNS (1), /* MADBR B */
281 COSTS_N_INSNS (1), /* MAEBR B */
282 COSTS_N_INSNS (101), /* DXBR B+101 */
283 COSTS_N_INSNS (29), /* DDBR */
284 COSTS_N_INSNS (22), /* DEBR */
285 COSTS_N_INSNS (160), /* DLGR cracked */
286 COSTS_N_INSNS (160), /* DLR cracked */
287 COSTS_N_INSNS (160), /* DR expanded */
288 COSTS_N_INSNS (160), /* DSGFR cracked */
289 COSTS_N_INSNS (160), /* DSGR cracked */
293 struct processor_costs zEC12_cost =
295 COSTS_N_INSNS (7), /* M */
296 COSTS_N_INSNS (5), /* MGHI */
297 COSTS_N_INSNS (5), /* MH */
298 COSTS_N_INSNS (5), /* MHI */
299 COSTS_N_INSNS (7), /* ML */
300 COSTS_N_INSNS (7), /* MR */
301 COSTS_N_INSNS (6), /* MS */
302 COSTS_N_INSNS (8), /* MSG */
303 COSTS_N_INSNS (6), /* MSGF */
304 COSTS_N_INSNS (6), /* MSGFR */
305 COSTS_N_INSNS (8), /* MSGR */
306 COSTS_N_INSNS (6), /* MSR */
307 COSTS_N_INSNS (1) , /* multiplication in DFmode */
308 COSTS_N_INSNS (40), /* MXBR B+40 */
309 COSTS_N_INSNS (100), /* SQXBR B+100 */
310 COSTS_N_INSNS (42), /* SQDBR B+42 */
311 COSTS_N_INSNS (28), /* SQEBR B+28 */
312 COSTS_N_INSNS (1), /* MADBR B */
313 COSTS_N_INSNS (1), /* MAEBR B */
314 COSTS_N_INSNS (131), /* DXBR B+131 */
315 COSTS_N_INSNS (29), /* DDBR */
316 COSTS_N_INSNS (22), /* DEBR */
317 COSTS_N_INSNS (160), /* DLGR cracked */
318 COSTS_N_INSNS (160), /* DLR cracked */
319 COSTS_N_INSNS (160), /* DR expanded */
320 COSTS_N_INSNS (160), /* DSGFR cracked */
321 COSTS_N_INSNS (160), /* DSGR cracked */
326 /* The preferred name to be used in user visible output. */
327 const char *const name;
328 /* CPU name as it should be passed to Binutils via .machine */
329 const char *const binutils_name;
330 const enum processor_type processor;
331 const struct processor_costs *cost;
333 const processor_table[] =
335 { "g5", "g5", PROCESSOR_9672_G5, &z900_cost },
336 { "g6", "g6", PROCESSOR_9672_G6, &z900_cost },
337 { "z900", "z900", PROCESSOR_2064_Z900, &z900_cost },
338 { "z990", "z990", PROCESSOR_2084_Z990, &z990_cost },
339 { "z9-109", "z9-109", PROCESSOR_2094_Z9_109, &z9_109_cost },
340 { "z9-ec", "z9-ec", PROCESSOR_2094_Z9_EC, &z9_109_cost },
341 { "z10", "z10", PROCESSOR_2097_Z10, &z10_cost },
342 { "z196", "z196", PROCESSOR_2817_Z196, &z196_cost },
343 { "zEC12", "zEC12", PROCESSOR_2827_ZEC12, &zEC12_cost },
344 { "z13", "z13", PROCESSOR_2964_Z13, &zEC12_cost },
345 { "z14", "arch12", PROCESSOR_3906_Z14, &zEC12_cost },
346 { "native", "", PROCESSOR_NATIVE, NULL }
349 extern int reload_completed;
351 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
352 static rtx_insn *last_scheduled_insn;
353 #define MAX_SCHED_UNITS 3
354 static int last_scheduled_unit_distance[MAX_SCHED_UNITS];
356 /* The maximum score added for an instruction whose unit hasn't been
357 in use for MAX_SCHED_MIX_DISTANCE steps. Increase this value to
358 give instruction mix scheduling more priority over instruction
360 #define MAX_SCHED_MIX_SCORE 8
362 /* The maximum distance up to which individual scores will be
363 calculated. Everything beyond this gives MAX_SCHED_MIX_SCORE.
364 Increase this with the OOO windows size of the machine. */
365 #define MAX_SCHED_MIX_DISTANCE 100
367 /* Structure used to hold the components of a S/390 memory
368 address. A legitimate address on S/390 is of the general
370 base + index + displacement
371 where any of the components is optional.
373 base and index are registers of the class ADDR_REGS,
374 displacement is an unsigned 12-bit immediate constant. */
385 /* The following structure is embedded in the machine
386 specific part of struct function. */
388 struct GTY (()) s390_frame_layout
390 /* Offset within stack frame. */
391 HOST_WIDE_INT gprs_offset;
392 HOST_WIDE_INT f0_offset;
393 HOST_WIDE_INT f4_offset;
394 HOST_WIDE_INT f8_offset;
395 HOST_WIDE_INT backchain_offset;
397 /* Number of first and last gpr where slots in the register
398 save area are reserved for. */
399 int first_save_gpr_slot;
400 int last_save_gpr_slot;
402 /* Location (FP register number) where GPRs (r0-r15) should
404 0 - does not need to be saved at all
406 #define SAVE_SLOT_NONE 0
407 #define SAVE_SLOT_STACK -1
408 signed char gpr_save_slots[16];
410 /* Number of first and last gpr to be saved, restored. */
412 int first_restore_gpr;
414 int last_restore_gpr;
416 /* Bits standing for floating point registers. Set, if the
417 respective register has to be saved. Starting with reg 16 (f0)
418 at the rightmost bit.
419 Bit 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
420 fpr 15 13 11 9 14 12 10 8 7 5 3 1 6 4 2 0
421 reg 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 */
422 unsigned int fpr_bitmap;
424 /* Number of floating point registers f8-f15 which must be saved. */
427 /* Set if return address needs to be saved.
428 This flag is set by s390_return_addr_rtx if it could not use
429 the initial value of r14 and therefore depends on r14 saved
431 bool save_return_addr_p;
433 /* Size of stack frame. */
434 HOST_WIDE_INT frame_size;
437 /* Define the structure for the machine field in struct function. */
439 struct GTY(()) machine_function
441 struct s390_frame_layout frame_layout;
443 /* Literal pool base register. */
446 /* True if we may need to perform branch splitting. */
447 bool split_branches_pending_p;
449 bool has_landing_pad_p;
451 /* True if the current function may contain a tbegin clobbering
455 /* For -fsplit-stack support: A stack local which holds a pointer to
456 the stack arguments for a function with a variable number of
457 arguments. This is set at the start of the function and is used
458 to initialize the overflow_arg_area field of the va_list
460 rtx split_stack_varargs_pointer;
463 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
465 #define cfun_frame_layout (cfun->machine->frame_layout)
466 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
467 #define cfun_save_arg_fprs_p (!!(TARGET_64BIT \
468 ? cfun_frame_layout.fpr_bitmap & 0x0f \
469 : cfun_frame_layout.fpr_bitmap & 0x03))
470 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
471 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
472 #define cfun_set_fpr_save(REGNO) (cfun->machine->frame_layout.fpr_bitmap |= \
473 (1 << (REGNO - FPR0_REGNUM)))
474 #define cfun_fpr_save_p(REGNO) (!!(cfun->machine->frame_layout.fpr_bitmap & \
475 (1 << (REGNO - FPR0_REGNUM))))
476 #define cfun_gpr_save_slot(REGNO) \
477 cfun->machine->frame_layout.gpr_save_slots[REGNO]
479 /* Number of GPRs and FPRs used for argument passing. */
480 #define GP_ARG_NUM_REG 5
481 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
482 #define VEC_ARG_NUM_REG 8
484 /* A couple of shortcuts. */
485 #define CONST_OK_FOR_J(x) \
486 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
487 #define CONST_OK_FOR_K(x) \
488 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
489 #define CONST_OK_FOR_Os(x) \
490 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
491 #define CONST_OK_FOR_Op(x) \
492 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
493 #define CONST_OK_FOR_On(x) \
494 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
496 #define REGNO_PAIR_OK(REGNO, MODE) \
497 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
499 /* That's the read ahead of the dynamic branch prediction unit in
500 bytes on a z10 (or higher) CPU. */
501 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
504 /* Indicate which ABI has been used for passing vector args.
505 0 - no vector type arguments have been passed where the ABI is relevant
506 1 - the old ABI has been used
507 2 - a vector type argument has been passed either in a vector register
508 or on the stack by value */
509 static int s390_vector_abi = 0;
511 /* Set the vector ABI marker if TYPE is subject to the vector ABI
512 switch. The vector ABI affects only vector data types. There are
513 two aspects of the vector ABI relevant here:
515 1. vectors >= 16 bytes have an alignment of 8 bytes with the new
516 ABI and natural alignment with the old.
518 2. vector <= 16 bytes are passed in VRs or by value on the stack
519 with the new ABI but by reference on the stack with the old.
521 If ARG_P is true TYPE is used for a function argument or return
522 value. The ABI marker then is set for all vector data types. If
523 ARG_P is false only type 1 vectors are being checked. */
526 s390_check_type_for_vector_abi (const_tree type, bool arg_p, bool in_struct_p)
528 static hash_set<const_tree> visited_types_hash;
533 if (type == NULL_TREE || TREE_CODE (type) == ERROR_MARK)
536 if (visited_types_hash.contains (type))
539 visited_types_hash.add (type);
541 if (VECTOR_TYPE_P (type))
543 int type_size = int_size_in_bytes (type);
545 /* Outside arguments only the alignment is changing and this
546 only happens for vector types >= 16 bytes. */
547 if (!arg_p && type_size < 16)
550 /* In arguments vector types > 16 are passed as before (GCC
551 never enforced the bigger alignment for arguments which was
552 required by the old vector ABI). However, it might still be
553 ABI relevant due to the changed alignment if it is a struct
555 if (arg_p && type_size > 16 && !in_struct_p)
558 s390_vector_abi = TARGET_VX_ABI ? 2 : 1;
560 else if (POINTER_TYPE_P (type) || TREE_CODE (type) == ARRAY_TYPE)
562 /* ARRAY_TYPE: Since with neither of the ABIs we have more than
563 natural alignment there will never be ABI dependent padding
564 in an array type. That's why we do not set in_struct_p to
566 s390_check_type_for_vector_abi (TREE_TYPE (type), arg_p, in_struct_p);
568 else if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
572 /* Check the return type. */
573 s390_check_type_for_vector_abi (TREE_TYPE (type), true, false);
575 for (arg_chain = TYPE_ARG_TYPES (type);
577 arg_chain = TREE_CHAIN (arg_chain))
578 s390_check_type_for_vector_abi (TREE_VALUE (arg_chain), true, false);
580 else if (RECORD_OR_UNION_TYPE_P (type))
584 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
586 if (TREE_CODE (field) != FIELD_DECL)
589 s390_check_type_for_vector_abi (TREE_TYPE (field), arg_p, true);
595 /* System z builtins. */
597 #include "s390-builtins.h"
599 const unsigned int bflags_builtin[S390_BUILTIN_MAX + 1] =
604 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, ...) BFLAGS,
606 #define OB_DEF_VAR(...)
607 #include "s390-builtins.def"
611 const unsigned int opflags_builtin[S390_BUILTIN_MAX + 1] =
616 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, OPFLAGS, ...) OPFLAGS,
618 #define OB_DEF_VAR(...)
619 #include "s390-builtins.def"
623 const unsigned int bflags_overloaded_builtin[S390_OVERLOADED_BUILTIN_MAX + 1] =
629 #define OB_DEF(NAME, FIRST_VAR_NAME, LAST_VAR_NAME, BFLAGS, ...) BFLAGS,
630 #define OB_DEF_VAR(...)
631 #include "s390-builtins.def"
636 bflags_overloaded_builtin_var[S390_OVERLOADED_BUILTIN_VAR_MAX + 1] =
643 #define OB_DEF_VAR(NAME, PATTERN, FLAGS, OPFLAGS, FNTYPE) FLAGS,
644 #include "s390-builtins.def"
649 opflags_overloaded_builtin_var[S390_OVERLOADED_BUILTIN_VAR_MAX + 1] =
656 #define OB_DEF_VAR(NAME, PATTERN, FLAGS, OPFLAGS, FNTYPE) OPFLAGS,
657 #include "s390-builtins.def"
661 tree s390_builtin_types[BT_MAX];
662 tree s390_builtin_fn_types[BT_FN_MAX];
663 tree s390_builtin_decls[S390_BUILTIN_MAX +
664 S390_OVERLOADED_BUILTIN_MAX +
665 S390_OVERLOADED_BUILTIN_VAR_MAX];
667 static enum insn_code const code_for_builtin[S390_BUILTIN_MAX + 1] = {
671 #define B_DEF(NAME, PATTERN, ...) CODE_FOR_##PATTERN,
673 #define OB_DEF_VAR(...)
675 #include "s390-builtins.def"
680 s390_init_builtins (void)
682 /* These definitions are being used in s390-builtins.def. */
683 tree returns_twice_attr = tree_cons (get_identifier ("returns_twice"),
685 tree noreturn_attr = tree_cons (get_identifier ("noreturn"), NULL, NULL);
686 tree c_uint64_type_node;
688 /* The uint64_type_node from tree.c is not compatible to the C99
689 uint64_t data type. What we want is c_uint64_type_node from
690 c-common.c. But since backend code is not supposed to interface
691 with the frontend we recreate it here. */
693 c_uint64_type_node = long_unsigned_type_node;
695 c_uint64_type_node = long_long_unsigned_type_node;
698 #define DEF_TYPE(INDEX, NODE, CONST_P) \
699 if (s390_builtin_types[INDEX] == NULL) \
700 s390_builtin_types[INDEX] = (!CONST_P) ? \
701 (NODE) : build_type_variant ((NODE), 1, 0);
703 #undef DEF_POINTER_TYPE
704 #define DEF_POINTER_TYPE(INDEX, INDEX_BASE) \
705 if (s390_builtin_types[INDEX] == NULL) \
706 s390_builtin_types[INDEX] = \
707 build_pointer_type (s390_builtin_types[INDEX_BASE]);
709 #undef DEF_DISTINCT_TYPE
710 #define DEF_DISTINCT_TYPE(INDEX, INDEX_BASE) \
711 if (s390_builtin_types[INDEX] == NULL) \
712 s390_builtin_types[INDEX] = \
713 build_distinct_type_copy (s390_builtin_types[INDEX_BASE]);
715 #undef DEF_VECTOR_TYPE
716 #define DEF_VECTOR_TYPE(INDEX, INDEX_BASE, ELEMENTS) \
717 if (s390_builtin_types[INDEX] == NULL) \
718 s390_builtin_types[INDEX] = \
719 build_vector_type (s390_builtin_types[INDEX_BASE], ELEMENTS);
721 #undef DEF_OPAQUE_VECTOR_TYPE
722 #define DEF_OPAQUE_VECTOR_TYPE(INDEX, INDEX_BASE, ELEMENTS) \
723 if (s390_builtin_types[INDEX] == NULL) \
724 s390_builtin_types[INDEX] = \
725 build_opaque_vector_type (s390_builtin_types[INDEX_BASE], ELEMENTS);
728 #define DEF_FN_TYPE(INDEX, args...) \
729 if (s390_builtin_fn_types[INDEX] == NULL) \
730 s390_builtin_fn_types[INDEX] = \
731 build_function_type_list (args, NULL_TREE);
733 #define DEF_OV_TYPE(...)
734 #include "s390-builtin-types.def"
737 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, OPFLAGS, FNTYPE) \
738 if (s390_builtin_decls[S390_BUILTIN_##NAME] == NULL) \
739 s390_builtin_decls[S390_BUILTIN_##NAME] = \
740 add_builtin_function ("__builtin_" #NAME, \
741 s390_builtin_fn_types[FNTYPE], \
742 S390_BUILTIN_##NAME, \
747 #define OB_DEF(NAME, FIRST_VAR_NAME, LAST_VAR_NAME, BFLAGS, FNTYPE) \
748 if (s390_builtin_decls[S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX] \
750 s390_builtin_decls[S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX] = \
751 add_builtin_function ("__builtin_" #NAME, \
752 s390_builtin_fn_types[FNTYPE], \
753 S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX, \
758 #define OB_DEF_VAR(...)
759 #include "s390-builtins.def"
763 /* Return true if ARG is appropriate as argument number ARGNUM of
764 builtin DECL. The operand flags from s390-builtins.def have to
765 passed as OP_FLAGS. */
767 s390_const_operand_ok (tree arg, int argnum, int op_flags, tree decl)
769 if (O_UIMM_P (op_flags))
771 int bitwidths[] = { 1, 2, 3, 4, 5, 8, 12, 16, 32 };
772 int bitwidth = bitwidths[op_flags - O_U1];
774 if (!tree_fits_uhwi_p (arg)
775 || tree_to_uhwi (arg) > (HOST_WIDE_INT_1U << bitwidth) - 1)
777 error("constant argument %d for builtin %qF is out of range (0.."
778 HOST_WIDE_INT_PRINT_UNSIGNED ")",
780 (HOST_WIDE_INT_1U << bitwidth) - 1);
785 if (O_SIMM_P (op_flags))
787 int bitwidths[] = { 2, 3, 4, 5, 8, 12, 16, 32 };
788 int bitwidth = bitwidths[op_flags - O_S2];
790 if (!tree_fits_shwi_p (arg)
791 || tree_to_shwi (arg) < -(HOST_WIDE_INT_1 << (bitwidth - 1))
792 || tree_to_shwi (arg) > ((HOST_WIDE_INT_1 << (bitwidth - 1)) - 1))
794 error("constant argument %d for builtin %qF is out of range ("
795 HOST_WIDE_INT_PRINT_DEC ".."
796 HOST_WIDE_INT_PRINT_DEC ")",
798 -(HOST_WIDE_INT_1 << (bitwidth - 1)),
799 (HOST_WIDE_INT_1 << (bitwidth - 1)) - 1);
806 /* Expand an expression EXP that calls a built-in function,
807 with result going to TARGET if that's convenient
808 (and in mode MODE if that's convenient).
809 SUBTARGET may be used as the target for computing one of EXP's operands.
810 IGNORE is nonzero if the value is to be ignored. */
813 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
814 machine_mode mode ATTRIBUTE_UNUSED,
815 int ignore ATTRIBUTE_UNUSED)
819 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
820 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
821 enum insn_code icode;
822 rtx op[MAX_ARGS], pat;
826 call_expr_arg_iterator iter;
827 unsigned int all_op_flags = opflags_for_builtin (fcode);
828 machine_mode last_vec_mode = VOIDmode;
830 if (TARGET_DEBUG_ARG)
833 "s390_expand_builtin, code = %4d, %s, bflags = 0x%x\n",
834 (int)fcode, IDENTIFIER_POINTER (DECL_NAME (fndecl)),
835 bflags_for_builtin (fcode));
838 if (S390_USE_TARGET_ATTRIBUTE)
842 bflags = bflags_for_builtin (fcode);
843 if ((bflags & B_HTM) && !TARGET_HTM)
845 error ("builtin %qF is not supported without -mhtm "
846 "(default with -march=zEC12 and higher).", fndecl);
849 if (((bflags & B_VX) || (bflags & B_VXE)) && !TARGET_VX)
851 error ("builtin %qF requires -mvx "
852 "(default with -march=z13 and higher).", fndecl);
856 if ((bflags & B_VXE) && !TARGET_VXE)
858 error ("Builtin %qF requires z14 or higher.", fndecl);
862 if (fcode >= S390_OVERLOADED_BUILTIN_VAR_OFFSET
863 && fcode < S390_ALL_BUILTIN_MAX)
867 else if (fcode < S390_OVERLOADED_BUILTIN_OFFSET)
869 icode = code_for_builtin[fcode];
870 /* Set a flag in the machine specific cfun part in order to support
871 saving/restoring of FPRs. */
872 if (fcode == S390_BUILTIN_tbegin || fcode == S390_BUILTIN_tbegin_retry)
873 cfun->machine->tbegin_p = true;
875 else if (fcode < S390_OVERLOADED_BUILTIN_VAR_OFFSET)
877 error ("unresolved overloaded builtin");
881 internal_error ("bad builtin fcode");
884 internal_error ("bad builtin icode");
886 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
890 machine_mode tmode = insn_data[icode].operand[0].mode;
892 || GET_MODE (target) != tmode
893 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
894 target = gen_reg_rtx (tmode);
896 /* There are builtins (e.g. vec_promote) with no vector
897 arguments but an element selector. So we have to also look
898 at the vector return type when emitting the modulo
900 if (VECTOR_MODE_P (insn_data[icode].operand[0].mode))
901 last_vec_mode = insn_data[icode].operand[0].mode;
905 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
908 const struct insn_operand_data *insn_op;
909 unsigned int op_flags = all_op_flags & ((1 << O_SHIFT) - 1);
911 all_op_flags = all_op_flags >> O_SHIFT;
913 if (arg == error_mark_node)
915 if (arity >= MAX_ARGS)
918 if (O_IMM_P (op_flags)
919 && TREE_CODE (arg) != INTEGER_CST)
921 error ("constant value required for builtin %qF argument %d",
926 if (!s390_const_operand_ok (arg, arity + 1, op_flags, fndecl))
929 insn_op = &insn_data[icode].operand[arity + nonvoid];
930 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
932 /* expand_expr truncates constants to the target mode only if it
933 is "convenient". However, our checks below rely on this
935 if (CONST_INT_P (op[arity])
936 && SCALAR_INT_MODE_P (insn_op->mode)
937 && GET_MODE (op[arity]) != insn_op->mode)
938 op[arity] = GEN_INT (trunc_int_for_mode (INTVAL (op[arity]),
941 /* Wrap the expanded RTX for pointer types into a MEM expr with
942 the proper mode. This allows us to use e.g. (match_operand
943 "memory_operand"..) in the insn patterns instead of (mem
944 (match_operand "address_operand)). This is helpful for
945 patterns not just accepting MEMs. */
946 if (POINTER_TYPE_P (TREE_TYPE (arg))
947 && insn_op->predicate != address_operand)
948 op[arity] = gen_rtx_MEM (insn_op->mode, op[arity]);
950 /* Expand the module operation required on element selectors. */
951 if (op_flags == O_ELEM)
953 gcc_assert (last_vec_mode != VOIDmode);
954 op[arity] = simplify_expand_binop (SImode, code_to_optab (AND),
956 GEN_INT (GET_MODE_NUNITS (last_vec_mode) - 1),
957 NULL_RTX, 1, OPTAB_DIRECT);
960 /* Record the vector mode used for an element selector. This assumes:
961 1. There is no builtin with two different vector modes and an element selector
962 2. The element selector comes after the vector type it is referring to.
963 This currently the true for all the builtins but FIXME we
964 should better check for that. */
965 if (VECTOR_MODE_P (insn_op->mode))
966 last_vec_mode = insn_op->mode;
968 if (insn_op->predicate (op[arity], insn_op->mode))
974 if (MEM_P (op[arity])
975 && insn_op->predicate == memory_operand
976 && (GET_MODE (XEXP (op[arity], 0)) == Pmode
977 || GET_MODE (XEXP (op[arity], 0)) == VOIDmode))
979 op[arity] = replace_equiv_address (op[arity],
980 copy_to_mode_reg (Pmode,
981 XEXP (op[arity], 0)));
983 /* Some of the builtins require different modes/types than the
984 pattern in order to implement a specific API. Instead of
985 adding many expanders which do the mode change we do it here.
986 E.g. s390_vec_add_u128 required to have vector unsigned char
987 arguments is mapped to addti3. */
988 else if (insn_op->mode != VOIDmode
989 && GET_MODE (op[arity]) != VOIDmode
990 && GET_MODE (op[arity]) != insn_op->mode
991 && ((tmp_rtx = simplify_gen_subreg (insn_op->mode, op[arity],
992 GET_MODE (op[arity]), 0))
997 else if (GET_MODE (op[arity]) == insn_op->mode
998 || GET_MODE (op[arity]) == VOIDmode
999 || (insn_op->predicate == address_operand
1000 && GET_MODE (op[arity]) == Pmode))
1002 /* An address_operand usually has VOIDmode in the expander
1003 so we cannot use this. */
1004 machine_mode target_mode =
1005 (insn_op->predicate == address_operand
1006 ? (machine_mode) Pmode : insn_op->mode);
1007 op[arity] = copy_to_mode_reg (target_mode, op[arity]);
1010 if (!insn_op->predicate (op[arity], insn_op->mode))
1012 error ("invalid argument %d for builtin %qF", arity + 1, fndecl);
1021 pat = GEN_FCN (icode) (target);
1025 pat = GEN_FCN (icode) (target, op[0]);
1027 pat = GEN_FCN (icode) (op[0]);
1031 pat = GEN_FCN (icode) (target, op[0], op[1]);
1033 pat = GEN_FCN (icode) (op[0], op[1]);
1037 pat = GEN_FCN (icode) (target, op[0], op[1], op[2]);
1039 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
1043 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3]);
1045 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
1049 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4]);
1051 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]);
1055 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4], op[5]);
1057 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4], op[5]);
1073 static const int s390_hotpatch_hw_max = 1000000;
1074 static int s390_hotpatch_hw_before_label = 0;
1075 static int s390_hotpatch_hw_after_label = 0;
1077 /* Check whether the hotpatch attribute is applied to a function and, if it has
1078 an argument, the argument is valid. */
1081 s390_handle_hotpatch_attribute (tree *node, tree name, tree args,
1082 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1088 if (TREE_CODE (*node) != FUNCTION_DECL)
1090 warning (OPT_Wattributes, "%qE attribute only applies to functions",
1092 *no_add_attrs = true;
1094 if (args != NULL && TREE_CHAIN (args) != NULL)
1096 expr = TREE_VALUE (args);
1097 expr2 = TREE_VALUE (TREE_CHAIN (args));
1099 if (args == NULL || TREE_CHAIN (args) == NULL)
1101 else if (TREE_CODE (expr) != INTEGER_CST
1102 || !INTEGRAL_TYPE_P (TREE_TYPE (expr))
1103 || wi::gtu_p (expr, s390_hotpatch_hw_max))
1105 else if (TREE_CODE (expr2) != INTEGER_CST
1106 || !INTEGRAL_TYPE_P (TREE_TYPE (expr2))
1107 || wi::gtu_p (expr2, s390_hotpatch_hw_max))
1113 error ("requested %qE attribute is not a comma separated pair of"
1114 " non-negative integer constants or too large (max. %d)", name,
1115 s390_hotpatch_hw_max);
1116 *no_add_attrs = true;
1122 /* Expand the s390_vector_bool type attribute. */
1125 s390_handle_vectorbool_attribute (tree *node, tree name ATTRIBUTE_UNUSED,
1126 tree args ATTRIBUTE_UNUSED,
1127 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1129 tree type = *node, result = NULL_TREE;
1132 while (POINTER_TYPE_P (type)
1133 || TREE_CODE (type) == FUNCTION_TYPE
1134 || TREE_CODE (type) == METHOD_TYPE
1135 || TREE_CODE (type) == ARRAY_TYPE)
1136 type = TREE_TYPE (type);
1138 mode = TYPE_MODE (type);
1141 case E_DImode: case E_V2DImode:
1142 result = s390_builtin_types[BT_BV2DI];
1144 case E_SImode: case E_V4SImode:
1145 result = s390_builtin_types[BT_BV4SI];
1147 case E_HImode: case E_V8HImode:
1148 result = s390_builtin_types[BT_BV8HI];
1150 case E_QImode: case E_V16QImode:
1151 result = s390_builtin_types[BT_BV16QI];
1157 *no_add_attrs = true; /* No need to hang on to the attribute. */
1160 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
1165 static const struct attribute_spec s390_attribute_table[] = {
1166 { "hotpatch", 2, 2, true, false, false, s390_handle_hotpatch_attribute, false },
1167 { "s390_vector_bool", 0, 0, false, true, false, s390_handle_vectorbool_attribute, true },
1169 { NULL, 0, 0, false, false, false, NULL, false }
1172 /* Return the alignment for LABEL. We default to the -falign-labels
1173 value except for the literal pool base label. */
1175 s390_label_align (rtx_insn *label)
1177 rtx_insn *prev_insn = prev_active_insn (label);
1180 if (prev_insn == NULL_RTX)
1183 set = single_set (prev_insn);
1185 if (set == NULL_RTX)
1188 src = SET_SRC (set);
1190 /* Don't align literal pool base labels. */
1191 if (GET_CODE (src) == UNSPEC
1192 && XINT (src, 1) == UNSPEC_MAIN_BASE)
1196 return align_labels_log;
1199 static GTY(()) rtx got_symbol;
1201 /* Return the GOT table symbol. The symbol will be created when the
1202 function is invoked for the first time. */
1205 s390_got_symbol (void)
1209 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
1210 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
1216 static scalar_int_mode
1217 s390_libgcc_cmp_return_mode (void)
1219 return TARGET_64BIT ? DImode : SImode;
1222 static scalar_int_mode
1223 s390_libgcc_shift_count_mode (void)
1225 return TARGET_64BIT ? DImode : SImode;
1228 static scalar_int_mode
1229 s390_unwind_word_mode (void)
1231 return TARGET_64BIT ? DImode : SImode;
1234 /* Return true if the back end supports mode MODE. */
1236 s390_scalar_mode_supported_p (scalar_mode mode)
1238 /* In contrast to the default implementation reject TImode constants on 31bit
1239 TARGET_ZARCH for ABI compliance. */
1240 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
1243 if (DECIMAL_FLOAT_MODE_P (mode))
1244 return default_decimal_float_supported_p ();
1246 return default_scalar_mode_supported_p (mode);
1249 /* Return true if the back end supports vector mode MODE. */
1251 s390_vector_mode_supported_p (machine_mode mode)
1255 if (!VECTOR_MODE_P (mode)
1257 || GET_MODE_SIZE (mode) > 16)
1260 inner = GET_MODE_INNER (mode);
1278 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
1281 s390_set_has_landing_pad_p (bool value)
1283 cfun->machine->has_landing_pad_p = value;
1286 /* If two condition code modes are compatible, return a condition code
1287 mode which is compatible with both. Otherwise, return
1291 s390_cc_modes_compatible (machine_mode m1, machine_mode m2)
1299 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
1300 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
1321 /* Return true if SET either doesn't set the CC register, or else
1322 the source and destination have matching CC modes and that
1323 CC mode is at least as constrained as REQ_MODE. */
1326 s390_match_ccmode_set (rtx set, machine_mode req_mode)
1328 machine_mode set_mode;
1330 gcc_assert (GET_CODE (set) == SET);
1332 /* These modes are supposed to be used only in CC consumer
1334 gcc_assert (req_mode != CCVIALLmode && req_mode != CCVIANYmode
1335 && req_mode != CCVFALLmode && req_mode != CCVFANYmode);
1337 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
1340 set_mode = GET_MODE (SET_DEST (set));
1360 if (req_mode != set_mode)
1365 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
1366 && req_mode != CCSRmode && req_mode != CCURmode
1367 && req_mode != CCZ1mode)
1373 if (req_mode != CCAmode)
1381 return (GET_MODE (SET_SRC (set)) == set_mode);
1384 /* Return true if every SET in INSN that sets the CC register
1385 has source and destination with matching CC modes and that
1386 CC mode is at least as constrained as REQ_MODE.
1387 If REQ_MODE is VOIDmode, always return false. */
1390 s390_match_ccmode (rtx_insn *insn, machine_mode req_mode)
1394 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
1395 if (req_mode == VOIDmode)
1398 if (GET_CODE (PATTERN (insn)) == SET)
1399 return s390_match_ccmode_set (PATTERN (insn), req_mode);
1401 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1402 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1404 rtx set = XVECEXP (PATTERN (insn), 0, i);
1405 if (GET_CODE (set) == SET)
1406 if (!s390_match_ccmode_set (set, req_mode))
1413 /* If a test-under-mask instruction can be used to implement
1414 (compare (and ... OP1) OP2), return the CC mode required
1415 to do that. Otherwise, return VOIDmode.
1416 MIXED is true if the instruction can distinguish between
1417 CC1 and CC2 for mixed selected bits (TMxx), it is false
1418 if the instruction cannot (TM). */
1421 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
1425 /* ??? Fixme: should work on CONST_WIDE_INT as well. */
1426 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
1429 /* Selected bits all zero: CC0.
1430 e.g.: int a; if ((a & (16 + 128)) == 0) */
1431 if (INTVAL (op2) == 0)
1434 /* Selected bits all one: CC3.
1435 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
1436 if (INTVAL (op2) == INTVAL (op1))
1439 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
1441 if ((a & (16 + 128)) == 16) -> CCT1
1442 if ((a & (16 + 128)) == 128) -> CCT2 */
1445 bit1 = exact_log2 (INTVAL (op2));
1446 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
1447 if (bit0 != -1 && bit1 != -1)
1448 return bit0 > bit1 ? CCT1mode : CCT2mode;
1454 /* Given a comparison code OP (EQ, NE, etc.) and the operands
1455 OP0 and OP1 of a COMPARE, return the mode to be used for the
1459 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
1465 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
1466 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1468 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
1469 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
1471 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1472 || GET_CODE (op1) == NEG)
1473 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1476 if (GET_CODE (op0) == AND)
1478 /* Check whether we can potentially do it via TM. */
1479 machine_mode ccmode;
1480 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
1481 if (ccmode != VOIDmode)
1483 /* Relax CCTmode to CCZmode to allow fall-back to AND
1484 if that turns out to be beneficial. */
1485 return ccmode == CCTmode ? CCZmode : ccmode;
1489 if (register_operand (op0, HImode)
1490 && GET_CODE (op1) == CONST_INT
1491 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
1493 if (register_operand (op0, QImode)
1494 && GET_CODE (op1) == CONST_INT
1495 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
1504 /* The only overflow condition of NEG and ABS happens when
1505 -INT_MAX is used as parameter, which stays negative. So
1506 we have an overflow from a positive value to a negative.
1507 Using CCAP mode the resulting cc can be used for comparisons. */
1508 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
1509 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1512 /* If constants are involved in an add instruction it is possible to use
1513 the resulting cc for comparisons with zero. Knowing the sign of the
1514 constant the overflow behavior gets predictable. e.g.:
1515 int a, b; if ((b = a + c) > 0)
1516 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
1517 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
1518 && (CONST_OK_FOR_K (INTVAL (XEXP (op0, 1)))
1519 || (CONST_OK_FOR_CONSTRAINT_P (INTVAL (XEXP (op0, 1)), 'O', "Os")
1520 /* Avoid INT32_MIN on 32 bit. */
1521 && (!TARGET_ZARCH || INTVAL (XEXP (op0, 1)) != -0x7fffffff - 1))))
1523 if (INTVAL (XEXP((op0), 1)) < 0)
1537 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1538 && GET_CODE (op1) != CONST_INT)
1544 if (GET_CODE (op0) == PLUS
1545 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1548 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1549 && GET_CODE (op1) != CONST_INT)
1555 if (GET_CODE (op0) == MINUS
1556 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1559 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1560 && GET_CODE (op1) != CONST_INT)
1569 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
1570 that we can implement more efficiently. */
1573 s390_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
1574 bool op0_preserve_value)
1576 if (op0_preserve_value)
1579 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
1580 if ((*code == EQ || *code == NE)
1581 && *op1 == const0_rtx
1582 && GET_CODE (*op0) == ZERO_EXTRACT
1583 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
1584 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
1585 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
1587 rtx inner = XEXP (*op0, 0);
1588 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
1589 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
1590 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
1592 if (len > 0 && len < modesize
1593 && pos >= 0 && pos + len <= modesize
1594 && modesize <= HOST_BITS_PER_WIDE_INT)
1596 unsigned HOST_WIDE_INT block;
1597 block = (HOST_WIDE_INT_1U << len) - 1;
1598 block <<= modesize - pos - len;
1600 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
1601 gen_int_mode (block, GET_MODE (inner)));
1605 /* Narrow AND of memory against immediate to enable TM. */
1606 if ((*code == EQ || *code == NE)
1607 && *op1 == const0_rtx
1608 && GET_CODE (*op0) == AND
1609 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
1610 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
1612 rtx inner = XEXP (*op0, 0);
1613 rtx mask = XEXP (*op0, 1);
1615 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
1616 if (GET_CODE (inner) == SUBREG
1617 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
1618 && (GET_MODE_SIZE (GET_MODE (inner))
1619 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
1621 & GET_MODE_MASK (GET_MODE (inner))
1622 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
1624 inner = SUBREG_REG (inner);
1626 /* Do not change volatile MEMs. */
1627 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
1629 int part = s390_single_part (XEXP (*op0, 1),
1630 GET_MODE (inner), QImode, 0);
1633 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
1634 inner = adjust_address_nv (inner, QImode, part);
1635 *op0 = gen_rtx_AND (QImode, inner, mask);
1640 /* Narrow comparisons against 0xffff to HImode if possible. */
1641 if ((*code == EQ || *code == NE)
1642 && GET_CODE (*op1) == CONST_INT
1643 && INTVAL (*op1) == 0xffff
1644 && SCALAR_INT_MODE_P (GET_MODE (*op0))
1645 && (nonzero_bits (*op0, GET_MODE (*op0))
1646 & ~HOST_WIDE_INT_UC (0xffff)) == 0)
1648 *op0 = gen_lowpart (HImode, *op0);
1652 /* Remove redundant UNSPEC_STRCMPCC_TO_INT conversions if possible. */
1653 if (GET_CODE (*op0) == UNSPEC
1654 && XINT (*op0, 1) == UNSPEC_STRCMPCC_TO_INT
1655 && XVECLEN (*op0, 0) == 1
1656 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
1657 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
1658 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
1659 && *op1 == const0_rtx)
1661 enum rtx_code new_code = UNKNOWN;
1664 case EQ: new_code = EQ; break;
1665 case NE: new_code = NE; break;
1666 case LT: new_code = GTU; break;
1667 case GT: new_code = LTU; break;
1668 case LE: new_code = GEU; break;
1669 case GE: new_code = LEU; break;
1673 if (new_code != UNKNOWN)
1675 *op0 = XVECEXP (*op0, 0, 0);
1680 /* Remove redundant UNSPEC_CC_TO_INT conversions if possible. */
1681 if (GET_CODE (*op0) == UNSPEC
1682 && XINT (*op0, 1) == UNSPEC_CC_TO_INT
1683 && XVECLEN (*op0, 0) == 1
1684 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
1685 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
1686 && CONST_INT_P (*op1))
1688 enum rtx_code new_code = UNKNOWN;
1689 switch (GET_MODE (XVECEXP (*op0, 0, 0)))
1695 case EQ: new_code = EQ; break;
1696 case NE: new_code = NE; break;
1703 if (new_code != UNKNOWN)
1705 /* For CCRAWmode put the required cc mask into the second
1707 if (GET_MODE (XVECEXP (*op0, 0, 0)) == CCRAWmode
1708 && INTVAL (*op1) >= 0 && INTVAL (*op1) <= 3)
1709 *op1 = gen_rtx_CONST_INT (VOIDmode, 1 << (3 - INTVAL (*op1)));
1710 *op0 = XVECEXP (*op0, 0, 0);
1715 /* Simplify cascaded EQ, NE with const0_rtx. */
1716 if ((*code == NE || *code == EQ)
1717 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
1718 && GET_MODE (*op0) == SImode
1719 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
1720 && REG_P (XEXP (*op0, 0))
1721 && XEXP (*op0, 1) == const0_rtx
1722 && *op1 == const0_rtx)
1724 if ((*code == EQ && GET_CODE (*op0) == NE)
1725 || (*code == NE && GET_CODE (*op0) == EQ))
1729 *op0 = XEXP (*op0, 0);
1732 /* Prefer register over memory as first operand. */
1733 if (MEM_P (*op0) && REG_P (*op1))
1735 rtx tem = *op0; *op0 = *op1; *op1 = tem;
1736 *code = (int)swap_condition ((enum rtx_code)*code);
1739 /* A comparison result is compared against zero. Replace it with
1740 the (perhaps inverted) original comparison.
1741 This probably should be done by simplify_relational_operation. */
1742 if ((*code == EQ || *code == NE)
1743 && *op1 == const0_rtx
1744 && COMPARISON_P (*op0)
1745 && CC_REG_P (XEXP (*op0, 0)))
1747 enum rtx_code new_code;
1750 new_code = reversed_comparison_code_parts (GET_CODE (*op0),
1752 XEXP (*op1, 0), NULL);
1754 new_code = GET_CODE (*op0);
1756 if (new_code != UNKNOWN)
1759 *op1 = XEXP (*op0, 1);
1760 *op0 = XEXP (*op0, 0);
1766 /* Emit a compare instruction suitable to implement the comparison
1767 OP0 CODE OP1. Return the correct condition RTL to be placed in
1768 the IF_THEN_ELSE of the conditional branch testing the result. */
1771 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
1773 machine_mode mode = s390_select_ccmode (code, op0, op1);
1776 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
1778 /* Do not output a redundant compare instruction if a
1779 compare_and_swap pattern already computed the result and the
1780 machine modes are compatible. */
1781 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
1787 cc = gen_rtx_REG (mode, CC_REGNUM);
1788 emit_insn (gen_rtx_SET (cc, gen_rtx_COMPARE (mode, op0, op1)));
1791 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
1794 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
1796 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
1797 conditional branch testing the result. */
1800 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem,
1801 rtx cmp, rtx new_rtx, machine_mode ccmode)
1805 cc = gen_rtx_REG (ccmode, CC_REGNUM);
1806 switch (GET_MODE (mem))
1809 emit_insn (gen_atomic_compare_and_swapsi_internal (old, mem, cmp,
1813 emit_insn (gen_atomic_compare_and_swapdi_internal (old, mem, cmp,
1817 emit_insn (gen_atomic_compare_and_swapti_internal (old, mem, cmp,
1825 return s390_emit_compare (code, cc, const0_rtx);
1828 /* Emit a jump instruction to TARGET and return it. If COND is
1829 NULL_RTX, emit an unconditional jump, else a conditional jump under
1833 s390_emit_jump (rtx target, rtx cond)
1837 target = gen_rtx_LABEL_REF (VOIDmode, target);
1839 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
1841 insn = gen_rtx_SET (pc_rtx, target);
1842 return emit_jump_insn (insn);
1845 /* Return branch condition mask to implement a branch
1846 specified by CODE. Return -1 for invalid comparisons. */
1849 s390_branch_condition_mask (rtx code)
1851 const int CC0 = 1 << 3;
1852 const int CC1 = 1 << 2;
1853 const int CC2 = 1 << 1;
1854 const int CC3 = 1 << 0;
1856 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
1857 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
1858 gcc_assert (XEXP (code, 1) == const0_rtx
1859 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
1860 && CONST_INT_P (XEXP (code, 1))));
1863 switch (GET_MODE (XEXP (code, 0)))
1867 switch (GET_CODE (code))
1869 case EQ: return CC0;
1870 case NE: return CC1 | CC2 | CC3;
1876 switch (GET_CODE (code))
1878 case EQ: return CC1;
1879 case NE: return CC0 | CC2 | CC3;
1885 switch (GET_CODE (code))
1887 case EQ: return CC2;
1888 case NE: return CC0 | CC1 | CC3;
1894 switch (GET_CODE (code))
1896 case EQ: return CC3;
1897 case NE: return CC0 | CC1 | CC2;
1903 switch (GET_CODE (code))
1905 case EQ: return CC0 | CC2;
1906 case NE: return CC1 | CC3;
1912 switch (GET_CODE (code))
1914 case LTU: return CC2 | CC3; /* carry */
1915 case GEU: return CC0 | CC1; /* no carry */
1921 switch (GET_CODE (code))
1923 case GTU: return CC0 | CC1; /* borrow */
1924 case LEU: return CC2 | CC3; /* no borrow */
1930 switch (GET_CODE (code))
1932 case EQ: return CC0 | CC2;
1933 case NE: return CC1 | CC3;
1934 case LTU: return CC1;
1935 case GTU: return CC3;
1936 case LEU: return CC1 | CC2;
1937 case GEU: return CC2 | CC3;
1942 switch (GET_CODE (code))
1944 case EQ: return CC0;
1945 case NE: return CC1 | CC2 | CC3;
1946 case LTU: return CC1;
1947 case GTU: return CC2;
1948 case LEU: return CC0 | CC1;
1949 case GEU: return CC0 | CC2;
1955 switch (GET_CODE (code))
1957 case EQ: return CC0;
1958 case NE: return CC2 | CC1 | CC3;
1959 case LTU: return CC2;
1960 case GTU: return CC1;
1961 case LEU: return CC0 | CC2;
1962 case GEU: return CC0 | CC1;
1968 switch (GET_CODE (code))
1970 case EQ: return CC0;
1971 case NE: return CC1 | CC2 | CC3;
1972 case LT: return CC1 | CC3;
1973 case GT: return CC2;
1974 case LE: return CC0 | CC1 | CC3;
1975 case GE: return CC0 | CC2;
1981 switch (GET_CODE (code))
1983 case EQ: return CC0;
1984 case NE: return CC1 | CC2 | CC3;
1985 case LT: return CC1;
1986 case GT: return CC2 | CC3;
1987 case LE: return CC0 | CC1;
1988 case GE: return CC0 | CC2 | CC3;
1994 switch (GET_CODE (code))
1996 case EQ: return CC0;
1997 case NE: return CC1 | CC2 | CC3;
1998 case LT: return CC1;
1999 case GT: return CC2;
2000 case LE: return CC0 | CC1;
2001 case GE: return CC0 | CC2;
2002 case UNORDERED: return CC3;
2003 case ORDERED: return CC0 | CC1 | CC2;
2004 case UNEQ: return CC0 | CC3;
2005 case UNLT: return CC1 | CC3;
2006 case UNGT: return CC2 | CC3;
2007 case UNLE: return CC0 | CC1 | CC3;
2008 case UNGE: return CC0 | CC2 | CC3;
2009 case LTGT: return CC1 | CC2;
2015 switch (GET_CODE (code))
2017 case EQ: return CC0;
2018 case NE: return CC2 | CC1 | CC3;
2019 case LT: return CC2;
2020 case GT: return CC1;
2021 case LE: return CC0 | CC2;
2022 case GE: return CC0 | CC1;
2023 case UNORDERED: return CC3;
2024 case ORDERED: return CC0 | CC2 | CC1;
2025 case UNEQ: return CC0 | CC3;
2026 case UNLT: return CC2 | CC3;
2027 case UNGT: return CC1 | CC3;
2028 case UNLE: return CC0 | CC2 | CC3;
2029 case UNGE: return CC0 | CC1 | CC3;
2030 case LTGT: return CC2 | CC1;
2035 /* Vector comparison modes. */
2036 /* CC2 will never be set. It however is part of the negated
2039 switch (GET_CODE (code))
2044 case GE: return CC0;
2045 /* The inverted modes are in fact *any* modes. */
2049 case LT: return CC3 | CC1 | CC2;
2054 switch (GET_CODE (code))
2059 case GE: return CC0 | CC1;
2060 /* The inverted modes are in fact *all* modes. */
2064 case LT: return CC3 | CC2;
2068 switch (GET_CODE (code))
2072 case GE: return CC0;
2073 /* The inverted modes are in fact *any* modes. */
2076 case UNLT: return CC3 | CC1 | CC2;
2081 switch (GET_CODE (code))
2085 case GE: return CC0 | CC1;
2086 /* The inverted modes are in fact *all* modes. */
2089 case UNLT: return CC3 | CC2;
2094 switch (GET_CODE (code))
2097 return INTVAL (XEXP (code, 1));
2099 return (INTVAL (XEXP (code, 1))) ^ 0xf;
2110 /* Return branch condition mask to implement a compare and branch
2111 specified by CODE. Return -1 for invalid comparisons. */
2114 s390_compare_and_branch_condition_mask (rtx code)
2116 const int CC0 = 1 << 3;
2117 const int CC1 = 1 << 2;
2118 const int CC2 = 1 << 1;
2120 switch (GET_CODE (code))
2144 /* If INV is false, return assembler mnemonic string to implement
2145 a branch specified by CODE. If INV is true, return mnemonic
2146 for the corresponding inverted branch. */
2149 s390_branch_condition_mnemonic (rtx code, int inv)
2153 static const char *const mnemonic[16] =
2155 NULL, "o", "h", "nle",
2156 "l", "nhe", "lh", "ne",
2157 "e", "nlh", "he", "nl",
2158 "le", "nh", "no", NULL
2161 if (GET_CODE (XEXP (code, 0)) == REG
2162 && REGNO (XEXP (code, 0)) == CC_REGNUM
2163 && (XEXP (code, 1) == const0_rtx
2164 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
2165 && CONST_INT_P (XEXP (code, 1)))))
2166 mask = s390_branch_condition_mask (code);
2168 mask = s390_compare_and_branch_condition_mask (code);
2170 gcc_assert (mask >= 0);
2175 gcc_assert (mask >= 1 && mask <= 14);
2177 return mnemonic[mask];
2180 /* Return the part of op which has a value different from def.
2181 The size of the part is determined by mode.
2182 Use this function only if you already know that op really
2183 contains such a part. */
2185 unsigned HOST_WIDE_INT
2186 s390_extract_part (rtx op, machine_mode mode, int def)
2188 unsigned HOST_WIDE_INT value = 0;
2189 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
2190 int part_bits = GET_MODE_BITSIZE (mode);
2191 unsigned HOST_WIDE_INT part_mask = (HOST_WIDE_INT_1U << part_bits) - 1;
2194 for (i = 0; i < max_parts; i++)
2197 value = UINTVAL (op);
2199 value >>= part_bits;
2201 if ((value & part_mask) != (def & part_mask))
2202 return value & part_mask;
2208 /* If OP is an integer constant of mode MODE with exactly one
2209 part of mode PART_MODE unequal to DEF, return the number of that
2210 part. Otherwise, return -1. */
2213 s390_single_part (rtx op,
2215 machine_mode part_mode,
2218 unsigned HOST_WIDE_INT value = 0;
2219 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
2220 unsigned HOST_WIDE_INT part_mask
2221 = (HOST_WIDE_INT_1U << GET_MODE_BITSIZE (part_mode)) - 1;
2224 if (GET_CODE (op) != CONST_INT)
2227 for (i = 0; i < n_parts; i++)
2230 value = UINTVAL (op);
2232 value >>= GET_MODE_BITSIZE (part_mode);
2234 if ((value & part_mask) != (def & part_mask))
2242 return part == -1 ? -1 : n_parts - 1 - part;
2245 /* Return true if IN contains a contiguous bitfield in the lower SIZE
2246 bits and no other bits are set in (the lower SIZE bits of) IN.
2248 PSTART and PEND can be used to obtain the start and end
2249 position (inclusive) of the bitfield relative to 64
2250 bits. *PSTART / *PEND gives the position of the first/last bit
2251 of the bitfield counting from the highest order bit starting
2255 s390_contiguous_bitmask_nowrap_p (unsigned HOST_WIDE_INT in, int size,
2256 int *pstart, int *pend)
2260 int lowbit = HOST_BITS_PER_WIDE_INT - 1;
2261 int highbit = HOST_BITS_PER_WIDE_INT - size;
2262 unsigned HOST_WIDE_INT bitmask = HOST_WIDE_INT_1U;
2264 gcc_assert (!!pstart == !!pend);
2265 for (start = lowbit; start >= highbit; bitmask <<= 1, start--)
2268 /* Look for the rightmost bit of a contiguous range of ones. */
2275 /* Look for the firt zero bit after the range of ones. */
2276 if (! (bitmask & in))
2280 /* We're one past the last one-bit. */
2284 /* No one bits found. */
2287 if (start > highbit)
2289 unsigned HOST_WIDE_INT mask;
2291 /* Calculate a mask for all bits beyond the contiguous bits. */
2292 mask = ((~HOST_WIDE_INT_0U >> highbit)
2293 & (~HOST_WIDE_INT_0U << (lowbit - start + 1)));
2295 /* There are more bits set beyond the first range of one bits. */
2308 /* Same as s390_contiguous_bitmask_nowrap_p but also returns true
2309 if ~IN contains a contiguous bitfield. In that case, *END is <
2312 If WRAP_P is true, a bitmask that wraps around is also tested.
2313 When a wraparoud occurs *START is greater than *END (in
2314 non-null pointers), and the uppermost (64 - SIZE) bits are thus
2315 part of the range. If WRAP_P is false, no wraparound is
2319 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, bool wrap_p,
2320 int size, int *start, int *end)
2322 int bs = HOST_BITS_PER_WIDE_INT;
2325 gcc_assert (!!start == !!end);
2326 if ((in & ((~HOST_WIDE_INT_0U) >> (bs - size))) == 0)
2327 /* This cannot be expressed as a contiguous bitmask. Exit early because
2328 the second call of s390_contiguous_bitmask_nowrap_p would accept this as
2331 b = s390_contiguous_bitmask_nowrap_p (in, size, start, end);
2336 b = s390_contiguous_bitmask_nowrap_p (~in, size, start, end);
2342 gcc_assert (s >= 1);
2343 *start = ((e + 1) & (bs - 1));
2344 *end = ((s - 1 + bs) & (bs - 1));
2350 /* Return true if OP contains the same contiguous bitfield in *all*
2351 its elements. START and END can be used to obtain the start and
2352 end position of the bitfield.
2354 START/STOP give the position of the first/last bit of the bitfield
2355 counting from the lowest order bit starting with zero. In order to
2356 use these values for S/390 instructions this has to be converted to
2357 "bits big endian" style. */
2360 s390_contiguous_bitmask_vector_p (rtx op, int *start, int *end)
2362 unsigned HOST_WIDE_INT mask;
2367 gcc_assert (!!start == !!end);
2368 if (!const_vec_duplicate_p (op, &elt)
2369 || !CONST_INT_P (elt))
2372 size = GET_MODE_UNIT_BITSIZE (GET_MODE (op));
2374 /* We cannot deal with V1TI/V1TF. This would require a vgmq. */
2378 mask = UINTVAL (elt);
2380 b = s390_contiguous_bitmask_p (mask, true, size, start, end);
2385 *start -= (HOST_BITS_PER_WIDE_INT - size);
2386 *end -= (HOST_BITS_PER_WIDE_INT - size);
2394 /* Return true if C consists only of byte chunks being either 0 or
2395 0xff. If MASK is !=NULL a byte mask is generated which is
2396 appropriate for the vector generate byte mask instruction. */
2399 s390_bytemask_vector_p (rtx op, unsigned *mask)
2402 unsigned tmp_mask = 0;
2403 int nunit, unit_size;
2405 if (!VECTOR_MODE_P (GET_MODE (op))
2406 || GET_CODE (op) != CONST_VECTOR
2407 || !CONST_INT_P (XVECEXP (op, 0, 0)))
2410 nunit = GET_MODE_NUNITS (GET_MODE (op));
2411 unit_size = GET_MODE_UNIT_SIZE (GET_MODE (op));
2413 for (i = 0; i < nunit; i++)
2415 unsigned HOST_WIDE_INT c;
2418 if (!CONST_INT_P (XVECEXP (op, 0, i)))
2421 c = UINTVAL (XVECEXP (op, 0, i));
2422 for (j = 0; j < unit_size; j++)
2424 if ((c & 0xff) != 0 && (c & 0xff) != 0xff)
2426 tmp_mask |= (c & 1) << ((nunit - 1 - i) * unit_size + j);
2427 c = c >> BITS_PER_UNIT;
2437 /* Check whether a rotate of ROTL followed by an AND of CONTIG is
2438 equivalent to a shift followed by the AND. In particular, CONTIG
2439 should not overlap the (rotated) bit 0/bit 63 gap. Negative values
2440 for ROTL indicate a rotate to the right. */
2443 s390_extzv_shift_ok (int bitsize, int rotl, unsigned HOST_WIDE_INT contig)
2448 ok = s390_contiguous_bitmask_nowrap_p (contig, bitsize, &start, &end);
2452 return (64 - end >= rotl);
2455 /* Translate "- rotate right" in BITSIZE mode to "rotate left" in
2457 rotl = -rotl + (64 - bitsize);
2458 return (start >= rotl);
2462 /* Check whether we can (and want to) split a double-word
2463 move in mode MODE from SRC to DST into two single-word
2464 moves, moving the subword FIRST_SUBWORD first. */
2467 s390_split_ok_p (rtx dst, rtx src, machine_mode mode, int first_subword)
2469 /* Floating point and vector registers cannot be split. */
2470 if (FP_REG_P (src) || FP_REG_P (dst) || VECTOR_REG_P (src) || VECTOR_REG_P (dst))
2473 /* Non-offsettable memory references cannot be split. */
2474 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
2475 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
2478 /* Moving the first subword must not clobber a register
2479 needed to move the second subword. */
2480 if (register_operand (dst, mode))
2482 rtx subreg = operand_subword (dst, first_subword, 0, mode);
2483 if (reg_overlap_mentioned_p (subreg, src))
2490 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
2491 and [MEM2, MEM2 + SIZE] do overlap and false
2495 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
2497 rtx addr1, addr2, addr_delta;
2498 HOST_WIDE_INT delta;
2500 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
2506 addr1 = XEXP (mem1, 0);
2507 addr2 = XEXP (mem2, 0);
2509 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
2511 /* This overlapping check is used by peepholes merging memory block operations.
2512 Overlapping operations would otherwise be recognized by the S/390 hardware
2513 and would fall back to a slower implementation. Allowing overlapping
2514 operations would lead to slow code but not to wrong code. Therefore we are
2515 somewhat optimistic if we cannot prove that the memory blocks are
2517 That's why we return false here although this may accept operations on
2518 overlapping memory areas. */
2519 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
2522 delta = INTVAL (addr_delta);
2525 || (delta > 0 && delta < size)
2526 || (delta < 0 && -delta < size))
2532 /* Check whether the address of memory reference MEM2 equals exactly
2533 the address of memory reference MEM1 plus DELTA. Return true if
2534 we can prove this to be the case, false otherwise. */
2537 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
2539 rtx addr1, addr2, addr_delta;
2541 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
2544 addr1 = XEXP (mem1, 0);
2545 addr2 = XEXP (mem2, 0);
2547 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
2548 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
2554 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
2557 s390_expand_logical_operator (enum rtx_code code, machine_mode mode,
2560 machine_mode wmode = mode;
2561 rtx dst = operands[0];
2562 rtx src1 = operands[1];
2563 rtx src2 = operands[2];
2566 /* If we cannot handle the operation directly, use a temp register. */
2567 if (!s390_logical_operator_ok_p (operands))
2568 dst = gen_reg_rtx (mode);
2570 /* QImode and HImode patterns make sense only if we have a destination
2571 in memory. Otherwise perform the operation in SImode. */
2572 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
2575 /* Widen operands if required. */
2578 if (GET_CODE (dst) == SUBREG
2579 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
2581 else if (REG_P (dst))
2582 dst = gen_rtx_SUBREG (wmode, dst, 0);
2584 dst = gen_reg_rtx (wmode);
2586 if (GET_CODE (src1) == SUBREG
2587 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
2589 else if (GET_MODE (src1) != VOIDmode)
2590 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
2592 if (GET_CODE (src2) == SUBREG
2593 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
2595 else if (GET_MODE (src2) != VOIDmode)
2596 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
2599 /* Emit the instruction. */
2600 op = gen_rtx_SET (dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
2601 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
2602 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
2604 /* Fix up the destination if needed. */
2605 if (dst != operands[0])
2606 emit_move_insn (operands[0], gen_lowpart (mode, dst));
2609 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
2612 s390_logical_operator_ok_p (rtx *operands)
2614 /* If the destination operand is in memory, it needs to coincide
2615 with one of the source operands. After reload, it has to be
2616 the first source operand. */
2617 if (GET_CODE (operands[0]) == MEM)
2618 return rtx_equal_p (operands[0], operands[1])
2619 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
2624 /* Narrow logical operation CODE of memory operand MEMOP with immediate
2625 operand IMMOP to switch from SS to SI type instructions. */
2628 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
2630 int def = code == AND ? -1 : 0;
2634 gcc_assert (GET_CODE (*memop) == MEM);
2635 gcc_assert (!MEM_VOLATILE_P (*memop));
2637 mask = s390_extract_part (*immop, QImode, def);
2638 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
2639 gcc_assert (part >= 0);
2641 *memop = adjust_address (*memop, QImode, part);
2642 *immop = gen_int_mode (mask, QImode);
2646 /* How to allocate a 'struct machine_function'. */
2648 static struct machine_function *
2649 s390_init_machine_status (void)
2651 return ggc_cleared_alloc<machine_function> ();
2654 /* Map for smallest class containing reg regno. */
2656 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
2657 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 0 */
2658 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 4 */
2659 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 8 */
2660 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 12 */
2661 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 16 */
2662 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 20 */
2663 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 24 */
2664 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 28 */
2665 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS, /* 32 */
2666 ACCESS_REGS, ACCESS_REGS, VEC_REGS, VEC_REGS, /* 36 */
2667 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 40 */
2668 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 44 */
2669 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 48 */
2670 VEC_REGS, VEC_REGS /* 52 */
2673 /* Return attribute type of insn. */
2675 static enum attr_type
2676 s390_safe_attr_type (rtx_insn *insn)
2678 if (recog_memoized (insn) >= 0)
2679 return get_attr_type (insn);
2684 /* Return true if DISP is a valid short displacement. */
2687 s390_short_displacement (rtx disp)
2689 /* No displacement is OK. */
2693 /* Without the long displacement facility we don't need to
2694 distingiush between long and short displacement. */
2695 if (!TARGET_LONG_DISPLACEMENT)
2698 /* Integer displacement in range. */
2699 if (GET_CODE (disp) == CONST_INT)
2700 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
2702 /* GOT offset is not OK, the GOT can be large. */
2703 if (GET_CODE (disp) == CONST
2704 && GET_CODE (XEXP (disp, 0)) == UNSPEC
2705 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
2706 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
2709 /* All other symbolic constants are literal pool references,
2710 which are OK as the literal pool must be small. */
2711 if (GET_CODE (disp) == CONST)
2717 /* Decompose a RTL expression ADDR for a memory address into
2718 its components, returned in OUT.
2720 Returns false if ADDR is not a valid memory address, true
2721 otherwise. If OUT is NULL, don't return the components,
2722 but check for validity only.
2724 Note: Only addresses in canonical form are recognized.
2725 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
2726 canonical form so that they will be recognized. */
2729 s390_decompose_address (rtx addr, struct s390_address *out)
2731 HOST_WIDE_INT offset = 0;
2732 rtx base = NULL_RTX;
2733 rtx indx = NULL_RTX;
2734 rtx disp = NULL_RTX;
2736 bool pointer = false;
2737 bool base_ptr = false;
2738 bool indx_ptr = false;
2739 bool literal_pool = false;
2741 /* We may need to substitute the literal pool base register into the address
2742 below. However, at this point we do not know which register is going to
2743 be used as base, so we substitute the arg pointer register. This is going
2744 to be treated as holding a pointer below -- it shouldn't be used for any
2746 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
2748 /* Decompose address into base + index + displacement. */
2750 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
2753 else if (GET_CODE (addr) == PLUS)
2755 rtx op0 = XEXP (addr, 0);
2756 rtx op1 = XEXP (addr, 1);
2757 enum rtx_code code0 = GET_CODE (op0);
2758 enum rtx_code code1 = GET_CODE (op1);
2760 if (code0 == REG || code0 == UNSPEC)
2762 if (code1 == REG || code1 == UNSPEC)
2764 indx = op0; /* index + base */
2770 base = op0; /* base + displacement */
2775 else if (code0 == PLUS)
2777 indx = XEXP (op0, 0); /* index + base + disp */
2778 base = XEXP (op0, 1);
2789 disp = addr; /* displacement */
2791 /* Extract integer part of displacement. */
2795 if (GET_CODE (disp) == CONST_INT)
2797 offset = INTVAL (disp);
2800 else if (GET_CODE (disp) == CONST
2801 && GET_CODE (XEXP (disp, 0)) == PLUS
2802 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
2804 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
2805 disp = XEXP (XEXP (disp, 0), 0);
2809 /* Strip off CONST here to avoid special case tests later. */
2810 if (disp && GET_CODE (disp) == CONST)
2811 disp = XEXP (disp, 0);
2813 /* We can convert literal pool addresses to
2814 displacements by basing them off the base register. */
2815 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
2820 base = fake_pool_base, literal_pool = true;
2822 /* Mark up the displacement. */
2823 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
2824 UNSPEC_LTREL_OFFSET);
2827 /* Validate base register. */
2830 if (GET_CODE (base) == UNSPEC)
2831 switch (XINT (base, 1))
2835 disp = gen_rtx_UNSPEC (Pmode,
2836 gen_rtvec (1, XVECEXP (base, 0, 0)),
2837 UNSPEC_LTREL_OFFSET);
2841 base = XVECEXP (base, 0, 1);
2844 case UNSPEC_LTREL_BASE:
2845 if (XVECLEN (base, 0) == 1)
2846 base = fake_pool_base, literal_pool = true;
2848 base = XVECEXP (base, 0, 1);
2855 if (!REG_P (base) || GET_MODE (base) != Pmode)
2858 if (REGNO (base) == STACK_POINTER_REGNUM
2859 || REGNO (base) == FRAME_POINTER_REGNUM
2860 || ((reload_completed || reload_in_progress)
2861 && frame_pointer_needed
2862 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
2863 || REGNO (base) == ARG_POINTER_REGNUM
2865 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
2866 pointer = base_ptr = true;
2868 if ((reload_completed || reload_in_progress)
2869 && base == cfun->machine->base_reg)
2870 pointer = base_ptr = literal_pool = true;
2873 /* Validate index register. */
2876 if (GET_CODE (indx) == UNSPEC)
2877 switch (XINT (indx, 1))
2881 disp = gen_rtx_UNSPEC (Pmode,
2882 gen_rtvec (1, XVECEXP (indx, 0, 0)),
2883 UNSPEC_LTREL_OFFSET);
2887 indx = XVECEXP (indx, 0, 1);
2890 case UNSPEC_LTREL_BASE:
2891 if (XVECLEN (indx, 0) == 1)
2892 indx = fake_pool_base, literal_pool = true;
2894 indx = XVECEXP (indx, 0, 1);
2901 if (!REG_P (indx) || GET_MODE (indx) != Pmode)
2904 if (REGNO (indx) == STACK_POINTER_REGNUM
2905 || REGNO (indx) == FRAME_POINTER_REGNUM
2906 || ((reload_completed || reload_in_progress)
2907 && frame_pointer_needed
2908 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
2909 || REGNO (indx) == ARG_POINTER_REGNUM
2911 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
2912 pointer = indx_ptr = true;
2914 if ((reload_completed || reload_in_progress)
2915 && indx == cfun->machine->base_reg)
2916 pointer = indx_ptr = literal_pool = true;
2919 /* Prefer to use pointer as base, not index. */
2920 if (base && indx && !base_ptr
2921 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
2928 /* Validate displacement. */
2931 /* If virtual registers are involved, the displacement will change later
2932 anyway as the virtual registers get eliminated. This could make a
2933 valid displacement invalid, but it is more likely to make an invalid
2934 displacement valid, because we sometimes access the register save area
2935 via negative offsets to one of those registers.
2936 Thus we don't check the displacement for validity here. If after
2937 elimination the displacement turns out to be invalid after all,
2938 this is fixed up by reload in any case. */
2939 /* LRA maintains always displacements up to date and we need to
2940 know the displacement is right during all LRA not only at the
2941 final elimination. */
2943 || (base != arg_pointer_rtx
2944 && indx != arg_pointer_rtx
2945 && base != return_address_pointer_rtx
2946 && indx != return_address_pointer_rtx
2947 && base != frame_pointer_rtx
2948 && indx != frame_pointer_rtx
2949 && base != virtual_stack_vars_rtx
2950 && indx != virtual_stack_vars_rtx))
2951 if (!DISP_IN_RANGE (offset))
2956 /* All the special cases are pointers. */
2959 /* In the small-PIC case, the linker converts @GOT
2960 and @GOTNTPOFF offsets to possible displacements. */
2961 if (GET_CODE (disp) == UNSPEC
2962 && (XINT (disp, 1) == UNSPEC_GOT
2963 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
2969 /* Accept pool label offsets. */
2970 else if (GET_CODE (disp) == UNSPEC
2971 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
2974 /* Accept literal pool references. */
2975 else if (GET_CODE (disp) == UNSPEC
2976 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
2978 /* In case CSE pulled a non literal pool reference out of
2979 the pool we have to reject the address. This is
2980 especially important when loading the GOT pointer on non
2981 zarch CPUs. In this case the literal pool contains an lt
2982 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
2983 will most likely exceed the displacement. */
2984 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
2985 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
2988 orig_disp = gen_rtx_CONST (Pmode, disp);
2991 /* If we have an offset, make sure it does not
2992 exceed the size of the constant pool entry. */
2993 rtx sym = XVECEXP (disp, 0, 0);
2994 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
2997 orig_disp = plus_constant (Pmode, orig_disp, offset);
3012 out->disp = orig_disp;
3013 out->pointer = pointer;
3014 out->literal_pool = literal_pool;
3020 /* Decompose a RTL expression OP for an address style operand into its
3021 components, and return the base register in BASE and the offset in
3022 OFFSET. While OP looks like an address it is never supposed to be
3025 Return true if OP is a valid address operand, false if not. */
3028 s390_decompose_addrstyle_without_index (rtx op, rtx *base,
3029 HOST_WIDE_INT *offset)
3033 /* We can have an integer constant, an address register,
3034 or a sum of the two. */
3035 if (CONST_SCALAR_INT_P (op))
3040 if (op && GET_CODE (op) == PLUS && CONST_SCALAR_INT_P (XEXP (op, 1)))
3045 while (op && GET_CODE (op) == SUBREG)
3046 op = SUBREG_REG (op);
3048 if (op && GET_CODE (op) != REG)
3053 if (off == NULL_RTX)
3055 else if (CONST_INT_P (off))
3056 *offset = INTVAL (off);
3057 else if (CONST_WIDE_INT_P (off))
3058 /* The offset will anyway be cut down to 12 bits so take just
3059 the lowest order chunk of the wide int. */
3060 *offset = CONST_WIDE_INT_ELT (off, 0);
3071 /* Return true if CODE is a valid address without index. */
3074 s390_legitimate_address_without_index_p (rtx op)
3076 struct s390_address addr;
3078 if (!s390_decompose_address (XEXP (op, 0), &addr))
3087 /* Return TRUE if ADDR is an operand valid for a load/store relative
3088 instruction. Be aware that the alignment of the operand needs to
3089 be checked separately.
3090 Valid addresses are single references or a sum of a reference and a
3091 constant integer. Return these parts in SYMREF and ADDEND. You can
3092 pass NULL in REF and/or ADDEND if you are not interested in these
3093 values. Literal pool references are *not* considered symbol
3097 s390_loadrelative_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
3099 HOST_WIDE_INT tmpaddend = 0;
3101 if (GET_CODE (addr) == CONST)
3102 addr = XEXP (addr, 0);
3104 if (GET_CODE (addr) == PLUS)
3106 if (!CONST_INT_P (XEXP (addr, 1)))
3109 tmpaddend = INTVAL (XEXP (addr, 1));
3110 addr = XEXP (addr, 0);
3113 if ((GET_CODE (addr) == SYMBOL_REF && !CONSTANT_POOL_ADDRESS_P (addr))
3114 || (GET_CODE (addr) == UNSPEC
3115 && (XINT (addr, 1) == UNSPEC_GOTENT
3116 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
3121 *addend = tmpaddend;
3128 /* Return true if the address in OP is valid for constraint letter C
3129 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
3130 pool MEMs should be accepted. Only the Q, R, S, T constraint
3131 letters are allowed for C. */
3134 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
3136 struct s390_address addr;
3137 bool decomposed = false;
3139 if (!address_operand (op, GET_MODE (op)))
3142 /* This check makes sure that no symbolic address (except literal
3143 pool references) are accepted by the R or T constraints. */
3144 if (s390_loadrelative_operand_p (op, NULL, NULL))
3147 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
3150 if (!s390_decompose_address (op, &addr))
3152 if (addr.literal_pool)
3157 /* With reload, we sometimes get intermediate address forms that are
3158 actually invalid as-is, but we need to accept them in the most
3159 generic cases below ('R' or 'T'), since reload will in fact fix
3160 them up. LRA behaves differently here; we never see such forms,
3161 but on the other hand, we need to strictly reject every invalid
3162 address form. Perform this check right up front. */
3163 if (lra_in_progress)
3165 if (!decomposed && !s390_decompose_address (op, &addr))
3172 case 'Q': /* no index short displacement */
3173 if (!decomposed && !s390_decompose_address (op, &addr))
3177 if (!s390_short_displacement (addr.disp))
3181 case 'R': /* with index short displacement */
3182 if (TARGET_LONG_DISPLACEMENT)
3184 if (!decomposed && !s390_decompose_address (op, &addr))
3186 if (!s390_short_displacement (addr.disp))
3189 /* Any invalid address here will be fixed up by reload,
3190 so accept it for the most generic constraint. */
3193 case 'S': /* no index long displacement */
3194 if (!decomposed && !s390_decompose_address (op, &addr))
3200 case 'T': /* with index long displacement */
3201 /* Any invalid address here will be fixed up by reload,
3202 so accept it for the most generic constraint. */
3212 /* Evaluates constraint strings described by the regular expression
3213 ([A|B|Z](Q|R|S|T))|Y and returns 1 if OP is a valid operand for
3214 the constraint given in STR, or 0 else. */
3217 s390_mem_constraint (const char *str, rtx op)
3224 /* Check for offsettable variants of memory constraints. */
3225 if (!MEM_P (op) || MEM_VOLATILE_P (op))
3227 if ((reload_completed || reload_in_progress)
3228 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
3230 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
3232 /* Check for non-literal-pool variants of memory constraints. */
3235 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
3240 if (GET_CODE (op) != MEM)
3242 return s390_check_qrst_address (c, XEXP (op, 0), true);
3244 /* Simply check for the basic form of a shift count. Reload will
3245 take care of making sure we have a proper base register. */
3246 if (!s390_decompose_addrstyle_without_index (op, NULL, NULL))
3250 return s390_check_qrst_address (str[1], op, true);
3258 /* Evaluates constraint strings starting with letter O. Input
3259 parameter C is the second letter following the "O" in the constraint
3260 string. Returns 1 if VALUE meets the respective constraint and 0
3264 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
3272 return trunc_int_for_mode (value, SImode) == value;
3276 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
3279 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
3287 /* Evaluates constraint strings starting with letter N. Parameter STR
3288 contains the letters following letter "N" in the constraint string.
3289 Returns true if VALUE matches the constraint. */
3292 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
3294 machine_mode mode, part_mode;
3296 int part, part_goal;
3302 part_goal = str[0] - '0';
3346 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
3349 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
3352 if (part_goal != -1 && part_goal != part)
3359 /* Returns true if the input parameter VALUE is a float zero. */
3362 s390_float_const_zero_p (rtx value)
3364 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
3365 && value == CONST0_RTX (GET_MODE (value)));
3368 /* Implement TARGET_REGISTER_MOVE_COST. */
3371 s390_register_move_cost (machine_mode mode,
3372 reg_class_t from, reg_class_t to)
3374 /* On s390, copy between fprs and gprs is expensive. */
3376 /* It becomes somewhat faster having ldgr/lgdr. */
3377 if (TARGET_Z10 && GET_MODE_SIZE (mode) == 8)
3379 /* ldgr is single cycle. */
3380 if (reg_classes_intersect_p (from, GENERAL_REGS)
3381 && reg_classes_intersect_p (to, FP_REGS))
3383 /* lgdr needs 3 cycles. */
3384 if (reg_classes_intersect_p (to, GENERAL_REGS)
3385 && reg_classes_intersect_p (from, FP_REGS))
3389 /* Otherwise copying is done via memory. */
3390 if ((reg_classes_intersect_p (from, GENERAL_REGS)
3391 && reg_classes_intersect_p (to, FP_REGS))
3392 || (reg_classes_intersect_p (from, FP_REGS)
3393 && reg_classes_intersect_p (to, GENERAL_REGS)))
3399 /* Implement TARGET_MEMORY_MOVE_COST. */
3402 s390_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
3403 reg_class_t rclass ATTRIBUTE_UNUSED,
3404 bool in ATTRIBUTE_UNUSED)
3409 /* Compute a (partial) cost for rtx X. Return true if the complete
3410 cost has been computed, and false if subexpressions should be
3411 scanned. In either case, *TOTAL contains the cost result. The
3412 initial value of *TOTAL is the default value computed by
3413 rtx_cost. It may be left unmodified. OUTER_CODE contains the
3414 code of the superexpression of x. */
3417 s390_rtx_costs (rtx x, machine_mode mode, int outer_code,
3418 int opno ATTRIBUTE_UNUSED,
3419 int *total, bool speed ATTRIBUTE_UNUSED)
3421 int code = GET_CODE (x);
3429 case CONST_WIDE_INT:
3436 /* Without this a conditional move instruction would be
3437 accounted as 3 * COSTS_N_INSNS (set, if_then_else,
3438 comparison operator). That's a bit pessimistic. */
3440 if (!TARGET_Z196 || GET_CODE (SET_SRC (x)) != IF_THEN_ELSE)
3443 rtx cond = XEXP (SET_SRC (x), 0);
3445 if (!CC_REG_P (XEXP (cond, 0)) || !CONST_INT_P (XEXP (cond, 1)))
3448 /* It is going to be a load/store on condition. Make it
3449 slightly more expensive than a normal load. */
3450 *total = COSTS_N_INSNS (1) + 1;
3452 rtx dst = SET_DEST (x);
3453 rtx then = XEXP (SET_SRC (x), 1);
3454 rtx els = XEXP (SET_SRC (x), 2);
3456 /* It is a real IF-THEN-ELSE. An additional move will be
3457 needed to implement that. */
3458 if (reload_completed
3459 && !rtx_equal_p (dst, then)
3460 && !rtx_equal_p (dst, els))
3461 *total += COSTS_N_INSNS (1) / 2;
3463 /* A minor penalty for constants we cannot directly handle. */
3464 if ((CONST_INT_P (then) || CONST_INT_P (els))
3465 && (!TARGET_Z13 || MEM_P (dst)
3466 || (CONST_INT_P (then) && !satisfies_constraint_K (then))
3467 || (CONST_INT_P (els) && !satisfies_constraint_K (els))))
3468 *total += COSTS_N_INSNS (1) / 2;
3470 /* A store on condition can only handle register src operands. */
3471 if (MEM_P (dst) && (!REG_P (then) || !REG_P (els)))
3472 *total += COSTS_N_INSNS (1) / 2;
3478 if (GET_CODE (XEXP (x, 0)) == AND
3479 && GET_CODE (XEXP (x, 1)) == ASHIFT
3480 && REG_P (XEXP (XEXP (x, 0), 0))
3481 && REG_P (XEXP (XEXP (x, 1), 0))
3482 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
3483 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
3484 && (UINTVAL (XEXP (XEXP (x, 0), 1)) ==
3485 (HOST_WIDE_INT_1U << UINTVAL (XEXP (XEXP (x, 1), 1))) - 1))
3487 *total = COSTS_N_INSNS (2);
3491 /* ~AND on a 128 bit mode. This can be done using a vector
3494 && GET_CODE (XEXP (x, 0)) == NOT
3495 && GET_CODE (XEXP (x, 1)) == NOT
3496 && REG_P (XEXP (XEXP (x, 0), 0))
3497 && REG_P (XEXP (XEXP (x, 1), 0))
3498 && GET_MODE_SIZE (GET_MODE (XEXP (XEXP (x, 0), 0))) == 16
3499 && s390_hard_regno_mode_ok (VR0_REGNUM,
3500 GET_MODE (XEXP (XEXP (x, 0), 0))))
3502 *total = COSTS_N_INSNS (1);
3515 *total = COSTS_N_INSNS (1);
3520 *total = COSTS_N_INSNS (1);
3528 rtx left = XEXP (x, 0);
3529 rtx right = XEXP (x, 1);
3530 if (GET_CODE (right) == CONST_INT
3531 && CONST_OK_FOR_K (INTVAL (right)))
3532 *total = s390_cost->mhi;
3533 else if (GET_CODE (left) == SIGN_EXTEND)
3534 *total = s390_cost->mh;
3536 *total = s390_cost->ms; /* msr, ms, msy */
3541 rtx left = XEXP (x, 0);
3542 rtx right = XEXP (x, 1);
3545 if (GET_CODE (right) == CONST_INT
3546 && CONST_OK_FOR_K (INTVAL (right)))
3547 *total = s390_cost->mghi;
3548 else if (GET_CODE (left) == SIGN_EXTEND)
3549 *total = s390_cost->msgf;
3551 *total = s390_cost->msg; /* msgr, msg */
3553 else /* TARGET_31BIT */
3555 if (GET_CODE (left) == SIGN_EXTEND
3556 && GET_CODE (right) == SIGN_EXTEND)
3557 /* mulsidi case: mr, m */
3558 *total = s390_cost->m;
3559 else if (GET_CODE (left) == ZERO_EXTEND
3560 && GET_CODE (right) == ZERO_EXTEND
3561 && TARGET_CPU_ZARCH)
3562 /* umulsidi case: ml, mlr */
3563 *total = s390_cost->ml;
3565 /* Complex calculation is required. */
3566 *total = COSTS_N_INSNS (40);
3572 *total = s390_cost->mult_df;
3575 *total = s390_cost->mxbr;
3586 *total = s390_cost->madbr;
3589 *total = s390_cost->maebr;
3594 /* Negate in the third argument is free: FMSUB. */
3595 if (GET_CODE (XEXP (x, 2)) == NEG)
3597 *total += (rtx_cost (XEXP (x, 0), mode, FMA, 0, speed)
3598 + rtx_cost (XEXP (x, 1), mode, FMA, 1, speed)
3599 + rtx_cost (XEXP (XEXP (x, 2), 0), mode, FMA, 2, speed));
3606 if (mode == TImode) /* 128 bit division */
3607 *total = s390_cost->dlgr;
3608 else if (mode == DImode)
3610 rtx right = XEXP (x, 1);
3611 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
3612 *total = s390_cost->dlr;
3613 else /* 64 by 64 bit division */
3614 *total = s390_cost->dlgr;
3616 else if (mode == SImode) /* 32 bit division */
3617 *total = s390_cost->dlr;
3624 rtx right = XEXP (x, 1);
3625 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
3627 *total = s390_cost->dsgfr;
3629 *total = s390_cost->dr;
3630 else /* 64 by 64 bit division */
3631 *total = s390_cost->dsgr;
3633 else if (mode == SImode) /* 32 bit division */
3634 *total = s390_cost->dlr;
3635 else if (mode == SFmode)
3637 *total = s390_cost->debr;
3639 else if (mode == DFmode)
3641 *total = s390_cost->ddbr;
3643 else if (mode == TFmode)
3645 *total = s390_cost->dxbr;
3651 *total = s390_cost->sqebr;
3652 else if (mode == DFmode)
3653 *total = s390_cost->sqdbr;
3655 *total = s390_cost->sqxbr;
3660 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
3661 || outer_code == PLUS || outer_code == MINUS
3662 || outer_code == COMPARE)
3667 *total = COSTS_N_INSNS (1);
3668 if (GET_CODE (XEXP (x, 0)) == AND
3669 && GET_CODE (XEXP (x, 1)) == CONST_INT
3670 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
3672 rtx op0 = XEXP (XEXP (x, 0), 0);
3673 rtx op1 = XEXP (XEXP (x, 0), 1);
3674 rtx op2 = XEXP (x, 1);
3676 if (memory_operand (op0, GET_MODE (op0))
3677 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
3679 if (register_operand (op0, GET_MODE (op0))
3680 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
3690 /* Return the cost of an address rtx ADDR. */
3693 s390_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
3694 addr_space_t as ATTRIBUTE_UNUSED,
3695 bool speed ATTRIBUTE_UNUSED)
3697 struct s390_address ad;
3698 if (!s390_decompose_address (addr, &ad))
3701 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
3704 /* Implement targetm.vectorize.builtin_vectorization_cost. */
3706 s390_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
3708 int misalign ATTRIBUTE_UNUSED)
3710 switch (type_of_cost)
3720 case cond_branch_not_taken:
3722 case vec_promote_demote:
3723 case unaligned_load:
3724 case unaligned_store:
3727 case cond_branch_taken:
3731 return TYPE_VECTOR_SUBPARTS (vectype) - 1;
3738 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
3739 otherwise return 0. */
3742 tls_symbolic_operand (rtx op)
3744 if (GET_CODE (op) != SYMBOL_REF)
3746 return SYMBOL_REF_TLS_MODEL (op);
3749 /* Split DImode access register reference REG (on 64-bit) into its constituent
3750 low and high parts, and store them into LO and HI. Note that gen_lowpart/
3751 gen_highpart cannot be used as they assume all registers are word-sized,
3752 while our access registers have only half that size. */
3755 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
3757 gcc_assert (TARGET_64BIT);
3758 gcc_assert (ACCESS_REG_P (reg));
3759 gcc_assert (GET_MODE (reg) == DImode);
3760 gcc_assert (!(REGNO (reg) & 1));
3762 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
3763 *hi = gen_rtx_REG (SImode, REGNO (reg));
3766 /* Return true if OP contains a symbol reference */
3769 symbolic_reference_mentioned_p (rtx op)
3774 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
3777 fmt = GET_RTX_FORMAT (GET_CODE (op));
3778 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3784 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3785 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3789 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
3796 /* Return true if OP contains a reference to a thread-local symbol. */
3799 tls_symbolic_reference_mentioned_p (rtx op)
3804 if (GET_CODE (op) == SYMBOL_REF)
3805 return tls_symbolic_operand (op);
3807 fmt = GET_RTX_FORMAT (GET_CODE (op));
3808 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3814 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3815 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3819 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
3827 /* Return true if OP is a legitimate general operand when
3828 generating PIC code. It is given that flag_pic is on
3829 and that OP satisfies CONSTANT_P. */
3832 legitimate_pic_operand_p (rtx op)
3834 /* Accept all non-symbolic constants. */
3835 if (!SYMBOLIC_CONST (op))
3838 /* Reject everything else; must be handled
3839 via emit_symbolic_move. */
3843 /* Returns true if the constant value OP is a legitimate general operand.
3844 It is given that OP satisfies CONSTANT_P. */
3847 s390_legitimate_constant_p (machine_mode mode, rtx op)
3849 if (TARGET_VX && VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
3851 if (GET_MODE_SIZE (mode) != 16)
3854 if (!satisfies_constraint_j00 (op)
3855 && !satisfies_constraint_jm1 (op)
3856 && !satisfies_constraint_jKK (op)
3857 && !satisfies_constraint_jxx (op)
3858 && !satisfies_constraint_jyy (op))
3862 /* Accept all non-symbolic constants. */
3863 if (!SYMBOLIC_CONST (op))
3866 /* Accept immediate LARL operands. */
3867 if (TARGET_CPU_ZARCH && larl_operand (op, mode))
3870 /* Thread-local symbols are never legal constants. This is
3871 so that emit_call knows that computing such addresses
3872 might require a function call. */
3873 if (TLS_SYMBOLIC_CONST (op))
3876 /* In the PIC case, symbolic constants must *not* be
3877 forced into the literal pool. We accept them here,
3878 so that they will be handled by emit_symbolic_move. */
3882 /* All remaining non-PIC symbolic constants are
3883 forced into the literal pool. */
3887 /* Determine if it's legal to put X into the constant pool. This
3888 is not possible if X contains the address of a symbol that is
3889 not constant (TLS) or not known at final link time (PIC). */
3892 s390_cannot_force_const_mem (machine_mode mode, rtx x)
3894 switch (GET_CODE (x))
3898 case CONST_WIDE_INT:
3900 /* Accept all non-symbolic constants. */
3904 /* Labels are OK iff we are non-PIC. */
3905 return flag_pic != 0;
3908 /* 'Naked' TLS symbol references are never OK,
3909 non-TLS symbols are OK iff we are non-PIC. */
3910 if (tls_symbolic_operand (x))
3913 return flag_pic != 0;
3916 return s390_cannot_force_const_mem (mode, XEXP (x, 0));
3919 return s390_cannot_force_const_mem (mode, XEXP (x, 0))
3920 || s390_cannot_force_const_mem (mode, XEXP (x, 1));
3923 switch (XINT (x, 1))
3925 /* Only lt-relative or GOT-relative UNSPECs are OK. */
3926 case UNSPEC_LTREL_OFFSET:
3934 case UNSPEC_GOTNTPOFF:
3935 case UNSPEC_INDNTPOFF:
3938 /* If the literal pool shares the code section, be put
3939 execute template placeholders into the pool as well. */
3941 return TARGET_CPU_ZARCH;
3953 /* Returns true if the constant value OP is a legitimate general
3954 operand during and after reload. The difference to
3955 legitimate_constant_p is that this function will not accept
3956 a constant that would need to be forced to the literal pool
3957 before it can be used as operand.
3958 This function accepts all constants which can be loaded directly
3962 legitimate_reload_constant_p (rtx op)
3964 /* Accept la(y) operands. */
3965 if (GET_CODE (op) == CONST_INT
3966 && DISP_IN_RANGE (INTVAL (op)))
3969 /* Accept l(g)hi/l(g)fi operands. */
3970 if (GET_CODE (op) == CONST_INT
3971 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
3974 /* Accept lliXX operands. */
3976 && GET_CODE (op) == CONST_INT
3977 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
3978 && s390_single_part (op, word_mode, HImode, 0) >= 0)
3982 && GET_CODE (op) == CONST_INT
3983 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
3984 && s390_single_part (op, word_mode, SImode, 0) >= 0)
3987 /* Accept larl operands. */
3988 if (TARGET_CPU_ZARCH
3989 && larl_operand (op, VOIDmode))
3992 /* Accept floating-point zero operands that fit into a single GPR. */
3993 if (GET_CODE (op) == CONST_DOUBLE
3994 && s390_float_const_zero_p (op)
3995 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
3998 /* Accept double-word operands that can be split. */
3999 if (GET_CODE (op) == CONST_WIDE_INT
4000 || (GET_CODE (op) == CONST_INT
4001 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op)))
4003 machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
4004 rtx hi = operand_subword (op, 0, 0, dword_mode);
4005 rtx lo = operand_subword (op, 1, 0, dword_mode);
4006 return legitimate_reload_constant_p (hi)
4007 && legitimate_reload_constant_p (lo);
4010 /* Everything else cannot be handled without reload. */
4014 /* Returns true if the constant value OP is a legitimate fp operand
4015 during and after reload.
4016 This function accepts all constants which can be loaded directly
4020 legitimate_reload_fp_constant_p (rtx op)
4022 /* Accept floating-point zero operands if the load zero instruction
4023 can be used. Prior to z196 the load fp zero instruction caused a
4024 performance penalty if the result is used as BFP number. */
4026 && GET_CODE (op) == CONST_DOUBLE
4027 && s390_float_const_zero_p (op))
4033 /* Returns true if the constant value OP is a legitimate vector operand
4034 during and after reload.
4035 This function accepts all constants which can be loaded directly
4039 legitimate_reload_vector_constant_p (rtx op)
4041 if (TARGET_VX && GET_MODE_SIZE (GET_MODE (op)) == 16
4042 && (satisfies_constraint_j00 (op)
4043 || satisfies_constraint_jm1 (op)
4044 || satisfies_constraint_jKK (op)
4045 || satisfies_constraint_jxx (op)
4046 || satisfies_constraint_jyy (op)))
4052 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
4053 return the class of reg to actually use. */
4056 s390_preferred_reload_class (rtx op, reg_class_t rclass)
4058 switch (GET_CODE (op))
4060 /* Constants we cannot reload into general registers
4061 must be forced into the literal pool. */
4065 case CONST_WIDE_INT:
4066 if (reg_class_subset_p (GENERAL_REGS, rclass)
4067 && legitimate_reload_constant_p (op))
4068 return GENERAL_REGS;
4069 else if (reg_class_subset_p (ADDR_REGS, rclass)
4070 && legitimate_reload_constant_p (op))
4072 else if (reg_class_subset_p (FP_REGS, rclass)
4073 && legitimate_reload_fp_constant_p (op))
4075 else if (reg_class_subset_p (VEC_REGS, rclass)
4076 && legitimate_reload_vector_constant_p (op))
4081 /* If a symbolic constant or a PLUS is reloaded,
4082 it is most likely being used as an address, so
4083 prefer ADDR_REGS. If 'class' is not a superset
4084 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
4086 /* Symrefs cannot be pushed into the literal pool with -fPIC
4087 so we *MUST NOT* return NO_REGS for these cases
4088 (s390_cannot_force_const_mem will return true).
4090 On the other hand we MUST return NO_REGS for symrefs with
4091 invalid addend which might have been pushed to the literal
4092 pool (no -fPIC). Usually we would expect them to be
4093 handled via secondary reload but this does not happen if
4094 they are used as literal pool slot replacement in reload
4095 inheritance (see emit_input_reload_insns). */
4096 if (TARGET_CPU_ZARCH
4097 && GET_CODE (XEXP (op, 0)) == PLUS
4098 && GET_CODE (XEXP (XEXP(op, 0), 0)) == SYMBOL_REF
4099 && GET_CODE (XEXP (XEXP(op, 0), 1)) == CONST_INT)
4101 if (flag_pic && reg_class_subset_p (ADDR_REGS, rclass))
4109 if (!legitimate_reload_constant_p (op))
4113 /* load address will be used. */
4114 if (reg_class_subset_p (ADDR_REGS, rclass))
4126 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
4127 multiple of ALIGNMENT and the SYMBOL_REF being naturally
4131 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
4133 HOST_WIDE_INT addend;
4136 /* The "required alignment" might be 0 (e.g. for certain structs
4137 accessed via BLKmode). Early abort in this case, as well as when
4138 an alignment > 8 is required. */
4139 if (alignment < 2 || alignment > 8)
4142 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
4145 if (addend & (alignment - 1))
4148 if (GET_CODE (symref) == SYMBOL_REF)
4150 /* We have load-relative instructions for 2-byte, 4-byte, and
4151 8-byte alignment so allow only these. */
4154 case 8: return !SYMBOL_FLAG_NOTALIGN8_P (symref);
4155 case 4: return !SYMBOL_FLAG_NOTALIGN4_P (symref);
4156 case 2: return !SYMBOL_FLAG_NOTALIGN2_P (symref);
4157 default: return false;
4161 if (GET_CODE (symref) == UNSPEC
4162 && alignment <= UNITS_PER_LONG)
4168 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
4169 operand SCRATCH is used to reload the even part of the address and
4173 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
4175 HOST_WIDE_INT addend;
4178 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
4182 /* Easy case. The addend is even so larl will do fine. */
4183 emit_move_insn (reg, addr);
4186 /* We can leave the scratch register untouched if the target
4187 register is a valid base register. */
4188 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
4189 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
4192 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
4193 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
4196 emit_move_insn (scratch,
4197 gen_rtx_CONST (Pmode,
4198 gen_rtx_PLUS (Pmode, symref,
4199 GEN_INT (addend - 1))));
4201 emit_move_insn (scratch, symref);
4203 /* Increment the address using la in order to avoid clobbering cc. */
4204 s390_load_address (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
4208 /* Generate what is necessary to move between REG and MEM using
4209 SCRATCH. The direction is given by TOMEM. */
4212 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
4214 /* Reload might have pulled a constant out of the literal pool.
4215 Force it back in. */
4216 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
4217 || GET_CODE (mem) == CONST_WIDE_INT
4218 || GET_CODE (mem) == CONST_VECTOR
4219 || GET_CODE (mem) == CONST)
4220 mem = force_const_mem (GET_MODE (reg), mem);
4222 gcc_assert (MEM_P (mem));
4224 /* For a load from memory we can leave the scratch register
4225 untouched if the target register is a valid base register. */
4227 && REGNO (reg) < FIRST_PSEUDO_REGISTER
4228 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
4229 && GET_MODE (reg) == GET_MODE (scratch))
4232 /* Load address into scratch register. Since we can't have a
4233 secondary reload for a secondary reload we have to cover the case
4234 where larl would need a secondary reload here as well. */
4235 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
4237 /* Now we can use a standard load/store to do the move. */
4239 emit_move_insn (replace_equiv_address (mem, scratch), reg);
4241 emit_move_insn (reg, replace_equiv_address (mem, scratch));
4244 /* Inform reload about cases where moving X with a mode MODE to a register in
4245 RCLASS requires an extra scratch or immediate register. Return the class
4246 needed for the immediate register. */
4249 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
4250 machine_mode mode, secondary_reload_info *sri)
4252 enum reg_class rclass = (enum reg_class) rclass_i;
4254 /* Intermediate register needed. */
4255 if (reg_classes_intersect_p (CC_REGS, rclass))
4256 return GENERAL_REGS;
4260 /* The vst/vl vector move instructions allow only for short
4263 && GET_CODE (XEXP (x, 0)) == PLUS
4264 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4265 && !SHORT_DISP_IN_RANGE(INTVAL (XEXP (XEXP (x, 0), 1)))
4266 && reg_class_subset_p (rclass, VEC_REGS)
4267 && (!reg_class_subset_p (rclass, FP_REGS)
4268 || (GET_MODE_SIZE (mode) > 8
4269 && s390_class_max_nregs (FP_REGS, mode) == 1)))
4272 sri->icode = (TARGET_64BIT ?
4273 CODE_FOR_reloaddi_la_in :
4274 CODE_FOR_reloadsi_la_in);
4276 sri->icode = (TARGET_64BIT ?
4277 CODE_FOR_reloaddi_la_out :
4278 CODE_FOR_reloadsi_la_out);
4284 HOST_WIDE_INT offset;
4287 /* On z10 several optimizer steps may generate larl operands with
4290 && s390_loadrelative_operand_p (x, &symref, &offset)
4292 && !SYMBOL_FLAG_NOTALIGN2_P (symref)
4293 && (offset & 1) == 1)
4294 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
4295 : CODE_FOR_reloadsi_larl_odd_addend_z10);
4297 /* Handle all the (mem (symref)) accesses we cannot use the z10
4298 instructions for. */
4300 && s390_loadrelative_operand_p (XEXP (x, 0), NULL, NULL)
4302 || !reg_class_subset_p (rclass, GENERAL_REGS)
4303 || GET_MODE_SIZE (mode) > UNITS_PER_WORD
4304 || !s390_check_symref_alignment (XEXP (x, 0),
4305 GET_MODE_SIZE (mode))))
4307 #define __SECONDARY_RELOAD_CASE(M,m) \
4310 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
4311 CODE_FOR_reload##m##di_tomem_z10; \
4313 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
4314 CODE_FOR_reload##m##si_tomem_z10; \
4317 switch (GET_MODE (x))
4319 __SECONDARY_RELOAD_CASE (QI, qi);
4320 __SECONDARY_RELOAD_CASE (HI, hi);
4321 __SECONDARY_RELOAD_CASE (SI, si);
4322 __SECONDARY_RELOAD_CASE (DI, di);
4323 __SECONDARY_RELOAD_CASE (TI, ti);
4324 __SECONDARY_RELOAD_CASE (SF, sf);
4325 __SECONDARY_RELOAD_CASE (DF, df);
4326 __SECONDARY_RELOAD_CASE (TF, tf);
4327 __SECONDARY_RELOAD_CASE (SD, sd);
4328 __SECONDARY_RELOAD_CASE (DD, dd);
4329 __SECONDARY_RELOAD_CASE (TD, td);
4330 __SECONDARY_RELOAD_CASE (V1QI, v1qi);
4331 __SECONDARY_RELOAD_CASE (V2QI, v2qi);
4332 __SECONDARY_RELOAD_CASE (V4QI, v4qi);
4333 __SECONDARY_RELOAD_CASE (V8QI, v8qi);
4334 __SECONDARY_RELOAD_CASE (V16QI, v16qi);
4335 __SECONDARY_RELOAD_CASE (V1HI, v1hi);
4336 __SECONDARY_RELOAD_CASE (V2HI, v2hi);
4337 __SECONDARY_RELOAD_CASE (V4HI, v4hi);
4338 __SECONDARY_RELOAD_CASE (V8HI, v8hi);
4339 __SECONDARY_RELOAD_CASE (V1SI, v1si);
4340 __SECONDARY_RELOAD_CASE (V2SI, v2si);
4341 __SECONDARY_RELOAD_CASE (V4SI, v4si);
4342 __SECONDARY_RELOAD_CASE (V1DI, v1di);
4343 __SECONDARY_RELOAD_CASE (V2DI, v2di);
4344 __SECONDARY_RELOAD_CASE (V1TI, v1ti);
4345 __SECONDARY_RELOAD_CASE (V1SF, v1sf);
4346 __SECONDARY_RELOAD_CASE (V2SF, v2sf);
4347 __SECONDARY_RELOAD_CASE (V4SF, v4sf);
4348 __SECONDARY_RELOAD_CASE (V1DF, v1df);
4349 __SECONDARY_RELOAD_CASE (V2DF, v2df);
4350 __SECONDARY_RELOAD_CASE (V1TF, v1tf);
4354 #undef __SECONDARY_RELOAD_CASE
4358 /* We need a scratch register when loading a PLUS expression which
4359 is not a legitimate operand of the LOAD ADDRESS instruction. */
4360 /* LRA can deal with transformation of plus op very well -- so we
4361 don't need to prompt LRA in this case. */
4362 if (! lra_in_progress && in_p && s390_plus_operand (x, mode))
4363 sri->icode = (TARGET_64BIT ?
4364 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
4366 /* Performing a multiword move from or to memory we have to make sure the
4367 second chunk in memory is addressable without causing a displacement
4368 overflow. If that would be the case we calculate the address in
4369 a scratch register. */
4371 && GET_CODE (XEXP (x, 0)) == PLUS
4372 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4373 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
4374 + GET_MODE_SIZE (mode) - 1))
4376 /* For GENERAL_REGS a displacement overflow is no problem if occurring
4377 in a s_operand address since we may fallback to lm/stm. So we only
4378 have to care about overflows in the b+i+d case. */
4379 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
4380 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
4381 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
4382 /* For FP_REGS no lm/stm is available so this check is triggered
4383 for displacement overflows in b+i+d and b+d like addresses. */
4384 || (reg_classes_intersect_p (FP_REGS, rclass)
4385 && s390_class_max_nregs (FP_REGS, mode) > 1))
4388 sri->icode = (TARGET_64BIT ?
4389 CODE_FOR_reloaddi_la_in :
4390 CODE_FOR_reloadsi_la_in);
4392 sri->icode = (TARGET_64BIT ?
4393 CODE_FOR_reloaddi_la_out :
4394 CODE_FOR_reloadsi_la_out);
4398 /* A scratch address register is needed when a symbolic constant is
4399 copied to r0 compiling with -fPIC. In other cases the target
4400 register might be used as temporary (see legitimize_pic_address). */
4401 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
4402 sri->icode = (TARGET_64BIT ?
4403 CODE_FOR_reloaddi_PIC_addr :
4404 CODE_FOR_reloadsi_PIC_addr);
4406 /* Either scratch or no register needed. */
4410 /* Generate code to load SRC, which is PLUS that is not a
4411 legitimate operand for the LA instruction, into TARGET.
4412 SCRATCH may be used as scratch register. */
4415 s390_expand_plus_operand (rtx target, rtx src,
4419 struct s390_address ad;
4421 /* src must be a PLUS; get its two operands. */
4422 gcc_assert (GET_CODE (src) == PLUS);
4423 gcc_assert (GET_MODE (src) == Pmode);
4425 /* Check if any of the two operands is already scheduled
4426 for replacement by reload. This can happen e.g. when
4427 float registers occur in an address. */
4428 sum1 = find_replacement (&XEXP (src, 0));
4429 sum2 = find_replacement (&XEXP (src, 1));
4430 src = gen_rtx_PLUS (Pmode, sum1, sum2);
4432 /* If the address is already strictly valid, there's nothing to do. */
4433 if (!s390_decompose_address (src, &ad)
4434 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4435 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
4437 /* Otherwise, one of the operands cannot be an address register;
4438 we reload its value into the scratch register. */
4439 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
4441 emit_move_insn (scratch, sum1);
4444 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
4446 emit_move_insn (scratch, sum2);
4450 /* According to the way these invalid addresses are generated
4451 in reload.c, it should never happen (at least on s390) that
4452 *neither* of the PLUS components, after find_replacements
4453 was applied, is an address register. */
4454 if (sum1 == scratch && sum2 == scratch)
4460 src = gen_rtx_PLUS (Pmode, sum1, sum2);
4463 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
4464 is only ever performed on addresses, so we can mark the
4465 sum as legitimate for LA in any case. */
4466 s390_load_address (target, src);
4470 /* Return true if ADDR is a valid memory address.
4471 STRICT specifies whether strict register checking applies. */
4474 s390_legitimate_address_p (machine_mode mode, rtx addr, bool strict)
4476 struct s390_address ad;
4479 && larl_operand (addr, VOIDmode)
4480 && (mode == VOIDmode
4481 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
4484 if (!s390_decompose_address (addr, &ad))
4489 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4492 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
4498 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
4499 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
4503 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
4504 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
4510 /* Return true if OP is a valid operand for the LA instruction.
4511 In 31-bit, we need to prove that the result is used as an
4512 address, as LA performs only a 31-bit addition. */
4515 legitimate_la_operand_p (rtx op)
4517 struct s390_address addr;
4518 if (!s390_decompose_address (op, &addr))
4521 return (TARGET_64BIT || addr.pointer);
4524 /* Return true if it is valid *and* preferable to use LA to
4525 compute the sum of OP1 and OP2. */
4528 preferred_la_operand_p (rtx op1, rtx op2)
4530 struct s390_address addr;
4532 if (op2 != const0_rtx)
4533 op1 = gen_rtx_PLUS (Pmode, op1, op2);
4535 if (!s390_decompose_address (op1, &addr))
4537 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
4539 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
4542 /* Avoid LA instructions with index register on z196; it is
4543 preferable to use regular add instructions when possible.
4544 Starting with zEC12 the la with index register is "uncracked"
4546 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
4549 if (!TARGET_64BIT && !addr.pointer)
4555 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
4556 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
4562 /* Emit a forced load-address operation to load SRC into DST.
4563 This will use the LOAD ADDRESS instruction even in situations
4564 where legitimate_la_operand_p (SRC) returns false. */
4567 s390_load_address (rtx dst, rtx src)
4570 emit_move_insn (dst, src);
4572 emit_insn (gen_force_la_31 (dst, src));
4575 /* Return true if it ok to use SYMBOL_REF in a relative address. */
4578 s390_rel_address_ok_p (rtx symbol_ref)
4582 if (symbol_ref == s390_got_symbol () || CONSTANT_POOL_ADDRESS_P (symbol_ref))
4585 decl = SYMBOL_REF_DECL (symbol_ref);
4587 if (!flag_pic || SYMBOL_REF_LOCAL_P (symbol_ref))
4588 return (s390_pic_data_is_text_relative
4590 && TREE_CODE (decl) == FUNCTION_DECL));
4595 /* Return a legitimate reference for ORIG (an address) using the
4596 register REG. If REG is 0, a new pseudo is generated.
4598 There are two types of references that must be handled:
4600 1. Global data references must load the address from the GOT, via
4601 the PIC reg. An insn is emitted to do this load, and the reg is
4604 2. Static data references, constant pool addresses, and code labels
4605 compute the address as an offset from the GOT, whose base is in
4606 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
4607 differentiate them from global data objects. The returned
4608 address is the PIC reg + an unspec constant.
4610 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
4611 reg also appears in the address. */
4614 legitimize_pic_address (rtx orig, rtx reg)
4617 rtx addend = const0_rtx;
4620 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
4622 if (GET_CODE (addr) == CONST)
4623 addr = XEXP (addr, 0);
4625 if (GET_CODE (addr) == PLUS)
4627 addend = XEXP (addr, 1);
4628 addr = XEXP (addr, 0);
4631 if ((GET_CODE (addr) == LABEL_REF
4632 || (SYMBOL_REF_P (addr) && s390_rel_address_ok_p (addr))
4633 || (GET_CODE (addr) == UNSPEC &&
4634 (XINT (addr, 1) == UNSPEC_GOTENT
4635 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
4636 && GET_CODE (addend) == CONST_INT)
4638 /* This can be locally addressed. */
4640 /* larl_operand requires UNSPECs to be wrapped in a const rtx. */
4641 rtx const_addr = (GET_CODE (addr) == UNSPEC ?
4642 gen_rtx_CONST (Pmode, addr) : addr);
4644 if (TARGET_CPU_ZARCH
4645 && larl_operand (const_addr, VOIDmode)
4646 && INTVAL (addend) < HOST_WIDE_INT_1 << 31
4647 && INTVAL (addend) >= -(HOST_WIDE_INT_1 << 31))
4649 if (INTVAL (addend) & 1)
4651 /* LARL can't handle odd offsets, so emit a pair of LARL
4653 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4655 if (!DISP_IN_RANGE (INTVAL (addend)))
4657 HOST_WIDE_INT even = INTVAL (addend) - 1;
4658 addr = gen_rtx_PLUS (Pmode, addr, GEN_INT (even));
4659 addr = gen_rtx_CONST (Pmode, addr);
4660 addend = const1_rtx;
4663 emit_move_insn (temp, addr);
4664 new_rtx = gen_rtx_PLUS (Pmode, temp, addend);
4668 s390_load_address (reg, new_rtx);
4674 /* If the offset is even, we can just use LARL. This
4675 will happen automatically. */
4680 /* No larl - Access local symbols relative to the GOT. */
4682 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4684 if (reload_in_progress || reload_completed)
4685 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4687 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
4688 if (addend != const0_rtx)
4689 addr = gen_rtx_PLUS (Pmode, addr, addend);
4690 addr = gen_rtx_CONST (Pmode, addr);
4691 addr = force_const_mem (Pmode, addr);
4692 emit_move_insn (temp, addr);
4694 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4697 s390_load_address (reg, new_rtx);
4702 else if (GET_CODE (addr) == SYMBOL_REF && addend == const0_rtx)
4704 /* A non-local symbol reference without addend.
4706 The symbol ref is wrapped into an UNSPEC to make sure the
4707 proper operand modifier (@GOT or @GOTENT) will be emitted.
4708 This will tell the linker to put the symbol into the GOT.
4710 Additionally the code dereferencing the GOT slot is emitted here.
4712 An addend to the symref needs to be added afterwards.
4713 legitimize_pic_address calls itself recursively to handle
4714 that case. So no need to do it here. */
4717 reg = gen_reg_rtx (Pmode);
4721 /* Use load relative if possible.
4722 lgrl <target>, sym@GOTENT */
4723 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
4724 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4725 new_rtx = gen_const_mem (GET_MODE (reg), new_rtx);
4727 emit_move_insn (reg, new_rtx);
4730 else if (flag_pic == 1)
4732 /* Assume GOT offset is a valid displacement operand (< 4k
4733 or < 512k with z990). This is handled the same way in
4734 both 31- and 64-bit code (@GOT).
4735 lg <target>, sym@GOT(r12) */
4737 if (reload_in_progress || reload_completed)
4738 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4740 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
4741 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4742 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
4743 new_rtx = gen_const_mem (Pmode, new_rtx);
4744 emit_move_insn (reg, new_rtx);
4747 else if (TARGET_CPU_ZARCH)
4749 /* If the GOT offset might be >= 4k, we determine the position
4750 of the GOT entry via a PC-relative LARL (@GOTENT).
4751 larl temp, sym@GOTENT
4752 lg <target>, 0(temp) */
4754 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
4756 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
4757 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
4759 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
4760 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4761 emit_move_insn (temp, new_rtx);
4763 new_rtx = gen_const_mem (Pmode, temp);
4764 emit_move_insn (reg, new_rtx);
4770 /* If the GOT offset might be >= 4k, we have to load it
4771 from the literal pool (@GOT).
4773 lg temp, lit-litbase(r13)
4774 lg <target>, 0(temp)
4775 lit: .long sym@GOT */
4777 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
4779 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
4780 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
4782 if (reload_in_progress || reload_completed)
4783 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4785 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
4786 addr = gen_rtx_CONST (Pmode, addr);
4787 addr = force_const_mem (Pmode, addr);
4788 emit_move_insn (temp, addr);
4790 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4791 new_rtx = gen_const_mem (Pmode, new_rtx);
4792 emit_move_insn (reg, new_rtx);
4796 else if (GET_CODE (addr) == UNSPEC && GET_CODE (addend) == CONST_INT)
4798 gcc_assert (XVECLEN (addr, 0) == 1);
4799 switch (XINT (addr, 1))
4801 /* These address symbols (or PLT slots) relative to the GOT
4802 (not GOT slots!). In general this will exceed the
4803 displacement range so these value belong into the literal
4807 new_rtx = force_const_mem (Pmode, orig);
4810 /* For -fPIC the GOT size might exceed the displacement
4811 range so make sure the value is in the literal pool. */
4814 new_rtx = force_const_mem (Pmode, orig);
4817 /* For @GOTENT larl is used. This is handled like local
4823 /* @PLT is OK as is on 64-bit, must be converted to
4824 GOT-relative @PLTOFF on 31-bit. */
4826 if (!TARGET_CPU_ZARCH)
4828 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4830 if (reload_in_progress || reload_completed)
4831 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4833 addr = XVECEXP (addr, 0, 0);
4834 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
4836 if (addend != const0_rtx)
4837 addr = gen_rtx_PLUS (Pmode, addr, addend);
4838 addr = gen_rtx_CONST (Pmode, addr);
4839 addr = force_const_mem (Pmode, addr);
4840 emit_move_insn (temp, addr);
4842 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4845 s390_load_address (reg, new_rtx);
4850 /* On 64 bit larl can be used. This case is handled like
4851 local symbol refs. */
4855 /* Everything else cannot happen. */
4860 else if (addend != const0_rtx)
4862 /* Otherwise, compute the sum. */
4864 rtx base = legitimize_pic_address (addr, reg);
4865 new_rtx = legitimize_pic_address (addend,
4866 base == reg ? NULL_RTX : reg);
4867 if (GET_CODE (new_rtx) == CONST_INT)
4868 new_rtx = plus_constant (Pmode, base, INTVAL (new_rtx));
4871 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
4873 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
4874 new_rtx = XEXP (new_rtx, 1);
4876 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
4879 if (GET_CODE (new_rtx) == CONST)
4880 new_rtx = XEXP (new_rtx, 0);
4881 new_rtx = force_operand (new_rtx, 0);
4887 /* Load the thread pointer into a register. */
4890 s390_get_thread_pointer (void)
4892 rtx tp = gen_reg_rtx (Pmode);
4894 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
4895 mark_reg_pointer (tp, BITS_PER_WORD);
4900 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
4901 in s390_tls_symbol which always refers to __tls_get_offset.
4902 The returned offset is written to RESULT_REG and an USE rtx is
4903 generated for TLS_CALL. */
4905 static GTY(()) rtx s390_tls_symbol;
4908 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
4913 emit_insn (s390_load_got ());
4915 if (!s390_tls_symbol)
4916 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
4918 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
4919 gen_rtx_REG (Pmode, RETURN_REGNUM));
4921 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
4922 RTL_CONST_CALL_P (insn) = 1;
4925 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
4926 this (thread-local) address. REG may be used as temporary. */
4929 legitimize_tls_address (rtx addr, rtx reg)
4931 rtx new_rtx, tls_call, temp, base, r2;
4934 if (GET_CODE (addr) == SYMBOL_REF)
4935 switch (tls_symbolic_operand (addr))
4937 case TLS_MODEL_GLOBAL_DYNAMIC:
4939 r2 = gen_rtx_REG (Pmode, 2);
4940 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
4941 new_rtx = gen_rtx_CONST (Pmode, tls_call);
4942 new_rtx = force_const_mem (Pmode, new_rtx);
4943 emit_move_insn (r2, new_rtx);
4944 s390_emit_tls_call_insn (r2, tls_call);
4945 insn = get_insns ();
4948 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
4949 temp = gen_reg_rtx (Pmode);
4950 emit_libcall_block (insn, temp, r2, new_rtx);
4952 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4955 s390_load_address (reg, new_rtx);
4960 case TLS_MODEL_LOCAL_DYNAMIC:
4962 r2 = gen_rtx_REG (Pmode, 2);
4963 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
4964 new_rtx = gen_rtx_CONST (Pmode, tls_call);
4965 new_rtx = force_const_mem (Pmode, new_rtx);
4966 emit_move_insn (r2, new_rtx);
4967 s390_emit_tls_call_insn (r2, tls_call);
4968 insn = get_insns ();
4971 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
4972 temp = gen_reg_rtx (Pmode);
4973 emit_libcall_block (insn, temp, r2, new_rtx);
4975 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4976 base = gen_reg_rtx (Pmode);
4977 s390_load_address (base, new_rtx);
4979 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
4980 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4981 new_rtx = force_const_mem (Pmode, new_rtx);
4982 temp = gen_reg_rtx (Pmode);
4983 emit_move_insn (temp, new_rtx);
4985 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
4988 s390_load_address (reg, new_rtx);
4993 case TLS_MODEL_INITIAL_EXEC:
4996 /* Assume GOT offset < 4k. This is handled the same way
4997 in both 31- and 64-bit code. */
4999 if (reload_in_progress || reload_completed)
5000 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
5002 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
5003 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5004 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
5005 new_rtx = gen_const_mem (Pmode, new_rtx);
5006 temp = gen_reg_rtx (Pmode);
5007 emit_move_insn (temp, new_rtx);
5009 else if (TARGET_CPU_ZARCH)
5011 /* If the GOT offset might be >= 4k, we determine the position
5012 of the GOT entry via a PC-relative LARL. */
5014 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
5015 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5016 temp = gen_reg_rtx (Pmode);
5017 emit_move_insn (temp, new_rtx);
5019 new_rtx = gen_const_mem (Pmode, temp);
5020 temp = gen_reg_rtx (Pmode);
5021 emit_move_insn (temp, new_rtx);
5025 /* If the GOT offset might be >= 4k, we have to load it
5026 from the literal pool. */
5028 if (reload_in_progress || reload_completed)
5029 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
5031 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
5032 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5033 new_rtx = force_const_mem (Pmode, new_rtx);
5034 temp = gen_reg_rtx (Pmode);
5035 emit_move_insn (temp, new_rtx);
5037 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
5038 new_rtx = gen_const_mem (Pmode, new_rtx);
5040 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
5041 temp = gen_reg_rtx (Pmode);
5042 emit_insn (gen_rtx_SET (temp, new_rtx));
5046 /* In position-dependent code, load the absolute address of
5047 the GOT entry from the literal pool. */
5049 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
5050 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5051 new_rtx = force_const_mem (Pmode, new_rtx);
5052 temp = gen_reg_rtx (Pmode);
5053 emit_move_insn (temp, new_rtx);
5056 new_rtx = gen_const_mem (Pmode, new_rtx);
5057 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
5058 temp = gen_reg_rtx (Pmode);
5059 emit_insn (gen_rtx_SET (temp, new_rtx));
5062 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
5065 s390_load_address (reg, new_rtx);
5070 case TLS_MODEL_LOCAL_EXEC:
5071 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
5072 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5073 new_rtx = force_const_mem (Pmode, new_rtx);
5074 temp = gen_reg_rtx (Pmode);
5075 emit_move_insn (temp, new_rtx);
5077 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
5080 s390_load_address (reg, new_rtx);
5089 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
5091 switch (XINT (XEXP (addr, 0), 1))
5093 case UNSPEC_INDNTPOFF:
5094 gcc_assert (TARGET_CPU_ZARCH);
5103 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
5104 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
5106 new_rtx = XEXP (XEXP (addr, 0), 0);
5107 if (GET_CODE (new_rtx) != SYMBOL_REF)
5108 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5110 new_rtx = legitimize_tls_address (new_rtx, reg);
5111 new_rtx = plus_constant (Pmode, new_rtx,
5112 INTVAL (XEXP (XEXP (addr, 0), 1)));
5113 new_rtx = force_operand (new_rtx, 0);
5117 gcc_unreachable (); /* for now ... */
5122 /* Emit insns making the address in operands[1] valid for a standard
5123 move to operands[0]. operands[1] is replaced by an address which
5124 should be used instead of the former RTX to emit the move
5128 emit_symbolic_move (rtx *operands)
5130 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
5132 if (GET_CODE (operands[0]) == MEM)
5133 operands[1] = force_reg (Pmode, operands[1]);
5134 else if (TLS_SYMBOLIC_CONST (operands[1]))
5135 operands[1] = legitimize_tls_address (operands[1], temp);
5137 operands[1] = legitimize_pic_address (operands[1], temp);
5140 /* Try machine-dependent ways of modifying an illegitimate address X
5141 to be legitimate. If we find one, return the new, valid address.
5143 OLDX is the address as it was before break_out_memory_refs was called.
5144 In some cases it is useful to look at this to decide what needs to be done.
5146 MODE is the mode of the operand pointed to by X.
5148 When -fpic is used, special handling is needed for symbolic references.
5149 See comments by legitimize_pic_address for details. */
5152 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
5153 machine_mode mode ATTRIBUTE_UNUSED)
5155 rtx constant_term = const0_rtx;
5157 if (TLS_SYMBOLIC_CONST (x))
5159 x = legitimize_tls_address (x, 0);
5161 if (s390_legitimate_address_p (mode, x, FALSE))
5164 else if (GET_CODE (x) == PLUS
5165 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
5166 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
5172 if (SYMBOLIC_CONST (x)
5173 || (GET_CODE (x) == PLUS
5174 && (SYMBOLIC_CONST (XEXP (x, 0))
5175 || SYMBOLIC_CONST (XEXP (x, 1)))))
5176 x = legitimize_pic_address (x, 0);
5178 if (s390_legitimate_address_p (mode, x, FALSE))
5182 x = eliminate_constant_term (x, &constant_term);
5184 /* Optimize loading of large displacements by splitting them
5185 into the multiple of 4K and the rest; this allows the
5186 former to be CSE'd if possible.
5188 Don't do this if the displacement is added to a register
5189 pointing into the stack frame, as the offsets will
5190 change later anyway. */
5192 if (GET_CODE (constant_term) == CONST_INT
5193 && !TARGET_LONG_DISPLACEMENT
5194 && !DISP_IN_RANGE (INTVAL (constant_term))
5195 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
5197 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
5198 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
5200 rtx temp = gen_reg_rtx (Pmode);
5201 rtx val = force_operand (GEN_INT (upper), temp);
5203 emit_move_insn (temp, val);
5205 x = gen_rtx_PLUS (Pmode, x, temp);
5206 constant_term = GEN_INT (lower);
5209 if (GET_CODE (x) == PLUS)
5211 if (GET_CODE (XEXP (x, 0)) == REG)
5213 rtx temp = gen_reg_rtx (Pmode);
5214 rtx val = force_operand (XEXP (x, 1), temp);
5216 emit_move_insn (temp, val);
5218 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
5221 else if (GET_CODE (XEXP (x, 1)) == REG)
5223 rtx temp = gen_reg_rtx (Pmode);
5224 rtx val = force_operand (XEXP (x, 0), temp);
5226 emit_move_insn (temp, val);
5228 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
5232 if (constant_term != const0_rtx)
5233 x = gen_rtx_PLUS (Pmode, x, constant_term);
5238 /* Try a machine-dependent way of reloading an illegitimate address AD
5239 operand. If we find one, push the reload and return the new address.
5241 MODE is the mode of the enclosing MEM. OPNUM is the operand number
5242 and TYPE is the reload type of the current reload. */
5245 legitimize_reload_address (rtx ad, machine_mode mode ATTRIBUTE_UNUSED,
5246 int opnum, int type)
5248 if (!optimize || TARGET_LONG_DISPLACEMENT)
5251 if (GET_CODE (ad) == PLUS)
5253 rtx tem = simplify_binary_operation (PLUS, Pmode,
5254 XEXP (ad, 0), XEXP (ad, 1));
5259 if (GET_CODE (ad) == PLUS
5260 && GET_CODE (XEXP (ad, 0)) == REG
5261 && GET_CODE (XEXP (ad, 1)) == CONST_INT
5262 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
5264 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
5265 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
5266 rtx cst, tem, new_rtx;
5268 cst = GEN_INT (upper);
5269 if (!legitimate_reload_constant_p (cst))
5270 cst = force_const_mem (Pmode, cst);
5272 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
5273 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
5275 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
5276 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
5277 opnum, (enum reload_type) type);
5284 /* Emit code to move LEN bytes from DST to SRC. */
5287 s390_expand_movmem (rtx dst, rtx src, rtx len)
5289 /* When tuning for z10 or higher we rely on the Glibc functions to
5290 do the right thing. Only for constant lengths below 64k we will
5291 generate inline code. */
5292 if (s390_tune >= PROCESSOR_2097_Z10
5293 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
5296 /* Expand memcpy for constant length operands without a loop if it
5297 is shorter that way.
5299 With a constant length argument a
5300 memcpy loop (without pfd) is 36 bytes -> 6 * mvc */
5301 if (GET_CODE (len) == CONST_INT
5302 && INTVAL (len) >= 0
5303 && INTVAL (len) <= 256 * 6
5304 && (!TARGET_MVCLE || INTVAL (len) <= 256))
5308 for (l = INTVAL (len), o = 0; l > 0; l -= 256, o += 256)
5310 rtx newdst = adjust_address (dst, BLKmode, o);
5311 rtx newsrc = adjust_address (src, BLKmode, o);
5312 emit_insn (gen_movmem_short (newdst, newsrc,
5313 GEN_INT (l > 256 ? 255 : l - 1)));
5317 else if (TARGET_MVCLE)
5319 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
5324 rtx dst_addr, src_addr, count, blocks, temp;
5325 rtx_code_label *loop_start_label = gen_label_rtx ();
5326 rtx_code_label *loop_end_label = gen_label_rtx ();
5327 rtx_code_label *end_label = gen_label_rtx ();
5330 mode = GET_MODE (len);
5331 if (mode == VOIDmode)
5334 dst_addr = gen_reg_rtx (Pmode);
5335 src_addr = gen_reg_rtx (Pmode);
5336 count = gen_reg_rtx (mode);
5337 blocks = gen_reg_rtx (mode);
5339 convert_move (count, len, 1);
5340 emit_cmp_and_jump_insns (count, const0_rtx,
5341 EQ, NULL_RTX, mode, 1, end_label);
5343 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
5344 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
5345 dst = change_address (dst, VOIDmode, dst_addr);
5346 src = change_address (src, VOIDmode, src_addr);
5348 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5351 emit_move_insn (count, temp);
5353 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5356 emit_move_insn (blocks, temp);
5358 emit_cmp_and_jump_insns (blocks, const0_rtx,
5359 EQ, NULL_RTX, mode, 1, loop_end_label);
5361 emit_label (loop_start_label);
5364 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
5368 /* Issue a read prefetch for the +3 cache line. */
5369 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
5370 const0_rtx, const0_rtx);
5371 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5372 emit_insn (prefetch);
5374 /* Issue a write prefetch for the +3 cache line. */
5375 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
5376 const1_rtx, const0_rtx);
5377 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5378 emit_insn (prefetch);
5381 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
5382 s390_load_address (dst_addr,
5383 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
5384 s390_load_address (src_addr,
5385 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
5387 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5390 emit_move_insn (blocks, temp);
5392 emit_cmp_and_jump_insns (blocks, const0_rtx,
5393 EQ, NULL_RTX, mode, 1, loop_end_label);
5395 emit_jump (loop_start_label);
5396 emit_label (loop_end_label);
5398 emit_insn (gen_movmem_short (dst, src,
5399 convert_to_mode (Pmode, count, 1)));
5400 emit_label (end_label);
5405 /* Emit code to set LEN bytes at DST to VAL.
5406 Make use of clrmem if VAL is zero. */
5409 s390_expand_setmem (rtx dst, rtx len, rtx val)
5411 if (GET_CODE (len) == CONST_INT && INTVAL (len) <= 0)
5414 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
5416 /* Expand setmem/clrmem for a constant length operand without a
5417 loop if it will be shorter that way.
5418 With a constant length and without pfd argument a
5419 clrmem loop is 32 bytes -> 5.3 * xc
5420 setmem loop is 36 bytes -> 3.6 * (mvi/stc + mvc) */
5421 if (GET_CODE (len) == CONST_INT
5422 && ((INTVAL (len) <= 256 * 5 && val == const0_rtx)
5423 || INTVAL (len) <= 257 * 3)
5424 && (!TARGET_MVCLE || INTVAL (len) <= 256))
5428 if (val == const0_rtx)
5429 /* clrmem: emit 256 byte blockwise XCs. */
5430 for (l = INTVAL (len), o = 0; l > 0; l -= 256, o += 256)
5432 rtx newdst = adjust_address (dst, BLKmode, o);
5433 emit_insn (gen_clrmem_short (newdst,
5434 GEN_INT (l > 256 ? 255 : l - 1)));
5437 /* setmem: emit 1(mvi) + 256(mvc) byte blockwise memsets by
5438 setting first byte to val and using a 256 byte mvc with one
5439 byte overlap to propagate the byte. */
5440 for (l = INTVAL (len), o = 0; l > 0; l -= 257, o += 257)
5442 rtx newdst = adjust_address (dst, BLKmode, o);
5443 emit_move_insn (adjust_address (dst, QImode, o), val);
5446 rtx newdstp1 = adjust_address (dst, BLKmode, o + 1);
5447 emit_insn (gen_movmem_short (newdstp1, newdst,
5448 GEN_INT (l > 257 ? 255 : l - 2)));
5453 else if (TARGET_MVCLE)
5455 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
5457 emit_insn (gen_setmem_long_di (dst, convert_to_mode (Pmode, len, 1),
5460 emit_insn (gen_setmem_long_si (dst, convert_to_mode (Pmode, len, 1),
5466 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
5467 rtx_code_label *loop_start_label = gen_label_rtx ();
5468 rtx_code_label *onebyte_end_label = gen_label_rtx ();
5469 rtx_code_label *zerobyte_end_label = gen_label_rtx ();
5470 rtx_code_label *restbyte_end_label = gen_label_rtx ();
5473 mode = GET_MODE (len);
5474 if (mode == VOIDmode)
5477 dst_addr = gen_reg_rtx (Pmode);
5478 count = gen_reg_rtx (mode);
5479 blocks = gen_reg_rtx (mode);
5481 convert_move (count, len, 1);
5482 emit_cmp_and_jump_insns (count, const0_rtx,
5483 EQ, NULL_RTX, mode, 1, zerobyte_end_label,
5484 profile_probability::very_unlikely ());
5486 /* We need to make a copy of the target address since memset is
5487 supposed to return it unmodified. We have to make it here
5488 already since the new reg is used at onebyte_end_label. */
5489 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
5490 dst = change_address (dst, VOIDmode, dst_addr);
5492 if (val != const0_rtx)
5494 /* When using the overlapping mvc the original target
5495 address is only accessed as single byte entity (even by
5496 the mvc reading this value). */
5497 set_mem_size (dst, 1);
5498 dstp1 = adjust_address (dst, VOIDmode, 1);
5499 emit_cmp_and_jump_insns (count,
5500 const1_rtx, EQ, NULL_RTX, mode, 1,
5502 profile_probability::very_unlikely ());
5505 /* There is one unconditional (mvi+mvc)/xc after the loop
5506 dealing with the rest of the bytes, subtracting two (mvi+mvc)
5507 or one (xc) here leaves this number of bytes to be handled by
5509 temp = expand_binop (mode, add_optab, count,
5510 val == const0_rtx ? constm1_rtx : GEN_INT (-2),
5511 count, 1, OPTAB_DIRECT);
5513 emit_move_insn (count, temp);
5515 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5518 emit_move_insn (blocks, temp);
5520 emit_cmp_and_jump_insns (blocks, const0_rtx,
5521 EQ, NULL_RTX, mode, 1, restbyte_end_label);
5523 emit_jump (loop_start_label);
5525 if (val != const0_rtx)
5527 /* The 1 byte != 0 special case. Not handled efficiently
5528 since we require two jumps for that. However, this
5529 should be very rare. */
5530 emit_label (onebyte_end_label);
5531 emit_move_insn (adjust_address (dst, QImode, 0), val);
5532 emit_jump (zerobyte_end_label);
5535 emit_label (loop_start_label);
5538 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
5540 /* Issue a write prefetch for the +4 cache line. */
5541 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
5543 const1_rtx, const0_rtx);
5544 emit_insn (prefetch);
5545 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5548 if (val == const0_rtx)
5549 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
5552 /* Set the first byte in the block to the value and use an
5553 overlapping mvc for the block. */
5554 emit_move_insn (adjust_address (dst, QImode, 0), val);
5555 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (254)));
5557 s390_load_address (dst_addr,
5558 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
5560 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5563 emit_move_insn (blocks, temp);
5565 emit_cmp_and_jump_insns (blocks, const0_rtx,
5566 NE, NULL_RTX, mode, 1, loop_start_label);
5568 emit_label (restbyte_end_label);
5570 if (val == const0_rtx)
5571 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
5574 /* Set the first byte in the block to the value and use an
5575 overlapping mvc for the block. */
5576 emit_move_insn (adjust_address (dst, QImode, 0), val);
5577 /* execute only uses the lowest 8 bits of count that's
5578 exactly what we need here. */
5579 emit_insn (gen_movmem_short (dstp1, dst,
5580 convert_to_mode (Pmode, count, 1)));
5583 emit_label (zerobyte_end_label);
5587 /* Emit code to compare LEN bytes at OP0 with those at OP1,
5588 and return the result in TARGET. */
5591 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
5593 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
5596 /* When tuning for z10 or higher we rely on the Glibc functions to
5597 do the right thing. Only for constant lengths below 64k we will
5598 generate inline code. */
5599 if (s390_tune >= PROCESSOR_2097_Z10
5600 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
5603 /* As the result of CMPINT is inverted compared to what we need,
5604 we have to swap the operands. */
5605 tmp = op0; op0 = op1; op1 = tmp;
5607 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
5609 if (INTVAL (len) > 0)
5611 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
5612 emit_insn (gen_cmpint (target, ccreg));
5615 emit_move_insn (target, const0_rtx);
5617 else if (TARGET_MVCLE)
5619 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
5620 emit_insn (gen_cmpint (target, ccreg));
5624 rtx addr0, addr1, count, blocks, temp;
5625 rtx_code_label *loop_start_label = gen_label_rtx ();
5626 rtx_code_label *loop_end_label = gen_label_rtx ();
5627 rtx_code_label *end_label = gen_label_rtx ();
5630 mode = GET_MODE (len);
5631 if (mode == VOIDmode)
5634 addr0 = gen_reg_rtx (Pmode);
5635 addr1 = gen_reg_rtx (Pmode);
5636 count = gen_reg_rtx (mode);
5637 blocks = gen_reg_rtx (mode);
5639 convert_move (count, len, 1);
5640 emit_cmp_and_jump_insns (count, const0_rtx,
5641 EQ, NULL_RTX, mode, 1, end_label);
5643 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
5644 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
5645 op0 = change_address (op0, VOIDmode, addr0);
5646 op1 = change_address (op1, VOIDmode, addr1);
5648 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5651 emit_move_insn (count, temp);
5653 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5656 emit_move_insn (blocks, temp);
5658 emit_cmp_and_jump_insns (blocks, const0_rtx,
5659 EQ, NULL_RTX, mode, 1, loop_end_label);
5661 emit_label (loop_start_label);
5664 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
5668 /* Issue a read prefetch for the +2 cache line of operand 1. */
5669 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
5670 const0_rtx, const0_rtx);
5671 emit_insn (prefetch);
5672 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5674 /* Issue a read prefetch for the +2 cache line of operand 2. */
5675 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
5676 const0_rtx, const0_rtx);
5677 emit_insn (prefetch);
5678 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5681 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
5682 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
5683 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
5684 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
5685 temp = gen_rtx_SET (pc_rtx, temp);
5686 emit_jump_insn (temp);
5688 s390_load_address (addr0,
5689 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
5690 s390_load_address (addr1,
5691 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
5693 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5696 emit_move_insn (blocks, temp);
5698 emit_cmp_and_jump_insns (blocks, const0_rtx,
5699 EQ, NULL_RTX, mode, 1, loop_end_label);
5701 emit_jump (loop_start_label);
5702 emit_label (loop_end_label);
5704 emit_insn (gen_cmpmem_short (op0, op1,
5705 convert_to_mode (Pmode, count, 1)));
5706 emit_label (end_label);
5708 emit_insn (gen_cmpint (target, ccreg));
5713 /* Emit a conditional jump to LABEL for condition code mask MASK using
5714 comparsion operator COMPARISON. Return the emitted jump insn. */
5717 s390_emit_ccraw_jump (HOST_WIDE_INT mask, enum rtx_code comparison, rtx label)
5721 gcc_assert (comparison == EQ || comparison == NE);
5722 gcc_assert (mask > 0 && mask < 15);
5724 temp = gen_rtx_fmt_ee (comparison, VOIDmode,
5725 gen_rtx_REG (CCRAWmode, CC_REGNUM), GEN_INT (mask));
5726 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
5727 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
5728 temp = gen_rtx_SET (pc_rtx, temp);
5729 return emit_jump_insn (temp);
5732 /* Emit the instructions to implement strlen of STRING and store the
5733 result in TARGET. The string has the known ALIGNMENT. This
5734 version uses vector instructions and is therefore not appropriate
5735 for targets prior to z13. */
5738 s390_expand_vec_strlen (rtx target, rtx string, rtx alignment)
5740 rtx highest_index_to_load_reg = gen_reg_rtx (Pmode);
5741 rtx str_reg = gen_reg_rtx (V16QImode);
5742 rtx str_addr_base_reg = gen_reg_rtx (Pmode);
5743 rtx str_idx_reg = gen_reg_rtx (Pmode);
5744 rtx result_reg = gen_reg_rtx (V16QImode);
5745 rtx is_aligned_label = gen_label_rtx ();
5746 rtx into_loop_label = NULL_RTX;
5747 rtx loop_start_label = gen_label_rtx ();
5749 rtx len = gen_reg_rtx (QImode);
5752 s390_load_address (str_addr_base_reg, XEXP (string, 0));
5753 emit_move_insn (str_idx_reg, const0_rtx);
5755 if (INTVAL (alignment) < 16)
5757 /* Check whether the address happens to be aligned properly so
5758 jump directly to the aligned loop. */
5759 emit_cmp_and_jump_insns (gen_rtx_AND (Pmode,
5760 str_addr_base_reg, GEN_INT (15)),
5761 const0_rtx, EQ, NULL_RTX,
5762 Pmode, 1, is_aligned_label);
5764 temp = gen_reg_rtx (Pmode);
5765 temp = expand_binop (Pmode, and_optab, str_addr_base_reg,
5766 GEN_INT (15), temp, 1, OPTAB_DIRECT);
5767 gcc_assert (REG_P (temp));
5768 highest_index_to_load_reg =
5769 expand_binop (Pmode, sub_optab, GEN_INT (15), temp,
5770 highest_index_to_load_reg, 1, OPTAB_DIRECT);
5771 gcc_assert (REG_P (highest_index_to_load_reg));
5772 emit_insn (gen_vllv16qi (str_reg,
5773 convert_to_mode (SImode, highest_index_to_load_reg, 1),
5774 gen_rtx_MEM (BLKmode, str_addr_base_reg)));
5776 into_loop_label = gen_label_rtx ();
5777 s390_emit_jump (into_loop_label, NULL_RTX);
5781 emit_label (is_aligned_label);
5782 LABEL_NUSES (is_aligned_label) = INTVAL (alignment) < 16 ? 2 : 1;
5784 /* Reaching this point we are only performing 16 bytes aligned
5786 emit_move_insn (highest_index_to_load_reg, GEN_INT (15));
5788 emit_label (loop_start_label);
5789 LABEL_NUSES (loop_start_label) = 1;
5791 /* Load 16 bytes of the string into VR. */
5792 emit_move_insn (str_reg,
5793 gen_rtx_MEM (V16QImode,
5794 gen_rtx_PLUS (Pmode, str_idx_reg,
5795 str_addr_base_reg)));
5796 if (into_loop_label != NULL_RTX)
5798 emit_label (into_loop_label);
5799 LABEL_NUSES (into_loop_label) = 1;
5802 /* Increment string index by 16 bytes. */
5803 expand_binop (Pmode, add_optab, str_idx_reg, GEN_INT (16),
5804 str_idx_reg, 1, OPTAB_DIRECT);
5806 emit_insn (gen_vec_vfenesv16qi (result_reg, str_reg, str_reg,
5807 GEN_INT (VSTRING_FLAG_ZS | VSTRING_FLAG_CS)));
5809 add_int_reg_note (s390_emit_ccraw_jump (8, NE, loop_start_label),
5811 profile_probability::very_likely ().to_reg_br_prob_note ());
5812 emit_insn (gen_vec_extractv16qiqi (len, result_reg, GEN_INT (7)));
5814 /* If the string pointer wasn't aligned we have loaded less then 16
5815 bytes and the remaining bytes got filled with zeros (by vll).
5816 Now we have to check whether the resulting index lies within the
5817 bytes actually part of the string. */
5819 cond = s390_emit_compare (GT, convert_to_mode (Pmode, len, 1),
5820 highest_index_to_load_reg);
5821 s390_load_address (highest_index_to_load_reg,
5822 gen_rtx_PLUS (Pmode, highest_index_to_load_reg,
5825 emit_insn (gen_movdicc (str_idx_reg, cond,
5826 highest_index_to_load_reg, str_idx_reg));
5828 emit_insn (gen_movsicc (str_idx_reg, cond,
5829 highest_index_to_load_reg, str_idx_reg));
5831 add_reg_br_prob_note (s390_emit_jump (is_aligned_label, cond),
5832 profile_probability::very_unlikely ());
5834 expand_binop (Pmode, add_optab, str_idx_reg,
5835 GEN_INT (-16), str_idx_reg, 1, OPTAB_DIRECT);
5836 /* FIXME: len is already zero extended - so avoid the llgcr emitted
5838 temp = expand_binop (Pmode, add_optab, str_idx_reg,
5839 convert_to_mode (Pmode, len, 1),
5840 target, 1, OPTAB_DIRECT);
5842 emit_move_insn (target, temp);
5846 s390_expand_vec_movstr (rtx result, rtx dst, rtx src)
5848 rtx temp = gen_reg_rtx (Pmode);
5849 rtx src_addr = XEXP (src, 0);
5850 rtx dst_addr = XEXP (dst, 0);
5851 rtx src_addr_reg = gen_reg_rtx (Pmode);
5852 rtx dst_addr_reg = gen_reg_rtx (Pmode);
5853 rtx offset = gen_reg_rtx (Pmode);
5854 rtx vsrc = gen_reg_rtx (V16QImode);
5855 rtx vpos = gen_reg_rtx (V16QImode);
5856 rtx loadlen = gen_reg_rtx (SImode);
5857 rtx gpos_qi = gen_reg_rtx(QImode);
5858 rtx gpos = gen_reg_rtx (SImode);
5859 rtx done_label = gen_label_rtx ();
5860 rtx loop_label = gen_label_rtx ();
5861 rtx exit_label = gen_label_rtx ();
5862 rtx full_label = gen_label_rtx ();
5864 /* Perform a quick check for string ending on the first up to 16
5865 bytes and exit early if successful. */
5867 emit_insn (gen_vlbb (vsrc, src, GEN_INT (6)));
5868 emit_insn (gen_lcbb (loadlen, src_addr, GEN_INT (6)));
5869 emit_insn (gen_vfenezv16qi (vpos, vsrc, vsrc));
5870 emit_insn (gen_vec_extractv16qiqi (gpos_qi, vpos, GEN_INT (7)));
5871 emit_move_insn (gpos, gen_rtx_SUBREG (SImode, gpos_qi, 0));
5872 /* gpos is the byte index if a zero was found and 16 otherwise.
5873 So if it is lower than the loaded bytes we have a hit. */
5874 emit_cmp_and_jump_insns (gpos, loadlen, GE, NULL_RTX, SImode, 1,
5876 emit_insn (gen_vstlv16qi (vsrc, gpos, dst));
5878 force_expand_binop (Pmode, add_optab, dst_addr, gpos, result,
5880 emit_jump (exit_label);
5883 emit_label (full_label);
5884 LABEL_NUSES (full_label) = 1;
5886 /* Calculate `offset' so that src + offset points to the last byte
5887 before 16 byte alignment. */
5889 /* temp = src_addr & 0xf */
5890 force_expand_binop (Pmode, and_optab, src_addr, GEN_INT (15), temp,
5893 /* offset = 0xf - temp */
5894 emit_move_insn (offset, GEN_INT (15));
5895 force_expand_binop (Pmode, sub_optab, offset, temp, offset,
5898 /* Store `offset' bytes in the dstination string. The quick check
5899 has loaded at least `offset' bytes into vsrc. */
5901 emit_insn (gen_vstlv16qi (vsrc, gen_lowpart (SImode, offset), dst));
5903 /* Advance to the next byte to be loaded. */
5904 force_expand_binop (Pmode, add_optab, offset, const1_rtx, offset,
5907 /* Make sure the addresses are single regs which can be used as a
5909 emit_move_insn (src_addr_reg, src_addr);
5910 emit_move_insn (dst_addr_reg, dst_addr);
5914 emit_label (loop_label);
5915 LABEL_NUSES (loop_label) = 1;
5917 emit_move_insn (vsrc,
5918 gen_rtx_MEM (V16QImode,
5919 gen_rtx_PLUS (Pmode, src_addr_reg, offset)));
5921 emit_insn (gen_vec_vfenesv16qi (vpos, vsrc, vsrc,
5922 GEN_INT (VSTRING_FLAG_ZS | VSTRING_FLAG_CS)));
5923 add_int_reg_note (s390_emit_ccraw_jump (8, EQ, done_label),
5924 REG_BR_PROB, profile_probability::very_unlikely ()
5925 .to_reg_br_prob_note ());
5927 emit_move_insn (gen_rtx_MEM (V16QImode,
5928 gen_rtx_PLUS (Pmode, dst_addr_reg, offset)),
5931 force_expand_binop (Pmode, add_optab, offset, GEN_INT (16),
5932 offset, 1, OPTAB_DIRECT);
5934 emit_jump (loop_label);
5939 /* We are done. Add the offset of the zero character to the dst_addr
5940 pointer to get the result. */
5942 emit_label (done_label);
5943 LABEL_NUSES (done_label) = 1;
5945 force_expand_binop (Pmode, add_optab, dst_addr_reg, offset, dst_addr_reg,
5948 emit_insn (gen_vec_extractv16qiqi (gpos_qi, vpos, GEN_INT (7)));
5949 emit_move_insn (gpos, gen_rtx_SUBREG (SImode, gpos_qi, 0));
5951 emit_insn (gen_vstlv16qi (vsrc, gpos, gen_rtx_MEM (BLKmode, dst_addr_reg)));
5953 force_expand_binop (Pmode, add_optab, dst_addr_reg, gpos, result,
5958 emit_label (exit_label);
5959 LABEL_NUSES (exit_label) = 1;
5963 /* Expand conditional increment or decrement using alc/slb instructions.
5964 Should generate code setting DST to either SRC or SRC + INCREMENT,
5965 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
5966 Returns true if successful, false otherwise.
5968 That makes it possible to implement some if-constructs without jumps e.g.:
5969 (borrow = CC0 | CC1 and carry = CC2 | CC3)
5970 unsigned int a, b, c;
5971 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
5972 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
5973 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
5974 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
5976 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
5977 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
5978 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
5979 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
5980 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
5983 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
5984 rtx dst, rtx src, rtx increment)
5986 machine_mode cmp_mode;
5987 machine_mode cc_mode;
5993 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
5994 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
5996 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
5997 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
6002 /* Try ADD LOGICAL WITH CARRY. */
6003 if (increment == const1_rtx)
6005 /* Determine CC mode to use. */
6006 if (cmp_code == EQ || cmp_code == NE)
6008 if (cmp_op1 != const0_rtx)
6010 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
6011 NULL_RTX, 0, OPTAB_WIDEN);
6012 cmp_op1 = const0_rtx;
6015 cmp_code = cmp_code == EQ ? LEU : GTU;
6018 if (cmp_code == LTU || cmp_code == LEU)
6023 cmp_code = swap_condition (cmp_code);
6040 /* Emit comparison instruction pattern. */
6041 if (!register_operand (cmp_op0, cmp_mode))
6042 cmp_op0 = force_reg (cmp_mode, cmp_op0);
6044 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
6045 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
6046 /* We use insn_invalid_p here to add clobbers if required. */
6047 ret = insn_invalid_p (emit_insn (insn), false);
6050 /* Emit ALC instruction pattern. */
6051 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
6052 gen_rtx_REG (cc_mode, CC_REGNUM),
6055 if (src != const0_rtx)
6057 if (!register_operand (src, GET_MODE (dst)))
6058 src = force_reg (GET_MODE (dst), src);
6060 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
6061 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
6064 p = rtvec_alloc (2);
6066 gen_rtx_SET (dst, op_res);
6068 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6069 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
6074 /* Try SUBTRACT LOGICAL WITH BORROW. */
6075 if (increment == constm1_rtx)
6077 /* Determine CC mode to use. */
6078 if (cmp_code == EQ || cmp_code == NE)
6080 if (cmp_op1 != const0_rtx)
6082 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
6083 NULL_RTX, 0, OPTAB_WIDEN);
6084 cmp_op1 = const0_rtx;
6087 cmp_code = cmp_code == EQ ? LEU : GTU;
6090 if (cmp_code == GTU || cmp_code == GEU)
6095 cmp_code = swap_condition (cmp_code);
6112 /* Emit comparison instruction pattern. */
6113 if (!register_operand (cmp_op0, cmp_mode))
6114 cmp_op0 = force_reg (cmp_mode, cmp_op0);
6116 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
6117 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
6118 /* We use insn_invalid_p here to add clobbers if required. */
6119 ret = insn_invalid_p (emit_insn (insn), false);
6122 /* Emit SLB instruction pattern. */
6123 if (!register_operand (src, GET_MODE (dst)))
6124 src = force_reg (GET_MODE (dst), src);
6126 op_res = gen_rtx_MINUS (GET_MODE (dst),
6127 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
6128 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
6129 gen_rtx_REG (cc_mode, CC_REGNUM),
6131 p = rtvec_alloc (2);
6133 gen_rtx_SET (dst, op_res);
6135 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6136 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
6144 /* Expand code for the insv template. Return true if successful. */
6147 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
6149 int bitsize = INTVAL (op1);
6150 int bitpos = INTVAL (op2);
6151 machine_mode mode = GET_MODE (dest);
6153 int smode_bsize, mode_bsize;
6156 if (bitsize + bitpos > GET_MODE_BITSIZE (mode))
6159 /* Generate INSERT IMMEDIATE (IILL et al). */
6160 /* (set (ze (reg)) (const_int)). */
6162 && register_operand (dest, word_mode)
6163 && (bitpos % 16) == 0
6164 && (bitsize % 16) == 0
6165 && const_int_operand (src, VOIDmode))
6167 HOST_WIDE_INT val = INTVAL (src);
6168 int regpos = bitpos + bitsize;
6170 while (regpos > bitpos)
6172 machine_mode putmode;
6175 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
6180 putsize = GET_MODE_BITSIZE (putmode);
6182 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
6185 gen_int_mode (val, putmode));
6188 gcc_assert (regpos == bitpos);
6192 smode = smallest_int_mode_for_size (bitsize);
6193 smode_bsize = GET_MODE_BITSIZE (smode);
6194 mode_bsize = GET_MODE_BITSIZE (mode);
6196 /* Generate STORE CHARACTERS UNDER MASK (STCM et al). */
6198 && (bitsize % BITS_PER_UNIT) == 0
6200 && (register_operand (src, word_mode)
6201 || const_int_operand (src, VOIDmode)))
6203 /* Emit standard pattern if possible. */
6204 if (smode_bsize == bitsize)
6206 emit_move_insn (adjust_address (dest, smode, 0),
6207 gen_lowpart (smode, src));
6211 /* (set (ze (mem)) (const_int)). */
6212 else if (const_int_operand (src, VOIDmode))
6214 int size = bitsize / BITS_PER_UNIT;
6215 rtx src_mem = adjust_address (force_const_mem (word_mode, src),
6217 UNITS_PER_WORD - size);
6219 dest = adjust_address (dest, BLKmode, 0);
6220 set_mem_size (dest, size);
6221 s390_expand_movmem (dest, src_mem, GEN_INT (size));
6225 /* (set (ze (mem)) (reg)). */
6226 else if (register_operand (src, word_mode))
6229 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
6233 /* Emit st,stcmh sequence. */
6234 int stcmh_width = bitsize - 32;
6235 int size = stcmh_width / BITS_PER_UNIT;
6237 emit_move_insn (adjust_address (dest, SImode, size),
6238 gen_lowpart (SImode, src));
6239 set_mem_size (dest, size);
6240 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
6241 GEN_INT (stcmh_width),
6243 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT (32)));
6249 /* Generate INSERT CHARACTERS UNDER MASK (IC, ICM et al). */
6250 if ((bitpos % BITS_PER_UNIT) == 0
6251 && (bitsize % BITS_PER_UNIT) == 0
6252 && (bitpos & 32) == ((bitpos + bitsize - 1) & 32)
6254 && (mode == DImode || mode == SImode)
6255 && register_operand (dest, mode))
6257 /* Emit a strict_low_part pattern if possible. */
6258 if (smode_bsize == bitsize && bitpos == mode_bsize - smode_bsize)
6260 op = gen_rtx_STRICT_LOW_PART (VOIDmode, gen_lowpart (smode, dest));
6261 op = gen_rtx_SET (op, gen_lowpart (smode, src));
6262 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6263 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
6267 /* ??? There are more powerful versions of ICM that are not
6268 completely represented in the md file. */
6271 /* For z10, generate ROTATE THEN INSERT SELECTED BITS (RISBG et al). */
6272 if (TARGET_Z10 && (mode == DImode || mode == SImode))
6274 machine_mode mode_s = GET_MODE (src);
6276 if (CONSTANT_P (src))
6278 /* For constant zero values the representation with AND
6279 appears to be folded in more situations than the (set
6280 (zero_extract) ...).
6281 We only do this when the start and end of the bitfield
6282 remain in the same SImode chunk. That way nihf or nilf
6284 The AND patterns might still generate a risbg for this. */
6285 if (src == const0_rtx && bitpos / 32 == (bitpos + bitsize - 1) / 32)
6288 src = force_reg (mode, src);
6290 else if (mode_s != mode)
6292 gcc_assert (GET_MODE_BITSIZE (mode_s) >= bitsize);
6293 src = force_reg (mode_s, src);
6294 src = gen_lowpart (mode, src);
6297 op = gen_rtx_ZERO_EXTRACT (mode, dest, op1, op2),
6298 op = gen_rtx_SET (op, src);
6302 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6303 op = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber));
6313 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
6314 register that holds VAL of mode MODE shifted by COUNT bits. */
6317 s390_expand_mask_and_shift (rtx val, machine_mode mode, rtx count)
6319 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
6320 NULL_RTX, 1, OPTAB_DIRECT);
6321 return expand_simple_binop (SImode, ASHIFT, val, count,
6322 NULL_RTX, 1, OPTAB_DIRECT);
6325 /* Generate a vector comparison COND of CMP_OP1 and CMP_OP2 and store
6326 the result in TARGET. */
6329 s390_expand_vec_compare (rtx target, enum rtx_code cond,
6330 rtx cmp_op1, rtx cmp_op2)
6332 machine_mode mode = GET_MODE (target);
6333 bool neg_p = false, swap_p = false;
6336 if (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_VECTOR_FLOAT)
6340 /* NE a != b -> !(a == b) */
6341 case NE: cond = EQ; neg_p = true; break;
6342 /* UNGT a u> b -> !(b >= a) */
6343 case UNGT: cond = GE; neg_p = true; swap_p = true; break;
6344 /* UNGE a u>= b -> !(b > a) */
6345 case UNGE: cond = GT; neg_p = true; swap_p = true; break;
6346 /* LE: a <= b -> b >= a */
6347 case LE: cond = GE; swap_p = true; break;
6348 /* UNLE: a u<= b -> !(a > b) */
6349 case UNLE: cond = GT; neg_p = true; break;
6350 /* LT: a < b -> b > a */
6351 case LT: cond = GT; swap_p = true; break;
6352 /* UNLT: a u< b -> !(a >= b) */
6353 case UNLT: cond = GE; neg_p = true; break;
6355 emit_insn (gen_vec_cmpuneqv2df (target, cmp_op1, cmp_op2));
6358 emit_insn (gen_vec_cmpltgtv2df (target, cmp_op1, cmp_op2));
6361 emit_insn (gen_vec_orderedv2df (target, cmp_op1, cmp_op2));
6364 emit_insn (gen_vec_unorderedv2df (target, cmp_op1, cmp_op2));
6373 /* NE: a != b -> !(a == b) */
6374 case NE: cond = EQ; neg_p = true; break;
6375 /* GE: a >= b -> !(b > a) */
6376 case GE: cond = GT; neg_p = true; swap_p = true; break;
6377 /* GEU: a >= b -> !(b > a) */
6378 case GEU: cond = GTU; neg_p = true; swap_p = true; break;
6379 /* LE: a <= b -> !(a > b) */
6380 case LE: cond = GT; neg_p = true; break;
6381 /* LEU: a <= b -> !(a > b) */
6382 case LEU: cond = GTU; neg_p = true; break;
6383 /* LT: a < b -> b > a */
6384 case LT: cond = GT; swap_p = true; break;
6385 /* LTU: a < b -> b > a */
6386 case LTU: cond = GTU; swap_p = true; break;
6393 tmp = cmp_op1; cmp_op1 = cmp_op2; cmp_op2 = tmp;
6396 emit_insn (gen_rtx_SET (target, gen_rtx_fmt_ee (cond,
6398 cmp_op1, cmp_op2)));
6400 emit_insn (gen_rtx_SET (target, gen_rtx_NOT (mode, target)));
6403 /* Expand the comparison CODE of CMP1 and CMP2 and copy 1 or 0 into
6404 TARGET if either all (ALL_P is true) or any (ALL_P is false) of the
6405 elements in CMP1 and CMP2 fulfill the comparison.
6406 This function is only used to emit patterns for the vx builtins and
6407 therefore only handles comparison codes required by the
6410 s390_expand_vec_compare_cc (rtx target, enum rtx_code code,
6411 rtx cmp1, rtx cmp2, bool all_p)
6413 machine_mode cc_producer_mode, cc_consumer_mode, scratch_mode;
6414 rtx tmp_reg = gen_reg_rtx (SImode);
6415 bool swap_p = false;
6417 if (GET_MODE_CLASS (GET_MODE (cmp1)) == MODE_VECTOR_INT)
6423 cc_producer_mode = CCVEQmode;
6427 code = swap_condition (code);
6432 cc_producer_mode = CCVIHmode;
6436 code = swap_condition (code);
6441 cc_producer_mode = CCVIHUmode;
6447 scratch_mode = GET_MODE (cmp1);
6448 /* These codes represent inverted CC interpretations. Inverting
6449 an ALL CC mode results in an ANY CC mode and the other way
6450 around. Invert the all_p flag here to compensate for
6452 if (code == NE || code == LE || code == LEU)
6455 cc_consumer_mode = all_p ? CCVIALLmode : CCVIANYmode;
6457 else if (GET_MODE_CLASS (GET_MODE (cmp1)) == MODE_VECTOR_FLOAT)
6463 case EQ: cc_producer_mode = CCVEQmode; break;
6464 case NE: cc_producer_mode = CCVEQmode; inv_p = true; break;
6465 case GT: cc_producer_mode = CCVFHmode; break;
6466 case GE: cc_producer_mode = CCVFHEmode; break;
6467 case UNLE: cc_producer_mode = CCVFHmode; inv_p = true; break;
6468 case UNLT: cc_producer_mode = CCVFHEmode; inv_p = true; break;
6469 case LT: cc_producer_mode = CCVFHmode; code = GT; swap_p = true; break;
6470 case LE: cc_producer_mode = CCVFHEmode; code = GE; swap_p = true; break;
6471 default: gcc_unreachable ();
6473 scratch_mode = mode_for_vector
6474 (int_mode_for_mode (GET_MODE_INNER (GET_MODE (cmp1))).require (),
6475 GET_MODE_NUNITS (GET_MODE (cmp1)));
6476 gcc_assert (scratch_mode != BLKmode);
6481 cc_consumer_mode = all_p ? CCVFALLmode : CCVFANYmode;
6493 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6494 gen_rtvec (2, gen_rtx_SET (
6495 gen_rtx_REG (cc_producer_mode, CC_REGNUM),
6496 gen_rtx_COMPARE (cc_producer_mode, cmp1, cmp2)),
6497 gen_rtx_CLOBBER (VOIDmode,
6498 gen_rtx_SCRATCH (scratch_mode)))));
6499 emit_move_insn (target, const0_rtx);
6500 emit_move_insn (tmp_reg, const1_rtx);
6502 emit_move_insn (target,
6503 gen_rtx_IF_THEN_ELSE (SImode,
6504 gen_rtx_fmt_ee (code, VOIDmode,
6505 gen_rtx_REG (cc_consumer_mode, CC_REGNUM),
6510 /* Invert the comparison CODE applied to a CC mode. This is only safe
6511 if we know whether there result was created by a floating point
6512 compare or not. For the CCV modes this is encoded as part of the
6515 s390_reverse_condition (machine_mode mode, enum rtx_code code)
6517 /* Reversal of FP compares takes care -- an ordered compare
6518 becomes an unordered compare and vice versa. */
6519 if (mode == CCVFALLmode || mode == CCVFANYmode)
6520 return reverse_condition_maybe_unordered (code);
6521 else if (mode == CCVIALLmode || mode == CCVIANYmode)
6522 return reverse_condition (code);
6527 /* Generate a vector comparison expression loading either elements of
6528 THEN or ELS into TARGET depending on the comparison COND of CMP_OP1
6532 s390_expand_vcond (rtx target, rtx then, rtx els,
6533 enum rtx_code cond, rtx cmp_op1, rtx cmp_op2)
6536 machine_mode result_mode;
6539 machine_mode target_mode = GET_MODE (target);
6540 machine_mode cmp_mode = GET_MODE (cmp_op1);
6541 rtx op = (cond == LT) ? els : then;
6543 /* Try to optimize x < 0 ? -1 : 0 into (signed) x >> 31
6544 and x < 0 ? 1 : 0 into (unsigned) x >> 31. Likewise
6545 for short and byte (x >> 15 and x >> 7 respectively). */
6546 if ((cond == LT || cond == GE)
6547 && target_mode == cmp_mode
6548 && cmp_op2 == CONST0_RTX (cmp_mode)
6549 && op == CONST0_RTX (target_mode)
6550 && s390_vector_mode_supported_p (target_mode)
6551 && GET_MODE_CLASS (target_mode) == MODE_VECTOR_INT)
6553 rtx negop = (cond == LT) ? then : els;
6555 int shift = GET_MODE_BITSIZE (GET_MODE_INNER (target_mode)) - 1;
6557 /* if x < 0 ? 1 : 0 or if x >= 0 ? 0 : 1 */
6558 if (negop == CONST1_RTX (target_mode))
6560 rtx res = expand_simple_binop (cmp_mode, LSHIFTRT, cmp_op1,
6561 GEN_INT (shift), target,
6564 emit_move_insn (target, res);
6568 /* if x < 0 ? -1 : 0 or if x >= 0 ? 0 : -1 */
6569 else if (all_ones_operand (negop, target_mode))
6571 rtx res = expand_simple_binop (cmp_mode, ASHIFTRT, cmp_op1,
6572 GEN_INT (shift), target,
6575 emit_move_insn (target, res);
6580 /* We always use an integral type vector to hold the comparison
6582 result_mode = mode_for_vector
6583 (int_mode_for_mode (GET_MODE_INNER (cmp_mode)).require (),
6584 GET_MODE_NUNITS (cmp_mode));
6585 result_target = gen_reg_rtx (result_mode);
6587 /* We allow vector immediates as comparison operands that
6588 can be handled by the optimization above but not by the
6589 following code. Hence, force them into registers here. */
6590 if (!REG_P (cmp_op1))
6591 cmp_op1 = force_reg (GET_MODE (cmp_op1), cmp_op1);
6593 if (!REG_P (cmp_op2))
6594 cmp_op2 = force_reg (GET_MODE (cmp_op2), cmp_op2);
6596 s390_expand_vec_compare (result_target, cond,
6599 /* If the results are supposed to be either -1 or 0 we are done
6600 since this is what our compare instructions generate anyway. */
6601 if (all_ones_operand (then, GET_MODE (then))
6602 && const0_operand (els, GET_MODE (els)))
6604 emit_move_insn (target, gen_rtx_SUBREG (target_mode,
6609 /* Otherwise we will do a vsel afterwards. */
6610 /* This gets triggered e.g.
6611 with gcc.c-torture/compile/pr53410-1.c */
6613 then = force_reg (target_mode, then);
6616 els = force_reg (target_mode, els);
6618 tmp = gen_rtx_fmt_ee (EQ, VOIDmode,
6620 CONST0_RTX (result_mode));
6622 /* We compared the result against zero above so we have to swap then
6624 tmp = gen_rtx_IF_THEN_ELSE (target_mode, tmp, els, then);
6626 gcc_assert (target_mode == GET_MODE (then));
6627 emit_insn (gen_rtx_SET (target, tmp));
6630 /* Emit the RTX necessary to initialize the vector TARGET with values
6633 s390_expand_vec_init (rtx target, rtx vals)
6635 machine_mode mode = GET_MODE (target);
6636 machine_mode inner_mode = GET_MODE_INNER (mode);
6637 int n_elts = GET_MODE_NUNITS (mode);
6638 bool all_same = true, all_regs = true, all_const_int = true;
6642 for (i = 0; i < n_elts; ++i)
6644 x = XVECEXP (vals, 0, i);
6646 if (!CONST_INT_P (x))
6647 all_const_int = false;
6649 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6656 /* Use vector gen mask or vector gen byte mask if possible. */
6657 if (all_same && all_const_int
6658 && (XVECEXP (vals, 0, 0) == const0_rtx
6659 || s390_contiguous_bitmask_vector_p (XVECEXP (vals, 0, 0),
6661 || s390_bytemask_vector_p (XVECEXP (vals, 0, 0), NULL)))
6663 emit_insn (gen_rtx_SET (target,
6664 gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0))));
6670 emit_insn (gen_rtx_SET (target,
6671 gen_rtx_VEC_DUPLICATE (mode,
6672 XVECEXP (vals, 0, 0))));
6679 && GET_MODE_SIZE (inner_mode) == 8)
6681 /* Use vector load pair. */
6682 emit_insn (gen_rtx_SET (target,
6683 gen_rtx_VEC_CONCAT (mode,
6684 XVECEXP (vals, 0, 0),
6685 XVECEXP (vals, 0, 1))));
6689 /* Use vector load logical element and zero. */
6690 if (TARGET_VXE && (mode == V4SImode || mode == V4SFmode))
6694 x = XVECEXP (vals, 0, 0);
6695 if (memory_operand (x, inner_mode))
6697 for (i = 1; i < n_elts; ++i)
6698 found = found && XVECEXP (vals, 0, i) == const0_rtx;
6702 machine_mode half_mode = (inner_mode == SFmode
6703 ? V2SFmode : V2SImode);
6704 emit_insn (gen_rtx_SET (target,
6705 gen_rtx_VEC_CONCAT (mode,
6706 gen_rtx_VEC_CONCAT (half_mode,
6709 gen_rtx_VEC_CONCAT (half_mode,
6717 /* We are about to set the vector elements one by one. Zero out the
6718 full register first in order to help the data flow framework to
6719 detect it as full VR set. */
6720 emit_insn (gen_rtx_SET (target, CONST0_RTX (mode)));
6722 /* Unfortunately the vec_init expander is not allowed to fail. So
6723 we have to implement the fallback ourselves. */
6724 for (i = 0; i < n_elts; i++)
6726 rtx elem = XVECEXP (vals, 0, i);
6727 if (!general_operand (elem, GET_MODE (elem)))
6728 elem = force_reg (inner_mode, elem);
6730 emit_insn (gen_rtx_SET (target,
6731 gen_rtx_UNSPEC (mode,
6733 GEN_INT (i), target),
6738 /* Structure to hold the initial parameters for a compare_and_swap operation
6739 in HImode and QImode. */
6741 struct alignment_context
6743 rtx memsi; /* SI aligned memory location. */
6744 rtx shift; /* Bit offset with regard to lsb. */
6745 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
6746 rtx modemaski; /* ~modemask */
6747 bool aligned; /* True if memory is aligned, false else. */
6750 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
6751 structure AC for transparent simplifying, if the memory alignment is known
6752 to be at least 32bit. MEM is the memory location for the actual operation
6753 and MODE its mode. */
6756 init_alignment_context (struct alignment_context *ac, rtx mem,
6759 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
6760 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
6763 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
6766 /* Alignment is unknown. */
6767 rtx byteoffset, addr, align;
6769 /* Force the address into a register. */
6770 addr = force_reg (Pmode, XEXP (mem, 0));
6772 /* Align it to SImode. */
6773 align = expand_simple_binop (Pmode, AND, addr,
6774 GEN_INT (-GET_MODE_SIZE (SImode)),
6775 NULL_RTX, 1, OPTAB_DIRECT);
6777 ac->memsi = gen_rtx_MEM (SImode, align);
6778 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
6779 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
6780 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
6782 /* Calculate shiftcount. */
6783 byteoffset = expand_simple_binop (Pmode, AND, addr,
6784 GEN_INT (GET_MODE_SIZE (SImode) - 1),
6785 NULL_RTX, 1, OPTAB_DIRECT);
6786 /* As we already have some offset, evaluate the remaining distance. */
6787 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
6788 NULL_RTX, 1, OPTAB_DIRECT);
6791 /* Shift is the byte count, but we need the bitcount. */
6792 ac->shift = expand_simple_binop (SImode, ASHIFT, ac->shift, GEN_INT (3),
6793 NULL_RTX, 1, OPTAB_DIRECT);
6795 /* Calculate masks. */
6796 ac->modemask = expand_simple_binop (SImode, ASHIFT,
6797 GEN_INT (GET_MODE_MASK (mode)),
6798 ac->shift, NULL_RTX, 1, OPTAB_DIRECT);
6799 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask,
6803 /* A subroutine of s390_expand_cs_hqi. Insert INS into VAL. If possible,
6804 use a single insv insn into SEQ2. Otherwise, put prep insns in SEQ1 and
6805 perform the merge in SEQ2. */
6808 s390_two_part_insv (struct alignment_context *ac, rtx *seq1, rtx *seq2,
6809 machine_mode mode, rtx val, rtx ins)
6816 tmp = copy_to_mode_reg (SImode, val);
6817 if (s390_expand_insv (tmp, GEN_INT (GET_MODE_BITSIZE (mode)),
6821 *seq2 = get_insns ();
6828 /* Failed to use insv. Generate a two part shift and mask. */
6830 tmp = s390_expand_mask_and_shift (ins, mode, ac->shift);
6831 *seq1 = get_insns ();
6835 tmp = expand_simple_binop (SImode, IOR, tmp, val, NULL_RTX, 1, OPTAB_DIRECT);
6836 *seq2 = get_insns ();
6842 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
6843 the memory location, CMP the old value to compare MEM with and NEW_RTX the
6844 value to set if CMP == MEM. */
6847 s390_expand_cs_hqi (machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
6848 rtx cmp, rtx new_rtx, bool is_weak)
6850 struct alignment_context ac;
6851 rtx cmpv, newv, val, cc, seq0, seq1, seq2, seq3;
6852 rtx res = gen_reg_rtx (SImode);
6853 rtx_code_label *csloop = NULL, *csend = NULL;
6855 gcc_assert (MEM_P (mem));
6857 init_alignment_context (&ac, mem, mode);
6859 /* Load full word. Subsequent loads are performed by CS. */
6860 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
6861 NULL_RTX, 1, OPTAB_DIRECT);
6863 /* Prepare insertions of cmp and new_rtx into the loaded value. When
6864 possible, we try to use insv to make this happen efficiently. If
6865 that fails we'll generate code both inside and outside the loop. */
6866 cmpv = s390_two_part_insv (&ac, &seq0, &seq2, mode, val, cmp);
6867 newv = s390_two_part_insv (&ac, &seq1, &seq3, mode, val, new_rtx);
6874 /* Start CS loop. */
6877 /* Begin assuming success. */
6878 emit_move_insn (btarget, const1_rtx);
6880 csloop = gen_label_rtx ();
6881 csend = gen_label_rtx ();
6882 emit_label (csloop);
6885 /* val = "<mem>00..0<mem>"
6886 * cmp = "00..0<cmp>00..0"
6887 * new = "00..0<new>00..0"
6893 cc = s390_emit_compare_and_swap (EQ, res, ac.memsi, cmpv, newv, CCZ1mode);
6895 emit_insn (gen_cstorecc4 (btarget, cc, XEXP (cc, 0), XEXP (cc, 1)));
6900 /* Jump to end if we're done (likely?). */
6901 s390_emit_jump (csend, cc);
6903 /* Check for changes outside mode, and loop internal if so.
6904 Arrange the moves so that the compare is adjacent to the
6905 branch so that we can generate CRJ. */
6906 tmp = copy_to_reg (val);
6907 force_expand_binop (SImode, and_optab, res, ac.modemaski, val,
6909 cc = s390_emit_compare (NE, val, tmp);
6910 s390_emit_jump (csloop, cc);
6913 emit_move_insn (btarget, const0_rtx);
6917 /* Return the correct part of the bitfield. */
6918 convert_move (vtarget, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
6919 NULL_RTX, 1, OPTAB_DIRECT), 1);
6922 /* Variant of s390_expand_cs for SI, DI and TI modes. */
6924 s390_expand_cs_tdsi (machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
6925 rtx cmp, rtx new_rtx, bool is_weak)
6927 rtx output = vtarget;
6928 rtx_code_label *skip_cs_label = NULL;
6929 bool do_const_opt = false;
6931 if (!register_operand (output, mode))
6932 output = gen_reg_rtx (mode);
6934 /* If IS_WEAK is true and the INPUT value is a constant, compare the memory
6935 with the constant first and skip the compare_and_swap because its very
6936 expensive and likely to fail anyway.
6937 Note 1: This is done only for IS_WEAK. C11 allows optimizations that may
6938 cause spurious in that case.
6939 Note 2: It may be useful to do this also for non-constant INPUT.
6940 Note 3: Currently only targets with "load on condition" are supported
6941 (z196 and newer). */
6944 && (mode == SImode || mode == DImode))
6945 do_const_opt = (is_weak && CONST_INT_P (cmp));
6949 rtx cc = gen_rtx_REG (CCZmode, CC_REGNUM);
6951 skip_cs_label = gen_label_rtx ();
6952 emit_move_insn (btarget, const0_rtx);
6953 if (CONST_INT_P (cmp) && INTVAL (cmp) == 0)
6955 rtvec lt = rtvec_alloc (2);
6957 /* Load-and-test + conditional jump. */
6959 = gen_rtx_SET (cc, gen_rtx_COMPARE (CCZmode, mem, cmp));
6960 RTVEC_ELT (lt, 1) = gen_rtx_SET (output, mem);
6961 emit_insn (gen_rtx_PARALLEL (VOIDmode, lt));
6965 emit_move_insn (output, mem);
6966 emit_insn (gen_rtx_SET (cc, gen_rtx_COMPARE (CCZmode, output, cmp)));
6968 s390_emit_jump (skip_cs_label, gen_rtx_NE (VOIDmode, cc, const0_rtx));
6969 add_reg_br_prob_note (get_last_insn (),
6970 profile_probability::very_unlikely ());
6971 /* If the jump is not taken, OUTPUT is the expected value. */
6973 /* Reload newval to a register manually, *after* the compare and jump
6974 above. Otherwise Reload might place it before the jump. */
6977 cmp = force_reg (mode, cmp);
6978 new_rtx = force_reg (mode, new_rtx);
6979 s390_emit_compare_and_swap (EQ, output, mem, cmp, new_rtx,
6980 (do_const_opt) ? CCZmode : CCZ1mode);
6981 if (skip_cs_label != NULL)
6982 emit_label (skip_cs_label);
6984 /* We deliberately accept non-register operands in the predicate
6985 to ensure the write back to the output operand happens *before*
6986 the store-flags code below. This makes it easier for combine
6987 to merge the store-flags code with a potential test-and-branch
6988 pattern following (immediately!) afterwards. */
6989 if (output != vtarget)
6990 emit_move_insn (vtarget, output);
6996 /* Do not use gen_cstorecc4 here because it writes either 1 or 0, but
6997 btarget has already been initialized with 0 above. */
6998 cc = gen_rtx_REG (CCZmode, CC_REGNUM);
6999 cond = gen_rtx_EQ (VOIDmode, cc, const0_rtx);
7000 ite = gen_rtx_IF_THEN_ELSE (SImode, cond, const1_rtx, btarget);
7001 emit_insn (gen_rtx_SET (btarget, ite));
7007 cc = gen_rtx_REG (CCZ1mode, CC_REGNUM);
7008 cond = gen_rtx_EQ (SImode, cc, const0_rtx);
7009 emit_insn (gen_cstorecc4 (btarget, cond, cc, const0_rtx));
7013 /* Expand an atomic compare and swap operation. MEM is the memory location,
7014 CMP the old value to compare MEM with and NEW_RTX the value to set if
7018 s390_expand_cs (machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
7019 rtx cmp, rtx new_rtx, bool is_weak)
7026 s390_expand_cs_tdsi (mode, btarget, vtarget, mem, cmp, new_rtx, is_weak);
7030 s390_expand_cs_hqi (mode, btarget, vtarget, mem, cmp, new_rtx, is_weak);
7037 /* Expand an atomic_exchange operation simulated with a compare-and-swap loop.
7038 The memory location MEM is set to INPUT. OUTPUT is set to the previous value
7042 s390_expand_atomic_exchange_tdsi (rtx output, rtx mem, rtx input)
7044 machine_mode mode = GET_MODE (mem);
7045 rtx_code_label *csloop;
7048 && (mode == DImode || mode == SImode)
7049 && CONST_INT_P (input) && INTVAL (input) == 0)
7051 emit_move_insn (output, const0_rtx);
7053 emit_insn (gen_atomic_fetch_anddi (output, mem, const0_rtx, input));
7055 emit_insn (gen_atomic_fetch_andsi (output, mem, const0_rtx, input));
7059 input = force_reg (mode, input);
7060 emit_move_insn (output, mem);
7061 csloop = gen_label_rtx ();
7062 emit_label (csloop);
7063 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, output, mem, output,
7067 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
7068 and VAL the value to play with. If AFTER is true then store the value
7069 MEM holds after the operation, if AFTER is false then store the value MEM
7070 holds before the operation. If TARGET is zero then discard that value, else
7071 store it to TARGET. */
7074 s390_expand_atomic (machine_mode mode, enum rtx_code code,
7075 rtx target, rtx mem, rtx val, bool after)
7077 struct alignment_context ac;
7079 rtx new_rtx = gen_reg_rtx (SImode);
7080 rtx orig = gen_reg_rtx (SImode);
7081 rtx_code_label *csloop = gen_label_rtx ();
7083 gcc_assert (!target || register_operand (target, VOIDmode));
7084 gcc_assert (MEM_P (mem));
7086 init_alignment_context (&ac, mem, mode);
7088 /* Shift val to the correct bit positions.
7089 Preserve "icm", but prevent "ex icm". */
7090 if (!(ac.aligned && code == SET && MEM_P (val)))
7091 val = s390_expand_mask_and_shift (val, mode, ac.shift);
7093 /* Further preparation insns. */
7094 if (code == PLUS || code == MINUS)
7095 emit_move_insn (orig, val);
7096 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
7097 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
7098 NULL_RTX, 1, OPTAB_DIRECT);
7100 /* Load full word. Subsequent loads are performed by CS. */
7101 cmp = force_reg (SImode, ac.memsi);
7103 /* Start CS loop. */
7104 emit_label (csloop);
7105 emit_move_insn (new_rtx, cmp);
7107 /* Patch new with val at correct position. */
7112 val = expand_simple_binop (SImode, code, new_rtx, orig,
7113 NULL_RTX, 1, OPTAB_DIRECT);
7114 val = expand_simple_binop (SImode, AND, val, ac.modemask,
7115 NULL_RTX, 1, OPTAB_DIRECT);
7118 if (ac.aligned && MEM_P (val))
7119 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0,
7120 0, 0, SImode, val, false);
7123 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
7124 NULL_RTX, 1, OPTAB_DIRECT);
7125 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
7126 NULL_RTX, 1, OPTAB_DIRECT);
7132 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
7133 NULL_RTX, 1, OPTAB_DIRECT);
7135 case MULT: /* NAND */
7136 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
7137 NULL_RTX, 1, OPTAB_DIRECT);
7138 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
7139 NULL_RTX, 1, OPTAB_DIRECT);
7145 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
7146 ac.memsi, cmp, new_rtx,
7149 /* Return the correct part of the bitfield. */
7151 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
7152 after ? new_rtx : cmp, ac.shift,
7153 NULL_RTX, 1, OPTAB_DIRECT), 1);
7156 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
7157 We need to emit DTP-relative relocations. */
7159 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
7162 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
7167 fputs ("\t.long\t", file);
7170 fputs ("\t.quad\t", file);
7175 output_addr_const (file, x);
7176 fputs ("@DTPOFF", file);
7179 /* Return the proper mode for REGNO being represented in the dwarf
7182 s390_dwarf_frame_reg_mode (int regno)
7184 machine_mode save_mode = default_dwarf_frame_reg_mode (regno);
7186 /* Make sure not to return DImode for any GPR with -m31 -mzarch. */
7187 if (GENERAL_REGNO_P (regno))
7190 /* The rightmost 64 bits of vector registers are call-clobbered. */
7191 if (GET_MODE_SIZE (save_mode) > 8)
7197 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
7198 /* Implement TARGET_MANGLE_TYPE. */
7201 s390_mangle_type (const_tree type)
7203 type = TYPE_MAIN_VARIANT (type);
7205 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
7206 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
7209 if (type == s390_builtin_types[BT_BV16QI]) return "U6__boolc";
7210 if (type == s390_builtin_types[BT_BV8HI]) return "U6__bools";
7211 if (type == s390_builtin_types[BT_BV4SI]) return "U6__booli";
7212 if (type == s390_builtin_types[BT_BV2DI]) return "U6__booll";
7214 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
7215 && TARGET_LONG_DOUBLE_128)
7218 /* For all other types, use normal C++ mangling. */
7223 /* In the name of slightly smaller debug output, and to cater to
7224 general assembler lossage, recognize various UNSPEC sequences
7225 and turn them back into a direct symbol reference. */
7228 s390_delegitimize_address (rtx orig_x)
7232 orig_x = delegitimize_mem_from_attrs (orig_x);
7235 /* Extract the symbol ref from:
7236 (plus:SI (reg:SI 12 %r12)
7237 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
7238 UNSPEC_GOTOFF/PLTOFF)))
7240 (plus:SI (reg:SI 12 %r12)
7241 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
7242 UNSPEC_GOTOFF/PLTOFF)
7243 (const_int 4 [0x4])))) */
7244 if (GET_CODE (x) == PLUS
7245 && REG_P (XEXP (x, 0))
7246 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
7247 && GET_CODE (XEXP (x, 1)) == CONST)
7249 HOST_WIDE_INT offset = 0;
7251 /* The const operand. */
7252 y = XEXP (XEXP (x, 1), 0);
7254 if (GET_CODE (y) == PLUS
7255 && GET_CODE (XEXP (y, 1)) == CONST_INT)
7257 offset = INTVAL (XEXP (y, 1));
7261 if (GET_CODE (y) == UNSPEC
7262 && (XINT (y, 1) == UNSPEC_GOTOFF
7263 || XINT (y, 1) == UNSPEC_PLTOFF))
7264 return plus_constant (Pmode, XVECEXP (y, 0, 0), offset);
7267 if (GET_CODE (x) != MEM)
7271 if (GET_CODE (x) == PLUS
7272 && GET_CODE (XEXP (x, 1)) == CONST
7273 && GET_CODE (XEXP (x, 0)) == REG
7274 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
7276 y = XEXP (XEXP (x, 1), 0);
7277 if (GET_CODE (y) == UNSPEC
7278 && XINT (y, 1) == UNSPEC_GOT)
7279 y = XVECEXP (y, 0, 0);
7283 else if (GET_CODE (x) == CONST)
7285 /* Extract the symbol ref from:
7286 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
7287 UNSPEC_PLT/GOTENT))) */
7290 if (GET_CODE (y) == UNSPEC
7291 && (XINT (y, 1) == UNSPEC_GOTENT
7292 || XINT (y, 1) == UNSPEC_PLT))
7293 y = XVECEXP (y, 0, 0);
7300 if (GET_MODE (orig_x) != Pmode)
7302 if (GET_MODE (orig_x) == BLKmode)
7304 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
7311 /* Output operand OP to stdio stream FILE.
7312 OP is an address (register + offset) which is not used to address data;
7313 instead the rightmost bits are interpreted as the value. */
7316 print_addrstyle_operand (FILE *file, rtx op)
7318 HOST_WIDE_INT offset;
7321 /* Extract base register and offset. */
7322 if (!s390_decompose_addrstyle_without_index (op, &base, &offset))
7328 gcc_assert (GET_CODE (base) == REG);
7329 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
7330 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
7333 /* Offsets are constricted to twelve bits. */
7334 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
7336 fprintf (file, "(%s)", reg_names[REGNO (base)]);
7339 /* Assigns the number of NOP halfwords to be emitted before and after the
7340 function label to *HW_BEFORE and *HW_AFTER. Both pointers must not be NULL.
7341 If hotpatching is disabled for the function, the values are set to zero.
7345 s390_function_num_hotpatch_hw (tree decl,
7351 attr = lookup_attribute ("hotpatch", DECL_ATTRIBUTES (decl));
7353 /* Handle the arguments of the hotpatch attribute. The values
7354 specified via attribute might override the cmdline argument
7358 tree args = TREE_VALUE (attr);
7360 *hw_before = TREE_INT_CST_LOW (TREE_VALUE (args));
7361 *hw_after = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (args)));
7365 /* Use the values specified by the cmdline arguments. */
7366 *hw_before = s390_hotpatch_hw_before_label;
7367 *hw_after = s390_hotpatch_hw_after_label;
7371 /* Write the current .machine and .machinemode specification to the assembler
7374 #ifdef HAVE_AS_MACHINE_MACHINEMODE
7376 s390_asm_output_machine_for_arch (FILE *asm_out_file)
7378 fprintf (asm_out_file, "\t.machinemode %s\n",
7379 (TARGET_ZARCH) ? "zarch" : "esa");
7380 fprintf (asm_out_file, "\t.machine \"%s",
7381 processor_table[s390_arch].binutils_name);
7382 if (S390_USE_ARCHITECTURE_MODIFIERS)
7386 cpu_flags = processor_flags_table[(int) s390_arch];
7387 if (TARGET_HTM && !(cpu_flags & PF_TX))
7388 fprintf (asm_out_file, "+htm");
7389 else if (!TARGET_HTM && (cpu_flags & PF_TX))
7390 fprintf (asm_out_file, "+nohtm");
7391 if (TARGET_VX && !(cpu_flags & PF_VX))
7392 fprintf (asm_out_file, "+vx");
7393 else if (!TARGET_VX && (cpu_flags & PF_VX))
7394 fprintf (asm_out_file, "+novx");
7396 fprintf (asm_out_file, "\"\n");
7399 /* Write an extra function header before the very start of the function. */
7402 s390_asm_output_function_prefix (FILE *asm_out_file,
7403 const char *fnname ATTRIBUTE_UNUSED)
7405 if (DECL_FUNCTION_SPECIFIC_TARGET (current_function_decl) == NULL)
7407 /* Since only the function specific options are saved but not the indications
7408 which options are set, it's too much work here to figure out which options
7409 have actually changed. Thus, generate .machine and .machinemode whenever a
7410 function has the target attribute or pragma. */
7411 fprintf (asm_out_file, "\t.machinemode push\n");
7412 fprintf (asm_out_file, "\t.machine push\n");
7413 s390_asm_output_machine_for_arch (asm_out_file);
7416 /* Write an extra function footer after the very end of the function. */
7419 s390_asm_declare_function_size (FILE *asm_out_file,
7420 const char *fnname, tree decl)
7422 if (!flag_inhibit_size_directive)
7423 ASM_OUTPUT_MEASURED_SIZE (asm_out_file, fnname);
7424 if (DECL_FUNCTION_SPECIFIC_TARGET (decl) == NULL)
7426 fprintf (asm_out_file, "\t.machine pop\n");
7427 fprintf (asm_out_file, "\t.machinemode pop\n");
7431 /* Write the extra assembler code needed to declare a function properly. */
7434 s390_asm_output_function_label (FILE *asm_out_file, const char *fname,
7437 int hw_before, hw_after;
7439 s390_function_num_hotpatch_hw (decl, &hw_before, &hw_after);
7442 unsigned int function_alignment;
7445 /* Add a trampoline code area before the function label and initialize it
7446 with two-byte nop instructions. This area can be overwritten with code
7447 that jumps to a patched version of the function. */
7448 asm_fprintf (asm_out_file, "\tnopr\t%%r0"
7449 "\t# pre-label NOPs for hotpatch (%d halfwords)\n",
7451 for (i = 1; i < hw_before; i++)
7452 fputs ("\tnopr\t%r0\n", asm_out_file);
7454 /* Note: The function label must be aligned so that (a) the bytes of the
7455 following nop do not cross a cacheline boundary, and (b) a jump address
7456 (eight bytes for 64 bit targets, 4 bytes for 32 bit targets) can be
7457 stored directly before the label without crossing a cacheline
7458 boundary. All this is necessary to make sure the trampoline code can
7459 be changed atomically.
7460 This alignment is done automatically using the FOUNCTION_BOUNDARY, but
7461 if there are NOPs before the function label, the alignment is placed
7462 before them. So it is necessary to duplicate the alignment after the
7464 function_alignment = MAX (8, DECL_ALIGN (decl) / BITS_PER_UNIT);
7465 if (! DECL_USER_ALIGN (decl))
7466 function_alignment = MAX (function_alignment,
7467 (unsigned int) align_functions);
7468 fputs ("\t# alignment for hotpatch\n", asm_out_file);
7469 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (function_alignment));
7472 if (S390_USE_TARGET_ATTRIBUTE && TARGET_DEBUG_ARG)
7474 asm_fprintf (asm_out_file, "\t# fn:%s ar%d\n", fname, s390_arch);
7475 asm_fprintf (asm_out_file, "\t# fn:%s tu%d\n", fname, s390_tune);
7476 asm_fprintf (asm_out_file, "\t# fn:%s sg%d\n", fname, s390_stack_guard);
7477 asm_fprintf (asm_out_file, "\t# fn:%s ss%d\n", fname, s390_stack_size);
7478 asm_fprintf (asm_out_file, "\t# fn:%s bc%d\n", fname, s390_branch_cost);
7479 asm_fprintf (asm_out_file, "\t# fn:%s wf%d\n", fname,
7480 s390_warn_framesize);
7481 asm_fprintf (asm_out_file, "\t# fn:%s ba%d\n", fname, TARGET_BACKCHAIN);
7482 asm_fprintf (asm_out_file, "\t# fn:%s hd%d\n", fname, TARGET_HARD_DFP);
7483 asm_fprintf (asm_out_file, "\t# fn:%s hf%d\n", fname, !TARGET_SOFT_FLOAT);
7484 asm_fprintf (asm_out_file, "\t# fn:%s ht%d\n", fname, TARGET_OPT_HTM);
7485 asm_fprintf (asm_out_file, "\t# fn:%s vx%d\n", fname, TARGET_OPT_VX);
7486 asm_fprintf (asm_out_file, "\t# fn:%s ps%d\n", fname,
7487 TARGET_PACKED_STACK);
7488 asm_fprintf (asm_out_file, "\t# fn:%s se%d\n", fname, TARGET_SMALL_EXEC);
7489 asm_fprintf (asm_out_file, "\t# fn:%s mv%d\n", fname, TARGET_MVCLE);
7490 asm_fprintf (asm_out_file, "\t# fn:%s zv%d\n", fname, TARGET_ZVECTOR);
7491 asm_fprintf (asm_out_file, "\t# fn:%s wd%d\n", fname,
7492 s390_warn_dynamicstack_p);
7494 ASM_OUTPUT_LABEL (asm_out_file, fname);
7496 asm_fprintf (asm_out_file,
7497 "\t# post-label NOPs for hotpatch (%d halfwords)\n",
7501 /* Output machine-dependent UNSPECs occurring in address constant X
7502 in assembler syntax to stdio stream FILE. Returns true if the
7503 constant X could be recognized, false otherwise. */
7506 s390_output_addr_const_extra (FILE *file, rtx x)
7508 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
7509 switch (XINT (x, 1))
7512 output_addr_const (file, XVECEXP (x, 0, 0));
7513 fprintf (file, "@GOTENT");
7516 output_addr_const (file, XVECEXP (x, 0, 0));
7517 fprintf (file, "@GOT");
7520 output_addr_const (file, XVECEXP (x, 0, 0));
7521 fprintf (file, "@GOTOFF");
7524 output_addr_const (file, XVECEXP (x, 0, 0));
7525 fprintf (file, "@PLT");
7528 output_addr_const (file, XVECEXP (x, 0, 0));
7529 fprintf (file, "@PLTOFF");
7532 output_addr_const (file, XVECEXP (x, 0, 0));
7533 fprintf (file, "@TLSGD");
7536 assemble_name (file, get_some_local_dynamic_name ());
7537 fprintf (file, "@TLSLDM");
7540 output_addr_const (file, XVECEXP (x, 0, 0));
7541 fprintf (file, "@DTPOFF");
7544 output_addr_const (file, XVECEXP (x, 0, 0));
7545 fprintf (file, "@NTPOFF");
7547 case UNSPEC_GOTNTPOFF:
7548 output_addr_const (file, XVECEXP (x, 0, 0));
7549 fprintf (file, "@GOTNTPOFF");
7551 case UNSPEC_INDNTPOFF:
7552 output_addr_const (file, XVECEXP (x, 0, 0));
7553 fprintf (file, "@INDNTPOFF");
7557 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
7558 switch (XINT (x, 1))
7560 case UNSPEC_POOL_OFFSET:
7561 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
7562 output_addr_const (file, x);
7568 /* Output address operand ADDR in assembler syntax to
7569 stdio stream FILE. */
7572 print_operand_address (FILE *file, rtx addr)
7574 struct s390_address ad;
7575 memset (&ad, 0, sizeof (s390_address));
7577 if (s390_loadrelative_operand_p (addr, NULL, NULL))
7581 output_operand_lossage ("symbolic memory references are "
7582 "only supported on z10 or later");
7585 output_addr_const (file, addr);
7589 if (!s390_decompose_address (addr, &ad)
7590 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7591 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
7592 output_operand_lossage ("cannot decompose address");
7595 output_addr_const (file, ad.disp);
7597 fprintf (file, "0");
7599 if (ad.base && ad.indx)
7600 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
7601 reg_names[REGNO (ad.base)]);
7603 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
7606 /* Output operand X in assembler syntax to stdio stream FILE.
7607 CODE specified the format flag. The following format flags
7610 'C': print opcode suffix for branch condition.
7611 'D': print opcode suffix for inverse branch condition.
7612 'E': print opcode suffix for branch on index instruction.
7613 'G': print the size of the operand in bytes.
7614 'J': print tls_load/tls_gdcall/tls_ldcall suffix
7615 'M': print the second word of a TImode operand.
7616 'N': print the second word of a DImode operand.
7617 'O': print only the displacement of a memory reference or address.
7618 'R': print only the base register of a memory reference or address.
7619 'S': print S-type memory reference (base+displacement).
7620 'Y': print address style operand without index (e.g. shift count or setmem
7623 'b': print integer X as if it's an unsigned byte.
7624 'c': print integer X as if it's an signed byte.
7625 'e': "end" contiguous bitmask X in either DImode or vector inner mode.
7626 'f': "end" contiguous bitmask X in SImode.
7627 'h': print integer X as if it's a signed halfword.
7628 'i': print the first nonzero HImode part of X.
7629 'j': print the first HImode part unequal to -1 of X.
7630 'k': print the first nonzero SImode part of X.
7631 'm': print the first SImode part unequal to -1 of X.
7632 'o': print integer X as if it's an unsigned 32bit word.
7633 's': "start" of contiguous bitmask X in either DImode or vector inner mode.
7634 't': CONST_INT: "start" of contiguous bitmask X in SImode.
7635 CONST_VECTOR: Generate a bitmask for vgbm instruction.
7636 'x': print integer X as if it's an unsigned halfword.
7637 'v': print register number as vector register (v1 instead of f1).
7641 print_operand (FILE *file, rtx x, int code)
7648 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
7652 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
7656 if (GET_CODE (x) == LE)
7657 fprintf (file, "l");
7658 else if (GET_CODE (x) == GT)
7659 fprintf (file, "h");
7661 output_operand_lossage ("invalid comparison operator "
7662 "for 'E' output modifier");
7666 if (GET_CODE (x) == SYMBOL_REF)
7668 fprintf (file, "%s", ":tls_load:");
7669 output_addr_const (file, x);
7671 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
7673 fprintf (file, "%s", ":tls_gdcall:");
7674 output_addr_const (file, XVECEXP (x, 0, 0));
7676 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
7678 fprintf (file, "%s", ":tls_ldcall:");
7679 const char *name = get_some_local_dynamic_name ();
7681 assemble_name (file, name);
7684 output_operand_lossage ("invalid reference for 'J' output modifier");
7688 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
7693 struct s390_address ad;
7696 ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
7699 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7702 output_operand_lossage ("invalid address for 'O' output modifier");
7707 output_addr_const (file, ad.disp);
7709 fprintf (file, "0");
7715 struct s390_address ad;
7718 ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
7721 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7724 output_operand_lossage ("invalid address for 'R' output modifier");
7729 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
7731 fprintf (file, "0");
7737 struct s390_address ad;
7742 output_operand_lossage ("memory reference expected for "
7743 "'S' output modifier");
7746 ret = s390_decompose_address (XEXP (x, 0), &ad);
7749 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7752 output_operand_lossage ("invalid address for 'S' output modifier");
7757 output_addr_const (file, ad.disp);
7759 fprintf (file, "0");
7762 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
7767 if (GET_CODE (x) == REG)
7768 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
7769 else if (GET_CODE (x) == MEM)
7770 x = change_address (x, VOIDmode,
7771 plus_constant (Pmode, XEXP (x, 0), 4));
7773 output_operand_lossage ("register or memory expression expected "
7774 "for 'N' output modifier");
7778 if (GET_CODE (x) == REG)
7779 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
7780 else if (GET_CODE (x) == MEM)
7781 x = change_address (x, VOIDmode,
7782 plus_constant (Pmode, XEXP (x, 0), 8));
7784 output_operand_lossage ("register or memory expression expected "
7785 "for 'M' output modifier");
7789 print_addrstyle_operand (file, x);
7793 switch (GET_CODE (x))
7796 /* Print FP regs as fx instead of vx when they are accessed
7797 through non-vector mode. */
7799 || VECTOR_NOFP_REG_P (x)
7800 || (FP_REG_P (x) && VECTOR_MODE_P (GET_MODE (x)))
7801 || (VECTOR_REG_P (x)
7802 && (GET_MODE_SIZE (GET_MODE (x)) /
7803 s390_class_max_nregs (FP_REGS, GET_MODE (x))) > 8))
7804 fprintf (file, "%%v%s", reg_names[REGNO (x)] + 2);
7806 fprintf (file, "%s", reg_names[REGNO (x)]);
7810 output_address (GET_MODE (x), XEXP (x, 0));
7817 output_addr_const (file, x);
7830 ival = ((ival & 0xff) ^ 0x80) - 0x80;
7836 ival = ((ival & 0xffff) ^ 0x8000) - 0x8000;
7839 ival = s390_extract_part (x, HImode, 0);
7842 ival = s390_extract_part (x, HImode, -1);
7845 ival = s390_extract_part (x, SImode, 0);
7848 ival = s390_extract_part (x, SImode, -1);
7860 len = (code == 's' || code == 'e' ? 64 : 32);
7861 ok = s390_contiguous_bitmask_p (ival, true, len, &start, &end);
7863 if (code == 's' || code == 't')
7870 output_operand_lossage ("invalid constant for output modifier '%c'", code);
7872 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
7875 case CONST_WIDE_INT:
7877 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7878 CONST_WIDE_INT_ELT (x, 0) & 0xff);
7879 else if (code == 'x')
7880 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7881 CONST_WIDE_INT_ELT (x, 0) & 0xffff);
7882 else if (code == 'h')
7883 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7884 ((CONST_WIDE_INT_ELT (x, 0) & 0xffff) ^ 0x8000) - 0x8000);
7888 output_operand_lossage ("invalid constant - try using "
7889 "an output modifier");
7891 output_operand_lossage ("invalid constant for output modifier '%c'",
7899 gcc_assert (const_vec_duplicate_p (x));
7900 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7901 ((INTVAL (XVECEXP (x, 0, 0)) & 0xffff) ^ 0x8000) - 0x8000);
7909 ok = s390_contiguous_bitmask_vector_p (x, &start, &end);
7911 ival = (code == 's') ? start : end;
7912 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
7918 bool ok = s390_bytemask_vector_p (x, &mask);
7920 fprintf (file, "%u", mask);
7925 output_operand_lossage ("invalid constant vector for output "
7926 "modifier '%c'", code);
7932 output_operand_lossage ("invalid expression - try using "
7933 "an output modifier");
7935 output_operand_lossage ("invalid expression for output "
7936 "modifier '%c'", code);
7941 /* Target hook for assembling integer objects. We need to define it
7942 here to work a round a bug in some versions of GAS, which couldn't
7943 handle values smaller than INT_MIN when printed in decimal. */
7946 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
7948 if (size == 8 && aligned_p
7949 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
7951 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
7955 return default_assemble_integer (x, size, aligned_p);
7958 /* Returns true if register REGNO is used for forming
7959 a memory address in expression X. */
7962 reg_used_in_mem_p (int regno, rtx x)
7964 enum rtx_code code = GET_CODE (x);
7970 if (refers_to_regno_p (regno, XEXP (x, 0)))
7973 else if (code == SET
7974 && GET_CODE (SET_DEST (x)) == PC)
7976 if (refers_to_regno_p (regno, SET_SRC (x)))
7980 fmt = GET_RTX_FORMAT (code);
7981 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7984 && reg_used_in_mem_p (regno, XEXP (x, i)))
7987 else if (fmt[i] == 'E')
7988 for (j = 0; j < XVECLEN (x, i); j++)
7989 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
7995 /* Returns true if expression DEP_RTX sets an address register
7996 used by instruction INSN to address memory. */
7999 addr_generation_dependency_p (rtx dep_rtx, rtx_insn *insn)
8003 if (NONJUMP_INSN_P (dep_rtx))
8004 dep_rtx = PATTERN (dep_rtx);
8006 if (GET_CODE (dep_rtx) == SET)
8008 target = SET_DEST (dep_rtx);
8009 if (GET_CODE (target) == STRICT_LOW_PART)
8010 target = XEXP (target, 0);
8011 while (GET_CODE (target) == SUBREG)
8012 target = SUBREG_REG (target);
8014 if (GET_CODE (target) == REG)
8016 int regno = REGNO (target);
8018 if (s390_safe_attr_type (insn) == TYPE_LA)
8020 pat = PATTERN (insn);
8021 if (GET_CODE (pat) == PARALLEL)
8023 gcc_assert (XVECLEN (pat, 0) == 2);
8024 pat = XVECEXP (pat, 0, 0);
8026 gcc_assert (GET_CODE (pat) == SET);
8027 return refers_to_regno_p (regno, SET_SRC (pat));
8029 else if (get_attr_atype (insn) == ATYPE_AGEN)
8030 return reg_used_in_mem_p (regno, PATTERN (insn));
8036 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
8039 s390_agen_dep_p (rtx_insn *dep_insn, rtx_insn *insn)
8041 rtx dep_rtx = PATTERN (dep_insn);
8044 if (GET_CODE (dep_rtx) == SET
8045 && addr_generation_dependency_p (dep_rtx, insn))
8047 else if (GET_CODE (dep_rtx) == PARALLEL)
8049 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
8051 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
8059 /* A C statement (sans semicolon) to update the integer scheduling priority
8060 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
8061 reduce the priority to execute INSN later. Do not define this macro if
8062 you do not need to adjust the scheduling priorities of insns.
8064 A STD instruction should be scheduled earlier,
8065 in order to use the bypass. */
8067 s390_adjust_priority (rtx_insn *insn, int priority)
8069 if (! INSN_P (insn))
8072 if (s390_tune <= PROCESSOR_2064_Z900)
8075 switch (s390_safe_attr_type (insn))
8079 priority = priority << 3;
8083 priority = priority << 1;
8092 /* The number of instructions that can be issued per cycle. */
8095 s390_issue_rate (void)
8099 case PROCESSOR_2084_Z990:
8100 case PROCESSOR_2094_Z9_109:
8101 case PROCESSOR_2094_Z9_EC:
8102 case PROCESSOR_2817_Z196:
8104 case PROCESSOR_2097_Z10:
8106 case PROCESSOR_9672_G5:
8107 case PROCESSOR_9672_G6:
8108 case PROCESSOR_2064_Z900:
8109 /* Starting with EC12 we use the sched_reorder hook to take care
8110 of instruction dispatch constraints. The algorithm only
8111 picks the best instruction and assumes only a single
8112 instruction gets issued per cycle. */
8113 case PROCESSOR_2827_ZEC12:
8114 case PROCESSOR_2964_Z13:
8115 case PROCESSOR_3906_Z14:
8122 s390_first_cycle_multipass_dfa_lookahead (void)
8127 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
8128 Fix up MEMs as required. */
8131 annotate_constant_pool_refs (rtx *x)
8136 gcc_assert (GET_CODE (*x) != SYMBOL_REF
8137 || !CONSTANT_POOL_ADDRESS_P (*x));
8139 /* Literal pool references can only occur inside a MEM ... */
8140 if (GET_CODE (*x) == MEM)
8142 rtx memref = XEXP (*x, 0);
8144 if (GET_CODE (memref) == SYMBOL_REF
8145 && CONSTANT_POOL_ADDRESS_P (memref))
8147 rtx base = cfun->machine->base_reg;
8148 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
8151 *x = replace_equiv_address (*x, addr);
8155 if (GET_CODE (memref) == CONST
8156 && GET_CODE (XEXP (memref, 0)) == PLUS
8157 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
8158 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
8159 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
8161 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
8162 rtx sym = XEXP (XEXP (memref, 0), 0);
8163 rtx base = cfun->machine->base_reg;
8164 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
8167 *x = replace_equiv_address (*x, plus_constant (Pmode, addr, off));
8172 /* ... or a load-address type pattern. */
8173 if (GET_CODE (*x) == SET)
8175 rtx addrref = SET_SRC (*x);
8177 if (GET_CODE (addrref) == SYMBOL_REF
8178 && CONSTANT_POOL_ADDRESS_P (addrref))
8180 rtx base = cfun->machine->base_reg;
8181 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
8184 SET_SRC (*x) = addr;
8188 if (GET_CODE (addrref) == CONST
8189 && GET_CODE (XEXP (addrref, 0)) == PLUS
8190 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
8191 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
8192 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
8194 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
8195 rtx sym = XEXP (XEXP (addrref, 0), 0);
8196 rtx base = cfun->machine->base_reg;
8197 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
8200 SET_SRC (*x) = plus_constant (Pmode, addr, off);
8205 /* Annotate LTREL_BASE as well. */
8206 if (GET_CODE (*x) == UNSPEC
8207 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
8209 rtx base = cfun->machine->base_reg;
8210 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
8215 fmt = GET_RTX_FORMAT (GET_CODE (*x));
8216 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
8220 annotate_constant_pool_refs (&XEXP (*x, i));
8222 else if (fmt[i] == 'E')
8224 for (j = 0; j < XVECLEN (*x, i); j++)
8225 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
8230 /* Split all branches that exceed the maximum distance.
8231 Returns true if this created a new literal pool entry. */
8234 s390_split_branches (void)
8236 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8237 int new_literal = 0, ret;
8242 /* We need correct insn addresses. */
8244 shorten_branches (get_insns ());
8246 /* Find all branches that exceed 64KB, and split them. */
8248 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8250 if (! JUMP_P (insn) || tablejump_p (insn, NULL, NULL))
8253 pat = PATTERN (insn);
8254 if (GET_CODE (pat) == PARALLEL)
8255 pat = XVECEXP (pat, 0, 0);
8256 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
8259 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
8261 label = &SET_SRC (pat);
8263 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
8265 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
8266 label = &XEXP (SET_SRC (pat), 1);
8267 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
8268 label = &XEXP (SET_SRC (pat), 2);
8275 if (get_attr_length (insn) <= 4)
8278 /* We are going to use the return register as scratch register,
8279 make sure it will be saved/restored by the prologue/epilogue. */
8280 cfun_frame_layout.save_return_addr_p = 1;
8285 rtx mem = force_const_mem (Pmode, *label);
8286 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, mem),
8288 INSN_ADDRESSES_NEW (set_insn, -1);
8289 annotate_constant_pool_refs (&PATTERN (set_insn));
8296 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
8297 UNSPEC_LTREL_OFFSET);
8298 target = gen_rtx_CONST (Pmode, target);
8299 target = force_const_mem (Pmode, target);
8300 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, target),
8302 INSN_ADDRESSES_NEW (set_insn, -1);
8303 annotate_constant_pool_refs (&PATTERN (set_insn));
8305 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
8306 cfun->machine->base_reg),
8308 target = gen_rtx_PLUS (Pmode, temp_reg, target);
8311 ret = validate_change (insn, label, target, 0);
8319 /* Find an annotated literal pool symbol referenced in RTX X,
8320 and store it at REF. Will abort if X contains references to
8321 more than one such pool symbol; multiple references to the same
8322 symbol are allowed, however.
8324 The rtx pointed to by REF must be initialized to NULL_RTX
8325 by the caller before calling this routine. */
8328 find_constant_pool_ref (rtx x, rtx *ref)
8333 /* Ignore LTREL_BASE references. */
8334 if (GET_CODE (x) == UNSPEC
8335 && XINT (x, 1) == UNSPEC_LTREL_BASE)
8337 /* Likewise POOL_ENTRY insns. */
8338 if (GET_CODE (x) == UNSPEC_VOLATILE
8339 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
8342 gcc_assert (GET_CODE (x) != SYMBOL_REF
8343 || !CONSTANT_POOL_ADDRESS_P (x));
8345 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
8347 rtx sym = XVECEXP (x, 0, 0);
8348 gcc_assert (GET_CODE (sym) == SYMBOL_REF
8349 && CONSTANT_POOL_ADDRESS_P (sym));
8351 if (*ref == NULL_RTX)
8354 gcc_assert (*ref == sym);
8359 fmt = GET_RTX_FORMAT (GET_CODE (x));
8360 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8364 find_constant_pool_ref (XEXP (x, i), ref);
8366 else if (fmt[i] == 'E')
8368 for (j = 0; j < XVECLEN (x, i); j++)
8369 find_constant_pool_ref (XVECEXP (x, i, j), ref);
8374 /* Replace every reference to the annotated literal pool
8375 symbol REF in X by its base plus OFFSET. */
8378 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
8383 gcc_assert (*x != ref);
8385 if (GET_CODE (*x) == UNSPEC
8386 && XINT (*x, 1) == UNSPEC_LTREF
8387 && XVECEXP (*x, 0, 0) == ref)
8389 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
8393 if (GET_CODE (*x) == PLUS
8394 && GET_CODE (XEXP (*x, 1)) == CONST_INT
8395 && GET_CODE (XEXP (*x, 0)) == UNSPEC
8396 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
8397 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
8399 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
8400 *x = plus_constant (Pmode, addr, INTVAL (XEXP (*x, 1)));
8404 fmt = GET_RTX_FORMAT (GET_CODE (*x));
8405 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
8409 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
8411 else if (fmt[i] == 'E')
8413 for (j = 0; j < XVECLEN (*x, i); j++)
8414 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
8419 /* Check whether X contains an UNSPEC_LTREL_BASE.
8420 Return its constant pool symbol if found, NULL_RTX otherwise. */
8423 find_ltrel_base (rtx x)
8428 if (GET_CODE (x) == UNSPEC
8429 && XINT (x, 1) == UNSPEC_LTREL_BASE)
8430 return XVECEXP (x, 0, 0);
8432 fmt = GET_RTX_FORMAT (GET_CODE (x));
8433 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8437 rtx fnd = find_ltrel_base (XEXP (x, i));
8441 else if (fmt[i] == 'E')
8443 for (j = 0; j < XVECLEN (x, i); j++)
8445 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
8455 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
8458 replace_ltrel_base (rtx *x)
8463 if (GET_CODE (*x) == UNSPEC
8464 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
8466 *x = XVECEXP (*x, 0, 1);
8470 fmt = GET_RTX_FORMAT (GET_CODE (*x));
8471 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
8475 replace_ltrel_base (&XEXP (*x, i));
8477 else if (fmt[i] == 'E')
8479 for (j = 0; j < XVECLEN (*x, i); j++)
8480 replace_ltrel_base (&XVECEXP (*x, i, j));
8486 /* We keep a list of constants which we have to add to internal
8487 constant tables in the middle of large functions. */
8489 #define NR_C_MODES 32
8490 machine_mode constant_modes[NR_C_MODES] =
8492 TFmode, TImode, TDmode,
8493 V16QImode, V8HImode, V4SImode, V2DImode, V1TImode,
8494 V4SFmode, V2DFmode, V1TFmode,
8495 DFmode, DImode, DDmode,
8496 V8QImode, V4HImode, V2SImode, V1DImode, V2SFmode, V1DFmode,
8497 SFmode, SImode, SDmode,
8498 V4QImode, V2HImode, V1SImode, V1SFmode,
8507 struct constant *next;
8509 rtx_code_label *label;
8512 struct constant_pool
8514 struct constant_pool *next;
8515 rtx_insn *first_insn;
8516 rtx_insn *pool_insn;
8518 rtx_insn *emit_pool_after;
8520 struct constant *constants[NR_C_MODES];
8521 struct constant *execute;
8522 rtx_code_label *label;
8526 /* Allocate new constant_pool structure. */
8528 static struct constant_pool *
8529 s390_alloc_pool (void)
8531 struct constant_pool *pool;
8534 pool = (struct constant_pool *) xmalloc (sizeof *pool);
8536 for (i = 0; i < NR_C_MODES; i++)
8537 pool->constants[i] = NULL;
8539 pool->execute = NULL;
8540 pool->label = gen_label_rtx ();
8541 pool->first_insn = NULL;
8542 pool->pool_insn = NULL;
8543 pool->insns = BITMAP_ALLOC (NULL);
8545 pool->emit_pool_after = NULL;
8550 /* Create new constant pool covering instructions starting at INSN
8551 and chain it to the end of POOL_LIST. */
8553 static struct constant_pool *
8554 s390_start_pool (struct constant_pool **pool_list, rtx_insn *insn)
8556 struct constant_pool *pool, **prev;
8558 pool = s390_alloc_pool ();
8559 pool->first_insn = insn;
8561 for (prev = pool_list; *prev; prev = &(*prev)->next)
8568 /* End range of instructions covered by POOL at INSN and emit
8569 placeholder insn representing the pool. */
8572 s390_end_pool (struct constant_pool *pool, rtx_insn *insn)
8574 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
8577 insn = get_last_insn ();
8579 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
8580 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8583 /* Add INSN to the list of insns covered by POOL. */
8586 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
8588 bitmap_set_bit (pool->insns, INSN_UID (insn));
8591 /* Return pool out of POOL_LIST that covers INSN. */
8593 static struct constant_pool *
8594 s390_find_pool (struct constant_pool *pool_list, rtx insn)
8596 struct constant_pool *pool;
8598 for (pool = pool_list; pool; pool = pool->next)
8599 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
8605 /* Add constant VAL of mode MODE to the constant pool POOL. */
8608 s390_add_constant (struct constant_pool *pool, rtx val, machine_mode mode)
8613 for (i = 0; i < NR_C_MODES; i++)
8614 if (constant_modes[i] == mode)
8616 gcc_assert (i != NR_C_MODES);
8618 for (c = pool->constants[i]; c != NULL; c = c->next)
8619 if (rtx_equal_p (val, c->value))
8624 c = (struct constant *) xmalloc (sizeof *c);
8626 c->label = gen_label_rtx ();
8627 c->next = pool->constants[i];
8628 pool->constants[i] = c;
8629 pool->size += GET_MODE_SIZE (mode);
8633 /* Return an rtx that represents the offset of X from the start of
8637 s390_pool_offset (struct constant_pool *pool, rtx x)
8641 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
8642 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
8643 UNSPEC_POOL_OFFSET);
8644 return gen_rtx_CONST (GET_MODE (x), x);
8647 /* Find constant VAL of mode MODE in the constant pool POOL.
8648 Return an RTX describing the distance from the start of
8649 the pool to the location of the new constant. */
8652 s390_find_constant (struct constant_pool *pool, rtx val,
8658 for (i = 0; i < NR_C_MODES; i++)
8659 if (constant_modes[i] == mode)
8661 gcc_assert (i != NR_C_MODES);
8663 for (c = pool->constants[i]; c != NULL; c = c->next)
8664 if (rtx_equal_p (val, c->value))
8669 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
8672 /* Check whether INSN is an execute. Return the label_ref to its
8673 execute target template if so, NULL_RTX otherwise. */
8676 s390_execute_label (rtx insn)
8678 if (NONJUMP_INSN_P (insn)
8679 && GET_CODE (PATTERN (insn)) == PARALLEL
8680 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
8681 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
8682 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
8687 /* Add execute target for INSN to the constant pool POOL. */
8690 s390_add_execute (struct constant_pool *pool, rtx insn)
8694 for (c = pool->execute; c != NULL; c = c->next)
8695 if (INSN_UID (insn) == INSN_UID (c->value))
8700 c = (struct constant *) xmalloc (sizeof *c);
8702 c->label = gen_label_rtx ();
8703 c->next = pool->execute;
8709 /* Find execute target for INSN in the constant pool POOL.
8710 Return an RTX describing the distance from the start of
8711 the pool to the location of the execute target. */
8714 s390_find_execute (struct constant_pool *pool, rtx insn)
8718 for (c = pool->execute; c != NULL; c = c->next)
8719 if (INSN_UID (insn) == INSN_UID (c->value))
8724 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
8727 /* For an execute INSN, extract the execute target template. */
8730 s390_execute_target (rtx insn)
8732 rtx pattern = PATTERN (insn);
8733 gcc_assert (s390_execute_label (insn));
8735 if (XVECLEN (pattern, 0) == 2)
8737 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
8741 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
8744 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
8745 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
8747 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
8753 /* Indicate that INSN cannot be duplicated. This is the case for
8754 execute insns that carry a unique label. */
8757 s390_cannot_copy_insn_p (rtx_insn *insn)
8759 rtx label = s390_execute_label (insn);
8760 return label && label != const0_rtx;
8763 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
8764 do not emit the pool base label. */
8767 s390_dump_pool (struct constant_pool *pool, bool remote_label)
8770 rtx_insn *insn = pool->pool_insn;
8773 /* Switch to rodata section. */
8774 if (TARGET_CPU_ZARCH)
8776 insn = emit_insn_after (gen_pool_section_start (), insn);
8777 INSN_ADDRESSES_NEW (insn, -1);
8780 /* Ensure minimum pool alignment. */
8781 if (TARGET_CPU_ZARCH)
8782 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
8784 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
8785 INSN_ADDRESSES_NEW (insn, -1);
8787 /* Emit pool base label. */
8790 insn = emit_label_after (pool->label, insn);
8791 INSN_ADDRESSES_NEW (insn, -1);
8794 /* Dump constants in descending alignment requirement order,
8795 ensuring proper alignment for every constant. */
8796 for (i = 0; i < NR_C_MODES; i++)
8797 for (c = pool->constants[i]; c; c = c->next)
8799 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
8800 rtx value = copy_rtx (c->value);
8801 if (GET_CODE (value) == CONST
8802 && GET_CODE (XEXP (value, 0)) == UNSPEC
8803 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
8804 && XVECLEN (XEXP (value, 0), 0) == 1)
8805 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
8807 insn = emit_label_after (c->label, insn);
8808 INSN_ADDRESSES_NEW (insn, -1);
8810 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
8811 gen_rtvec (1, value),
8812 UNSPECV_POOL_ENTRY);
8813 insn = emit_insn_after (value, insn);
8814 INSN_ADDRESSES_NEW (insn, -1);
8817 /* Ensure minimum alignment for instructions. */
8818 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
8819 INSN_ADDRESSES_NEW (insn, -1);
8821 /* Output in-pool execute template insns. */
8822 for (c = pool->execute; c; c = c->next)
8824 insn = emit_label_after (c->label, insn);
8825 INSN_ADDRESSES_NEW (insn, -1);
8827 insn = emit_insn_after (s390_execute_target (c->value), insn);
8828 INSN_ADDRESSES_NEW (insn, -1);
8831 /* Switch back to previous section. */
8832 if (TARGET_CPU_ZARCH)
8834 insn = emit_insn_after (gen_pool_section_end (), insn);
8835 INSN_ADDRESSES_NEW (insn, -1);
8838 insn = emit_barrier_after (insn);
8839 INSN_ADDRESSES_NEW (insn, -1);
8841 /* Remove placeholder insn. */
8842 remove_insn (pool->pool_insn);
8845 /* Free all memory used by POOL. */
8848 s390_free_pool (struct constant_pool *pool)
8850 struct constant *c, *next;
8853 for (i = 0; i < NR_C_MODES; i++)
8854 for (c = pool->constants[i]; c; c = next)
8860 for (c = pool->execute; c; c = next)
8866 BITMAP_FREE (pool->insns);
8871 /* Collect main literal pool. Return NULL on overflow. */
8873 static struct constant_pool *
8874 s390_mainpool_start (void)
8876 struct constant_pool *pool;
8879 pool = s390_alloc_pool ();
8881 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8883 if (NONJUMP_INSN_P (insn)
8884 && GET_CODE (PATTERN (insn)) == SET
8885 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
8886 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
8888 /* There might be two main_pool instructions if base_reg
8889 is call-clobbered; one for shrink-wrapped code and one
8890 for the rest. We want to keep the first. */
8891 if (pool->pool_insn)
8893 insn = PREV_INSN (insn);
8894 delete_insn (NEXT_INSN (insn));
8897 pool->pool_insn = insn;
8900 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
8902 s390_add_execute (pool, insn);
8904 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8906 rtx pool_ref = NULL_RTX;
8907 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8910 rtx constant = get_pool_constant (pool_ref);
8911 machine_mode mode = get_pool_mode (pool_ref);
8912 s390_add_constant (pool, constant, mode);
8916 /* If hot/cold partitioning is enabled we have to make sure that
8917 the literal pool is emitted in the same section where the
8918 initialization of the literal pool base pointer takes place.
8919 emit_pool_after is only used in the non-overflow case on non
8920 Z cpus where we can emit the literal pool at the end of the
8921 function body within the text section. */
8923 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
8924 && !pool->emit_pool_after)
8925 pool->emit_pool_after = PREV_INSN (insn);
8928 gcc_assert (pool->pool_insn || pool->size == 0);
8930 if (pool->size >= 4096)
8932 /* We're going to chunkify the pool, so remove the main
8933 pool placeholder insn. */
8934 remove_insn (pool->pool_insn);
8936 s390_free_pool (pool);
8940 /* If the functions ends with the section where the literal pool
8941 should be emitted set the marker to its end. */
8942 if (pool && !pool->emit_pool_after)
8943 pool->emit_pool_after = get_last_insn ();
8948 /* POOL holds the main literal pool as collected by s390_mainpool_start.
8949 Modify the current function to output the pool constants as well as
8950 the pool register setup instruction. */
8953 s390_mainpool_finish (struct constant_pool *pool)
8955 rtx base_reg = cfun->machine->base_reg;
8957 /* If the pool is empty, we're done. */
8958 if (pool->size == 0)
8960 /* We don't actually need a base register after all. */
8961 cfun->machine->base_reg = NULL_RTX;
8963 if (pool->pool_insn)
8964 remove_insn (pool->pool_insn);
8965 s390_free_pool (pool);
8969 /* We need correct insn addresses. */
8970 shorten_branches (get_insns ());
8972 /* On zSeries, we use a LARL to load the pool register. The pool is
8973 located in the .rodata section, so we emit it after the function. */
8974 if (TARGET_CPU_ZARCH)
8976 rtx set = gen_main_base_64 (base_reg, pool->label);
8977 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
8978 INSN_ADDRESSES_NEW (insn, -1);
8979 remove_insn (pool->pool_insn);
8981 insn = get_last_insn ();
8982 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8983 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8985 s390_dump_pool (pool, 0);
8988 /* On S/390, if the total size of the function's code plus literal pool
8989 does not exceed 4096 bytes, we use BASR to set up a function base
8990 pointer, and emit the literal pool at the end of the function. */
8991 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
8992 + pool->size + 8 /* alignment slop */ < 4096)
8994 rtx set = gen_main_base_31_small (base_reg, pool->label);
8995 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
8996 INSN_ADDRESSES_NEW (insn, -1);
8997 remove_insn (pool->pool_insn);
8999 insn = emit_label_after (pool->label, insn);
9000 INSN_ADDRESSES_NEW (insn, -1);
9002 /* emit_pool_after will be set by s390_mainpool_start to the
9003 last insn of the section where the literal pool should be
9005 insn = pool->emit_pool_after;
9007 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
9008 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
9010 s390_dump_pool (pool, 1);
9013 /* Otherwise, we emit an inline literal pool and use BASR to branch
9014 over it, setting up the pool register at the same time. */
9017 rtx_code_label *pool_end = gen_label_rtx ();
9019 rtx pat = gen_main_base_31_large (base_reg, pool->label, pool_end);
9020 rtx_insn *insn = emit_jump_insn_after (pat, pool->pool_insn);
9021 JUMP_LABEL (insn) = pool_end;
9022 INSN_ADDRESSES_NEW (insn, -1);
9023 remove_insn (pool->pool_insn);
9025 insn = emit_label_after (pool->label, insn);
9026 INSN_ADDRESSES_NEW (insn, -1);
9028 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
9029 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
9031 insn = emit_label_after (pool_end, pool->pool_insn);
9032 INSN_ADDRESSES_NEW (insn, -1);
9034 s390_dump_pool (pool, 1);
9038 /* Replace all literal pool references. */
9040 for (rtx_insn *insn = get_insns (); insn; insn = NEXT_INSN (insn))
9043 replace_ltrel_base (&PATTERN (insn));
9045 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
9047 rtx addr, pool_ref = NULL_RTX;
9048 find_constant_pool_ref (PATTERN (insn), &pool_ref);
9051 if (s390_execute_label (insn))
9052 addr = s390_find_execute (pool, insn);
9054 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
9055 get_pool_mode (pool_ref));
9057 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
9058 INSN_CODE (insn) = -1;
9064 /* Free the pool. */
9065 s390_free_pool (pool);
9068 /* POOL holds the main literal pool as collected by s390_mainpool_start.
9069 We have decided we cannot use this pool, so revert all changes
9070 to the current function that were done by s390_mainpool_start. */
9072 s390_mainpool_cancel (struct constant_pool *pool)
9074 /* We didn't actually change the instruction stream, so simply
9075 free the pool memory. */
9076 s390_free_pool (pool);
9080 /* Chunkify the literal pool. */
9082 #define S390_POOL_CHUNK_MIN 0xc00
9083 #define S390_POOL_CHUNK_MAX 0xe00
9085 static struct constant_pool *
9086 s390_chunkify_start (void)
9088 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
9091 rtx pending_ltrel = NULL_RTX;
9094 rtx (*gen_reload_base) (rtx, rtx) =
9095 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
9098 /* We need correct insn addresses. */
9100 shorten_branches (get_insns ());
9102 /* Scan all insns and move literals to pool chunks. */
9104 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9106 bool section_switch_p = false;
9108 /* Check for pending LTREL_BASE. */
9111 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
9114 gcc_assert (ltrel_base == pending_ltrel);
9115 pending_ltrel = NULL_RTX;
9119 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
9122 curr_pool = s390_start_pool (&pool_list, insn);
9124 s390_add_execute (curr_pool, insn);
9125 s390_add_pool_insn (curr_pool, insn);
9127 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
9129 rtx pool_ref = NULL_RTX;
9130 find_constant_pool_ref (PATTERN (insn), &pool_ref);
9133 rtx constant = get_pool_constant (pool_ref);
9134 machine_mode mode = get_pool_mode (pool_ref);
9137 curr_pool = s390_start_pool (&pool_list, insn);
9139 s390_add_constant (curr_pool, constant, mode);
9140 s390_add_pool_insn (curr_pool, insn);
9142 /* Don't split the pool chunk between a LTREL_OFFSET load
9143 and the corresponding LTREL_BASE. */
9144 if (GET_CODE (constant) == CONST
9145 && GET_CODE (XEXP (constant, 0)) == UNSPEC
9146 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
9148 gcc_assert (!pending_ltrel);
9149 pending_ltrel = pool_ref;
9154 if (JUMP_P (insn) || JUMP_TABLE_DATA_P (insn) || LABEL_P (insn))
9157 s390_add_pool_insn (curr_pool, insn);
9158 /* An LTREL_BASE must follow within the same basic block. */
9159 gcc_assert (!pending_ltrel);
9163 switch (NOTE_KIND (insn))
9165 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
9166 section_switch_p = true;
9168 case NOTE_INSN_VAR_LOCATION:
9169 case NOTE_INSN_CALL_ARG_LOCATION:
9176 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
9177 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
9180 if (TARGET_CPU_ZARCH)
9182 if (curr_pool->size < S390_POOL_CHUNK_MAX)
9185 s390_end_pool (curr_pool, NULL);
9190 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
9191 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
9194 /* We will later have to insert base register reload insns.
9195 Those will have an effect on code size, which we need to
9196 consider here. This calculation makes rather pessimistic
9197 worst-case assumptions. */
9201 if (chunk_size < S390_POOL_CHUNK_MIN
9202 && curr_pool->size < S390_POOL_CHUNK_MIN
9203 && !section_switch_p)
9206 /* Pool chunks can only be inserted after BARRIERs ... */
9207 if (BARRIER_P (insn))
9209 s390_end_pool (curr_pool, insn);
9214 /* ... so if we don't find one in time, create one. */
9215 else if (chunk_size > S390_POOL_CHUNK_MAX
9216 || curr_pool->size > S390_POOL_CHUNK_MAX
9217 || section_switch_p)
9219 rtx_insn *label, *jump, *barrier, *next, *prev;
9221 if (!section_switch_p)
9223 /* We can insert the barrier only after a 'real' insn. */
9224 if (! NONJUMP_INSN_P (insn) && ! CALL_P (insn))
9226 if (get_attr_length (insn) == 0)
9228 /* Don't separate LTREL_BASE from the corresponding
9229 LTREL_OFFSET load. */
9236 next = NEXT_INSN (insn);
9240 && (NOTE_KIND (next) == NOTE_INSN_VAR_LOCATION
9241 || NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION));
9245 gcc_assert (!pending_ltrel);
9247 /* The old pool has to end before the section switch
9248 note in order to make it part of the current
9250 insn = PREV_INSN (insn);
9253 label = gen_label_rtx ();
9255 if (prev && NOTE_P (prev))
9256 prev = prev_nonnote_insn (prev);
9258 jump = emit_jump_insn_after_setloc (gen_jump (label), insn,
9259 INSN_LOCATION (prev));
9261 jump = emit_jump_insn_after_noloc (gen_jump (label), insn);
9262 barrier = emit_barrier_after (jump);
9263 insn = emit_label_after (label, barrier);
9264 JUMP_LABEL (jump) = label;
9265 LABEL_NUSES (label) = 1;
9267 INSN_ADDRESSES_NEW (jump, -1);
9268 INSN_ADDRESSES_NEW (barrier, -1);
9269 INSN_ADDRESSES_NEW (insn, -1);
9271 s390_end_pool (curr_pool, barrier);
9279 s390_end_pool (curr_pool, NULL);
9280 gcc_assert (!pending_ltrel);
9282 /* Find all labels that are branched into
9283 from an insn belonging to a different chunk. */
9285 far_labels = BITMAP_ALLOC (NULL);
9287 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9289 rtx_jump_table_data *table;
9291 /* Labels marked with LABEL_PRESERVE_P can be target
9292 of non-local jumps, so we have to mark them.
9293 The same holds for named labels.
9295 Don't do that, however, if it is the label before
9299 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
9301 rtx_insn *vec_insn = NEXT_INSN (insn);
9302 if (! vec_insn || ! JUMP_TABLE_DATA_P (vec_insn))
9303 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
9305 /* Check potential targets in a table jump (casesi_jump). */
9306 else if (tablejump_p (insn, NULL, &table))
9308 rtx vec_pat = PATTERN (table);
9309 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
9311 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
9313 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
9315 if (s390_find_pool (pool_list, label)
9316 != s390_find_pool (pool_list, insn))
9317 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
9320 /* If we have a direct jump (conditional or unconditional),
9321 check all potential targets. */
9322 else if (JUMP_P (insn))
9324 rtx pat = PATTERN (insn);
9326 if (GET_CODE (pat) == PARALLEL)
9327 pat = XVECEXP (pat, 0, 0);
9329 if (GET_CODE (pat) == SET)
9331 rtx label = JUMP_LABEL (insn);
9332 if (label && !ANY_RETURN_P (label))
9334 if (s390_find_pool (pool_list, label)
9335 != s390_find_pool (pool_list, insn))
9336 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
9342 /* Insert base register reload insns before every pool. */
9344 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
9346 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
9348 rtx_insn *insn = curr_pool->first_insn;
9349 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
9352 /* Insert base register reload insns at every far label. */
9354 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9356 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
9358 struct constant_pool *pool = s390_find_pool (pool_list, insn);
9361 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
9363 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
9368 BITMAP_FREE (far_labels);
9371 /* Recompute insn addresses. */
9373 init_insn_lengths ();
9374 shorten_branches (get_insns ());
9379 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
9380 After we have decided to use this list, finish implementing
9381 all changes to the current function as required. */
9384 s390_chunkify_finish (struct constant_pool *pool_list)
9386 struct constant_pool *curr_pool = NULL;
9390 /* Replace all literal pool references. */
9392 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9395 replace_ltrel_base (&PATTERN (insn));
9397 curr_pool = s390_find_pool (pool_list, insn);
9401 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
9403 rtx addr, pool_ref = NULL_RTX;
9404 find_constant_pool_ref (PATTERN (insn), &pool_ref);
9407 if (s390_execute_label (insn))
9408 addr = s390_find_execute (curr_pool, insn);
9410 addr = s390_find_constant (curr_pool,
9411 get_pool_constant (pool_ref),
9412 get_pool_mode (pool_ref));
9414 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
9415 INSN_CODE (insn) = -1;
9420 /* Dump out all literal pools. */
9422 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
9423 s390_dump_pool (curr_pool, 0);
9425 /* Free pool list. */
9429 struct constant_pool *next = pool_list->next;
9430 s390_free_pool (pool_list);
9435 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
9436 We have decided we cannot use this list, so revert all changes
9437 to the current function that were done by s390_chunkify_start. */
9440 s390_chunkify_cancel (struct constant_pool *pool_list)
9442 struct constant_pool *curr_pool = NULL;
9445 /* Remove all pool placeholder insns. */
9447 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
9449 /* Did we insert an extra barrier? Remove it. */
9450 rtx_insn *barrier = PREV_INSN (curr_pool->pool_insn);
9451 rtx_insn *jump = barrier? PREV_INSN (barrier) : NULL;
9452 rtx_insn *label = NEXT_INSN (curr_pool->pool_insn);
9454 if (jump && JUMP_P (jump)
9455 && barrier && BARRIER_P (barrier)
9456 && label && LABEL_P (label)
9457 && GET_CODE (PATTERN (jump)) == SET
9458 && SET_DEST (PATTERN (jump)) == pc_rtx
9459 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
9460 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
9463 remove_insn (barrier);
9464 remove_insn (label);
9467 remove_insn (curr_pool->pool_insn);
9470 /* Remove all base register reload insns. */
9472 for (insn = get_insns (); insn; )
9474 rtx_insn *next_insn = NEXT_INSN (insn);
9476 if (NONJUMP_INSN_P (insn)
9477 && GET_CODE (PATTERN (insn)) == SET
9478 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
9479 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
9485 /* Free pool list. */
9489 struct constant_pool *next = pool_list->next;
9490 s390_free_pool (pool_list);
9495 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
9498 s390_output_pool_entry (rtx exp, machine_mode mode, unsigned int align)
9500 switch (GET_MODE_CLASS (mode))
9503 case MODE_DECIMAL_FLOAT:
9504 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
9506 assemble_real (*CONST_DOUBLE_REAL_VALUE (exp),
9507 as_a <scalar_float_mode> (mode), align);
9511 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
9512 mark_symbol_refs_as_used (exp);
9515 case MODE_VECTOR_INT:
9516 case MODE_VECTOR_FLOAT:
9519 machine_mode inner_mode;
9520 gcc_assert (GET_CODE (exp) == CONST_VECTOR);
9522 inner_mode = GET_MODE_INNER (GET_MODE (exp));
9523 for (i = 0; i < XVECLEN (exp, 0); i++)
9524 s390_output_pool_entry (XVECEXP (exp, 0, i),
9528 : GET_MODE_BITSIZE (inner_mode));
9538 /* Return an RTL expression representing the value of the return address
9539 for the frame COUNT steps up from the current frame. FRAME is the
9540 frame pointer of that frame. */
9543 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
9548 /* Without backchain, we fail for all but the current frame. */
9550 if (!TARGET_BACKCHAIN && count > 0)
9553 /* For the current frame, we need to make sure the initial
9554 value of RETURN_REGNUM is actually saved. */
9558 /* On non-z architectures branch splitting could overwrite r14. */
9559 if (TARGET_CPU_ZARCH)
9560 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
9563 cfun_frame_layout.save_return_addr_p = true;
9564 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
9568 if (TARGET_PACKED_STACK)
9569 offset = -2 * UNITS_PER_LONG;
9571 offset = RETURN_REGNUM * UNITS_PER_LONG;
9573 addr = plus_constant (Pmode, frame, offset);
9574 addr = memory_address (Pmode, addr);
9575 return gen_rtx_MEM (Pmode, addr);
9578 /* Return an RTL expression representing the back chain stored in
9579 the current stack frame. */
9582 s390_back_chain_rtx (void)
9586 gcc_assert (TARGET_BACKCHAIN);
9588 if (TARGET_PACKED_STACK)
9589 chain = plus_constant (Pmode, stack_pointer_rtx,
9590 STACK_POINTER_OFFSET - UNITS_PER_LONG);
9592 chain = stack_pointer_rtx;
9594 chain = gen_rtx_MEM (Pmode, chain);
9598 /* Find first call clobbered register unused in a function.
9599 This could be used as base register in a leaf function
9600 or for holding the return address before epilogue. */
9603 find_unused_clobbered_reg (void)
9606 for (i = 0; i < 6; i++)
9607 if (!df_regs_ever_live_p (i))
9613 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
9614 clobbered hard regs in SETREG. */
9617 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
9619 char *regs_ever_clobbered = (char *)data;
9620 unsigned int i, regno;
9621 machine_mode mode = GET_MODE (setreg);
9623 if (GET_CODE (setreg) == SUBREG)
9625 rtx inner = SUBREG_REG (setreg);
9626 if (!GENERAL_REG_P (inner) && !FP_REG_P (inner))
9628 regno = subreg_regno (setreg);
9630 else if (GENERAL_REG_P (setreg) || FP_REG_P (setreg))
9631 regno = REGNO (setreg);
9636 i < regno + HARD_REGNO_NREGS (regno, mode);
9638 regs_ever_clobbered[i] = 1;
9641 /* Walks through all basic blocks of the current function looking
9642 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
9643 of the passed integer array REGS_EVER_CLOBBERED are set to one for
9644 each of those regs. */
9647 s390_regs_ever_clobbered (char regs_ever_clobbered[])
9653 memset (regs_ever_clobbered, 0, 32);
9655 /* For non-leaf functions we have to consider all call clobbered regs to be
9659 for (i = 0; i < 32; i++)
9660 regs_ever_clobbered[i] = call_really_used_regs[i];
9663 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
9664 this work is done by liveness analysis (mark_regs_live_at_end).
9665 Special care is needed for functions containing landing pads. Landing pads
9666 may use the eh registers, but the code which sets these registers is not
9667 contained in that function. Hence s390_regs_ever_clobbered is not able to
9668 deal with this automatically. */
9669 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
9670 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
9671 if (crtl->calls_eh_return
9672 || (cfun->machine->has_landing_pad_p
9673 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
9674 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
9676 /* For nonlocal gotos all call-saved registers have to be saved.
9677 This flag is also set for the unwinding code in libgcc.
9678 See expand_builtin_unwind_init. For regs_ever_live this is done by
9680 if (crtl->saves_all_registers)
9681 for (i = 0; i < 32; i++)
9682 if (!call_really_used_regs[i])
9683 regs_ever_clobbered[i] = 1;
9685 FOR_EACH_BB_FN (cur_bb, cfun)
9687 FOR_BB_INSNS (cur_bb, cur_insn)
9691 if (!INSN_P (cur_insn))
9694 pat = PATTERN (cur_insn);
9696 /* Ignore GPR restore insns. */
9697 if (epilogue_completed && RTX_FRAME_RELATED_P (cur_insn))
9699 if (GET_CODE (pat) == SET
9700 && GENERAL_REG_P (SET_DEST (pat)))
9703 if (GET_MODE (SET_SRC (pat)) == DImode
9704 && FP_REG_P (SET_SRC (pat)))
9708 if (GET_CODE (SET_SRC (pat)) == MEM)
9713 if (GET_CODE (pat) == PARALLEL
9714 && load_multiple_operation (pat, VOIDmode))
9719 s390_reg_clobbered_rtx,
9720 regs_ever_clobbered);
9725 /* Determine the frame area which actually has to be accessed
9726 in the function epilogue. The values are stored at the
9727 given pointers AREA_BOTTOM (address of the lowest used stack
9728 address) and AREA_TOP (address of the first item which does
9729 not belong to the stack frame). */
9732 s390_frame_area (int *area_bottom, int *area_top)
9739 if (cfun_frame_layout.first_restore_gpr != -1)
9741 b = (cfun_frame_layout.gprs_offset
9742 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
9743 t = b + (cfun_frame_layout.last_restore_gpr
9744 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
9747 if (TARGET_64BIT && cfun_save_high_fprs_p)
9749 b = MIN (b, cfun_frame_layout.f8_offset);
9750 t = MAX (t, (cfun_frame_layout.f8_offset
9751 + cfun_frame_layout.high_fprs * 8));
9756 if (cfun_fpr_save_p (FPR4_REGNUM))
9758 b = MIN (b, cfun_frame_layout.f4_offset);
9759 t = MAX (t, cfun_frame_layout.f4_offset + 8);
9761 if (cfun_fpr_save_p (FPR6_REGNUM))
9763 b = MIN (b, cfun_frame_layout.f4_offset + 8);
9764 t = MAX (t, cfun_frame_layout.f4_offset + 16);
9770 /* Update gpr_save_slots in the frame layout trying to make use of
9771 FPRs as GPR save slots.
9772 This is a helper routine of s390_register_info. */
9775 s390_register_info_gprtofpr ()
9777 int save_reg_slot = FPR0_REGNUM;
9780 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
9783 /* builtin_eh_return needs to be able to modify the return address
9784 on the stack. It could also adjust the FPR save slot instead but
9785 is it worth the trouble?! */
9786 if (crtl->calls_eh_return)
9789 for (i = 15; i >= 6; i--)
9791 if (cfun_gpr_save_slot (i) == SAVE_SLOT_NONE)
9794 /* Advance to the next FP register which can be used as a
9796 while ((!call_really_used_regs[save_reg_slot]
9797 || df_regs_ever_live_p (save_reg_slot)
9798 || cfun_fpr_save_p (save_reg_slot))
9799 && FP_REGNO_P (save_reg_slot))
9801 if (!FP_REGNO_P (save_reg_slot))
9803 /* We only want to use ldgr/lgdr if we can get rid of
9804 stm/lm entirely. So undo the gpr slot allocation in
9805 case we ran out of FPR save slots. */
9806 for (j = 6; j <= 15; j++)
9807 if (FP_REGNO_P (cfun_gpr_save_slot (j)))
9808 cfun_gpr_save_slot (j) = SAVE_SLOT_STACK;
9811 cfun_gpr_save_slot (i) = save_reg_slot++;
9815 /* Set the bits in fpr_bitmap for FPRs which need to be saved due to
9817 This is a helper routine for s390_register_info. */
9820 s390_register_info_stdarg_fpr ()
9826 /* Save the FP argument regs for stdarg. f0, f2 for 31 bit and
9827 f0-f4 for 64 bit. */
9829 || !TARGET_HARD_FLOAT
9830 || !cfun->va_list_fpr_size
9831 || crtl->args.info.fprs >= FP_ARG_NUM_REG)
9834 min_fpr = crtl->args.info.fprs;
9835 max_fpr = min_fpr + cfun->va_list_fpr_size - 1;
9836 if (max_fpr >= FP_ARG_NUM_REG)
9837 max_fpr = FP_ARG_NUM_REG - 1;
9839 /* FPR argument regs start at f0. */
9840 min_fpr += FPR0_REGNUM;
9841 max_fpr += FPR0_REGNUM;
9843 for (i = min_fpr; i <= max_fpr; i++)
9844 cfun_set_fpr_save (i);
9847 /* Reserve the GPR save slots for GPRs which need to be saved due to
9849 This is a helper routine for s390_register_info. */
9852 s390_register_info_stdarg_gpr ()
9859 || !cfun->va_list_gpr_size
9860 || crtl->args.info.gprs >= GP_ARG_NUM_REG)
9863 min_gpr = crtl->args.info.gprs;
9864 max_gpr = min_gpr + cfun->va_list_gpr_size - 1;
9865 if (max_gpr >= GP_ARG_NUM_REG)
9866 max_gpr = GP_ARG_NUM_REG - 1;
9868 /* GPR argument regs start at r2. */
9869 min_gpr += GPR2_REGNUM;
9870 max_gpr += GPR2_REGNUM;
9872 /* If r6 was supposed to be saved into an FPR and now needs to go to
9873 the stack for vararg we have to adjust the restore range to make
9874 sure that the restore is done from stack as well. */
9875 if (FP_REGNO_P (cfun_gpr_save_slot (GPR6_REGNUM))
9876 && min_gpr <= GPR6_REGNUM
9877 && max_gpr >= GPR6_REGNUM)
9879 if (cfun_frame_layout.first_restore_gpr == -1
9880 || cfun_frame_layout.first_restore_gpr > GPR6_REGNUM)
9881 cfun_frame_layout.first_restore_gpr = GPR6_REGNUM;
9882 if (cfun_frame_layout.last_restore_gpr == -1
9883 || cfun_frame_layout.last_restore_gpr < GPR6_REGNUM)
9884 cfun_frame_layout.last_restore_gpr = GPR6_REGNUM;
9887 if (cfun_frame_layout.first_save_gpr == -1
9888 || cfun_frame_layout.first_save_gpr > min_gpr)
9889 cfun_frame_layout.first_save_gpr = min_gpr;
9891 if (cfun_frame_layout.last_save_gpr == -1
9892 || cfun_frame_layout.last_save_gpr < max_gpr)
9893 cfun_frame_layout.last_save_gpr = max_gpr;
9895 for (i = min_gpr; i <= max_gpr; i++)
9896 cfun_gpr_save_slot (i) = SAVE_SLOT_STACK;
9899 /* Calculate the save and restore ranges for stm(g) and lm(g) in the
9900 prologue and epilogue. */
9903 s390_register_info_set_ranges ()
9907 /* Find the first and the last save slot supposed to use the stack
9908 to set the restore range.
9909 Vararg regs might be marked as save to stack but only the
9910 call-saved regs really need restoring (i.e. r6). This code
9911 assumes that the vararg regs have not yet been recorded in
9912 cfun_gpr_save_slot. */
9913 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != SAVE_SLOT_STACK; i++);
9914 for (j = 15; j > i && cfun_gpr_save_slot (j) != SAVE_SLOT_STACK; j--);
9915 cfun_frame_layout.first_restore_gpr = (i == 16) ? -1 : i;
9916 cfun_frame_layout.last_restore_gpr = (i == 16) ? -1 : j;
9917 cfun_frame_layout.first_save_gpr = (i == 16) ? -1 : i;
9918 cfun_frame_layout.last_save_gpr = (i == 16) ? -1 : j;
9921 /* The GPR and FPR save slots in cfun->machine->frame_layout are set
9922 for registers which need to be saved in function prologue.
9923 This function can be used until the insns emitted for save/restore
9924 of the regs are visible in the RTL stream. */
9927 s390_register_info ()
9930 char clobbered_regs[32];
9932 gcc_assert (!epilogue_completed);
9934 if (reload_completed)
9935 /* After reload we rely on our own routine to determine which
9936 registers need saving. */
9937 s390_regs_ever_clobbered (clobbered_regs);
9939 /* During reload we use regs_ever_live as a base since reload
9940 does changes in there which we otherwise would not be aware
9942 for (i = 0; i < 32; i++)
9943 clobbered_regs[i] = df_regs_ever_live_p (i);
9945 for (i = 0; i < 32; i++)
9946 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
9948 /* Mark the call-saved FPRs which need to be saved.
9949 This needs to be done before checking the special GPRs since the
9950 stack pointer usage depends on whether high FPRs have to be saved
9952 cfun_frame_layout.fpr_bitmap = 0;
9953 cfun_frame_layout.high_fprs = 0;
9954 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
9955 if (clobbered_regs[i] && !call_really_used_regs[i])
9957 cfun_set_fpr_save (i);
9958 if (i >= FPR8_REGNUM)
9959 cfun_frame_layout.high_fprs++;
9962 /* Register 12 is used for GOT address, but also as temp in prologue
9963 for split-stack stdarg functions (unless r14 is available). */
9965 |= ((flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
9966 || (flag_split_stack && cfun->stdarg
9967 && (crtl->is_leaf || TARGET_TPF_PROFILING
9968 || has_hard_reg_initial_val (Pmode, RETURN_REGNUM))));
9970 clobbered_regs[BASE_REGNUM]
9971 |= (cfun->machine->base_reg
9972 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
9974 clobbered_regs[HARD_FRAME_POINTER_REGNUM]
9975 |= !!frame_pointer_needed;
9977 /* On pre z900 machines this might take until machine dependent
9979 save_return_addr_p will only be set on non-zarch machines so
9980 there is no risk that r14 goes into an FPR instead of a stack
9982 clobbered_regs[RETURN_REGNUM]
9984 || TARGET_TPF_PROFILING
9985 || cfun->machine->split_branches_pending_p
9986 || cfun_frame_layout.save_return_addr_p
9987 || crtl->calls_eh_return);
9989 clobbered_regs[STACK_POINTER_REGNUM]
9991 || TARGET_TPF_PROFILING
9992 || cfun_save_high_fprs_p
9993 || get_frame_size () > 0
9994 || (reload_completed && cfun_frame_layout.frame_size > 0)
9995 || cfun->calls_alloca);
9997 memset (cfun_frame_layout.gpr_save_slots, SAVE_SLOT_NONE, 16);
9999 for (i = 6; i < 16; i++)
10000 if (clobbered_regs[i])
10001 cfun_gpr_save_slot (i) = SAVE_SLOT_STACK;
10003 s390_register_info_stdarg_fpr ();
10004 s390_register_info_gprtofpr ();
10005 s390_register_info_set_ranges ();
10006 /* stdarg functions might need to save GPRs 2 to 6. This might
10007 override the GPR->FPR save decision made by
10008 s390_register_info_gprtofpr for r6 since vararg regs must go to
10010 s390_register_info_stdarg_gpr ();
10013 /* This function is called by s390_optimize_prologue in order to get
10014 rid of unnecessary GPR save/restore instructions. The register info
10015 for the GPRs is re-computed and the ranges are re-calculated. */
10018 s390_optimize_register_info ()
10020 char clobbered_regs[32];
10023 gcc_assert (epilogue_completed);
10024 gcc_assert (!cfun->machine->split_branches_pending_p);
10026 s390_regs_ever_clobbered (clobbered_regs);
10028 for (i = 0; i < 32; i++)
10029 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
10031 /* There is still special treatment needed for cases invisible to
10032 s390_regs_ever_clobbered. */
10033 clobbered_regs[RETURN_REGNUM]
10034 |= (TARGET_TPF_PROFILING
10035 /* When expanding builtin_return_addr in ESA mode we do not
10036 know whether r14 will later be needed as scratch reg when
10037 doing branch splitting. So the builtin always accesses the
10038 r14 save slot and we need to stick to the save/restore
10039 decision for r14 even if it turns out that it didn't get
10041 || cfun_frame_layout.save_return_addr_p
10042 || crtl->calls_eh_return);
10044 memset (cfun_frame_layout.gpr_save_slots, SAVE_SLOT_NONE, 6);
10046 for (i = 6; i < 16; i++)
10047 if (!clobbered_regs[i])
10048 cfun_gpr_save_slot (i) = SAVE_SLOT_NONE;
10050 s390_register_info_set_ranges ();
10051 s390_register_info_stdarg_gpr ();
10054 /* Fill cfun->machine with info about frame of current function. */
10057 s390_frame_info (void)
10059 HOST_WIDE_INT lowest_offset;
10061 cfun_frame_layout.first_save_gpr_slot = cfun_frame_layout.first_save_gpr;
10062 cfun_frame_layout.last_save_gpr_slot = cfun_frame_layout.last_save_gpr;
10064 /* The va_arg builtin uses a constant distance of 16 *
10065 UNITS_PER_LONG (r0-r15) to reach the FPRs from the reg_save_area
10066 pointer. So even if we are going to save the stack pointer in an
10067 FPR we need the stack space in order to keep the offsets
10069 if (cfun->stdarg && cfun_save_arg_fprs_p)
10071 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
10073 if (cfun_frame_layout.first_save_gpr_slot == -1)
10074 cfun_frame_layout.first_save_gpr_slot = STACK_POINTER_REGNUM;
10077 cfun_frame_layout.frame_size = get_frame_size ();
10078 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
10079 fatal_error (input_location,
10080 "total size of local variables exceeds architecture limit");
10082 if (!TARGET_PACKED_STACK)
10084 /* Fixed stack layout. */
10085 cfun_frame_layout.backchain_offset = 0;
10086 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
10087 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
10088 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
10089 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
10092 else if (TARGET_BACKCHAIN)
10094 /* Kernel stack layout - packed stack, backchain, no float */
10095 gcc_assert (TARGET_SOFT_FLOAT);
10096 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
10099 /* The distance between the backchain and the return address
10100 save slot must not change. So we always need a slot for the
10101 stack pointer which resides in between. */
10102 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
10104 cfun_frame_layout.gprs_offset
10105 = cfun_frame_layout.backchain_offset - cfun_gprs_save_area_size;
10107 /* FPRs will not be saved. Nevertheless pick sane values to
10108 keep area calculations valid. */
10109 cfun_frame_layout.f0_offset =
10110 cfun_frame_layout.f4_offset =
10111 cfun_frame_layout.f8_offset = cfun_frame_layout.gprs_offset;
10117 /* Packed stack layout without backchain. */
10119 /* With stdarg FPRs need their dedicated slots. */
10120 num_fprs = (TARGET_64BIT && cfun->stdarg ? 2
10121 : (cfun_fpr_save_p (FPR4_REGNUM) +
10122 cfun_fpr_save_p (FPR6_REGNUM)));
10123 cfun_frame_layout.f4_offset = STACK_POINTER_OFFSET - 8 * num_fprs;
10125 num_fprs = (cfun->stdarg ? 2
10126 : (cfun_fpr_save_p (FPR0_REGNUM)
10127 + cfun_fpr_save_p (FPR2_REGNUM)));
10128 cfun_frame_layout.f0_offset = cfun_frame_layout.f4_offset - 8 * num_fprs;
10130 cfun_frame_layout.gprs_offset
10131 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
10133 cfun_frame_layout.f8_offset = (cfun_frame_layout.gprs_offset
10134 - cfun_frame_layout.high_fprs * 8);
10137 if (cfun_save_high_fprs_p)
10138 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
10140 if (!crtl->is_leaf)
10141 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
10143 /* In the following cases we have to allocate a STACK_POINTER_OFFSET
10144 sized area at the bottom of the stack. This is required also for
10145 leaf functions. When GCC generates a local stack reference it
10146 will always add STACK_POINTER_OFFSET to all these references. */
10148 && !TARGET_TPF_PROFILING
10149 && cfun_frame_layout.frame_size == 0
10150 && !cfun->calls_alloca)
10153 /* Calculate the number of bytes we have used in our own register
10154 save area. With the packed stack layout we can re-use the
10155 remaining bytes for normal stack elements. */
10157 if (TARGET_PACKED_STACK)
10158 lowest_offset = MIN (MIN (cfun_frame_layout.f0_offset,
10159 cfun_frame_layout.f4_offset),
10160 cfun_frame_layout.gprs_offset);
10164 if (TARGET_BACKCHAIN)
10165 lowest_offset = MIN (lowest_offset, cfun_frame_layout.backchain_offset);
10167 cfun_frame_layout.frame_size += STACK_POINTER_OFFSET - lowest_offset;
10169 /* If under 31 bit an odd number of gprs has to be saved we have to
10170 adjust the frame size to sustain 8 byte alignment of stack
10172 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
10173 STACK_BOUNDARY / BITS_PER_UNIT - 1)
10174 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
10177 /* Generate frame layout. Fills in register and frame data for the current
10178 function in cfun->machine. This routine can be called multiple times;
10179 it will re-do the complete frame layout every time. */
10182 s390_init_frame_layout (void)
10184 HOST_WIDE_INT frame_size;
10187 /* After LRA the frame layout is supposed to be read-only and should
10188 not be re-computed. */
10189 if (reload_completed)
10192 /* On S/390 machines, we may need to perform branch splitting, which
10193 will require both base and return address register. We have no
10194 choice but to assume we're going to need them until right at the
10195 end of the machine dependent reorg phase. */
10196 if (!TARGET_CPU_ZARCH)
10197 cfun->machine->split_branches_pending_p = true;
10201 frame_size = cfun_frame_layout.frame_size;
10203 /* Try to predict whether we'll need the base register. */
10204 base_used = cfun->machine->split_branches_pending_p
10205 || crtl->uses_const_pool
10206 || (!DISP_IN_RANGE (frame_size)
10207 && !CONST_OK_FOR_K (frame_size));
10209 /* Decide which register to use as literal pool base. In small
10210 leaf functions, try to use an unused call-clobbered register
10211 as base register to avoid save/restore overhead. */
10213 cfun->machine->base_reg = NULL_RTX;
10219 /* Prefer r5 (most likely to be free). */
10220 for (br = 5; br >= 2 && df_regs_ever_live_p (br); br--)
10222 cfun->machine->base_reg =
10223 gen_rtx_REG (Pmode, (br >= 2) ? br : BASE_REGNUM);
10226 s390_register_info ();
10227 s390_frame_info ();
10229 while (frame_size != cfun_frame_layout.frame_size);
10232 /* Remove the FPR clobbers from a tbegin insn if it can be proven that
10233 the TX is nonescaping. A transaction is considered escaping if
10234 there is at least one path from tbegin returning CC0 to the
10235 function exit block without an tend.
10237 The check so far has some limitations:
10238 - only single tbegin/tend BBs are supported
10239 - the first cond jump after tbegin must separate the CC0 path from ~CC0
10240 - when CC is copied to a GPR and the CC0 check is done with the GPR
10241 this is not supported
10245 s390_optimize_nonescaping_tx (void)
10247 const unsigned int CC0 = 1 << 3;
10248 basic_block tbegin_bb = NULL;
10249 basic_block tend_bb = NULL;
10252 bool result = true;
10254 rtx_insn *tbegin_insn = NULL;
10256 if (!cfun->machine->tbegin_p)
10259 for (bb_index = 0; bb_index < n_basic_blocks_for_fn (cfun); bb_index++)
10261 bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
10266 FOR_BB_INSNS (bb, insn)
10268 rtx ite, cc, pat, target;
10269 unsigned HOST_WIDE_INT mask;
10271 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
10274 pat = PATTERN (insn);
10276 if (GET_CODE (pat) == PARALLEL)
10277 pat = XVECEXP (pat, 0, 0);
10279 if (GET_CODE (pat) != SET
10280 || GET_CODE (SET_SRC (pat)) != UNSPEC_VOLATILE)
10283 if (XINT (SET_SRC (pat), 1) == UNSPECV_TBEGIN)
10287 tbegin_insn = insn;
10289 /* Just return if the tbegin doesn't have clobbers. */
10290 if (GET_CODE (PATTERN (insn)) != PARALLEL)
10293 if (tbegin_bb != NULL)
10296 /* Find the next conditional jump. */
10297 for (tmp = NEXT_INSN (insn);
10299 tmp = NEXT_INSN (tmp))
10301 if (reg_set_p (gen_rtx_REG (CCmode, CC_REGNUM), tmp))
10306 ite = SET_SRC (PATTERN (tmp));
10307 if (GET_CODE (ite) != IF_THEN_ELSE)
10310 cc = XEXP (XEXP (ite, 0), 0);
10311 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc))
10312 || GET_MODE (cc) != CCRAWmode
10313 || GET_CODE (XEXP (XEXP (ite, 0), 1)) != CONST_INT)
10316 if (bb->succs->length () != 2)
10319 mask = INTVAL (XEXP (XEXP (ite, 0), 1));
10320 if (GET_CODE (XEXP (ite, 0)) == NE)
10324 target = XEXP (ite, 1);
10325 else if (mask == (CC0 ^ 0xf))
10326 target = XEXP (ite, 2);
10334 ei = ei_start (bb->succs);
10335 e1 = ei_safe_edge (ei);
10337 e2 = ei_safe_edge (ei);
10339 if (e2->flags & EDGE_FALLTHRU)
10342 e1 = ei_safe_edge (ei);
10345 if (!(e1->flags & EDGE_FALLTHRU))
10348 tbegin_bb = (target == pc_rtx) ? e1->dest : e2->dest;
10350 if (tmp == BB_END (bb))
10355 if (XINT (SET_SRC (pat), 1) == UNSPECV_TEND)
10357 if (tend_bb != NULL)
10364 /* Either we successfully remove the FPR clobbers here or we are not
10365 able to do anything for this TX. Both cases don't qualify for
10367 cfun->machine->tbegin_p = false;
10369 if (tbegin_bb == NULL || tend_bb == NULL)
10372 calculate_dominance_info (CDI_POST_DOMINATORS);
10373 result = dominated_by_p (CDI_POST_DOMINATORS, tbegin_bb, tend_bb);
10374 free_dominance_info (CDI_POST_DOMINATORS);
10379 PATTERN (tbegin_insn) = gen_rtx_PARALLEL (VOIDmode,
10381 XVECEXP (PATTERN (tbegin_insn), 0, 0),
10382 XVECEXP (PATTERN (tbegin_insn), 0, 1)));
10383 INSN_CODE (tbegin_insn) = -1;
10384 df_insn_rescan (tbegin_insn);
10389 /* Return true if it is legal to put a value with MODE into REGNO. */
10392 s390_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
10394 if (!TARGET_VX && VECTOR_NOFP_REGNO_P (regno))
10397 switch (REGNO_REG_CLASS (regno))
10400 return ((GET_MODE_CLASS (mode) == MODE_INT
10401 && s390_class_max_nregs (VEC_REGS, mode) == 1)
10403 || (TARGET_VXE && mode == SFmode)
10404 || s390_vector_mode_supported_p (mode));
10408 && ((GET_MODE_CLASS (mode) == MODE_INT
10409 && s390_class_max_nregs (FP_REGS, mode) == 1)
10411 || s390_vector_mode_supported_p (mode)))
10414 if (REGNO_PAIR_OK (regno, mode))
10416 if (mode == SImode || mode == DImode)
10419 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
10424 if (FRAME_REGNO_P (regno) && mode == Pmode)
10429 if (REGNO_PAIR_OK (regno, mode))
10432 || (mode != TFmode && mode != TCmode && mode != TDmode))
10437 if (GET_MODE_CLASS (mode) == MODE_CC)
10441 if (REGNO_PAIR_OK (regno, mode))
10443 if (mode == SImode || mode == Pmode)
10454 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
10457 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
10459 /* Once we've decided upon a register to use as base register, it must
10460 no longer be used for any other purpose. */
10461 if (cfun->machine->base_reg)
10462 if (REGNO (cfun->machine->base_reg) == old_reg
10463 || REGNO (cfun->machine->base_reg) == new_reg)
10466 /* Prevent regrename from using call-saved regs which haven't
10467 actually been saved. This is necessary since regrename assumes
10468 the backend save/restore decisions are based on
10469 df_regs_ever_live. Since we have our own routine we have to tell
10470 regrename manually about it. */
10471 if (GENERAL_REGNO_P (new_reg)
10472 && !call_really_used_regs[new_reg]
10473 && cfun_gpr_save_slot (new_reg) == SAVE_SLOT_NONE)
10479 /* Return nonzero if register REGNO can be used as a scratch register
10483 s390_hard_regno_scratch_ok (unsigned int regno)
10485 /* See s390_hard_regno_rename_ok. */
10486 if (GENERAL_REGNO_P (regno)
10487 && !call_really_used_regs[regno]
10488 && cfun_gpr_save_slot (regno) == SAVE_SLOT_NONE)
10494 /* Maximum number of registers to represent a value of mode MODE
10495 in a register of class RCLASS. */
10498 s390_class_max_nregs (enum reg_class rclass, machine_mode mode)
10501 bool reg_pair_required_p = false;
10507 reg_size = TARGET_VX ? 16 : 8;
10509 /* TF and TD modes would fit into a VR but we put them into a
10510 register pair since we do not have 128bit FP instructions on
10513 && SCALAR_FLOAT_MODE_P (mode)
10514 && GET_MODE_SIZE (mode) >= 16)
10515 reg_pair_required_p = true;
10517 /* Even if complex types would fit into a single FPR/VR we force
10518 them into a register pair to deal with the parts more easily.
10519 (FIXME: What about complex ints?) */
10520 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
10521 reg_pair_required_p = true;
10527 reg_size = UNITS_PER_WORD;
10531 if (reg_pair_required_p)
10532 return 2 * ((GET_MODE_SIZE (mode) / 2 + reg_size - 1) / reg_size);
10534 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
10537 /* Return TRUE if changing mode from FROM to TO should not be allowed
10538 for register class CLASS. */
10541 s390_cannot_change_mode_class (machine_mode from_mode,
10542 machine_mode to_mode,
10543 enum reg_class rclass)
10545 machine_mode small_mode;
10546 machine_mode big_mode;
10548 /* V1TF and TF have different representations in vector
10550 if (reg_classes_intersect_p (VEC_REGS, rclass)
10551 && ((from_mode == V1TFmode && to_mode == TFmode)
10552 || (from_mode == TFmode && to_mode == V1TFmode)))
10555 if (GET_MODE_SIZE (from_mode) == GET_MODE_SIZE (to_mode))
10558 if (GET_MODE_SIZE (from_mode) < GET_MODE_SIZE (to_mode))
10560 small_mode = from_mode;
10561 big_mode = to_mode;
10565 small_mode = to_mode;
10566 big_mode = from_mode;
10569 /* Values residing in VRs are little-endian style. All modes are
10570 placed left-aligned in an VR. This means that we cannot allow
10571 switching between modes with differing sizes. Also if the vector
10572 facility is available we still place TFmode values in VR register
10573 pairs, since the only instructions we have operating on TFmodes
10574 only deal with register pairs. Therefore we have to allow DFmode
10575 subregs of TFmodes to enable the TFmode splitters. */
10576 if (reg_classes_intersect_p (VEC_REGS, rclass)
10577 && (GET_MODE_SIZE (small_mode) < 8
10578 || s390_class_max_nregs (VEC_REGS, big_mode) == 1))
10581 /* Likewise for access registers, since they have only half the
10582 word size on 64-bit. */
10583 if (reg_classes_intersect_p (ACCESS_REGS, rclass))
10589 /* Return true if we use LRA instead of reload pass. */
10593 return s390_lra_flag;
10596 /* Return true if register FROM can be eliminated via register TO. */
10599 s390_can_eliminate (const int from, const int to)
10601 /* On zSeries machines, we have not marked the base register as fixed.
10602 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
10603 If a function requires the base register, we say here that this
10604 elimination cannot be performed. This will cause reload to free
10605 up the base register (as if it were fixed). On the other hand,
10606 if the current function does *not* require the base register, we
10607 say here the elimination succeeds, which in turn allows reload
10608 to allocate the base register for any other purpose. */
10609 if (from == BASE_REGNUM && to == BASE_REGNUM)
10611 if (TARGET_CPU_ZARCH)
10613 s390_init_frame_layout ();
10614 return cfun->machine->base_reg == NULL_RTX;
10620 /* Everything else must point into the stack frame. */
10621 gcc_assert (to == STACK_POINTER_REGNUM
10622 || to == HARD_FRAME_POINTER_REGNUM);
10624 gcc_assert (from == FRAME_POINTER_REGNUM
10625 || from == ARG_POINTER_REGNUM
10626 || from == RETURN_ADDRESS_POINTER_REGNUM);
10628 /* Make sure we actually saved the return address. */
10629 if (from == RETURN_ADDRESS_POINTER_REGNUM)
10630 if (!crtl->calls_eh_return
10632 && !cfun_frame_layout.save_return_addr_p)
10638 /* Return offset between register FROM and TO initially after prolog. */
10641 s390_initial_elimination_offset (int from, int to)
10643 HOST_WIDE_INT offset;
10645 /* ??? Why are we called for non-eliminable pairs? */
10646 if (!s390_can_eliminate (from, to))
10651 case FRAME_POINTER_REGNUM:
10652 offset = (get_frame_size()
10653 + STACK_POINTER_OFFSET
10654 + crtl->outgoing_args_size);
10657 case ARG_POINTER_REGNUM:
10658 s390_init_frame_layout ();
10659 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
10662 case RETURN_ADDRESS_POINTER_REGNUM:
10663 s390_init_frame_layout ();
10665 if (cfun_frame_layout.first_save_gpr_slot == -1)
10667 /* If it turns out that for stdarg nothing went into the reg
10668 save area we also do not need the return address
10670 if (cfun->stdarg && !cfun_save_arg_fprs_p)
10673 gcc_unreachable ();
10676 /* In order to make the following work it is not necessary for
10677 r14 to have a save slot. It is sufficient if one other GPR
10678 got one. Since the GPRs are always stored without gaps we
10679 are able to calculate where the r14 save slot would
10681 offset = (cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset +
10682 (RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot) *
10691 gcc_unreachable ();
10697 /* Emit insn to save fpr REGNUM at offset OFFSET relative
10698 to register BASE. Return generated insn. */
10701 save_fpr (rtx base, int offset, int regnum)
10704 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
10706 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
10707 set_mem_alias_set (addr, get_varargs_alias_set ());
10709 set_mem_alias_set (addr, get_frame_alias_set ());
10711 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
10714 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
10715 to register BASE. Return generated insn. */
10718 restore_fpr (rtx base, int offset, int regnum)
10721 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
10722 set_mem_alias_set (addr, get_frame_alias_set ());
10724 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
10727 /* Return true if REGNO is a global register, but not one
10728 of the special ones that need to be saved/restored in anyway. */
10731 global_not_special_regno_p (int regno)
10733 return (global_regs[regno]
10734 /* These registers are special and need to be
10735 restored in any case. */
10736 && !(regno == STACK_POINTER_REGNUM
10737 || regno == RETURN_REGNUM
10738 || regno == BASE_REGNUM
10739 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
10742 /* Generate insn to save registers FIRST to LAST into
10743 the register save area located at offset OFFSET
10744 relative to register BASE. */
10747 save_gprs (rtx base, int offset, int first, int last)
10749 rtx addr, insn, note;
10752 addr = plus_constant (Pmode, base, offset);
10753 addr = gen_rtx_MEM (Pmode, addr);
10755 set_mem_alias_set (addr, get_frame_alias_set ());
10757 /* Special-case single register. */
10761 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
10763 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
10765 if (!global_not_special_regno_p (first))
10766 RTX_FRAME_RELATED_P (insn) = 1;
10771 insn = gen_store_multiple (addr,
10772 gen_rtx_REG (Pmode, first),
10773 GEN_INT (last - first + 1));
10775 if (first <= 6 && cfun->stdarg)
10776 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
10778 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
10780 if (first + i <= 6)
10781 set_mem_alias_set (mem, get_varargs_alias_set ());
10784 /* We need to set the FRAME_RELATED flag on all SETs
10785 inside the store-multiple pattern.
10787 However, we must not emit DWARF records for registers 2..5
10788 if they are stored for use by variable arguments ...
10790 ??? Unfortunately, it is not enough to simply not the
10791 FRAME_RELATED flags for those SETs, because the first SET
10792 of the PARALLEL is always treated as if it had the flag
10793 set, even if it does not. Therefore we emit a new pattern
10794 without those registers as REG_FRAME_RELATED_EXPR note. */
10796 if (first >= 6 && !global_not_special_regno_p (first))
10798 rtx pat = PATTERN (insn);
10800 for (i = 0; i < XVECLEN (pat, 0); i++)
10801 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
10802 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
10804 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
10806 RTX_FRAME_RELATED_P (insn) = 1;
10808 else if (last >= 6)
10812 for (start = first >= 6 ? first : 6; start <= last; start++)
10813 if (!global_not_special_regno_p (start))
10819 addr = plus_constant (Pmode, base,
10820 offset + (start - first) * UNITS_PER_LONG);
10825 note = gen_movdi (gen_rtx_MEM (Pmode, addr),
10826 gen_rtx_REG (Pmode, start));
10828 note = gen_movsi (gen_rtx_MEM (Pmode, addr),
10829 gen_rtx_REG (Pmode, start));
10830 note = PATTERN (note);
10832 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
10833 RTX_FRAME_RELATED_P (insn) = 1;
10838 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
10839 gen_rtx_REG (Pmode, start),
10840 GEN_INT (last - start + 1));
10841 note = PATTERN (note);
10843 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
10845 for (i = 0; i < XVECLEN (note, 0); i++)
10846 if (GET_CODE (XVECEXP (note, 0, i)) == SET
10847 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
10849 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
10851 RTX_FRAME_RELATED_P (insn) = 1;
10857 /* Generate insn to restore registers FIRST to LAST from
10858 the register save area located at offset OFFSET
10859 relative to register BASE. */
10862 restore_gprs (rtx base, int offset, int first, int last)
10866 addr = plus_constant (Pmode, base, offset);
10867 addr = gen_rtx_MEM (Pmode, addr);
10868 set_mem_alias_set (addr, get_frame_alias_set ());
10870 /* Special-case single register. */
10874 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
10876 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
10878 RTX_FRAME_RELATED_P (insn) = 1;
10882 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
10884 GEN_INT (last - first + 1));
10885 RTX_FRAME_RELATED_P (insn) = 1;
10889 /* Return insn sequence to load the GOT register. */
10892 s390_load_got (void)
10896 /* We cannot use pic_offset_table_rtx here since we use this
10897 function also for non-pic if __tls_get_offset is called and in
10898 that case PIC_OFFSET_TABLE_REGNUM as well as pic_offset_table_rtx
10900 rtx got_rtx = gen_rtx_REG (Pmode, 12);
10904 if (TARGET_CPU_ZARCH)
10906 emit_move_insn (got_rtx, s390_got_symbol ());
10912 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, s390_got_symbol ()),
10913 UNSPEC_LTREL_OFFSET);
10914 offset = gen_rtx_CONST (Pmode, offset);
10915 offset = force_const_mem (Pmode, offset);
10917 emit_move_insn (got_rtx, offset);
10919 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
10920 UNSPEC_LTREL_BASE);
10921 offset = gen_rtx_PLUS (Pmode, got_rtx, offset);
10923 emit_move_insn (got_rtx, offset);
10926 insns = get_insns ();
10931 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
10932 and the change to the stack pointer. */
10935 s390_emit_stack_tie (void)
10937 rtx mem = gen_frame_mem (BLKmode,
10938 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
10940 emit_insn (gen_stack_tie (mem));
10943 /* Copy GPRS into FPR save slots. */
10946 s390_save_gprs_to_fprs (void)
10950 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
10953 for (i = 6; i < 16; i++)
10955 if (FP_REGNO_P (cfun_gpr_save_slot (i)))
10958 emit_move_insn (gen_rtx_REG (DImode, cfun_gpr_save_slot (i)),
10959 gen_rtx_REG (DImode, i));
10960 RTX_FRAME_RELATED_P (insn) = 1;
10961 /* This prevents dwarf2cfi from interpreting the set. Doing
10962 so it might emit def_cfa_register infos setting an FPR as
10964 add_reg_note (insn, REG_CFA_REGISTER, copy_rtx (PATTERN (insn)));
10969 /* Restore GPRs from FPR save slots. */
10972 s390_restore_gprs_from_fprs (void)
10976 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
10979 for (i = 6; i < 16; i++)
10983 if (!FP_REGNO_P (cfun_gpr_save_slot (i)))
10986 rtx fpr = gen_rtx_REG (DImode, cfun_gpr_save_slot (i));
10988 if (i == STACK_POINTER_REGNUM)
10989 insn = emit_insn (gen_stack_restore_from_fpr (fpr));
10991 insn = emit_move_insn (gen_rtx_REG (DImode, i), fpr);
10993 df_set_regs_ever_live (i, true);
10994 add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, i));
10995 if (i == STACK_POINTER_REGNUM)
10996 add_reg_note (insn, REG_CFA_DEF_CFA,
10997 plus_constant (Pmode, stack_pointer_rtx,
10998 STACK_POINTER_OFFSET));
10999 RTX_FRAME_RELATED_P (insn) = 1;
11004 /* A pass run immediately before shrink-wrapping and prologue and epilogue
11009 const pass_data pass_data_s390_early_mach =
11011 RTL_PASS, /* type */
11012 "early_mach", /* name */
11013 OPTGROUP_NONE, /* optinfo_flags */
11014 TV_MACH_DEP, /* tv_id */
11015 0, /* properties_required */
11016 0, /* properties_provided */
11017 0, /* properties_destroyed */
11018 0, /* todo_flags_start */
11019 ( TODO_df_verify | TODO_df_finish ), /* todo_flags_finish */
11022 class pass_s390_early_mach : public rtl_opt_pass
11025 pass_s390_early_mach (gcc::context *ctxt)
11026 : rtl_opt_pass (pass_data_s390_early_mach, ctxt)
11029 /* opt_pass methods: */
11030 virtual unsigned int execute (function *);
11032 }; // class pass_s390_early_mach
11035 pass_s390_early_mach::execute (function *fun)
11039 /* Try to get rid of the FPR clobbers. */
11040 s390_optimize_nonescaping_tx ();
11042 /* Re-compute register info. */
11043 s390_register_info ();
11045 /* If we're using a base register, ensure that it is always valid for
11046 the first non-prologue instruction. */
11047 if (fun->machine->base_reg)
11048 emit_insn_at_entry (gen_main_pool (fun->machine->base_reg));
11050 /* Annotate all constant pool references to let the scheduler know
11051 they implicitly use the base register. */
11052 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11055 annotate_constant_pool_refs (&PATTERN (insn));
11056 df_insn_rescan (insn);
11061 } // anon namespace
11063 /* Expand the prologue into a bunch of separate insns. */
11066 s390_emit_prologue (void)
11074 /* Choose best register to use for temp use within prologue.
11075 TPF with profiling must avoid the register 14 - the tracing function
11076 needs the original contents of r14 to be preserved. */
11078 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
11080 && !TARGET_TPF_PROFILING)
11081 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
11082 else if (flag_split_stack && cfun->stdarg)
11083 temp_reg = gen_rtx_REG (Pmode, 12);
11085 temp_reg = gen_rtx_REG (Pmode, 1);
11087 s390_save_gprs_to_fprs ();
11089 /* Save call saved gprs. */
11090 if (cfun_frame_layout.first_save_gpr != -1)
11092 insn = save_gprs (stack_pointer_rtx,
11093 cfun_frame_layout.gprs_offset +
11094 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
11095 - cfun_frame_layout.first_save_gpr_slot),
11096 cfun_frame_layout.first_save_gpr,
11097 cfun_frame_layout.last_save_gpr);
11101 /* Dummy insn to mark literal pool slot. */
11103 if (cfun->machine->base_reg)
11104 emit_insn (gen_main_pool (cfun->machine->base_reg));
11106 offset = cfun_frame_layout.f0_offset;
11108 /* Save f0 and f2. */
11109 for (i = FPR0_REGNUM; i <= FPR0_REGNUM + 1; i++)
11111 if (cfun_fpr_save_p (i))
11113 save_fpr (stack_pointer_rtx, offset, i);
11116 else if (!TARGET_PACKED_STACK || cfun->stdarg)
11120 /* Save f4 and f6. */
11121 offset = cfun_frame_layout.f4_offset;
11122 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
11124 if (cfun_fpr_save_p (i))
11126 insn = save_fpr (stack_pointer_rtx, offset, i);
11129 /* If f4 and f6 are call clobbered they are saved due to
11130 stdargs and therefore are not frame related. */
11131 if (!call_really_used_regs[i])
11132 RTX_FRAME_RELATED_P (insn) = 1;
11134 else if (!TARGET_PACKED_STACK || call_really_used_regs[i])
11138 if (TARGET_PACKED_STACK
11139 && cfun_save_high_fprs_p
11140 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
11142 offset = (cfun_frame_layout.f8_offset
11143 + (cfun_frame_layout.high_fprs - 1) * 8);
11145 for (i = FPR15_REGNUM; i >= FPR8_REGNUM && offset >= 0; i--)
11146 if (cfun_fpr_save_p (i))
11148 insn = save_fpr (stack_pointer_rtx, offset, i);
11150 RTX_FRAME_RELATED_P (insn) = 1;
11153 if (offset >= cfun_frame_layout.f8_offset)
11157 if (!TARGET_PACKED_STACK)
11158 next_fpr = cfun_save_high_fprs_p ? FPR15_REGNUM : 0;
11160 if (flag_stack_usage_info)
11161 current_function_static_stack_size = cfun_frame_layout.frame_size;
11163 /* Decrement stack pointer. */
11165 if (cfun_frame_layout.frame_size > 0)
11167 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
11168 rtx real_frame_off;
11170 if (s390_stack_size)
11172 HOST_WIDE_INT stack_guard;
11174 if (s390_stack_guard)
11175 stack_guard = s390_stack_guard;
11178 /* If no value for stack guard is provided the smallest power of 2
11179 larger than the current frame size is chosen. */
11181 while (stack_guard < cfun_frame_layout.frame_size)
11185 if (cfun_frame_layout.frame_size >= s390_stack_size)
11187 warning (0, "frame size of function %qs is %wd"
11188 " bytes exceeding user provided stack limit of "
11190 "An unconditional trap is added.",
11191 current_function_name(), cfun_frame_layout.frame_size,
11193 emit_insn (gen_trap ());
11198 /* stack_guard has to be smaller than s390_stack_size.
11199 Otherwise we would emit an AND with zero which would
11200 not match the test under mask pattern. */
11201 if (stack_guard >= s390_stack_size)
11203 warning (0, "frame size of function %qs is %wd"
11204 " bytes which is more than half the stack size. "
11205 "The dynamic check would not be reliable. "
11206 "No check emitted for this function.",
11207 current_function_name(),
11208 cfun_frame_layout.frame_size);
11212 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
11213 & ~(stack_guard - 1));
11215 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
11216 GEN_INT (stack_check_mask));
11218 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
11220 t, const0_rtx, const0_rtx));
11222 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
11224 t, const0_rtx, const0_rtx));
11229 if (s390_warn_framesize > 0
11230 && cfun_frame_layout.frame_size >= s390_warn_framesize)
11231 warning (0, "frame size of %qs is %wd bytes",
11232 current_function_name (), cfun_frame_layout.frame_size);
11234 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
11235 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
11237 /* Save incoming stack pointer into temp reg. */
11238 if (TARGET_BACKCHAIN || next_fpr)
11239 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
11241 /* Subtract frame size from stack pointer. */
11243 if (DISP_IN_RANGE (INTVAL (frame_off)))
11245 insn = gen_rtx_SET (stack_pointer_rtx,
11246 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11248 insn = emit_insn (insn);
11252 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
11253 frame_off = force_const_mem (Pmode, frame_off);
11255 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
11256 annotate_constant_pool_refs (&PATTERN (insn));
11259 RTX_FRAME_RELATED_P (insn) = 1;
11260 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
11261 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
11262 gen_rtx_SET (stack_pointer_rtx,
11263 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11266 /* Set backchain. */
11268 if (TARGET_BACKCHAIN)
11270 if (cfun_frame_layout.backchain_offset)
11271 addr = gen_rtx_MEM (Pmode,
11272 plus_constant (Pmode, stack_pointer_rtx,
11273 cfun_frame_layout.backchain_offset));
11275 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
11276 set_mem_alias_set (addr, get_frame_alias_set ());
11277 insn = emit_insn (gen_move_insn (addr, temp_reg));
11280 /* If we support non-call exceptions (e.g. for Java),
11281 we need to make sure the backchain pointer is set up
11282 before any possibly trapping memory access. */
11283 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
11285 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
11286 emit_clobber (addr);
11290 /* Save fprs 8 - 15 (64 bit ABI). */
11292 if (cfun_save_high_fprs_p && next_fpr)
11294 /* If the stack might be accessed through a different register
11295 we have to make sure that the stack pointer decrement is not
11296 moved below the use of the stack slots. */
11297 s390_emit_stack_tie ();
11299 insn = emit_insn (gen_add2_insn (temp_reg,
11300 GEN_INT (cfun_frame_layout.f8_offset)));
11304 for (i = FPR8_REGNUM; i <= next_fpr; i++)
11305 if (cfun_fpr_save_p (i))
11307 rtx addr = plus_constant (Pmode, stack_pointer_rtx,
11308 cfun_frame_layout.frame_size
11309 + cfun_frame_layout.f8_offset
11312 insn = save_fpr (temp_reg, offset, i);
11314 RTX_FRAME_RELATED_P (insn) = 1;
11315 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
11316 gen_rtx_SET (gen_rtx_MEM (DFmode, addr),
11317 gen_rtx_REG (DFmode, i)));
11321 /* Set frame pointer, if needed. */
11323 if (frame_pointer_needed)
11325 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
11326 RTX_FRAME_RELATED_P (insn) = 1;
11329 /* Set up got pointer, if needed. */
11331 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
11333 rtx_insn *insns = s390_load_got ();
11335 for (rtx_insn *insn = insns; insn; insn = NEXT_INSN (insn))
11336 annotate_constant_pool_refs (&PATTERN (insn));
11341 if (TARGET_TPF_PROFILING)
11343 /* Generate a BAS instruction to serve as a function
11344 entry intercept to facilitate the use of tracing
11345 algorithms located at the branch target. */
11346 emit_insn (gen_prologue_tpf ());
11348 /* Emit a blockage here so that all code
11349 lies between the profiling mechanisms. */
11350 emit_insn (gen_blockage ());
11354 /* Expand the epilogue into a bunch of separate insns. */
11357 s390_emit_epilogue (bool sibcall)
11359 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
11360 int area_bottom, area_top, offset = 0;
11365 if (TARGET_TPF_PROFILING)
11368 /* Generate a BAS instruction to serve as a function
11369 entry intercept to facilitate the use of tracing
11370 algorithms located at the branch target. */
11372 /* Emit a blockage here so that all code
11373 lies between the profiling mechanisms. */
11374 emit_insn (gen_blockage ());
11376 emit_insn (gen_epilogue_tpf ());
11379 /* Check whether to use frame or stack pointer for restore. */
11381 frame_pointer = (frame_pointer_needed
11382 ? hard_frame_pointer_rtx : stack_pointer_rtx);
11384 s390_frame_area (&area_bottom, &area_top);
11386 /* Check whether we can access the register save area.
11387 If not, increment the frame pointer as required. */
11389 if (area_top <= area_bottom)
11391 /* Nothing to restore. */
11393 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
11394 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
11396 /* Area is in range. */
11397 offset = cfun_frame_layout.frame_size;
11401 rtx insn, frame_off, cfa;
11403 offset = area_bottom < 0 ? -area_bottom : 0;
11404 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
11406 cfa = gen_rtx_SET (frame_pointer,
11407 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
11408 if (DISP_IN_RANGE (INTVAL (frame_off)))
11410 insn = gen_rtx_SET (frame_pointer,
11411 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
11412 insn = emit_insn (insn);
11416 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
11417 frame_off = force_const_mem (Pmode, frame_off);
11419 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
11420 annotate_constant_pool_refs (&PATTERN (insn));
11422 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
11423 RTX_FRAME_RELATED_P (insn) = 1;
11426 /* Restore call saved fprs. */
11430 if (cfun_save_high_fprs_p)
11432 next_offset = cfun_frame_layout.f8_offset;
11433 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
11435 if (cfun_fpr_save_p (i))
11437 restore_fpr (frame_pointer,
11438 offset + next_offset, i);
11440 = alloc_reg_note (REG_CFA_RESTORE,
11441 gen_rtx_REG (DFmode, i), cfa_restores);
11450 next_offset = cfun_frame_layout.f4_offset;
11452 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
11454 if (cfun_fpr_save_p (i))
11456 restore_fpr (frame_pointer,
11457 offset + next_offset, i);
11459 = alloc_reg_note (REG_CFA_RESTORE,
11460 gen_rtx_REG (DFmode, i), cfa_restores);
11463 else if (!TARGET_PACKED_STACK)
11469 /* Return register. */
11471 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
11473 /* Restore call saved gprs. */
11475 if (cfun_frame_layout.first_restore_gpr != -1)
11480 /* Check for global register and save them
11481 to stack location from where they get restored. */
11483 for (i = cfun_frame_layout.first_restore_gpr;
11484 i <= cfun_frame_layout.last_restore_gpr;
11487 if (global_not_special_regno_p (i))
11489 addr = plus_constant (Pmode, frame_pointer,
11490 offset + cfun_frame_layout.gprs_offset
11491 + (i - cfun_frame_layout.first_save_gpr_slot)
11493 addr = gen_rtx_MEM (Pmode, addr);
11494 set_mem_alias_set (addr, get_frame_alias_set ());
11495 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
11499 = alloc_reg_note (REG_CFA_RESTORE,
11500 gen_rtx_REG (Pmode, i), cfa_restores);
11503 /* Fetch return address from stack before load multiple,
11504 this will do good for scheduling.
11506 Only do this if we already decided that r14 needs to be
11507 saved to a stack slot. (And not just because r14 happens to
11508 be in between two GPRs which need saving.) Otherwise it
11509 would be difficult to take that decision back in
11510 s390_optimize_prologue.
11512 This optimization is only helpful on in-order machines. */
11514 && cfun_gpr_save_slot (RETURN_REGNUM) == SAVE_SLOT_STACK
11515 && s390_tune <= PROCESSOR_2097_Z10)
11517 int return_regnum = find_unused_clobbered_reg();
11518 if (!return_regnum)
11520 return_reg = gen_rtx_REG (Pmode, return_regnum);
11522 addr = plus_constant (Pmode, frame_pointer,
11523 offset + cfun_frame_layout.gprs_offset
11525 - cfun_frame_layout.first_save_gpr_slot)
11527 addr = gen_rtx_MEM (Pmode, addr);
11528 set_mem_alias_set (addr, get_frame_alias_set ());
11529 emit_move_insn (return_reg, addr);
11531 /* Once we did that optimization we have to make sure
11532 s390_optimize_prologue does not try to remove the store
11533 of r14 since we will not be able to find the load issued
11535 cfun_frame_layout.save_return_addr_p = true;
11538 insn = restore_gprs (frame_pointer,
11539 offset + cfun_frame_layout.gprs_offset
11540 + (cfun_frame_layout.first_restore_gpr
11541 - cfun_frame_layout.first_save_gpr_slot)
11543 cfun_frame_layout.first_restore_gpr,
11544 cfun_frame_layout.last_restore_gpr);
11545 insn = emit_insn (insn);
11546 REG_NOTES (insn) = cfa_restores;
11547 add_reg_note (insn, REG_CFA_DEF_CFA,
11548 plus_constant (Pmode, stack_pointer_rtx,
11549 STACK_POINTER_OFFSET));
11550 RTX_FRAME_RELATED_P (insn) = 1;
11553 s390_restore_gprs_from_fprs ();
11558 /* Return to caller. */
11560 p = rtvec_alloc (2);
11562 RTVEC_ELT (p, 0) = ret_rtx;
11563 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
11564 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
11568 /* Implement TARGET_SET_UP_BY_PROLOGUE. */
11571 s300_set_up_by_prologue (hard_reg_set_container *regs)
11573 if (cfun->machine->base_reg
11574 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
11575 SET_HARD_REG_BIT (regs->set, REGNO (cfun->machine->base_reg));
11578 /* -fsplit-stack support. */
11580 /* A SYMBOL_REF for __morestack. */
11581 static GTY(()) rtx morestack_ref;
11583 /* When using -fsplit-stack, the allocation routines set a field in
11584 the TCB to the bottom of the stack plus this much space, measured
11587 #define SPLIT_STACK_AVAILABLE 1024
11589 /* Emit -fsplit-stack prologue, which goes before the regular function
11593 s390_expand_split_stack_prologue (void)
11595 rtx r1, guard, cc = NULL;
11597 /* Offset from thread pointer to __private_ss. */
11598 int psso = TARGET_64BIT ? 0x38 : 0x20;
11599 /* Pointer size in bytes. */
11600 /* Frame size and argument size - the two parameters to __morestack. */
11601 HOST_WIDE_INT frame_size = cfun_frame_layout.frame_size;
11602 /* Align argument size to 8 bytes - simplifies __morestack code. */
11603 HOST_WIDE_INT args_size = crtl->args.size >= 0
11604 ? ((crtl->args.size + 7) & ~7)
11606 /* Label to be called by __morestack. */
11607 rtx_code_label *call_done = NULL;
11608 rtx_code_label *parm_base = NULL;
11611 gcc_assert (flag_split_stack && reload_completed);
11612 if (!TARGET_CPU_ZARCH)
11614 sorry ("CPUs older than z900 are not supported for -fsplit-stack");
11618 r1 = gen_rtx_REG (Pmode, 1);
11620 /* If no stack frame will be allocated, don't do anything. */
11623 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11625 /* If va_start is used, just use r15. */
11626 emit_move_insn (r1,
11627 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11628 GEN_INT (STACK_POINTER_OFFSET)));
11634 if (morestack_ref == NULL_RTX)
11636 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
11637 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
11638 | SYMBOL_FLAG_FUNCTION);
11641 if (CONST_OK_FOR_K (frame_size) || CONST_OK_FOR_Op (frame_size))
11643 /* If frame_size will fit in an add instruction, do a stack space
11644 check, and only call __morestack if there's not enough space. */
11646 /* Get thread pointer. r1 is the only register we can always destroy - r0
11647 could contain a static chain (and cannot be used to address memory
11648 anyway), r2-r6 can contain parameters, and r6-r15 are callee-saved. */
11649 emit_move_insn (r1, gen_rtx_REG (Pmode, TP_REGNUM));
11650 /* Aim at __private_ss. */
11651 guard = gen_rtx_MEM (Pmode, plus_constant (Pmode, r1, psso));
11653 /* If less that 1kiB used, skip addition and compare directly with
11655 if (frame_size > SPLIT_STACK_AVAILABLE)
11657 emit_move_insn (r1, guard);
11659 emit_insn (gen_adddi3 (r1, r1, GEN_INT (frame_size)));
11661 emit_insn (gen_addsi3 (r1, r1, GEN_INT (frame_size)));
11665 /* Compare the (maybe adjusted) guard with the stack pointer. */
11666 cc = s390_emit_compare (LT, stack_pointer_rtx, guard);
11669 call_done = gen_label_rtx ();
11670 parm_base = gen_label_rtx ();
11672 /* Emit the parameter block. */
11673 tmp = gen_split_stack_data (parm_base, call_done,
11674 GEN_INT (frame_size),
11675 GEN_INT (args_size));
11676 insn = emit_insn (tmp);
11677 add_reg_note (insn, REG_LABEL_OPERAND, call_done);
11678 LABEL_NUSES (call_done)++;
11679 add_reg_note (insn, REG_LABEL_OPERAND, parm_base);
11680 LABEL_NUSES (parm_base)++;
11682 /* %r1 = litbase. */
11683 insn = emit_move_insn (r1, gen_rtx_LABEL_REF (VOIDmode, parm_base));
11684 add_reg_note (insn, REG_LABEL_OPERAND, parm_base);
11685 LABEL_NUSES (parm_base)++;
11687 /* Now, we need to call __morestack. It has very special calling
11688 conventions: it preserves param/return/static chain registers for
11689 calling main function body, and looks for its own parameters at %r1. */
11693 tmp = gen_split_stack_cond_call (morestack_ref, cc, call_done);
11695 insn = emit_jump_insn (tmp);
11696 JUMP_LABEL (insn) = call_done;
11697 LABEL_NUSES (call_done)++;
11699 /* Mark the jump as very unlikely to be taken. */
11700 add_reg_br_prob_note (insn,
11701 profile_probability::very_unlikely ());
11703 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11705 /* If va_start is used, and __morestack was not called, just use
11707 emit_move_insn (r1,
11708 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11709 GEN_INT (STACK_POINTER_OFFSET)));
11714 tmp = gen_split_stack_call (morestack_ref, call_done);
11715 insn = emit_jump_insn (tmp);
11716 JUMP_LABEL (insn) = call_done;
11717 LABEL_NUSES (call_done)++;
11721 /* __morestack will call us here. */
11723 emit_label (call_done);
11726 /* We may have to tell the dataflow pass that the split stack prologue
11727 is initializing a register. */
11730 s390_live_on_entry (bitmap regs)
11732 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11734 gcc_assert (flag_split_stack);
11735 bitmap_set_bit (regs, 1);
11739 /* Return true if the function can use simple_return to return outside
11740 of a shrink-wrapped region. At present shrink-wrapping is supported
11744 s390_can_use_simple_return_insn (void)
11749 /* Return true if the epilogue is guaranteed to contain only a return
11750 instruction and if a direct return can therefore be used instead.
11751 One of the main advantages of using direct return instructions
11752 is that we can then use conditional returns. */
11755 s390_can_use_return_insn (void)
11759 if (!reload_completed)
11765 if (TARGET_TPF_PROFILING)
11768 for (i = 0; i < 16; i++)
11769 if (cfun_gpr_save_slot (i) != SAVE_SLOT_NONE)
11772 /* For 31 bit this is not covered by the frame_size check below
11773 since f4, f6 are saved in the register save area without needing
11774 additional stack space. */
11776 && (cfun_fpr_save_p (FPR4_REGNUM) || cfun_fpr_save_p (FPR6_REGNUM)))
11779 if (cfun->machine->base_reg
11780 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
11783 return cfun_frame_layout.frame_size == 0;
11786 /* The VX ABI differs for vararg functions. Therefore we need the
11787 prototype of the callee to be available when passing vector type
11789 static const char *
11790 s390_invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
11792 return ((TARGET_VX_ABI
11794 && VECTOR_TYPE_P (TREE_TYPE (val))
11795 && (funcdecl == NULL_TREE
11796 || (TREE_CODE (funcdecl) == FUNCTION_DECL
11797 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
11798 ? N_("vector argument passed to unprototyped function")
11803 /* Return the size in bytes of a function argument of
11804 type TYPE and/or mode MODE. At least one of TYPE or
11805 MODE must be specified. */
11808 s390_function_arg_size (machine_mode mode, const_tree type)
11811 return int_size_in_bytes (type);
11813 /* No type info available for some library calls ... */
11814 if (mode != BLKmode)
11815 return GET_MODE_SIZE (mode);
11817 /* If we have neither type nor mode, abort */
11818 gcc_unreachable ();
11821 /* Return true if a function argument of type TYPE and mode MODE
11822 is to be passed in a vector register, if available. */
11825 s390_function_arg_vector (machine_mode mode, const_tree type)
11827 if (!TARGET_VX_ABI)
11830 if (s390_function_arg_size (mode, type) > 16)
11833 /* No type info available for some library calls ... */
11835 return VECTOR_MODE_P (mode);
11837 /* The ABI says that record types with a single member are treated
11838 just like that member would be. */
11839 while (TREE_CODE (type) == RECORD_TYPE)
11841 tree field, single = NULL_TREE;
11843 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
11845 if (TREE_CODE (field) != FIELD_DECL)
11848 if (single == NULL_TREE)
11849 single = TREE_TYPE (field);
11854 if (single == NULL_TREE)
11858 /* If the field declaration adds extra byte due to
11859 e.g. padding this is not accepted as vector type. */
11860 if (int_size_in_bytes (single) <= 0
11861 || int_size_in_bytes (single) != int_size_in_bytes (type))
11867 return VECTOR_TYPE_P (type);
11870 /* Return true if a function argument of type TYPE and mode MODE
11871 is to be passed in a floating-point register, if available. */
11874 s390_function_arg_float (machine_mode mode, const_tree type)
11876 if (s390_function_arg_size (mode, type) > 8)
11879 /* Soft-float changes the ABI: no floating-point registers are used. */
11880 if (TARGET_SOFT_FLOAT)
11883 /* No type info available for some library calls ... */
11885 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
11887 /* The ABI says that record types with a single member are treated
11888 just like that member would be. */
11889 while (TREE_CODE (type) == RECORD_TYPE)
11891 tree field, single = NULL_TREE;
11893 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
11895 if (TREE_CODE (field) != FIELD_DECL)
11898 if (single == NULL_TREE)
11899 single = TREE_TYPE (field);
11904 if (single == NULL_TREE)
11910 return TREE_CODE (type) == REAL_TYPE;
11913 /* Return true if a function argument of type TYPE and mode MODE
11914 is to be passed in an integer register, or a pair of integer
11915 registers, if available. */
11918 s390_function_arg_integer (machine_mode mode, const_tree type)
11920 int size = s390_function_arg_size (mode, type);
11924 /* No type info available for some library calls ... */
11926 return GET_MODE_CLASS (mode) == MODE_INT
11927 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
11929 /* We accept small integral (and similar) types. */
11930 if (INTEGRAL_TYPE_P (type)
11931 || POINTER_TYPE_P (type)
11932 || TREE_CODE (type) == NULLPTR_TYPE
11933 || TREE_CODE (type) == OFFSET_TYPE
11934 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
11937 /* We also accept structs of size 1, 2, 4, 8 that are not
11938 passed in floating-point registers. */
11939 if (AGGREGATE_TYPE_P (type)
11940 && exact_log2 (size) >= 0
11941 && !s390_function_arg_float (mode, type))
11947 /* Return 1 if a function argument of type TYPE and mode MODE
11948 is to be passed by reference. The ABI specifies that only
11949 structures of size 1, 2, 4, or 8 bytes are passed by value,
11950 all other structures (and complex numbers) are passed by
11954 s390_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
11955 machine_mode mode, const_tree type,
11956 bool named ATTRIBUTE_UNUSED)
11958 int size = s390_function_arg_size (mode, type);
11960 if (s390_function_arg_vector (mode, type))
11968 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
11971 if (TREE_CODE (type) == COMPLEX_TYPE
11972 || TREE_CODE (type) == VECTOR_TYPE)
11979 /* Update the data in CUM to advance over an argument of mode MODE and
11980 data type TYPE. (TYPE is null for libcalls where that information
11981 may not be available.). The boolean NAMED specifies whether the
11982 argument is a named argument (as opposed to an unnamed argument
11983 matching an ellipsis). */
11986 s390_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
11987 const_tree type, bool named)
11989 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11991 if (s390_function_arg_vector (mode, type))
11993 /* We are called for unnamed vector stdarg arguments which are
11994 passed on the stack. In this case this hook does not have to
11995 do anything since stack arguments are tracked by common
12001 else if (s390_function_arg_float (mode, type))
12005 else if (s390_function_arg_integer (mode, type))
12007 int size = s390_function_arg_size (mode, type);
12008 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
12011 gcc_unreachable ();
12014 /* Define where to put the arguments to a function.
12015 Value is zero to push the argument on the stack,
12016 or a hard register in which to store the argument.
12018 MODE is the argument's machine mode.
12019 TYPE is the data type of the argument (as a tree).
12020 This is null for libcalls where that information may
12022 CUM is a variable of type CUMULATIVE_ARGS which gives info about
12023 the preceding args and about the function being called.
12024 NAMED is nonzero if this argument is a named parameter
12025 (otherwise it is an extra parameter matching an ellipsis).
12027 On S/390, we use general purpose registers 2 through 6 to
12028 pass integer, pointer, and certain structure arguments, and
12029 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
12030 to pass floating point arguments. All remaining arguments
12031 are pushed to the stack. */
12034 s390_function_arg (cumulative_args_t cum_v, machine_mode mode,
12035 const_tree type, bool named)
12037 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12040 s390_check_type_for_vector_abi (type, true, false);
12042 if (s390_function_arg_vector (mode, type))
12044 /* Vector arguments being part of the ellipsis are passed on the
12046 if (!named || (cum->vrs + 1 > VEC_ARG_NUM_REG))
12049 return gen_rtx_REG (mode, cum->vrs + FIRST_VEC_ARG_REGNO);
12051 else if (s390_function_arg_float (mode, type))
12053 if (cum->fprs + 1 > FP_ARG_NUM_REG)
12056 return gen_rtx_REG (mode, cum->fprs + 16);
12058 else if (s390_function_arg_integer (mode, type))
12060 int size = s390_function_arg_size (mode, type);
12061 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
12063 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
12065 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
12066 return gen_rtx_REG (mode, cum->gprs + 2);
12067 else if (n_gprs == 2)
12069 rtvec p = rtvec_alloc (2);
12072 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
12075 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
12078 return gen_rtx_PARALLEL (mode, p);
12082 /* After the real arguments, expand_call calls us once again
12083 with a void_type_node type. Whatever we return here is
12084 passed as operand 2 to the call expanders.
12086 We don't need this feature ... */
12087 else if (type == void_type_node)
12090 gcc_unreachable ();
12093 /* Return true if return values of type TYPE should be returned
12094 in a memory buffer whose address is passed by the caller as
12095 hidden first argument. */
12098 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
12100 /* We accept small integral (and similar) types. */
12101 if (INTEGRAL_TYPE_P (type)
12102 || POINTER_TYPE_P (type)
12103 || TREE_CODE (type) == OFFSET_TYPE
12104 || TREE_CODE (type) == REAL_TYPE)
12105 return int_size_in_bytes (type) > 8;
12107 /* vector types which fit into a VR. */
12109 && VECTOR_TYPE_P (type)
12110 && int_size_in_bytes (type) <= 16)
12113 /* Aggregates and similar constructs are always returned
12115 if (AGGREGATE_TYPE_P (type)
12116 || TREE_CODE (type) == COMPLEX_TYPE
12117 || VECTOR_TYPE_P (type))
12120 /* ??? We get called on all sorts of random stuff from
12121 aggregate_value_p. We can't abort, but it's not clear
12122 what's safe to return. Pretend it's a struct I guess. */
12126 /* Function arguments and return values are promoted to word size. */
12128 static machine_mode
12129 s390_promote_function_mode (const_tree type, machine_mode mode,
12131 const_tree fntype ATTRIBUTE_UNUSED,
12132 int for_return ATTRIBUTE_UNUSED)
12134 if (INTEGRAL_MODE_P (mode)
12135 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
12137 if (type != NULL_TREE && POINTER_TYPE_P (type))
12138 *punsignedp = POINTERS_EXTEND_UNSIGNED;
12145 /* Define where to return a (scalar) value of type RET_TYPE.
12146 If RET_TYPE is null, define where to return a (scalar)
12147 value of mode MODE from a libcall. */
12150 s390_function_and_libcall_value (machine_mode mode,
12151 const_tree ret_type,
12152 const_tree fntype_or_decl,
12153 bool outgoing ATTRIBUTE_UNUSED)
12155 /* For vector return types it is important to use the RET_TYPE
12156 argument whenever available since the middle-end might have
12157 changed the mode to a scalar mode. */
12158 bool vector_ret_type_p = ((ret_type && VECTOR_TYPE_P (ret_type))
12159 || (!ret_type && VECTOR_MODE_P (mode)));
12161 /* For normal functions perform the promotion as
12162 promote_function_mode would do. */
12165 int unsignedp = TYPE_UNSIGNED (ret_type);
12166 mode = promote_function_mode (ret_type, mode, &unsignedp,
12167 fntype_or_decl, 1);
12170 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT
12171 || SCALAR_FLOAT_MODE_P (mode)
12172 || (TARGET_VX_ABI && vector_ret_type_p));
12173 gcc_assert (GET_MODE_SIZE (mode) <= (TARGET_VX_ABI ? 16 : 8));
12175 if (TARGET_VX_ABI && vector_ret_type_p)
12176 return gen_rtx_REG (mode, FIRST_VEC_ARG_REGNO);
12177 else if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
12178 return gen_rtx_REG (mode, 16);
12179 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
12180 || UNITS_PER_LONG == UNITS_PER_WORD)
12181 return gen_rtx_REG (mode, 2);
12182 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
12184 /* This case is triggered when returning a 64 bit value with
12185 -m31 -mzarch. Although the value would fit into a single
12186 register it has to be forced into a 32 bit register pair in
12187 order to match the ABI. */
12188 rtvec p = rtvec_alloc (2);
12191 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
12193 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
12195 return gen_rtx_PARALLEL (mode, p);
12198 gcc_unreachable ();
12201 /* Define where to return a scalar return value of type RET_TYPE. */
12204 s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
12207 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
12208 fn_decl_or_type, outgoing);
12211 /* Define where to return a scalar libcall return value of mode
12215 s390_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
12217 return s390_function_and_libcall_value (mode, NULL_TREE,
12222 /* Create and return the va_list datatype.
12224 On S/390, va_list is an array type equivalent to
12226 typedef struct __va_list_tag
12230 void *__overflow_arg_area;
12231 void *__reg_save_area;
12234 where __gpr and __fpr hold the number of general purpose
12235 or floating point arguments used up to now, respectively,
12236 __overflow_arg_area points to the stack location of the
12237 next argument passed on the stack, and __reg_save_area
12238 always points to the start of the register area in the
12239 call frame of the current function. The function prologue
12240 saves all registers used for argument passing into this
12241 area if the function uses variable arguments. */
12244 s390_build_builtin_va_list (void)
12246 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
12248 record = lang_hooks.types.make_type (RECORD_TYPE);
12251 build_decl (BUILTINS_LOCATION,
12252 TYPE_DECL, get_identifier ("__va_list_tag"), record);
12254 f_gpr = build_decl (BUILTINS_LOCATION,
12255 FIELD_DECL, get_identifier ("__gpr"),
12256 long_integer_type_node);
12257 f_fpr = build_decl (BUILTINS_LOCATION,
12258 FIELD_DECL, get_identifier ("__fpr"),
12259 long_integer_type_node);
12260 f_ovf = build_decl (BUILTINS_LOCATION,
12261 FIELD_DECL, get_identifier ("__overflow_arg_area"),
12263 f_sav = build_decl (BUILTINS_LOCATION,
12264 FIELD_DECL, get_identifier ("__reg_save_area"),
12267 va_list_gpr_counter_field = f_gpr;
12268 va_list_fpr_counter_field = f_fpr;
12270 DECL_FIELD_CONTEXT (f_gpr) = record;
12271 DECL_FIELD_CONTEXT (f_fpr) = record;
12272 DECL_FIELD_CONTEXT (f_ovf) = record;
12273 DECL_FIELD_CONTEXT (f_sav) = record;
12275 TYPE_STUB_DECL (record) = type_decl;
12276 TYPE_NAME (record) = type_decl;
12277 TYPE_FIELDS (record) = f_gpr;
12278 DECL_CHAIN (f_gpr) = f_fpr;
12279 DECL_CHAIN (f_fpr) = f_ovf;
12280 DECL_CHAIN (f_ovf) = f_sav;
12282 layout_type (record);
12284 /* The correct type is an array type of one element. */
12285 return build_array_type (record, build_index_type (size_zero_node));
12288 /* Implement va_start by filling the va_list structure VALIST.
12289 STDARG_P is always true, and ignored.
12290 NEXTARG points to the first anonymous stack argument.
12292 The following global variables are used to initialize
12293 the va_list structure:
12296 holds number of gprs and fprs used for named arguments.
12297 crtl->args.arg_offset_rtx:
12298 holds the offset of the first anonymous stack argument
12299 (relative to the virtual arg pointer). */
12302 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
12304 HOST_WIDE_INT n_gpr, n_fpr;
12306 tree f_gpr, f_fpr, f_ovf, f_sav;
12307 tree gpr, fpr, ovf, sav, t;
12309 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12310 f_fpr = DECL_CHAIN (f_gpr);
12311 f_ovf = DECL_CHAIN (f_fpr);
12312 f_sav = DECL_CHAIN (f_ovf);
12314 valist = build_simple_mem_ref (valist);
12315 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12316 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
12317 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
12318 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
12320 /* Count number of gp and fp argument registers used. */
12322 n_gpr = crtl->args.info.gprs;
12323 n_fpr = crtl->args.info.fprs;
12325 if (cfun->va_list_gpr_size)
12327 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
12328 build_int_cst (NULL_TREE, n_gpr));
12329 TREE_SIDE_EFFECTS (t) = 1;
12330 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12333 if (cfun->va_list_fpr_size)
12335 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
12336 build_int_cst (NULL_TREE, n_fpr));
12337 TREE_SIDE_EFFECTS (t) = 1;
12338 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12341 if (flag_split_stack
12342 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
12344 && cfun->machine->split_stack_varargs_pointer == NULL_RTX)
12349 reg = gen_reg_rtx (Pmode);
12350 cfun->machine->split_stack_varargs_pointer = reg;
12353 emit_move_insn (reg, gen_rtx_REG (Pmode, 1));
12354 seq = get_insns ();
12357 push_topmost_sequence ();
12358 emit_insn_after (seq, entry_of_function ());
12359 pop_topmost_sequence ();
12362 /* Find the overflow area.
12363 FIXME: This currently is too pessimistic when the vector ABI is
12364 enabled. In that case we *always* set up the overflow area
12366 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
12367 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG
12370 if (cfun->machine->split_stack_varargs_pointer == NULL_RTX)
12371 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
12373 t = make_tree (TREE_TYPE (ovf), cfun->machine->split_stack_varargs_pointer);
12375 off = INTVAL (crtl->args.arg_offset_rtx);
12376 off = off < 0 ? 0 : off;
12377 if (TARGET_DEBUG_ARG)
12378 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
12379 (int)n_gpr, (int)n_fpr, off);
12381 t = fold_build_pointer_plus_hwi (t, off);
12383 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
12384 TREE_SIDE_EFFECTS (t) = 1;
12385 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12388 /* Find the register save area. */
12389 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
12390 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
12392 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
12393 t = fold_build_pointer_plus_hwi (t, -RETURN_REGNUM * UNITS_PER_LONG);
12395 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
12396 TREE_SIDE_EFFECTS (t) = 1;
12397 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12401 /* Implement va_arg by updating the va_list structure
12402 VALIST as required to retrieve an argument of type
12403 TYPE, and returning that argument.
12405 Generates code equivalent to:
12407 if (integral value) {
12408 if (size <= 4 && args.gpr < 5 ||
12409 size > 4 && args.gpr < 4 )
12410 ret = args.reg_save_area[args.gpr+8]
12412 ret = *args.overflow_arg_area++;
12413 } else if (vector value) {
12414 ret = *args.overflow_arg_area;
12415 args.overflow_arg_area += size / 8;
12416 } else if (float value) {
12418 ret = args.reg_save_area[args.fpr+64]
12420 ret = *args.overflow_arg_area++;
12421 } else if (aggregate value) {
12423 ret = *args.reg_save_area[args.gpr]
12425 ret = **args.overflow_arg_area++;
12429 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
12430 gimple_seq *post_p ATTRIBUTE_UNUSED)
12432 tree f_gpr, f_fpr, f_ovf, f_sav;
12433 tree gpr, fpr, ovf, sav, reg, t, u;
12434 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
12435 tree lab_false, lab_over = NULL_TREE;
12436 tree addr = create_tmp_var (ptr_type_node, "addr");
12437 bool left_align_p; /* How a value < UNITS_PER_LONG is aligned within
12440 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12441 f_fpr = DECL_CHAIN (f_gpr);
12442 f_ovf = DECL_CHAIN (f_fpr);
12443 f_sav = DECL_CHAIN (f_ovf);
12445 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12446 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
12447 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
12449 /* The tree for args* cannot be shared between gpr/fpr and ovf since
12450 both appear on a lhs. */
12451 valist = unshare_expr (valist);
12452 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
12454 size = int_size_in_bytes (type);
12456 s390_check_type_for_vector_abi (type, true, false);
12458 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
12460 if (TARGET_DEBUG_ARG)
12462 fprintf (stderr, "va_arg: aggregate type");
12466 /* Aggregates are passed by reference. */
12471 /* kernel stack layout on 31 bit: It is assumed here that no padding
12472 will be added by s390_frame_info because for va_args always an even
12473 number of gprs has to be saved r15-r2 = 14 regs. */
12474 sav_ofs = 2 * UNITS_PER_LONG;
12475 sav_scale = UNITS_PER_LONG;
12476 size = UNITS_PER_LONG;
12477 max_reg = GP_ARG_NUM_REG - n_reg;
12478 left_align_p = false;
12480 else if (s390_function_arg_vector (TYPE_MODE (type), type))
12482 if (TARGET_DEBUG_ARG)
12484 fprintf (stderr, "va_arg: vector type");
12494 left_align_p = true;
12496 else if (s390_function_arg_float (TYPE_MODE (type), type))
12498 if (TARGET_DEBUG_ARG)
12500 fprintf (stderr, "va_arg: float type");
12504 /* FP args go in FP registers, if present. */
12508 sav_ofs = 16 * UNITS_PER_LONG;
12510 max_reg = FP_ARG_NUM_REG - n_reg;
12511 left_align_p = false;
12515 if (TARGET_DEBUG_ARG)
12517 fprintf (stderr, "va_arg: other type");
12521 /* Otherwise into GP registers. */
12524 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
12526 /* kernel stack layout on 31 bit: It is assumed here that no padding
12527 will be added by s390_frame_info because for va_args always an even
12528 number of gprs has to be saved r15-r2 = 14 regs. */
12529 sav_ofs = 2 * UNITS_PER_LONG;
12531 if (size < UNITS_PER_LONG)
12532 sav_ofs += UNITS_PER_LONG - size;
12534 sav_scale = UNITS_PER_LONG;
12535 max_reg = GP_ARG_NUM_REG - n_reg;
12536 left_align_p = false;
12539 /* Pull the value out of the saved registers ... */
12541 if (reg != NULL_TREE)
12544 if (reg > ((typeof (reg))max_reg))
12547 addr = sav + sav_ofs + reg * save_scale;
12554 lab_false = create_artificial_label (UNKNOWN_LOCATION);
12555 lab_over = create_artificial_label (UNKNOWN_LOCATION);
12557 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
12558 t = build2 (GT_EXPR, boolean_type_node, reg, t);
12559 u = build1 (GOTO_EXPR, void_type_node, lab_false);
12560 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
12561 gimplify_and_add (t, pre_p);
12563 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
12564 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
12565 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
12566 t = fold_build_pointer_plus (t, u);
12568 gimplify_assign (addr, t, pre_p);
12570 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
12572 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
12575 /* ... Otherwise out of the overflow area. */
12578 if (size < UNITS_PER_LONG && !left_align_p)
12579 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG - size);
12581 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
12583 gimplify_assign (addr, t, pre_p);
12585 if (size < UNITS_PER_LONG && left_align_p)
12586 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG);
12588 t = fold_build_pointer_plus_hwi (t, size);
12590 gimplify_assign (ovf, t, pre_p);
12592 if (reg != NULL_TREE)
12593 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
12596 /* Increment register save count. */
12600 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
12601 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
12602 gimplify_and_add (u, pre_p);
12607 t = build_pointer_type_for_mode (build_pointer_type (type),
12609 addr = fold_convert (t, addr);
12610 addr = build_va_arg_indirect_ref (addr);
12614 t = build_pointer_type_for_mode (type, ptr_mode, true);
12615 addr = fold_convert (t, addr);
12618 return build_va_arg_indirect_ref (addr);
12621 /* Emit rtl for the tbegin or tbegin_retry (RETRY != NULL_RTX)
12623 DEST - Register location where CC will be stored.
12624 TDB - Pointer to a 256 byte area where to store the transaction.
12625 diagnostic block. NULL if TDB is not needed.
12626 RETRY - Retry count value. If non-NULL a retry loop for CC2
12628 CLOBBER_FPRS_P - If true clobbers for all FPRs are emitted as part
12629 of the tbegin instruction pattern. */
12632 s390_expand_tbegin (rtx dest, rtx tdb, rtx retry, bool clobber_fprs_p)
12634 rtx retry_plus_two = gen_reg_rtx (SImode);
12635 rtx retry_reg = gen_reg_rtx (SImode);
12636 rtx_code_label *retry_label = NULL;
12638 if (retry != NULL_RTX)
12640 emit_move_insn (retry_reg, retry);
12641 emit_insn (gen_addsi3 (retry_plus_two, retry_reg, const2_rtx));
12642 emit_insn (gen_addsi3 (retry_reg, retry_reg, const1_rtx));
12643 retry_label = gen_label_rtx ();
12644 emit_label (retry_label);
12647 if (clobber_fprs_p)
12650 emit_insn (gen_tbegin_1_z13 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
12653 emit_insn (gen_tbegin_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
12657 emit_insn (gen_tbegin_nofloat_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
12660 emit_move_insn (dest, gen_rtx_UNSPEC (SImode,
12661 gen_rtvec (1, gen_rtx_REG (CCRAWmode,
12663 UNSPEC_CC_TO_INT));
12664 if (retry != NULL_RTX)
12666 const int CC0 = 1 << 3;
12667 const int CC1 = 1 << 2;
12668 const int CC3 = 1 << 0;
12670 rtx count = gen_reg_rtx (SImode);
12671 rtx_code_label *leave_label = gen_label_rtx ();
12673 /* Exit for success and permanent failures. */
12674 jump = s390_emit_jump (leave_label,
12675 gen_rtx_EQ (VOIDmode,
12676 gen_rtx_REG (CCRAWmode, CC_REGNUM),
12677 gen_rtx_CONST_INT (VOIDmode, CC0 | CC1 | CC3)));
12678 LABEL_NUSES (leave_label) = 1;
12680 /* CC2 - transient failure. Perform retry with ppa. */
12681 emit_move_insn (count, retry_plus_two);
12682 emit_insn (gen_subsi3 (count, count, retry_reg));
12683 emit_insn (gen_tx_assist (count));
12684 jump = emit_jump_insn (gen_doloop_si64 (retry_label,
12687 JUMP_LABEL (jump) = retry_label;
12688 LABEL_NUSES (retry_label) = 1;
12689 emit_label (leave_label);
12694 /* Return the decl for the target specific builtin with the function
12698 s390_builtin_decl (unsigned fcode, bool initialized_p ATTRIBUTE_UNUSED)
12700 if (fcode >= S390_BUILTIN_MAX)
12701 return error_mark_node;
12703 return s390_builtin_decls[fcode];
12706 /* We call mcount before the function prologue. So a profiled leaf
12707 function should stay a leaf function. */
12710 s390_keep_leaf_when_profiled ()
12715 /* Output assembly code for the trampoline template to
12718 On S/390, we use gpr 1 internally in the trampoline code;
12719 gpr 0 is used to hold the static chain. */
12722 s390_asm_trampoline_template (FILE *file)
12725 op[0] = gen_rtx_REG (Pmode, 0);
12726 op[1] = gen_rtx_REG (Pmode, 1);
12730 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
12731 output_asm_insn ("lmg\t%0,%1,14(%1)", op); /* 6 byte */
12732 output_asm_insn ("br\t%1", op); /* 2 byte */
12733 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
12737 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
12738 output_asm_insn ("lm\t%0,%1,6(%1)", op); /* 4 byte */
12739 output_asm_insn ("br\t%1", op); /* 2 byte */
12740 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
12744 /* Emit RTL insns to initialize the variable parts of a trampoline.
12745 FNADDR is an RTX for the address of the function's pure code.
12746 CXT is an RTX for the static chain value for the function. */
12749 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
12751 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
12754 emit_block_move (m_tramp, assemble_trampoline_template (),
12755 GEN_INT (2 * UNITS_PER_LONG), BLOCK_OP_NORMAL);
12757 mem = adjust_address (m_tramp, Pmode, 2 * UNITS_PER_LONG);
12758 emit_move_insn (mem, cxt);
12759 mem = adjust_address (m_tramp, Pmode, 3 * UNITS_PER_LONG);
12760 emit_move_insn (mem, fnaddr);
12763 /* Output assembler code to FILE to increment profiler label # LABELNO
12764 for profiling a function entry. */
12767 s390_function_profiler (FILE *file, int labelno)
12772 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
12774 fprintf (file, "# function profiler \n");
12776 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
12777 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
12778 op[1] = gen_rtx_MEM (Pmode, plus_constant (Pmode, op[1], UNITS_PER_LONG));
12780 op[2] = gen_rtx_REG (Pmode, 1);
12781 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
12782 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
12784 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
12787 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
12788 op[4] = gen_rtx_CONST (Pmode, op[4]);
12793 output_asm_insn ("stg\t%0,%1", op);
12794 output_asm_insn ("larl\t%2,%3", op);
12795 output_asm_insn ("brasl\t%0,%4", op);
12796 output_asm_insn ("lg\t%0,%1", op);
12798 else if (TARGET_CPU_ZARCH)
12800 output_asm_insn ("st\t%0,%1", op);
12801 output_asm_insn ("larl\t%2,%3", op);
12802 output_asm_insn ("brasl\t%0,%4", op);
12803 output_asm_insn ("l\t%0,%1", op);
12805 else if (!flag_pic)
12807 op[6] = gen_label_rtx ();
12809 output_asm_insn ("st\t%0,%1", op);
12810 output_asm_insn ("bras\t%2,%l6", op);
12811 output_asm_insn (".long\t%4", op);
12812 output_asm_insn (".long\t%3", op);
12813 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
12814 output_asm_insn ("l\t%0,0(%2)", op);
12815 output_asm_insn ("l\t%2,4(%2)", op);
12816 output_asm_insn ("basr\t%0,%0", op);
12817 output_asm_insn ("l\t%0,%1", op);
12821 op[5] = gen_label_rtx ();
12822 op[6] = gen_label_rtx ();
12824 output_asm_insn ("st\t%0,%1", op);
12825 output_asm_insn ("bras\t%2,%l6", op);
12826 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
12827 output_asm_insn (".long\t%4-%l5", op);
12828 output_asm_insn (".long\t%3-%l5", op);
12829 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
12830 output_asm_insn ("lr\t%0,%2", op);
12831 output_asm_insn ("a\t%0,0(%2)", op);
12832 output_asm_insn ("a\t%2,4(%2)", op);
12833 output_asm_insn ("basr\t%0,%0", op);
12834 output_asm_insn ("l\t%0,%1", op);
12838 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
12839 into its SYMBOL_REF_FLAGS. */
12842 s390_encode_section_info (tree decl, rtx rtl, int first)
12844 default_encode_section_info (decl, rtl, first);
12846 if (TREE_CODE (decl) == VAR_DECL)
12848 /* Store the alignment to be able to check if we can use
12849 a larl/load-relative instruction. We only handle the cases
12850 that can go wrong (i.e. no FUNC_DECLs). */
12851 if (DECL_ALIGN (decl) == 0 || DECL_ALIGN (decl) % 16)
12852 SYMBOL_FLAG_SET_NOTALIGN2 (XEXP (rtl, 0));
12853 else if (DECL_ALIGN (decl) % 32)
12854 SYMBOL_FLAG_SET_NOTALIGN4 (XEXP (rtl, 0));
12855 else if (DECL_ALIGN (decl) % 64)
12856 SYMBOL_FLAG_SET_NOTALIGN8 (XEXP (rtl, 0));
12859 /* Literal pool references don't have a decl so they are handled
12860 differently here. We rely on the information in the MEM_ALIGN
12861 entry to decide upon the alignment. */
12863 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
12864 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0)))
12866 if (MEM_ALIGN (rtl) == 0 || MEM_ALIGN (rtl) % 16)
12867 SYMBOL_FLAG_SET_NOTALIGN2 (XEXP (rtl, 0));
12868 else if (MEM_ALIGN (rtl) % 32)
12869 SYMBOL_FLAG_SET_NOTALIGN4 (XEXP (rtl, 0));
12870 else if (MEM_ALIGN (rtl) % 64)
12871 SYMBOL_FLAG_SET_NOTALIGN8 (XEXP (rtl, 0));
12875 /* Output thunk to FILE that implements a C++ virtual function call (with
12876 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
12877 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
12878 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
12879 relative to the resulting this pointer. */
12882 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
12883 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
12889 /* Make sure unwind info is emitted for the thunk if needed. */
12890 final_start_function (emit_barrier (), file, 1);
12892 /* Operand 0 is the target function. */
12893 op[0] = XEXP (DECL_RTL (function), 0);
12894 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
12897 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
12898 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
12899 op[0] = gen_rtx_CONST (Pmode, op[0]);
12902 /* Operand 1 is the 'this' pointer. */
12903 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
12904 op[1] = gen_rtx_REG (Pmode, 3);
12906 op[1] = gen_rtx_REG (Pmode, 2);
12908 /* Operand 2 is the delta. */
12909 op[2] = GEN_INT (delta);
12911 /* Operand 3 is the vcall_offset. */
12912 op[3] = GEN_INT (vcall_offset);
12914 /* Operand 4 is the temporary register. */
12915 op[4] = gen_rtx_REG (Pmode, 1);
12917 /* Operands 5 to 8 can be used as labels. */
12923 /* Operand 9 can be used for temporary register. */
12926 /* Generate code. */
12929 /* Setup literal pool pointer if required. */
12930 if ((!DISP_IN_RANGE (delta)
12931 && !CONST_OK_FOR_K (delta)
12932 && !CONST_OK_FOR_Os (delta))
12933 || (!DISP_IN_RANGE (vcall_offset)
12934 && !CONST_OK_FOR_K (vcall_offset)
12935 && !CONST_OK_FOR_Os (vcall_offset)))
12937 op[5] = gen_label_rtx ();
12938 output_asm_insn ("larl\t%4,%5", op);
12941 /* Add DELTA to this pointer. */
12944 if (CONST_OK_FOR_J (delta))
12945 output_asm_insn ("la\t%1,%2(%1)", op);
12946 else if (DISP_IN_RANGE (delta))
12947 output_asm_insn ("lay\t%1,%2(%1)", op);
12948 else if (CONST_OK_FOR_K (delta))
12949 output_asm_insn ("aghi\t%1,%2", op);
12950 else if (CONST_OK_FOR_Os (delta))
12951 output_asm_insn ("agfi\t%1,%2", op);
12954 op[6] = gen_label_rtx ();
12955 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
12959 /* Perform vcall adjustment. */
12962 if (DISP_IN_RANGE (vcall_offset))
12964 output_asm_insn ("lg\t%4,0(%1)", op);
12965 output_asm_insn ("ag\t%1,%3(%4)", op);
12967 else if (CONST_OK_FOR_K (vcall_offset))
12969 output_asm_insn ("lghi\t%4,%3", op);
12970 output_asm_insn ("ag\t%4,0(%1)", op);
12971 output_asm_insn ("ag\t%1,0(%4)", op);
12973 else if (CONST_OK_FOR_Os (vcall_offset))
12975 output_asm_insn ("lgfi\t%4,%3", op);
12976 output_asm_insn ("ag\t%4,0(%1)", op);
12977 output_asm_insn ("ag\t%1,0(%4)", op);
12981 op[7] = gen_label_rtx ();
12982 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
12983 output_asm_insn ("ag\t%4,0(%1)", op);
12984 output_asm_insn ("ag\t%1,0(%4)", op);
12988 /* Jump to target. */
12989 output_asm_insn ("jg\t%0", op);
12991 /* Output literal pool if required. */
12994 output_asm_insn (".align\t4", op);
12995 targetm.asm_out.internal_label (file, "L",
12996 CODE_LABEL_NUMBER (op[5]));
13000 targetm.asm_out.internal_label (file, "L",
13001 CODE_LABEL_NUMBER (op[6]));
13002 output_asm_insn (".long\t%2", op);
13006 targetm.asm_out.internal_label (file, "L",
13007 CODE_LABEL_NUMBER (op[7]));
13008 output_asm_insn (".long\t%3", op);
13013 /* Setup base pointer if required. */
13015 || (!DISP_IN_RANGE (delta)
13016 && !CONST_OK_FOR_K (delta)
13017 && !CONST_OK_FOR_Os (delta))
13018 || (!DISP_IN_RANGE (delta)
13019 && !CONST_OK_FOR_K (vcall_offset)
13020 && !CONST_OK_FOR_Os (vcall_offset)))
13022 op[5] = gen_label_rtx ();
13023 output_asm_insn ("basr\t%4,0", op);
13024 targetm.asm_out.internal_label (file, "L",
13025 CODE_LABEL_NUMBER (op[5]));
13028 /* Add DELTA to this pointer. */
13031 if (CONST_OK_FOR_J (delta))
13032 output_asm_insn ("la\t%1,%2(%1)", op);
13033 else if (DISP_IN_RANGE (delta))
13034 output_asm_insn ("lay\t%1,%2(%1)", op);
13035 else if (CONST_OK_FOR_K (delta))
13036 output_asm_insn ("ahi\t%1,%2", op);
13037 else if (CONST_OK_FOR_Os (delta))
13038 output_asm_insn ("afi\t%1,%2", op);
13041 op[6] = gen_label_rtx ();
13042 output_asm_insn ("a\t%1,%6-%5(%4)", op);
13046 /* Perform vcall adjustment. */
13049 if (CONST_OK_FOR_J (vcall_offset))
13051 output_asm_insn ("l\t%4,0(%1)", op);
13052 output_asm_insn ("a\t%1,%3(%4)", op);
13054 else if (DISP_IN_RANGE (vcall_offset))
13056 output_asm_insn ("l\t%4,0(%1)", op);
13057 output_asm_insn ("ay\t%1,%3(%4)", op);
13059 else if (CONST_OK_FOR_K (vcall_offset))
13061 output_asm_insn ("lhi\t%4,%3", op);
13062 output_asm_insn ("a\t%4,0(%1)", op);
13063 output_asm_insn ("a\t%1,0(%4)", op);
13065 else if (CONST_OK_FOR_Os (vcall_offset))
13067 output_asm_insn ("iilf\t%4,%3", op);
13068 output_asm_insn ("a\t%4,0(%1)", op);
13069 output_asm_insn ("a\t%1,0(%4)", op);
13073 op[7] = gen_label_rtx ();
13074 output_asm_insn ("l\t%4,%7-%5(%4)", op);
13075 output_asm_insn ("a\t%4,0(%1)", op);
13076 output_asm_insn ("a\t%1,0(%4)", op);
13079 /* We had to clobber the base pointer register.
13080 Re-setup the base pointer (with a different base). */
13081 op[5] = gen_label_rtx ();
13082 output_asm_insn ("basr\t%4,0", op);
13083 targetm.asm_out.internal_label (file, "L",
13084 CODE_LABEL_NUMBER (op[5]));
13087 /* Jump to target. */
13088 op[8] = gen_label_rtx ();
13091 output_asm_insn ("l\t%4,%8-%5(%4)", op);
13092 else if (!nonlocal)
13093 output_asm_insn ("a\t%4,%8-%5(%4)", op);
13094 /* We cannot call through .plt, since .plt requires %r12 loaded. */
13095 else if (flag_pic == 1)
13097 output_asm_insn ("a\t%4,%8-%5(%4)", op);
13098 output_asm_insn ("l\t%4,%0(%4)", op);
13100 else if (flag_pic == 2)
13102 op[9] = gen_rtx_REG (Pmode, 0);
13103 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
13104 output_asm_insn ("a\t%4,%8-%5(%4)", op);
13105 output_asm_insn ("ar\t%4,%9", op);
13106 output_asm_insn ("l\t%4,0(%4)", op);
13109 output_asm_insn ("br\t%4", op);
13111 /* Output literal pool. */
13112 output_asm_insn (".align\t4", op);
13114 if (nonlocal && flag_pic == 2)
13115 output_asm_insn (".long\t%0", op);
13118 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
13119 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
13122 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
13124 output_asm_insn (".long\t%0", op);
13126 output_asm_insn (".long\t%0-%5", op);
13130 targetm.asm_out.internal_label (file, "L",
13131 CODE_LABEL_NUMBER (op[6]));
13132 output_asm_insn (".long\t%2", op);
13136 targetm.asm_out.internal_label (file, "L",
13137 CODE_LABEL_NUMBER (op[7]));
13138 output_asm_insn (".long\t%3", op);
13141 final_end_function ();
13145 s390_valid_pointer_mode (scalar_int_mode mode)
13147 return (mode == SImode || (TARGET_64BIT && mode == DImode));
13150 /* Checks whether the given CALL_EXPR would use a caller
13151 saved register. This is used to decide whether sibling call
13152 optimization could be performed on the respective function
13156 s390_call_saved_register_used (tree call_expr)
13158 CUMULATIVE_ARGS cum_v;
13159 cumulative_args_t cum;
13166 INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0);
13167 cum = pack_cumulative_args (&cum_v);
13169 for (i = 0; i < call_expr_nargs (call_expr); i++)
13171 parameter = CALL_EXPR_ARG (call_expr, i);
13172 gcc_assert (parameter);
13174 /* For an undeclared variable passed as parameter we will get
13175 an ERROR_MARK node here. */
13176 if (TREE_CODE (parameter) == ERROR_MARK)
13179 type = TREE_TYPE (parameter);
13182 mode = TYPE_MODE (type);
13185 /* We assume that in the target function all parameters are
13186 named. This only has an impact on vector argument register
13187 usage none of which is call-saved. */
13188 if (pass_by_reference (&cum_v, mode, type, true))
13191 type = build_pointer_type (type);
13194 parm_rtx = s390_function_arg (cum, mode, type, true);
13196 s390_function_arg_advance (cum, mode, type, true);
13201 if (REG_P (parm_rtx))
13204 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
13206 if (!call_used_regs[reg + REGNO (parm_rtx)])
13210 if (GET_CODE (parm_rtx) == PARALLEL)
13214 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
13216 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
13218 gcc_assert (REG_P (r));
13221 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
13223 if (!call_used_regs[reg + REGNO (r)])
13232 /* Return true if the given call expression can be
13233 turned into a sibling call.
13234 DECL holds the declaration of the function to be called whereas
13235 EXP is the call expression itself. */
13238 s390_function_ok_for_sibcall (tree decl, tree exp)
13240 /* The TPF epilogue uses register 1. */
13241 if (TARGET_TPF_PROFILING)
13244 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
13245 which would have to be restored before the sibcall. */
13246 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
13249 /* Register 6 on s390 is available as an argument register but unfortunately
13250 "caller saved". This makes functions needing this register for arguments
13251 not suitable for sibcalls. */
13252 return !s390_call_saved_register_used (exp);
13255 /* Return the fixed registers used for condition codes. */
13258 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
13261 *p2 = INVALID_REGNUM;
13266 /* This function is used by the call expanders of the machine description.
13267 It emits the call insn itself together with the necessary operations
13268 to adjust the target address and returns the emitted insn.
13269 ADDR_LOCATION is the target address rtx
13270 TLS_CALL the location of the thread-local symbol
13271 RESULT_REG the register where the result of the call should be stored
13272 RETADDR_REG the register where the return address should be stored
13273 If this parameter is NULL_RTX the call is considered
13274 to be a sibling call. */
13277 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
13280 bool plt_call = false;
13286 /* Direct function calls need special treatment. */
13287 if (GET_CODE (addr_location) == SYMBOL_REF)
13289 /* When calling a global routine in PIC mode, we must
13290 replace the symbol itself with the PLT stub. */
13291 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
13293 if (TARGET_64BIT || retaddr_reg != NULL_RTX)
13295 addr_location = gen_rtx_UNSPEC (Pmode,
13296 gen_rtvec (1, addr_location),
13298 addr_location = gen_rtx_CONST (Pmode, addr_location);
13302 /* For -fpic code the PLT entries might use r12 which is
13303 call-saved. Therefore we cannot do a sibcall when
13304 calling directly using a symbol ref. When reaching
13305 this point we decided (in s390_function_ok_for_sibcall)
13306 to do a sibcall for a function pointer but one of the
13307 optimizers was able to get rid of the function pointer
13308 by propagating the symbol ref into the call. This
13309 optimization is illegal for S/390 so we turn the direct
13310 call into a indirect call again. */
13311 addr_location = force_reg (Pmode, addr_location);
13314 /* Unless we can use the bras(l) insn, force the
13315 routine address into a register. */
13316 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
13319 addr_location = legitimize_pic_address (addr_location, 0);
13321 addr_location = force_reg (Pmode, addr_location);
13325 /* If it is already an indirect call or the code above moved the
13326 SYMBOL_REF to somewhere else make sure the address can be found in
13328 if (retaddr_reg == NULL_RTX
13329 && GET_CODE (addr_location) != SYMBOL_REF
13332 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
13333 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
13336 addr_location = gen_rtx_MEM (QImode, addr_location);
13337 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
13339 if (result_reg != NULL_RTX)
13340 call = gen_rtx_SET (result_reg, call);
13342 if (retaddr_reg != NULL_RTX)
13344 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
13346 if (tls_call != NULL_RTX)
13347 vec = gen_rtvec (3, call, clobber,
13348 gen_rtx_USE (VOIDmode, tls_call));
13350 vec = gen_rtvec (2, call, clobber);
13352 call = gen_rtx_PARALLEL (VOIDmode, vec);
13355 insn = emit_call_insn (call);
13357 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
13358 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
13360 /* s390_function_ok_for_sibcall should
13361 have denied sibcalls in this case. */
13362 gcc_assert (retaddr_reg != NULL_RTX);
13363 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, 12));
13368 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
13371 s390_conditional_register_usage (void)
13377 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
13378 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
13380 if (TARGET_CPU_ZARCH)
13382 fixed_regs[BASE_REGNUM] = 0;
13383 call_used_regs[BASE_REGNUM] = 0;
13384 fixed_regs[RETURN_REGNUM] = 0;
13385 call_used_regs[RETURN_REGNUM] = 0;
13389 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
13390 call_used_regs[i] = call_really_used_regs[i] = 0;
13394 call_used_regs[FPR4_REGNUM] = call_really_used_regs[FPR4_REGNUM] = 0;
13395 call_used_regs[FPR6_REGNUM] = call_really_used_regs[FPR6_REGNUM] = 0;
13398 if (TARGET_SOFT_FLOAT)
13400 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
13401 call_used_regs[i] = fixed_regs[i] = 1;
13404 /* Disable v16 - v31 for non-vector target. */
13407 for (i = VR16_REGNUM; i <= VR31_REGNUM; i++)
13408 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
13412 /* Corresponding function to eh_return expander. */
13414 static GTY(()) rtx s390_tpf_eh_return_symbol;
13416 s390_emit_tpf_eh_return (rtx target)
13421 if (!s390_tpf_eh_return_symbol)
13422 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
13424 reg = gen_rtx_REG (Pmode, 2);
13425 orig_ra = gen_rtx_REG (Pmode, 3);
13427 emit_move_insn (reg, target);
13428 emit_move_insn (orig_ra, get_hard_reg_initial_val (Pmode, RETURN_REGNUM));
13429 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
13430 gen_rtx_REG (Pmode, RETURN_REGNUM));
13431 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
13432 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), orig_ra);
13434 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
13437 /* Rework the prologue/epilogue to avoid saving/restoring
13438 registers unnecessarily. */
13441 s390_optimize_prologue (void)
13443 rtx_insn *insn, *new_insn, *next_insn;
13445 /* Do a final recompute of the frame-related data. */
13446 s390_optimize_register_info ();
13448 /* If all special registers are in fact used, there's nothing we
13449 can do, so no point in walking the insn list. */
13451 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
13452 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
13453 && (TARGET_CPU_ZARCH
13454 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
13455 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
13458 /* Search for prologue/epilogue insns and replace them. */
13460 for (insn = get_insns (); insn; insn = next_insn)
13462 int first, last, off;
13463 rtx set, base, offset;
13466 next_insn = NEXT_INSN (insn);
13468 if (! NONJUMP_INSN_P (insn) || ! RTX_FRAME_RELATED_P (insn))
13471 pat = PATTERN (insn);
13473 /* Remove ldgr/lgdr instructions used for saving and restore
13474 GPRs if possible. */
13479 if (INSN_CODE (insn) == CODE_FOR_stack_restore_from_fpr)
13480 tmp_pat = XVECEXP (pat, 0, 0);
13482 if (GET_CODE (tmp_pat) == SET
13483 && GET_MODE (SET_SRC (tmp_pat)) == DImode
13484 && REG_P (SET_SRC (tmp_pat))
13485 && REG_P (SET_DEST (tmp_pat)))
13487 int src_regno = REGNO (SET_SRC (tmp_pat));
13488 int dest_regno = REGNO (SET_DEST (tmp_pat));
13492 if (!((GENERAL_REGNO_P (src_regno)
13493 && FP_REGNO_P (dest_regno))
13494 || (FP_REGNO_P (src_regno)
13495 && GENERAL_REGNO_P (dest_regno))))
13498 gpr_regno = GENERAL_REGNO_P (src_regno) ? src_regno : dest_regno;
13499 fpr_regno = FP_REGNO_P (src_regno) ? src_regno : dest_regno;
13501 /* GPR must be call-saved, FPR must be call-clobbered. */
13502 if (!call_really_used_regs[fpr_regno]
13503 || call_really_used_regs[gpr_regno])
13506 /* It must not happen that what we once saved in an FPR now
13507 needs a stack slot. */
13508 gcc_assert (cfun_gpr_save_slot (gpr_regno) != SAVE_SLOT_STACK);
13510 if (cfun_gpr_save_slot (gpr_regno) == SAVE_SLOT_NONE)
13512 remove_insn (insn);
13518 if (GET_CODE (pat) == PARALLEL
13519 && store_multiple_operation (pat, VOIDmode))
13521 set = XVECEXP (pat, 0, 0);
13522 first = REGNO (SET_SRC (set));
13523 last = first + XVECLEN (pat, 0) - 1;
13524 offset = const0_rtx;
13525 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
13526 off = INTVAL (offset);
13528 if (GET_CODE (base) != REG || off < 0)
13530 if (cfun_frame_layout.first_save_gpr != -1
13531 && (cfun_frame_layout.first_save_gpr < first
13532 || cfun_frame_layout.last_save_gpr > last))
13534 if (REGNO (base) != STACK_POINTER_REGNUM
13535 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13537 if (first > BASE_REGNUM || last < BASE_REGNUM)
13540 if (cfun_frame_layout.first_save_gpr != -1)
13542 rtx s_pat = save_gprs (base,
13543 off + (cfun_frame_layout.first_save_gpr
13544 - first) * UNITS_PER_LONG,
13545 cfun_frame_layout.first_save_gpr,
13546 cfun_frame_layout.last_save_gpr);
13547 new_insn = emit_insn_before (s_pat, insn);
13548 INSN_ADDRESSES_NEW (new_insn, -1);
13551 remove_insn (insn);
13555 if (cfun_frame_layout.first_save_gpr == -1
13556 && GET_CODE (pat) == SET
13557 && GENERAL_REG_P (SET_SRC (pat))
13558 && GET_CODE (SET_DEST (pat)) == MEM)
13561 first = REGNO (SET_SRC (set));
13562 offset = const0_rtx;
13563 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
13564 off = INTVAL (offset);
13566 if (GET_CODE (base) != REG || off < 0)
13568 if (REGNO (base) != STACK_POINTER_REGNUM
13569 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13572 remove_insn (insn);
13576 if (GET_CODE (pat) == PARALLEL
13577 && load_multiple_operation (pat, VOIDmode))
13579 set = XVECEXP (pat, 0, 0);
13580 first = REGNO (SET_DEST (set));
13581 last = first + XVECLEN (pat, 0) - 1;
13582 offset = const0_rtx;
13583 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
13584 off = INTVAL (offset);
13586 if (GET_CODE (base) != REG || off < 0)
13589 if (cfun_frame_layout.first_restore_gpr != -1
13590 && (cfun_frame_layout.first_restore_gpr < first
13591 || cfun_frame_layout.last_restore_gpr > last))
13593 if (REGNO (base) != STACK_POINTER_REGNUM
13594 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13596 if (first > BASE_REGNUM || last < BASE_REGNUM)
13599 if (cfun_frame_layout.first_restore_gpr != -1)
13601 rtx rpat = restore_gprs (base,
13602 off + (cfun_frame_layout.first_restore_gpr
13603 - first) * UNITS_PER_LONG,
13604 cfun_frame_layout.first_restore_gpr,
13605 cfun_frame_layout.last_restore_gpr);
13607 /* Remove REG_CFA_RESTOREs for registers that we no
13608 longer need to save. */
13609 REG_NOTES (rpat) = REG_NOTES (insn);
13610 for (rtx *ptr = ®_NOTES (rpat); *ptr; )
13611 if (REG_NOTE_KIND (*ptr) == REG_CFA_RESTORE
13612 && ((int) REGNO (XEXP (*ptr, 0))
13613 < cfun_frame_layout.first_restore_gpr))
13614 *ptr = XEXP (*ptr, 1);
13616 ptr = &XEXP (*ptr, 1);
13617 new_insn = emit_insn_before (rpat, insn);
13618 RTX_FRAME_RELATED_P (new_insn) = 1;
13619 INSN_ADDRESSES_NEW (new_insn, -1);
13622 remove_insn (insn);
13626 if (cfun_frame_layout.first_restore_gpr == -1
13627 && GET_CODE (pat) == SET
13628 && GENERAL_REG_P (SET_DEST (pat))
13629 && GET_CODE (SET_SRC (pat)) == MEM)
13632 first = REGNO (SET_DEST (set));
13633 offset = const0_rtx;
13634 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
13635 off = INTVAL (offset);
13637 if (GET_CODE (base) != REG || off < 0)
13640 if (REGNO (base) != STACK_POINTER_REGNUM
13641 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13644 remove_insn (insn);
13650 /* On z10 and later the dynamic branch prediction must see the
13651 backward jump within a certain windows. If not it falls back to
13652 the static prediction. This function rearranges the loop backward
13653 branch in a way which makes the static prediction always correct.
13654 The function returns true if it added an instruction. */
13656 s390_fix_long_loop_prediction (rtx_insn *insn)
13658 rtx set = single_set (insn);
13659 rtx code_label, label_ref;
13660 rtx_insn *uncond_jump;
13661 rtx_insn *cur_insn;
13665 /* This will exclude branch on count and branch on index patterns
13666 since these are correctly statically predicted. */
13668 || SET_DEST (set) != pc_rtx
13669 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
13672 /* Skip conditional returns. */
13673 if (ANY_RETURN_P (XEXP (SET_SRC (set), 1))
13674 && XEXP (SET_SRC (set), 2) == pc_rtx)
13677 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
13678 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
13680 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
13682 code_label = XEXP (label_ref, 0);
13684 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
13685 || INSN_ADDRESSES (INSN_UID (insn)) == -1
13686 || (INSN_ADDRESSES (INSN_UID (insn))
13687 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
13690 for (distance = 0, cur_insn = PREV_INSN (insn);
13691 distance < PREDICT_DISTANCE - 6;
13692 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
13693 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
13696 rtx_code_label *new_label = gen_label_rtx ();
13697 uncond_jump = emit_jump_insn_after (
13698 gen_rtx_SET (pc_rtx,
13699 gen_rtx_LABEL_REF (VOIDmode, code_label)),
13701 emit_label_after (new_label, uncond_jump);
13703 tmp = XEXP (SET_SRC (set), 1);
13704 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
13705 XEXP (SET_SRC (set), 2) = tmp;
13706 INSN_CODE (insn) = -1;
13708 XEXP (label_ref, 0) = new_label;
13709 JUMP_LABEL (insn) = new_label;
13710 JUMP_LABEL (uncond_jump) = code_label;
13715 /* Returns 1 if INSN reads the value of REG for purposes not related
13716 to addressing of memory, and 0 otherwise. */
13718 s390_non_addr_reg_read_p (rtx reg, rtx_insn *insn)
13720 return reg_referenced_p (reg, PATTERN (insn))
13721 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
13724 /* Starting from INSN find_cond_jump looks downwards in the insn
13725 stream for a single jump insn which is the last user of the
13726 condition code set in INSN. */
13728 find_cond_jump (rtx_insn *insn)
13730 for (; insn; insn = NEXT_INSN (insn))
13734 if (LABEL_P (insn))
13737 if (!JUMP_P (insn))
13739 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
13744 /* This will be triggered by a return. */
13745 if (GET_CODE (PATTERN (insn)) != SET)
13748 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
13749 ite = SET_SRC (PATTERN (insn));
13751 if (GET_CODE (ite) != IF_THEN_ELSE)
13754 cc = XEXP (XEXP (ite, 0), 0);
13755 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
13758 if (find_reg_note (insn, REG_DEAD, cc))
13766 /* Swap the condition in COND and the operands in OP0 and OP1 so that
13767 the semantics does not change. If NULL_RTX is passed as COND the
13768 function tries to find the conditional jump starting with INSN. */
13770 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx_insn *insn)
13774 if (cond == NULL_RTX)
13776 rtx_insn *jump = find_cond_jump (NEXT_INSN (insn));
13777 rtx set = jump ? single_set (jump) : NULL_RTX;
13779 if (set == NULL_RTX)
13782 cond = XEXP (SET_SRC (set), 0);
13787 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
13790 /* On z10, instructions of the compare-and-branch family have the
13791 property to access the register occurring as second operand with
13792 its bits complemented. If such a compare is grouped with a second
13793 instruction that accesses the same register non-complemented, and
13794 if that register's value is delivered via a bypass, then the
13795 pipeline recycles, thereby causing significant performance decline.
13796 This function locates such situations and exchanges the two
13797 operands of the compare. The function return true whenever it
13800 s390_z10_optimize_cmp (rtx_insn *insn)
13802 rtx_insn *prev_insn, *next_insn;
13803 bool insn_added_p = false;
13804 rtx cond, *op0, *op1;
13806 if (GET_CODE (PATTERN (insn)) == PARALLEL)
13808 /* Handle compare and branch and branch on count
13810 rtx pattern = single_set (insn);
13813 || SET_DEST (pattern) != pc_rtx
13814 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
13817 cond = XEXP (SET_SRC (pattern), 0);
13818 op0 = &XEXP (cond, 0);
13819 op1 = &XEXP (cond, 1);
13821 else if (GET_CODE (PATTERN (insn)) == SET)
13825 /* Handle normal compare instructions. */
13826 src = SET_SRC (PATTERN (insn));
13827 dest = SET_DEST (PATTERN (insn));
13830 || !CC_REGNO_P (REGNO (dest))
13831 || GET_CODE (src) != COMPARE)
13834 /* s390_swap_cmp will try to find the conditional
13835 jump when passing NULL_RTX as condition. */
13837 op0 = &XEXP (src, 0);
13838 op1 = &XEXP (src, 1);
13843 if (!REG_P (*op0) || !REG_P (*op1))
13846 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
13849 /* Swap the COMPARE arguments and its mask if there is a
13850 conflicting access in the previous insn. */
13851 prev_insn = prev_active_insn (insn);
13852 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
13853 && reg_referenced_p (*op1, PATTERN (prev_insn)))
13854 s390_swap_cmp (cond, op0, op1, insn);
13856 /* Check if there is a conflict with the next insn. If there
13857 was no conflict with the previous insn, then swap the
13858 COMPARE arguments and its mask. If we already swapped
13859 the operands, or if swapping them would cause a conflict
13860 with the previous insn, issue a NOP after the COMPARE in
13861 order to separate the two instuctions. */
13862 next_insn = next_active_insn (insn);
13863 if (next_insn != NULL_RTX && INSN_P (next_insn)
13864 && s390_non_addr_reg_read_p (*op1, next_insn))
13866 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
13867 && s390_non_addr_reg_read_p (*op0, prev_insn))
13869 if (REGNO (*op1) == 0)
13870 emit_insn_after (gen_nop1 (), insn);
13872 emit_insn_after (gen_nop (), insn);
13873 insn_added_p = true;
13876 s390_swap_cmp (cond, op0, op1, insn);
13878 return insn_added_p;
13881 /* Number of INSNs to be scanned backward in the last BB of the loop
13882 and forward in the first BB of the loop. This usually should be a
13883 bit more than the number of INSNs which could go into one
13885 #define S390_OSC_SCAN_INSN_NUM 5
13887 /* Scan LOOP for static OSC collisions and return true if a osc_break
13888 should be issued for this loop. */
13890 s390_adjust_loop_scan_osc (struct loop* loop)
13893 HARD_REG_SET modregs, newregs;
13894 rtx_insn *insn, *store_insn = NULL;
13896 struct s390_address addr_store, addr_load;
13897 subrtx_iterator::array_type array;
13900 CLEAR_HARD_REG_SET (modregs);
13903 FOR_BB_INSNS_REVERSE (loop->latch, insn)
13905 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
13909 if (insn_count > S390_OSC_SCAN_INSN_NUM)
13912 find_all_hard_reg_sets (insn, &newregs, true);
13913 IOR_HARD_REG_SET (modregs, newregs);
13915 set = single_set (insn);
13919 if (MEM_P (SET_DEST (set))
13920 && s390_decompose_address (XEXP (SET_DEST (set), 0), &addr_store))
13927 if (store_insn == NULL_RTX)
13931 FOR_BB_INSNS (loop->header, insn)
13933 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
13936 if (insn == store_insn)
13940 if (insn_count > S390_OSC_SCAN_INSN_NUM)
13943 find_all_hard_reg_sets (insn, &newregs, true);
13944 IOR_HARD_REG_SET (modregs, newregs);
13946 set = single_set (insn);
13950 /* An intermediate store disrupts static OSC checking
13952 if (MEM_P (SET_DEST (set))
13953 && s390_decompose_address (XEXP (SET_DEST (set), 0), NULL))
13956 FOR_EACH_SUBRTX (iter, array, SET_SRC (set), NONCONST)
13958 && s390_decompose_address (XEXP (*iter, 0), &addr_load)
13959 && rtx_equal_p (addr_load.base, addr_store.base)
13960 && rtx_equal_p (addr_load.indx, addr_store.indx)
13961 && rtx_equal_p (addr_load.disp, addr_store.disp))
13963 if ((addr_load.base != NULL_RTX
13964 && TEST_HARD_REG_BIT (modregs, REGNO (addr_load.base)))
13965 || (addr_load.indx != NULL_RTX
13966 && TEST_HARD_REG_BIT (modregs, REGNO (addr_load.indx))))
13973 /* Look for adjustments which can be done on simple innermost
13976 s390_adjust_loops ()
13978 struct loop *loop = NULL;
13981 compute_bb_for_insn ();
13983 /* Find the loops. */
13984 loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
13986 FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
13990 flow_loop_dump (loop, dump_file, NULL, 0);
13991 fprintf (dump_file, ";; OSC loop scan Loop: ");
13993 if (loop->latch == NULL
13994 || pc_set (BB_END (loop->latch)) == NULL_RTX
13995 || !s390_adjust_loop_scan_osc (loop))
13999 if (loop->latch == NULL)
14000 fprintf (dump_file, " muliple backward jumps\n");
14003 fprintf (dump_file, " header insn: %d latch insn: %d ",
14004 INSN_UID (BB_HEAD (loop->header)),
14005 INSN_UID (BB_END (loop->latch)));
14006 if (pc_set (BB_END (loop->latch)) == NULL_RTX)
14007 fprintf (dump_file, " loop does not end with jump\n");
14009 fprintf (dump_file, " not instrumented\n");
14015 rtx_insn *new_insn;
14018 fprintf (dump_file, " adding OSC break insn: ");
14019 new_insn = emit_insn_before (gen_osc_break (),
14020 BB_END (loop->latch));
14021 INSN_ADDRESSES_NEW (new_insn, -1);
14025 loop_optimizer_finalize ();
14027 df_finish_pass (false);
14030 /* Perform machine-dependent processing. */
14035 bool pool_overflow = false;
14036 int hw_before, hw_after;
14038 if (s390_tune == PROCESSOR_2964_Z13)
14039 s390_adjust_loops ();
14041 /* Make sure all splits have been performed; splits after
14042 machine_dependent_reorg might confuse insn length counts. */
14043 split_all_insns_noflow ();
14045 /* Install the main literal pool and the associated base
14046 register load insns.
14048 In addition, there are two problematic situations we need
14051 - the literal pool might be > 4096 bytes in size, so that
14052 some of its elements cannot be directly accessed
14054 - a branch target might be > 64K away from the branch, so that
14055 it is not possible to use a PC-relative instruction.
14057 To fix those, we split the single literal pool into multiple
14058 pool chunks, reloading the pool base register at various
14059 points throughout the function to ensure it always points to
14060 the pool chunk the following code expects, and / or replace
14061 PC-relative branches by absolute branches.
14063 However, the two problems are interdependent: splitting the
14064 literal pool can move a branch further away from its target,
14065 causing the 64K limit to overflow, and on the other hand,
14066 replacing a PC-relative branch by an absolute branch means
14067 we need to put the branch target address into the literal
14068 pool, possibly causing it to overflow.
14070 So, we loop trying to fix up both problems until we manage
14071 to satisfy both conditions at the same time. Note that the
14072 loop is guaranteed to terminate as every pass of the loop
14073 strictly decreases the total number of PC-relative branches
14074 in the function. (This is not completely true as there
14075 might be branch-over-pool insns introduced by chunkify_start.
14076 Those never need to be split however.) */
14080 struct constant_pool *pool = NULL;
14082 /* Collect the literal pool. */
14083 if (!pool_overflow)
14085 pool = s390_mainpool_start ();
14087 pool_overflow = true;
14090 /* If literal pool overflowed, start to chunkify it. */
14092 pool = s390_chunkify_start ();
14094 /* Split out-of-range branches. If this has created new
14095 literal pool entries, cancel current chunk list and
14096 recompute it. zSeries machines have large branch
14097 instructions, so we never need to split a branch. */
14098 if (!TARGET_CPU_ZARCH && s390_split_branches ())
14101 s390_chunkify_cancel (pool);
14103 s390_mainpool_cancel (pool);
14108 /* If we made it up to here, both conditions are satisfied.
14109 Finish up literal pool related changes. */
14111 s390_chunkify_finish (pool);
14113 s390_mainpool_finish (pool);
14115 /* We're done splitting branches. */
14116 cfun->machine->split_branches_pending_p = false;
14120 /* Generate out-of-pool execute target insns. */
14121 if (TARGET_CPU_ZARCH)
14123 rtx_insn *insn, *target;
14126 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
14128 label = s390_execute_label (insn);
14132 gcc_assert (label != const0_rtx);
14134 target = emit_label (XEXP (label, 0));
14135 INSN_ADDRESSES_NEW (target, -1);
14137 target = emit_insn (s390_execute_target (insn));
14138 INSN_ADDRESSES_NEW (target, -1);
14142 /* Try to optimize prologue and epilogue further. */
14143 s390_optimize_prologue ();
14145 /* Walk over the insns and do some >=z10 specific changes. */
14146 if (s390_tune >= PROCESSOR_2097_Z10)
14149 bool insn_added_p = false;
14151 /* The insn lengths and addresses have to be up to date for the
14152 following manipulations. */
14153 shorten_branches (get_insns ());
14155 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
14157 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
14161 insn_added_p |= s390_fix_long_loop_prediction (insn);
14163 if ((GET_CODE (PATTERN (insn)) == PARALLEL
14164 || GET_CODE (PATTERN (insn)) == SET)
14165 && s390_tune == PROCESSOR_2097_Z10)
14166 insn_added_p |= s390_z10_optimize_cmp (insn);
14169 /* Adjust branches if we added new instructions. */
14171 shorten_branches (get_insns ());
14174 s390_function_num_hotpatch_hw (current_function_decl, &hw_before, &hw_after);
14179 /* Insert NOPs for hotpatching. */
14180 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
14182 1. inside the area covered by debug information to allow setting
14183 breakpoints at the NOPs,
14184 2. before any insn which results in an asm instruction,
14185 3. before in-function labels to avoid jumping to the NOPs, for
14186 example as part of a loop,
14187 4. before any barrier in case the function is completely empty
14188 (__builtin_unreachable ()) and has neither internal labels nor
14191 if (active_insn_p (insn) || BARRIER_P (insn) || LABEL_P (insn))
14193 /* Output a series of NOPs before the first active insn. */
14194 while (insn && hw_after > 0)
14196 if (hw_after >= 3 && TARGET_CPU_ZARCH)
14198 emit_insn_before (gen_nop_6_byte (), insn);
14201 else if (hw_after >= 2)
14203 emit_insn_before (gen_nop_4_byte (), insn);
14208 emit_insn_before (gen_nop_2_byte (), insn);
14215 /* Return true if INSN is a fp load insn writing register REGNO. */
14217 s390_fpload_toreg (rtx_insn *insn, unsigned int regno)
14220 enum attr_type flag = s390_safe_attr_type (insn);
14222 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
14225 set = single_set (insn);
14227 if (set == NULL_RTX)
14230 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
14233 if (REGNO (SET_DEST (set)) != regno)
14239 /* This value describes the distance to be avoided between an
14240 arithmetic fp instruction and an fp load writing the same register.
14241 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
14242 fine but the exact value has to be avoided. Otherwise the FP
14243 pipeline will throw an exception causing a major penalty. */
14244 #define Z10_EARLYLOAD_DISTANCE 7
14246 /* Rearrange the ready list in order to avoid the situation described
14247 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
14248 moved to the very end of the ready list. */
14250 s390_z10_prevent_earlyload_conflicts (rtx_insn **ready, int *nready_p)
14252 unsigned int regno;
14253 int nready = *nready_p;
14258 enum attr_type flag;
14261 /* Skip DISTANCE - 1 active insns. */
14262 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
14263 distance > 0 && insn != NULL_RTX;
14264 distance--, insn = prev_active_insn (insn))
14265 if (CALL_P (insn) || JUMP_P (insn))
14268 if (insn == NULL_RTX)
14271 set = single_set (insn);
14273 if (set == NULL_RTX || !REG_P (SET_DEST (set))
14274 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
14277 flag = s390_safe_attr_type (insn);
14279 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
14282 regno = REGNO (SET_DEST (set));
14285 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
14292 memmove (&ready[1], &ready[0], sizeof (rtx_insn *) * i);
14297 /* The s390_sched_state variable tracks the state of the current or
14298 the last instruction group.
14300 0,1,2 number of instructions scheduled in the current group
14301 3 the last group is complete - normal insns
14302 4 the last group was a cracked/expanded insn */
14304 static int s390_sched_state;
14306 #define S390_SCHED_STATE_NORMAL 3
14307 #define S390_SCHED_STATE_CRACKED 4
14309 #define S390_SCHED_ATTR_MASK_CRACKED 0x1
14310 #define S390_SCHED_ATTR_MASK_EXPANDED 0x2
14311 #define S390_SCHED_ATTR_MASK_ENDGROUP 0x4
14312 #define S390_SCHED_ATTR_MASK_GROUPALONE 0x8
14314 static unsigned int
14315 s390_get_sched_attrmask (rtx_insn *insn)
14317 unsigned int mask = 0;
14321 case PROCESSOR_2827_ZEC12:
14322 if (get_attr_zEC12_cracked (insn))
14323 mask |= S390_SCHED_ATTR_MASK_CRACKED;
14324 if (get_attr_zEC12_expanded (insn))
14325 mask |= S390_SCHED_ATTR_MASK_EXPANDED;
14326 if (get_attr_zEC12_endgroup (insn))
14327 mask |= S390_SCHED_ATTR_MASK_ENDGROUP;
14328 if (get_attr_zEC12_groupalone (insn))
14329 mask |= S390_SCHED_ATTR_MASK_GROUPALONE;
14331 case PROCESSOR_2964_Z13:
14332 case PROCESSOR_3906_Z14:
14333 if (get_attr_z13_cracked (insn))
14334 mask |= S390_SCHED_ATTR_MASK_CRACKED;
14335 if (get_attr_z13_expanded (insn))
14336 mask |= S390_SCHED_ATTR_MASK_EXPANDED;
14337 if (get_attr_z13_endgroup (insn))
14338 mask |= S390_SCHED_ATTR_MASK_ENDGROUP;
14339 if (get_attr_z13_groupalone (insn))
14340 mask |= S390_SCHED_ATTR_MASK_GROUPALONE;
14343 gcc_unreachable ();
14348 static unsigned int
14349 s390_get_unit_mask (rtx_insn *insn, int *units)
14351 unsigned int mask = 0;
14355 case PROCESSOR_2964_Z13:
14356 case PROCESSOR_3906_Z14:
14358 if (get_attr_z13_unit_lsu (insn))
14360 if (get_attr_z13_unit_fxu (insn))
14362 if (get_attr_z13_unit_vfu (insn))
14366 gcc_unreachable ();
14371 /* Return the scheduling score for INSN. The higher the score the
14372 better. The score is calculated from the OOO scheduling attributes
14373 of INSN and the scheduling state s390_sched_state. */
14375 s390_sched_score (rtx_insn *insn)
14377 unsigned int mask = s390_get_sched_attrmask (insn);
14380 switch (s390_sched_state)
14383 /* Try to put insns into the first slot which would otherwise
14385 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
14386 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
14388 if ((mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
14392 /* Prefer not cracked insns while trying to put together a
14394 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
14395 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0
14396 && (mask & S390_SCHED_ATTR_MASK_GROUPALONE) == 0)
14398 if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) == 0)
14402 /* Prefer not cracked insns while trying to put together a
14404 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
14405 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0
14406 && (mask & S390_SCHED_ATTR_MASK_GROUPALONE) == 0)
14408 /* Prefer endgroup insns in the last slot. */
14409 if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) != 0)
14412 case S390_SCHED_STATE_NORMAL:
14413 /* Prefer not cracked insns if the last was not cracked. */
14414 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
14415 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0)
14417 if ((mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
14420 case S390_SCHED_STATE_CRACKED:
14421 /* Try to keep cracked insns together to prevent them from
14422 interrupting groups. */
14423 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
14424 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
14429 if (s390_tune >= PROCESSOR_2964_Z13)
14432 unsigned unit_mask, m = 1;
14434 unit_mask = s390_get_unit_mask (insn, &units);
14435 gcc_assert (units <= MAX_SCHED_UNITS);
14437 /* Add a score in range 0..MAX_SCHED_MIX_SCORE depending on how long
14438 ago the last insn of this unit type got scheduled. This is
14439 supposed to help providing a proper instruction mix to the
14441 for (i = 0; i < units; i++, m <<= 1)
14443 score += (last_scheduled_unit_distance[i] * MAX_SCHED_MIX_SCORE /
14444 MAX_SCHED_MIX_DISTANCE);
14449 /* This function is called via hook TARGET_SCHED_REORDER before
14450 issuing one insn from list READY which contains *NREADYP entries.
14451 For target z10 it reorders load instructions to avoid early load
14452 conflicts in the floating point pipeline */
14454 s390_sched_reorder (FILE *file, int verbose,
14455 rtx_insn **ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
14457 if (s390_tune == PROCESSOR_2097_Z10
14458 && reload_completed
14460 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
14462 if (s390_tune >= PROCESSOR_2827_ZEC12
14463 && reload_completed
14467 int last_index = *nreadyp - 1;
14468 int max_index = -1;
14469 int max_score = -1;
14472 /* Just move the insn with the highest score to the top (the
14473 end) of the list. A full sort is not needed since a conflict
14474 in the hazard recognition cannot happen. So the top insn in
14475 the ready list will always be taken. */
14476 for (i = last_index; i >= 0; i--)
14480 if (recog_memoized (ready[i]) < 0)
14483 score = s390_sched_score (ready[i]);
14484 if (score > max_score)
14491 if (max_index != -1)
14493 if (max_index != last_index)
14495 tmp = ready[max_index];
14496 ready[max_index] = ready[last_index];
14497 ready[last_index] = tmp;
14501 ";;\t\tBACKEND: move insn %d to the top of list\n",
14502 INSN_UID (ready[last_index]));
14504 else if (verbose > 5)
14506 ";;\t\tBACKEND: best insn %d already on top\n",
14507 INSN_UID (ready[last_index]));
14512 fprintf (file, "ready list ooo attributes - sched state: %d\n",
14515 for (i = last_index; i >= 0; i--)
14517 unsigned int sched_mask;
14518 rtx_insn *insn = ready[i];
14520 if (recog_memoized (insn) < 0)
14523 sched_mask = s390_get_sched_attrmask (insn);
14524 fprintf (file, ";;\t\tBACKEND: insn %d score: %d: ",
14526 s390_sched_score (insn));
14527 #define PRINT_SCHED_ATTR(M, ATTR) fprintf (file, "%s ",\
14528 ((M) & sched_mask) ? #ATTR : "");
14529 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_CRACKED, cracked);
14530 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_EXPANDED, expanded);
14531 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_ENDGROUP, endgroup);
14532 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_GROUPALONE, groupalone);
14533 #undef PRINT_SCHED_ATTR
14534 if (s390_tune >= PROCESSOR_2964_Z13)
14536 unsigned int unit_mask, m = 1;
14539 unit_mask = s390_get_unit_mask (insn, &units);
14540 fprintf (file, "(units:");
14541 for (j = 0; j < units; j++, m <<= 1)
14543 fprintf (file, " u%d", j);
14544 fprintf (file, ")");
14546 fprintf (file, "\n");
14551 return s390_issue_rate ();
14555 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
14556 the scheduler has issued INSN. It stores the last issued insn into
14557 last_scheduled_insn in order to make it available for
14558 s390_sched_reorder. */
14560 s390_sched_variable_issue (FILE *file, int verbose, rtx_insn *insn, int more)
14562 last_scheduled_insn = insn;
14564 if (s390_tune >= PROCESSOR_2827_ZEC12
14565 && reload_completed
14566 && recog_memoized (insn) >= 0)
14568 unsigned int mask = s390_get_sched_attrmask (insn);
14570 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
14571 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
14572 s390_sched_state = S390_SCHED_STATE_CRACKED;
14573 else if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) != 0
14574 || (mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
14575 s390_sched_state = S390_SCHED_STATE_NORMAL;
14578 /* Only normal insns are left (mask == 0). */
14579 switch (s390_sched_state)
14584 case S390_SCHED_STATE_NORMAL:
14585 if (s390_sched_state == S390_SCHED_STATE_NORMAL)
14586 s390_sched_state = 1;
14588 s390_sched_state++;
14591 case S390_SCHED_STATE_CRACKED:
14592 s390_sched_state = S390_SCHED_STATE_NORMAL;
14597 if (s390_tune >= PROCESSOR_2964_Z13)
14600 unsigned unit_mask, m = 1;
14602 unit_mask = s390_get_unit_mask (insn, &units);
14603 gcc_assert (units <= MAX_SCHED_UNITS);
14605 for (i = 0; i < units; i++, m <<= 1)
14607 last_scheduled_unit_distance[i] = 0;
14608 else if (last_scheduled_unit_distance[i] < MAX_SCHED_MIX_DISTANCE)
14609 last_scheduled_unit_distance[i]++;
14614 unsigned int sched_mask;
14616 sched_mask = s390_get_sched_attrmask (insn);
14618 fprintf (file, ";;\t\tBACKEND: insn %d: ", INSN_UID (insn));
14619 #define PRINT_SCHED_ATTR(M, ATTR) fprintf (file, "%s ", ((M) & sched_mask) ? #ATTR : "");
14620 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_CRACKED, cracked);
14621 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_EXPANDED, expanded);
14622 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_ENDGROUP, endgroup);
14623 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_GROUPALONE, groupalone);
14624 #undef PRINT_SCHED_ATTR
14626 if (s390_tune >= PROCESSOR_2964_Z13)
14628 unsigned int unit_mask, m = 1;
14631 unit_mask = s390_get_unit_mask (insn, &units);
14632 fprintf (file, "(units:");
14633 for (j = 0; j < units; j++, m <<= 1)
14635 fprintf (file, " %d", j);
14636 fprintf (file, ")");
14638 fprintf (file, " sched state: %d\n", s390_sched_state);
14640 if (s390_tune >= PROCESSOR_2964_Z13)
14644 s390_get_unit_mask (insn, &units);
14646 fprintf (file, ";;\t\tBACKEND: units unused for: ");
14647 for (j = 0; j < units; j++)
14648 fprintf (file, "%d:%d ", j, last_scheduled_unit_distance[j]);
14649 fprintf (file, "\n");
14654 if (GET_CODE (PATTERN (insn)) != USE
14655 && GET_CODE (PATTERN (insn)) != CLOBBER)
14662 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
14663 int verbose ATTRIBUTE_UNUSED,
14664 int max_ready ATTRIBUTE_UNUSED)
14666 last_scheduled_insn = NULL;
14667 memset (last_scheduled_unit_distance, 0, MAX_SCHED_UNITS * sizeof (int));
14668 s390_sched_state = 0;
14671 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
14672 a new number struct loop *loop should be unrolled if tuned for cpus with
14673 a built-in stride prefetcher.
14674 The loop is analyzed for memory accesses by calling check_dpu for
14675 each rtx of the loop. Depending on the loop_depth and the amount of
14676 memory accesses a new number <=nunroll is returned to improve the
14677 behavior of the hardware prefetch unit. */
14679 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
14684 unsigned mem_count = 0;
14686 if (s390_tune < PROCESSOR_2097_Z10)
14689 /* Count the number of memory references within the loop body. */
14690 bbs = get_loop_body (loop);
14691 subrtx_iterator::array_type array;
14692 for (i = 0; i < loop->num_nodes; i++)
14693 FOR_BB_INSNS (bbs[i], insn)
14694 if (INSN_P (insn) && INSN_CODE (insn) != -1)
14695 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
14700 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
14701 if (mem_count == 0)
14704 switch (loop_depth(loop))
14707 return MIN (nunroll, 28 / mem_count);
14709 return MIN (nunroll, 22 / mem_count);
14711 return MIN (nunroll, 16 / mem_count);
14715 /* Restore the current options. This is a hook function and also called
14719 s390_function_specific_restore (struct gcc_options *opts,
14720 struct cl_target_option *ptr ATTRIBUTE_UNUSED)
14722 opts->x_s390_cost_pointer = (long)processor_table[opts->x_s390_tune].cost;
14726 s390_option_override_internal (bool main_args_p,
14727 struct gcc_options *opts,
14728 const struct gcc_options *opts_set)
14730 const char *prefix;
14731 const char *suffix;
14733 /* Set up prefix/suffix so the error messages refer to either the command
14734 line argument, or the attribute(target). */
14742 prefix = "option(\"";
14747 /* Architecture mode defaults according to ABI. */
14748 if (!(opts_set->x_target_flags & MASK_ZARCH))
14751 opts->x_target_flags |= MASK_ZARCH;
14753 opts->x_target_flags &= ~MASK_ZARCH;
14756 /* Set the march default in case it hasn't been specified on cmdline. */
14757 if (!opts_set->x_s390_arch)
14758 opts->x_s390_arch = PROCESSOR_2064_Z900;
14759 else if (opts->x_s390_arch == PROCESSOR_9672_G5
14760 || opts->x_s390_arch == PROCESSOR_9672_G6)
14761 warning (OPT_Wdeprecated, "%sarch=%s%s is deprecated and will be removed "
14762 "in future releases; use at least %sarch=z900%s",
14763 prefix, opts->x_s390_arch == PROCESSOR_9672_G5 ? "g5" : "g6",
14764 suffix, prefix, suffix);
14766 opts->x_s390_arch_flags = processor_flags_table[(int) opts->x_s390_arch];
14768 /* Determine processor to tune for. */
14769 if (!opts_set->x_s390_tune)
14770 opts->x_s390_tune = opts->x_s390_arch;
14771 else if (opts->x_s390_tune == PROCESSOR_9672_G5
14772 || opts->x_s390_tune == PROCESSOR_9672_G6)
14773 warning (OPT_Wdeprecated, "%stune=%s%s is deprecated and will be removed "
14774 "in future releases; use at least %stune=z900%s",
14775 prefix, opts->x_s390_tune == PROCESSOR_9672_G5 ? "g5" : "g6",
14776 suffix, prefix, suffix);
14778 opts->x_s390_tune_flags = processor_flags_table[opts->x_s390_tune];
14780 /* Sanity checks. */
14781 if (opts->x_s390_arch == PROCESSOR_NATIVE
14782 || opts->x_s390_tune == PROCESSOR_NATIVE)
14783 gcc_unreachable ();
14784 if (TARGET_ZARCH_P (opts->x_target_flags) && !TARGET_CPU_ZARCH_P (opts))
14785 error ("z/Architecture mode not supported on %s",
14786 processor_table[(int)opts->x_s390_arch].name);
14787 if (TARGET_64BIT && !TARGET_ZARCH_P (opts->x_target_flags))
14788 error ("64-bit ABI not supported in ESA/390 mode");
14790 /* Enable hardware transactions if available and not explicitly
14791 disabled by user. E.g. with -m31 -march=zEC12 -mzarch */
14792 if (!TARGET_OPT_HTM_P (opts_set->x_target_flags))
14794 if (TARGET_CPU_HTM_P (opts) && TARGET_ZARCH_P (opts->x_target_flags))
14795 opts->x_target_flags |= MASK_OPT_HTM;
14797 opts->x_target_flags &= ~MASK_OPT_HTM;
14800 if (TARGET_OPT_VX_P (opts_set->x_target_flags))
14802 if (TARGET_OPT_VX_P (opts->x_target_flags))
14804 if (!TARGET_CPU_VX_P (opts))
14805 error ("hardware vector support not available on %s",
14806 processor_table[(int)opts->x_s390_arch].name);
14807 if (TARGET_SOFT_FLOAT_P (opts->x_target_flags))
14808 error ("hardware vector support not available with -msoft-float");
14813 if (TARGET_CPU_VX_P (opts))
14814 /* Enable vector support if available and not explicitly disabled
14815 by user. E.g. with -m31 -march=z13 -mzarch */
14816 opts->x_target_flags |= MASK_OPT_VX;
14818 opts->x_target_flags &= ~MASK_OPT_VX;
14821 /* Use hardware DFP if available and not explicitly disabled by
14822 user. E.g. with -m31 -march=z10 -mzarch */
14823 if (!TARGET_HARD_DFP_P (opts_set->x_target_flags))
14825 if (TARGET_DFP_P (opts))
14826 opts->x_target_flags |= MASK_HARD_DFP;
14828 opts->x_target_flags &= ~MASK_HARD_DFP;
14831 if (TARGET_HARD_DFP_P (opts->x_target_flags) && !TARGET_DFP_P (opts))
14833 if (TARGET_HARD_DFP_P (opts_set->x_target_flags))
14835 if (!TARGET_CPU_DFP_P (opts))
14836 error ("hardware decimal floating point instructions"
14837 " not available on %s",
14838 processor_table[(int)opts->x_s390_arch].name);
14839 if (!TARGET_ZARCH_P (opts->x_target_flags))
14840 error ("hardware decimal floating point instructions"
14841 " not available in ESA/390 mode");
14844 opts->x_target_flags &= ~MASK_HARD_DFP;
14847 if (TARGET_SOFT_FLOAT_P (opts_set->x_target_flags)
14848 && TARGET_SOFT_FLOAT_P (opts->x_target_flags))
14850 if (TARGET_HARD_DFP_P (opts_set->x_target_flags)
14851 && TARGET_HARD_DFP_P (opts->x_target_flags))
14852 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
14854 opts->x_target_flags &= ~MASK_HARD_DFP;
14857 if (TARGET_BACKCHAIN_P (opts->x_target_flags)
14858 && TARGET_PACKED_STACK_P (opts->x_target_flags)
14859 && TARGET_HARD_FLOAT_P (opts->x_target_flags))
14860 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
14863 if (opts->x_s390_stack_size)
14865 if (opts->x_s390_stack_guard >= opts->x_s390_stack_size)
14866 error ("stack size must be greater than the stack guard value");
14867 else if (opts->x_s390_stack_size > 1 << 16)
14868 error ("stack size must not be greater than 64k");
14870 else if (opts->x_s390_stack_guard)
14871 error ("-mstack-guard implies use of -mstack-size");
14873 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
14874 if (!TARGET_LONG_DOUBLE_128_P (opts_set->x_target_flags))
14875 opts->x_target_flags |= MASK_LONG_DOUBLE_128;
14878 if (opts->x_s390_tune >= PROCESSOR_2097_Z10)
14880 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
14881 opts->x_param_values,
14882 opts_set->x_param_values);
14883 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
14884 opts->x_param_values,
14885 opts_set->x_param_values);
14886 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
14887 opts->x_param_values,
14888 opts_set->x_param_values);
14889 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
14890 opts->x_param_values,
14891 opts_set->x_param_values);
14894 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
14895 opts->x_param_values,
14896 opts_set->x_param_values);
14897 /* values for loop prefetching */
14898 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
14899 opts->x_param_values,
14900 opts_set->x_param_values);
14901 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
14902 opts->x_param_values,
14903 opts_set->x_param_values);
14904 /* s390 has more than 2 levels and the size is much larger. Since
14905 we are always running virtualized assume that we only get a small
14906 part of the caches above l1. */
14907 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
14908 opts->x_param_values,
14909 opts_set->x_param_values);
14910 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
14911 opts->x_param_values,
14912 opts_set->x_param_values);
14913 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
14914 opts->x_param_values,
14915 opts_set->x_param_values);
14917 /* Use the alternative scheduling-pressure algorithm by default. */
14918 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
14919 opts->x_param_values,
14920 opts_set->x_param_values);
14922 maybe_set_param_value (PARAM_MIN_VECT_LOOP_BOUND, 2,
14923 opts->x_param_values,
14924 opts_set->x_param_values);
14926 /* Call target specific restore function to do post-init work. At the moment,
14927 this just sets opts->x_s390_cost_pointer. */
14928 s390_function_specific_restore (opts, NULL);
14932 s390_option_override (void)
14935 cl_deferred_option *opt;
14936 vec<cl_deferred_option> *v =
14937 (vec<cl_deferred_option> *) s390_deferred_options;
14940 FOR_EACH_VEC_ELT (*v, i, opt)
14942 switch (opt->opt_index)
14944 case OPT_mhotpatch_:
14951 strncpy (s, opt->arg, 256);
14953 t = strchr (s, ',');
14958 val1 = integral_argument (s);
14959 val2 = integral_argument (t);
14966 if (val1 == -1 || val2 == -1)
14968 /* argument is not a plain number */
14969 error ("arguments to %qs should be non-negative integers",
14973 else if (val1 > s390_hotpatch_hw_max
14974 || val2 > s390_hotpatch_hw_max)
14976 error ("argument to %qs is too large (max. %d)",
14977 "-mhotpatch=n,m", s390_hotpatch_hw_max);
14980 s390_hotpatch_hw_before_label = val1;
14981 s390_hotpatch_hw_after_label = val2;
14985 gcc_unreachable ();
14989 /* Set up function hooks. */
14990 init_machine_status = s390_init_machine_status;
14992 s390_option_override_internal (true, &global_options, &global_options_set);
14994 /* Save the initial options in case the user does function specific
14996 target_option_default_node = build_target_option_node (&global_options);
14997 target_option_current_node = target_option_default_node;
14999 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
15000 requires the arch flags to be evaluated already. Since prefetching
15001 is beneficial on s390, we enable it if available. */
15002 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
15003 flag_prefetch_loop_arrays = 1;
15005 if (!s390_pic_data_is_text_relative && !flag_pic)
15006 error ("-mno-pic-data-is-text-relative cannot be used without -fpic/-fPIC");
15010 /* Don't emit DWARF3/4 unless specifically selected. The TPF
15011 debuggers do not yet support DWARF 3/4. */
15012 if (!global_options_set.x_dwarf_strict)
15014 if (!global_options_set.x_dwarf_version)
15018 /* Register a target-specific optimization-and-lowering pass
15019 to run immediately before prologue and epilogue generation.
15021 Registering the pass must be done at start up. It's
15022 convenient to do it here. */
15023 opt_pass *new_pass = new pass_s390_early_mach (g);
15024 struct register_pass_info insert_pass_s390_early_mach =
15026 new_pass, /* pass */
15027 "pro_and_epilogue", /* reference_pass_name */
15028 1, /* ref_pass_instance_number */
15029 PASS_POS_INSERT_BEFORE /* po_op */
15031 register_pass (&insert_pass_s390_early_mach);
15034 #if S390_USE_TARGET_ATTRIBUTE
15035 /* Inner function to process the attribute((target(...))), take an argument and
15036 set the current options from the argument. If we have a list, recursively go
15040 s390_valid_target_attribute_inner_p (tree args,
15041 struct gcc_options *opts,
15042 struct gcc_options *new_opts_set,
15048 #define S390_ATTRIB(S,O,A) { S, sizeof (S)-1, O, A, 0 }
15049 #define S390_PRAGMA(S,O,A) { S, sizeof (S)-1, O, A, 1 }
15050 static const struct
15052 const char *string;
15056 int only_as_pragma;
15059 S390_ATTRIB ("arch=", OPT_march_, 1),
15060 S390_ATTRIB ("tune=", OPT_mtune_, 1),
15061 /* uinteger options */
15062 S390_ATTRIB ("stack-guard=", OPT_mstack_guard_, 1),
15063 S390_ATTRIB ("stack-size=", OPT_mstack_size_, 1),
15064 S390_ATTRIB ("branch-cost=", OPT_mbranch_cost_, 1),
15065 S390_ATTRIB ("warn-framesize=", OPT_mwarn_framesize_, 1),
15067 S390_ATTRIB ("backchain", OPT_mbackchain, 0),
15068 S390_ATTRIB ("hard-dfp", OPT_mhard_dfp, 0),
15069 S390_ATTRIB ("hard-float", OPT_mhard_float, 0),
15070 S390_ATTRIB ("htm", OPT_mhtm, 0),
15071 S390_ATTRIB ("vx", OPT_mvx, 0),
15072 S390_ATTRIB ("packed-stack", OPT_mpacked_stack, 0),
15073 S390_ATTRIB ("small-exec", OPT_msmall_exec, 0),
15074 S390_ATTRIB ("soft-float", OPT_msoft_float, 0),
15075 S390_ATTRIB ("mvcle", OPT_mmvcle, 0),
15076 S390_PRAGMA ("zvector", OPT_mzvector, 0),
15077 /* boolean options */
15078 S390_ATTRIB ("warn-dynamicstack", OPT_mwarn_dynamicstack, 0),
15083 /* If this is a list, recurse to get the options. */
15084 if (TREE_CODE (args) == TREE_LIST)
15087 int num_pragma_values;
15090 /* Note: attribs.c:decl_attributes prepends the values from
15091 current_target_pragma to the list of target attributes. To determine
15092 whether we're looking at a value of the attribute or the pragma we
15093 assume that the first [list_length (current_target_pragma)] values in
15094 the list are the values from the pragma. */
15095 num_pragma_values = (!force_pragma && current_target_pragma != NULL)
15096 ? list_length (current_target_pragma) : 0;
15097 for (i = 0; args; args = TREE_CHAIN (args), i++)
15101 is_pragma = (force_pragma || i < num_pragma_values);
15102 if (TREE_VALUE (args)
15103 && !s390_valid_target_attribute_inner_p (TREE_VALUE (args),
15104 opts, new_opts_set,
15113 else if (TREE_CODE (args) != STRING_CST)
15115 error ("attribute %<target%> argument not a string");
15119 /* Handle multiple arguments separated by commas. */
15120 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
15122 while (next_optstr && *next_optstr != '\0')
15124 char *p = next_optstr;
15126 char *comma = strchr (next_optstr, ',');
15127 size_t len, opt_len;
15133 enum cl_var_type var_type;
15139 len = comma - next_optstr;
15140 next_optstr = comma + 1;
15145 next_optstr = NULL;
15148 /* Recognize no-xxx. */
15149 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
15158 /* Find the option. */
15161 for (i = 0; i < ARRAY_SIZE (attrs); i++)
15163 opt_len = attrs[i].len;
15164 if (ch == attrs[i].string[0]
15165 && ((attrs[i].has_arg) ? len > opt_len : len == opt_len)
15166 && memcmp (p, attrs[i].string, opt_len) == 0)
15168 opt = attrs[i].opt;
15169 if (!opt_set_p && cl_options[opt].cl_reject_negative)
15171 mask = cl_options[opt].var_value;
15172 var_type = cl_options[opt].var_type;
15178 /* Process the option. */
15181 error ("attribute(target(\"%s\")) is unknown", orig_p);
15184 else if (attrs[i].only_as_pragma && !force_pragma)
15186 /* Value is not allowed for the target attribute. */
15187 error ("value %qs is not supported by attribute %<target%>",
15192 else if (var_type == CLVC_BIT_SET || var_type == CLVC_BIT_CLEAR)
15194 if (var_type == CLVC_BIT_CLEAR)
15195 opt_set_p = !opt_set_p;
15198 opts->x_target_flags |= mask;
15200 opts->x_target_flags &= ~mask;
15201 new_opts_set->x_target_flags |= mask;
15204 else if (cl_options[opt].var_type == CLVC_BOOLEAN)
15208 if (cl_options[opt].cl_uinteger)
15210 /* Unsigned integer argument. Code based on the function
15211 decode_cmdline_option () in opts-common.c. */
15212 value = integral_argument (p + opt_len);
15215 value = (opt_set_p) ? 1 : 0;
15219 struct cl_decoded_option decoded;
15221 /* Value range check; only implemented for numeric and boolean
15222 options at the moment. */
15223 generate_option (opt, NULL, value, CL_TARGET, &decoded);
15224 s390_handle_option (opts, new_opts_set, &decoded, input_location);
15225 set_option (opts, new_opts_set, opt, value,
15226 p + opt_len, DK_UNSPECIFIED, input_location,
15231 error ("attribute(target(\"%s\")) is unknown", orig_p);
15236 else if (cl_options[opt].var_type == CLVC_ENUM)
15241 arg_ok = opt_enum_arg_to_value (opt, p + opt_len, &value, CL_TARGET);
15243 set_option (opts, new_opts_set, opt, value,
15244 p + opt_len, DK_UNSPECIFIED, input_location,
15248 error ("attribute(target(\"%s\")) is unknown", orig_p);
15254 gcc_unreachable ();
15259 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
15262 s390_valid_target_attribute_tree (tree args,
15263 struct gcc_options *opts,
15264 const struct gcc_options *opts_set,
15267 tree t = NULL_TREE;
15268 struct gcc_options new_opts_set;
15270 memset (&new_opts_set, 0, sizeof (new_opts_set));
15272 /* Process each of the options on the chain. */
15273 if (! s390_valid_target_attribute_inner_p (args, opts, &new_opts_set,
15275 return error_mark_node;
15277 /* If some option was set (even if it has not changed), rerun
15278 s390_option_override_internal, and then save the options away. */
15279 if (new_opts_set.x_target_flags
15280 || new_opts_set.x_s390_arch
15281 || new_opts_set.x_s390_tune
15282 || new_opts_set.x_s390_stack_guard
15283 || new_opts_set.x_s390_stack_size
15284 || new_opts_set.x_s390_branch_cost
15285 || new_opts_set.x_s390_warn_framesize
15286 || new_opts_set.x_s390_warn_dynamicstack_p)
15288 const unsigned char *src = (const unsigned char *)opts_set;
15289 unsigned char *dest = (unsigned char *)&new_opts_set;
15292 /* Merge the original option flags into the new ones. */
15293 for (i = 0; i < sizeof(*opts_set); i++)
15296 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
15297 s390_option_override_internal (false, opts, &new_opts_set);
15298 /* Save the current options unless we are validating options for
15300 t = build_target_option_node (opts);
15305 /* Hook to validate attribute((target("string"))). */
15308 s390_valid_target_attribute_p (tree fndecl,
15309 tree ARG_UNUSED (name),
15311 int ARG_UNUSED (flags))
15313 struct gcc_options func_options;
15314 tree new_target, new_optimize;
15317 /* attribute((target("default"))) does nothing, beyond
15318 affecting multi-versioning. */
15319 if (TREE_VALUE (args)
15320 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
15321 && TREE_CHAIN (args) == NULL_TREE
15322 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
15325 tree old_optimize = build_optimization_node (&global_options);
15327 /* Get the optimization options of the current function. */
15328 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
15330 if (!func_optimize)
15331 func_optimize = old_optimize;
15333 /* Init func_options. */
15334 memset (&func_options, 0, sizeof (func_options));
15335 init_options_struct (&func_options, NULL);
15336 lang_hooks.init_options_struct (&func_options);
15338 cl_optimization_restore (&func_options, TREE_OPTIMIZATION (func_optimize));
15340 /* Initialize func_options to the default before its target options can
15342 cl_target_option_restore (&func_options,
15343 TREE_TARGET_OPTION (target_option_default_node));
15345 new_target = s390_valid_target_attribute_tree (args, &func_options,
15346 &global_options_set,
15348 current_target_pragma));
15349 new_optimize = build_optimization_node (&func_options);
15350 if (new_target == error_mark_node)
15352 else if (fndecl && new_target)
15354 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
15355 if (old_optimize != new_optimize)
15356 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
15361 /* Hook to determine if one function can safely inline another. */
15364 s390_can_inline_p (tree caller, tree callee)
15366 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
15367 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
15370 callee_tree = target_option_default_node;
15372 caller_tree = target_option_default_node;
15373 if (callee_tree == caller_tree)
15376 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
15377 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
15380 if ((caller_opts->x_target_flags & ~(MASK_SOFT_FLOAT | MASK_HARD_DFP))
15381 != (callee_opts->x_target_flags & ~(MASK_SOFT_FLOAT | MASK_HARD_DFP)))
15384 /* Don't inline functions to be compiled for a more recent arch into a
15385 function for an older arch. */
15386 else if (caller_opts->x_s390_arch < callee_opts->x_s390_arch)
15389 /* Inlining a hard float function into a soft float function is only
15390 allowed if the hard float function doesn't actually make use of
15393 We are called from FEs for multi-versioning call optimization, so
15394 beware of ipa_fn_summaries not available. */
15395 else if (((TARGET_SOFT_FLOAT_P (caller_opts->x_target_flags)
15396 && !TARGET_SOFT_FLOAT_P (callee_opts->x_target_flags))
15397 || (!TARGET_HARD_DFP_P (caller_opts->x_target_flags)
15398 && TARGET_HARD_DFP_P (callee_opts->x_target_flags)))
15399 && (! ipa_fn_summaries
15400 || ipa_fn_summaries->get
15401 (cgraph_node::get (callee))->fp_expressions))
15407 /* Restore targets globals from NEW_TREE and invalidate s390_previous_fndecl
15411 s390_activate_target_options (tree new_tree)
15413 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
15414 if (TREE_TARGET_GLOBALS (new_tree))
15415 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
15416 else if (new_tree == target_option_default_node)
15417 restore_target_globals (&default_target_globals);
15419 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
15420 s390_previous_fndecl = NULL_TREE;
15423 /* Establish appropriate back-end context for processing the function
15424 FNDECL. The argument might be NULL to indicate processing at top
15425 level, outside of any function scope. */
15427 s390_set_current_function (tree fndecl)
15429 /* Only change the context if the function changes. This hook is called
15430 several times in the course of compiling a function, and we don't want to
15431 slow things down too much or call target_reinit when it isn't safe. */
15432 if (fndecl == s390_previous_fndecl)
15436 if (s390_previous_fndecl == NULL_TREE)
15437 old_tree = target_option_current_node;
15438 else if (DECL_FUNCTION_SPECIFIC_TARGET (s390_previous_fndecl))
15439 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (s390_previous_fndecl);
15441 old_tree = target_option_default_node;
15443 if (fndecl == NULL_TREE)
15445 if (old_tree != target_option_current_node)
15446 s390_activate_target_options (target_option_current_node);
15450 tree new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
15451 if (new_tree == NULL_TREE)
15452 new_tree = target_option_default_node;
15454 if (old_tree != new_tree)
15455 s390_activate_target_options (new_tree);
15456 s390_previous_fndecl = fndecl;
15460 /* Implement TARGET_USE_BY_PIECES_INFRASTRUCTURE_P. */
15463 s390_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size,
15464 unsigned int align ATTRIBUTE_UNUSED,
15465 enum by_pieces_operation op ATTRIBUTE_UNUSED,
15466 bool speed_p ATTRIBUTE_UNUSED)
15468 return (size == 1 || size == 2
15469 || size == 4 || (TARGET_ZARCH && size == 8));
15472 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
15475 s390_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
15477 tree sfpc = s390_builtin_decls[S390_BUILTIN_s390_sfpc];
15478 tree efpc = s390_builtin_decls[S390_BUILTIN_s390_efpc];
15479 tree call_efpc = build_call_expr (efpc, 0);
15480 tree fenv_var = create_tmp_var_raw (unsigned_type_node);
15482 #define FPC_EXCEPTION_MASK HOST_WIDE_INT_UC (0xf8000000)
15483 #define FPC_FLAGS_MASK HOST_WIDE_INT_UC (0x00f80000)
15484 #define FPC_DXC_MASK HOST_WIDE_INT_UC (0x0000ff00)
15485 #define FPC_EXCEPTION_MASK_SHIFT HOST_WIDE_INT_UC (24)
15486 #define FPC_FLAGS_SHIFT HOST_WIDE_INT_UC (16)
15487 #define FPC_DXC_SHIFT HOST_WIDE_INT_UC (8)
15489 /* Generates the equivalent of feholdexcept (&fenv_var)
15491 fenv_var = __builtin_s390_efpc ();
15492 __builtin_s390_sfpc (fenv_var & mask) */
15493 tree old_fpc = build2 (MODIFY_EXPR, unsigned_type_node, fenv_var, call_efpc);
15495 build2 (BIT_AND_EXPR, unsigned_type_node, fenv_var,
15496 build_int_cst (unsigned_type_node,
15497 ~(FPC_DXC_MASK | FPC_FLAGS_MASK |
15498 FPC_EXCEPTION_MASK)));
15499 tree set_new_fpc = build_call_expr (sfpc, 1, new_fpc);
15500 *hold = build2 (COMPOUND_EXPR, void_type_node, old_fpc, set_new_fpc);
15502 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT)
15504 __builtin_s390_sfpc (__builtin_s390_efpc () & mask) */
15505 new_fpc = build2 (BIT_AND_EXPR, unsigned_type_node, call_efpc,
15506 build_int_cst (unsigned_type_node,
15507 ~(FPC_DXC_MASK | FPC_FLAGS_MASK)));
15508 *clear = build_call_expr (sfpc, 1, new_fpc);
15510 /* Generates the equivalent of feupdateenv (fenv_var)
15512 old_fpc = __builtin_s390_efpc ();
15513 __builtin_s390_sfpc (fenv_var);
15514 __atomic_feraiseexcept ((old_fpc & FPC_FLAGS_MASK) >> FPC_FLAGS_SHIFT); */
15516 old_fpc = create_tmp_var_raw (unsigned_type_node);
15517 tree store_old_fpc = build2 (MODIFY_EXPR, void_type_node,
15518 old_fpc, call_efpc);
15520 set_new_fpc = build_call_expr (sfpc, 1, fenv_var);
15522 tree raise_old_except = build2 (BIT_AND_EXPR, unsigned_type_node, old_fpc,
15523 build_int_cst (unsigned_type_node,
15525 raise_old_except = build2 (RSHIFT_EXPR, unsigned_type_node, raise_old_except,
15526 build_int_cst (unsigned_type_node,
15528 tree atomic_feraiseexcept
15529 = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
15530 raise_old_except = build_call_expr (atomic_feraiseexcept,
15531 1, raise_old_except);
15533 *update = build2 (COMPOUND_EXPR, void_type_node,
15534 build2 (COMPOUND_EXPR, void_type_node,
15535 store_old_fpc, set_new_fpc),
15538 #undef FPC_EXCEPTION_MASK
15539 #undef FPC_FLAGS_MASK
15540 #undef FPC_DXC_MASK
15541 #undef FPC_EXCEPTION_MASK_SHIFT
15542 #undef FPC_FLAGS_SHIFT
15543 #undef FPC_DXC_SHIFT
15546 /* Return the vector mode to be used for inner mode MODE when doing
15548 static machine_mode
15549 s390_preferred_simd_mode (scalar_mode mode)
15569 /* Our hardware does not require vectors to be strictly aligned. */
15571 s390_support_vector_misalignment (machine_mode mode ATTRIBUTE_UNUSED,
15572 const_tree type ATTRIBUTE_UNUSED,
15573 int misalignment ATTRIBUTE_UNUSED,
15574 bool is_packed ATTRIBUTE_UNUSED)
15579 return default_builtin_support_vector_misalignment (mode, type, misalignment,
15583 /* The vector ABI requires vector types to be aligned on an 8 byte
15584 boundary (our stack alignment). However, we allow this to be
15585 overriden by the user, while this definitely breaks the ABI. */
15586 static HOST_WIDE_INT
15587 s390_vector_alignment (const_tree type)
15589 if (!TARGET_VX_ABI)
15590 return default_vector_alignment (type);
15592 if (TYPE_USER_ALIGN (type))
15593 return TYPE_ALIGN (type);
15595 return MIN (64, tree_to_shwi (TYPE_SIZE (type)));
15598 #ifdef HAVE_AS_MACHINE_MACHINEMODE
15599 /* Implement TARGET_ASM_FILE_START. */
15601 s390_asm_file_start (void)
15603 default_file_start ();
15604 s390_asm_output_machine_for_arch (asm_out_file);
15608 /* Implement TARGET_ASM_FILE_END. */
15610 s390_asm_file_end (void)
15612 #ifdef HAVE_AS_GNU_ATTRIBUTE
15613 varpool_node *vnode;
15614 cgraph_node *cnode;
15616 FOR_EACH_VARIABLE (vnode)
15617 if (TREE_PUBLIC (vnode->decl))
15618 s390_check_type_for_vector_abi (TREE_TYPE (vnode->decl), false, false);
15620 FOR_EACH_FUNCTION (cnode)
15621 if (TREE_PUBLIC (cnode->decl))
15622 s390_check_type_for_vector_abi (TREE_TYPE (cnode->decl), false, false);
15625 if (s390_vector_abi != 0)
15626 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
15629 file_end_indicate_exec_stack ();
15631 if (flag_split_stack)
15632 file_end_indicate_split_stack ();
15635 /* Return true if TYPE is a vector bool type. */
15637 s390_vector_bool_type_p (const_tree type)
15639 return TYPE_VECTOR_OPAQUE (type);
15642 /* Return the diagnostic message string if the binary operation OP is
15643 not permitted on TYPE1 and TYPE2, NULL otherwise. */
15645 s390_invalid_binary_op (int op ATTRIBUTE_UNUSED, const_tree type1, const_tree type2)
15647 bool bool1_p, bool2_p;
15651 machine_mode mode1, mode2;
15653 if (!TARGET_ZVECTOR)
15656 if (!VECTOR_TYPE_P (type1) || !VECTOR_TYPE_P (type2))
15659 bool1_p = s390_vector_bool_type_p (type1);
15660 bool2_p = s390_vector_bool_type_p (type2);
15662 /* Mixing signed and unsigned types is forbidden for all
15664 if (!bool1_p && !bool2_p
15665 && TYPE_UNSIGNED (type1) != TYPE_UNSIGNED (type2))
15666 return N_("types differ in signedness");
15668 plusminus_p = (op == PLUS_EXPR || op == MINUS_EXPR);
15669 muldiv_p = (op == MULT_EXPR || op == RDIV_EXPR || op == TRUNC_DIV_EXPR
15670 || op == CEIL_DIV_EXPR || op == FLOOR_DIV_EXPR
15671 || op == ROUND_DIV_EXPR);
15672 compare_p = (op == LT_EXPR || op == LE_EXPR || op == GT_EXPR || op == GE_EXPR
15673 || op == EQ_EXPR || op == NE_EXPR);
15675 if (bool1_p && bool2_p && (plusminus_p || muldiv_p))
15676 return N_("binary operator does not support two vector bool operands");
15678 if (bool1_p != bool2_p && (muldiv_p || compare_p))
15679 return N_("binary operator does not support vector bool operand");
15681 mode1 = TYPE_MODE (type1);
15682 mode2 = TYPE_MODE (type2);
15684 if (bool1_p != bool2_p && plusminus_p
15685 && (GET_MODE_CLASS (mode1) == MODE_VECTOR_FLOAT
15686 || GET_MODE_CLASS (mode2) == MODE_VECTOR_FLOAT))
15687 return N_("binary operator does not support mixing vector "
15688 "bool with floating point vector operands");
15693 /* Implement TARGET_C_EXCESS_PRECISION.
15695 FIXME: For historical reasons, float_t and double_t are typedef'ed to
15696 double on s390, causing operations on float_t to operate in a higher
15697 precision than is necessary. However, it is not the case that SFmode
15698 operations have implicit excess precision, and we generate more optimal
15699 code if we let the compiler know no implicit extra precision is added.
15701 That means when we are compiling with -fexcess-precision=fast, the value
15702 we set for FLT_EVAL_METHOD will be out of line with the actual precision of
15703 float_t (though they would be correct for -fexcess-precision=standard).
15705 A complete fix would modify glibc to remove the unnecessary typedef
15706 of float_t to double. */
15708 static enum flt_eval_method
15709 s390_excess_precision (enum excess_precision_type type)
15713 case EXCESS_PRECISION_TYPE_IMPLICIT:
15714 case EXCESS_PRECISION_TYPE_FAST:
15715 /* The fastest type to promote to will always be the native type,
15716 whether that occurs with implicit excess precision or
15718 return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT;
15719 case EXCESS_PRECISION_TYPE_STANDARD:
15720 /* Otherwise, when we are in a standards compliant mode, to
15721 ensure consistency with the implementation in glibc, report that
15722 float is evaluated to the range and precision of double. */
15723 return FLT_EVAL_METHOD_PROMOTE_TO_DOUBLE;
15725 gcc_unreachable ();
15727 return FLT_EVAL_METHOD_UNPREDICTABLE;
15730 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
15732 static unsigned HOST_WIDE_INT
15733 s390_asan_shadow_offset (void)
15735 return TARGET_64BIT ? HOST_WIDE_INT_1U << 52 : HOST_WIDE_INT_UC (0x20000000);
15738 /* Initialize GCC target structure. */
15740 #undef TARGET_ASM_ALIGNED_HI_OP
15741 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
15742 #undef TARGET_ASM_ALIGNED_DI_OP
15743 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
15744 #undef TARGET_ASM_INTEGER
15745 #define TARGET_ASM_INTEGER s390_assemble_integer
15747 #undef TARGET_ASM_OPEN_PAREN
15748 #define TARGET_ASM_OPEN_PAREN ""
15750 #undef TARGET_ASM_CLOSE_PAREN
15751 #define TARGET_ASM_CLOSE_PAREN ""
15753 #undef TARGET_OPTION_OVERRIDE
15754 #define TARGET_OPTION_OVERRIDE s390_option_override
15756 #ifdef TARGET_THREAD_SSP_OFFSET
15757 #undef TARGET_STACK_PROTECT_GUARD
15758 #define TARGET_STACK_PROTECT_GUARD hook_tree_void_null
15761 #undef TARGET_ENCODE_SECTION_INFO
15762 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
15764 #undef TARGET_SCALAR_MODE_SUPPORTED_P
15765 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
15768 #undef TARGET_HAVE_TLS
15769 #define TARGET_HAVE_TLS true
15771 #undef TARGET_CANNOT_FORCE_CONST_MEM
15772 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
15774 #undef TARGET_DELEGITIMIZE_ADDRESS
15775 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
15777 #undef TARGET_LEGITIMIZE_ADDRESS
15778 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
15780 #undef TARGET_RETURN_IN_MEMORY
15781 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
15783 #undef TARGET_INIT_BUILTINS
15784 #define TARGET_INIT_BUILTINS s390_init_builtins
15785 #undef TARGET_EXPAND_BUILTIN
15786 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
15787 #undef TARGET_BUILTIN_DECL
15788 #define TARGET_BUILTIN_DECL s390_builtin_decl
15790 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
15791 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
15793 #undef TARGET_ASM_OUTPUT_MI_THUNK
15794 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
15795 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
15796 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
15798 #undef TARGET_C_EXCESS_PRECISION
15799 #define TARGET_C_EXCESS_PRECISION s390_excess_precision
15801 #undef TARGET_SCHED_ADJUST_PRIORITY
15802 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
15803 #undef TARGET_SCHED_ISSUE_RATE
15804 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
15805 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
15806 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
15808 #undef TARGET_SCHED_VARIABLE_ISSUE
15809 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
15810 #undef TARGET_SCHED_REORDER
15811 #define TARGET_SCHED_REORDER s390_sched_reorder
15812 #undef TARGET_SCHED_INIT
15813 #define TARGET_SCHED_INIT s390_sched_init
15815 #undef TARGET_CANNOT_COPY_INSN_P
15816 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
15817 #undef TARGET_RTX_COSTS
15818 #define TARGET_RTX_COSTS s390_rtx_costs
15819 #undef TARGET_ADDRESS_COST
15820 #define TARGET_ADDRESS_COST s390_address_cost
15821 #undef TARGET_REGISTER_MOVE_COST
15822 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
15823 #undef TARGET_MEMORY_MOVE_COST
15824 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
15825 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
15826 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
15827 s390_builtin_vectorization_cost
15829 #undef TARGET_MACHINE_DEPENDENT_REORG
15830 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
15832 #undef TARGET_VALID_POINTER_MODE
15833 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
15835 #undef TARGET_BUILD_BUILTIN_VA_LIST
15836 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
15837 #undef TARGET_EXPAND_BUILTIN_VA_START
15838 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
15839 #undef TARGET_ASAN_SHADOW_OFFSET
15840 #define TARGET_ASAN_SHADOW_OFFSET s390_asan_shadow_offset
15841 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
15842 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
15844 #undef TARGET_PROMOTE_FUNCTION_MODE
15845 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
15846 #undef TARGET_PASS_BY_REFERENCE
15847 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
15849 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
15850 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
15851 #undef TARGET_FUNCTION_ARG
15852 #define TARGET_FUNCTION_ARG s390_function_arg
15853 #undef TARGET_FUNCTION_ARG_ADVANCE
15854 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
15855 #undef TARGET_FUNCTION_VALUE
15856 #define TARGET_FUNCTION_VALUE s390_function_value
15857 #undef TARGET_LIBCALL_VALUE
15858 #define TARGET_LIBCALL_VALUE s390_libcall_value
15859 #undef TARGET_STRICT_ARGUMENT_NAMING
15860 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
15862 #undef TARGET_KEEP_LEAF_WHEN_PROFILED
15863 #define TARGET_KEEP_LEAF_WHEN_PROFILED s390_keep_leaf_when_profiled
15865 #undef TARGET_FIXED_CONDITION_CODE_REGS
15866 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
15868 #undef TARGET_CC_MODES_COMPATIBLE
15869 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
15871 #undef TARGET_INVALID_WITHIN_DOLOOP
15872 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_insn_null
15875 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
15876 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
15879 #undef TARGET_DWARF_FRAME_REG_MODE
15880 #define TARGET_DWARF_FRAME_REG_MODE s390_dwarf_frame_reg_mode
15882 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
15883 #undef TARGET_MANGLE_TYPE
15884 #define TARGET_MANGLE_TYPE s390_mangle_type
15887 #undef TARGET_SCALAR_MODE_SUPPORTED_P
15888 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
15890 #undef TARGET_VECTOR_MODE_SUPPORTED_P
15891 #define TARGET_VECTOR_MODE_SUPPORTED_P s390_vector_mode_supported_p
15893 #undef TARGET_PREFERRED_RELOAD_CLASS
15894 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
15896 #undef TARGET_SECONDARY_RELOAD
15897 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
15899 #undef TARGET_LIBGCC_CMP_RETURN_MODE
15900 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
15902 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
15903 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
15905 #undef TARGET_LEGITIMATE_ADDRESS_P
15906 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
15908 #undef TARGET_LEGITIMATE_CONSTANT_P
15909 #define TARGET_LEGITIMATE_CONSTANT_P s390_legitimate_constant_p
15911 #undef TARGET_LRA_P
15912 #define TARGET_LRA_P s390_lra_p
15914 #undef TARGET_CAN_ELIMINATE
15915 #define TARGET_CAN_ELIMINATE s390_can_eliminate
15917 #undef TARGET_CONDITIONAL_REGISTER_USAGE
15918 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
15920 #undef TARGET_LOOP_UNROLL_ADJUST
15921 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
15923 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
15924 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
15925 #undef TARGET_TRAMPOLINE_INIT
15926 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
15929 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
15930 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
15932 #undef TARGET_UNWIND_WORD_MODE
15933 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
15935 #undef TARGET_CANONICALIZE_COMPARISON
15936 #define TARGET_CANONICALIZE_COMPARISON s390_canonicalize_comparison
15938 #undef TARGET_HARD_REGNO_SCRATCH_OK
15939 #define TARGET_HARD_REGNO_SCRATCH_OK s390_hard_regno_scratch_ok
15941 #undef TARGET_ATTRIBUTE_TABLE
15942 #define TARGET_ATTRIBUTE_TABLE s390_attribute_table
15944 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
15945 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
15947 #undef TARGET_SET_UP_BY_PROLOGUE
15948 #define TARGET_SET_UP_BY_PROLOGUE s300_set_up_by_prologue
15950 #undef TARGET_EXTRA_LIVE_ON_ENTRY
15951 #define TARGET_EXTRA_LIVE_ON_ENTRY s390_live_on_entry
15953 #undef TARGET_USE_BY_PIECES_INFRASTRUCTURE_P
15954 #define TARGET_USE_BY_PIECES_INFRASTRUCTURE_P \
15955 s390_use_by_pieces_infrastructure_p
15957 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
15958 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV s390_atomic_assign_expand_fenv
15960 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
15961 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN s390_invalid_arg_for_unprototyped_fn
15963 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
15964 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE s390_preferred_simd_mode
15966 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
15967 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT s390_support_vector_misalignment
15969 #undef TARGET_VECTOR_ALIGNMENT
15970 #define TARGET_VECTOR_ALIGNMENT s390_vector_alignment
15972 #undef TARGET_INVALID_BINARY_OP
15973 #define TARGET_INVALID_BINARY_OP s390_invalid_binary_op
15975 #ifdef HAVE_AS_MACHINE_MACHINEMODE
15976 #undef TARGET_ASM_FILE_START
15977 #define TARGET_ASM_FILE_START s390_asm_file_start
15980 #undef TARGET_ASM_FILE_END
15981 #define TARGET_ASM_FILE_END s390_asm_file_end
15983 #if S390_USE_TARGET_ATTRIBUTE
15984 #undef TARGET_SET_CURRENT_FUNCTION
15985 #define TARGET_SET_CURRENT_FUNCTION s390_set_current_function
15987 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
15988 #define TARGET_OPTION_VALID_ATTRIBUTE_P s390_valid_target_attribute_p
15990 #undef TARGET_CAN_INLINE_P
15991 #define TARGET_CAN_INLINE_P s390_can_inline_p
15994 #undef TARGET_OPTION_RESTORE
15995 #define TARGET_OPTION_RESTORE s390_function_specific_restore
15997 struct gcc_target targetm = TARGET_INITIALIZER;
15999 #include "gt-s390.h"