1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999-2017 Free Software Foundation, Inc.
3 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
4 Ulrich Weigand (uweigand@de.ibm.com) and
5 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
28 #include "target-globals.h"
37 #include "stringpool.h"
45 #include "diagnostic-core.h"
46 #include "diagnostic.h"
48 #include "fold-const.h"
49 #include "print-tree.h"
50 #include "stor-layout.h"
53 #include "conditions.h"
55 #include "insn-attr.h"
67 #include "cfgcleanup.h"
69 #include "langhooks.h"
70 #include "internal-fn.h"
71 #include "gimple-fold.h"
76 #include "tree-pass.h"
81 #include "tm-constrs.h"
83 /* This file should be included last. */
84 #include "target-def.h"
86 /* Remember the last target of s390_set_current_function. */
87 static GTY(()) tree s390_previous_fndecl;
89 /* Define the specific costs for a given cpu. */
91 struct processor_costs
94 const int m; /* cost of an M instruction. */
95 const int mghi; /* cost of an MGHI instruction. */
96 const int mh; /* cost of an MH instruction. */
97 const int mhi; /* cost of an MHI instruction. */
98 const int ml; /* cost of an ML instruction. */
99 const int mr; /* cost of an MR instruction. */
100 const int ms; /* cost of an MS instruction. */
101 const int msg; /* cost of an MSG instruction. */
102 const int msgf; /* cost of an MSGF instruction. */
103 const int msgfr; /* cost of an MSGFR instruction. */
104 const int msgr; /* cost of an MSGR instruction. */
105 const int msr; /* cost of an MSR instruction. */
106 const int mult_df; /* cost of multiplication in DFmode. */
109 const int sqxbr; /* cost of square root in TFmode. */
110 const int sqdbr; /* cost of square root in DFmode. */
111 const int sqebr; /* cost of square root in SFmode. */
112 /* multiply and add */
113 const int madbr; /* cost of multiply and add in DFmode. */
114 const int maebr; /* cost of multiply and add in SFmode. */
126 #define s390_cost ((const struct processor_costs *)(s390_cost_pointer))
129 struct processor_costs z900_cost =
131 COSTS_N_INSNS (5), /* M */
132 COSTS_N_INSNS (10), /* MGHI */
133 COSTS_N_INSNS (5), /* MH */
134 COSTS_N_INSNS (4), /* MHI */
135 COSTS_N_INSNS (5), /* ML */
136 COSTS_N_INSNS (5), /* MR */
137 COSTS_N_INSNS (4), /* MS */
138 COSTS_N_INSNS (15), /* MSG */
139 COSTS_N_INSNS (7), /* MSGF */
140 COSTS_N_INSNS (7), /* MSGFR */
141 COSTS_N_INSNS (10), /* MSGR */
142 COSTS_N_INSNS (4), /* MSR */
143 COSTS_N_INSNS (7), /* multiplication in DFmode */
144 COSTS_N_INSNS (13), /* MXBR */
145 COSTS_N_INSNS (136), /* SQXBR */
146 COSTS_N_INSNS (44), /* SQDBR */
147 COSTS_N_INSNS (35), /* SQEBR */
148 COSTS_N_INSNS (18), /* MADBR */
149 COSTS_N_INSNS (13), /* MAEBR */
150 COSTS_N_INSNS (134), /* DXBR */
151 COSTS_N_INSNS (30), /* DDBR */
152 COSTS_N_INSNS (27), /* DEBR */
153 COSTS_N_INSNS (220), /* DLGR */
154 COSTS_N_INSNS (34), /* DLR */
155 COSTS_N_INSNS (34), /* DR */
156 COSTS_N_INSNS (32), /* DSGFR */
157 COSTS_N_INSNS (32), /* DSGR */
161 struct processor_costs z990_cost =
163 COSTS_N_INSNS (4), /* M */
164 COSTS_N_INSNS (2), /* MGHI */
165 COSTS_N_INSNS (2), /* MH */
166 COSTS_N_INSNS (2), /* MHI */
167 COSTS_N_INSNS (4), /* ML */
168 COSTS_N_INSNS (4), /* MR */
169 COSTS_N_INSNS (5), /* MS */
170 COSTS_N_INSNS (6), /* MSG */
171 COSTS_N_INSNS (4), /* MSGF */
172 COSTS_N_INSNS (4), /* MSGFR */
173 COSTS_N_INSNS (4), /* MSGR */
174 COSTS_N_INSNS (4), /* MSR */
175 COSTS_N_INSNS (1), /* multiplication in DFmode */
176 COSTS_N_INSNS (28), /* MXBR */
177 COSTS_N_INSNS (130), /* SQXBR */
178 COSTS_N_INSNS (66), /* SQDBR */
179 COSTS_N_INSNS (38), /* SQEBR */
180 COSTS_N_INSNS (1), /* MADBR */
181 COSTS_N_INSNS (1), /* MAEBR */
182 COSTS_N_INSNS (60), /* DXBR */
183 COSTS_N_INSNS (40), /* DDBR */
184 COSTS_N_INSNS (26), /* DEBR */
185 COSTS_N_INSNS (176), /* DLGR */
186 COSTS_N_INSNS (31), /* DLR */
187 COSTS_N_INSNS (31), /* DR */
188 COSTS_N_INSNS (31), /* DSGFR */
189 COSTS_N_INSNS (31), /* DSGR */
193 struct processor_costs z9_109_cost =
195 COSTS_N_INSNS (4), /* M */
196 COSTS_N_INSNS (2), /* MGHI */
197 COSTS_N_INSNS (2), /* MH */
198 COSTS_N_INSNS (2), /* MHI */
199 COSTS_N_INSNS (4), /* ML */
200 COSTS_N_INSNS (4), /* MR */
201 COSTS_N_INSNS (5), /* MS */
202 COSTS_N_INSNS (6), /* MSG */
203 COSTS_N_INSNS (4), /* MSGF */
204 COSTS_N_INSNS (4), /* MSGFR */
205 COSTS_N_INSNS (4), /* MSGR */
206 COSTS_N_INSNS (4), /* MSR */
207 COSTS_N_INSNS (1), /* multiplication in DFmode */
208 COSTS_N_INSNS (28), /* MXBR */
209 COSTS_N_INSNS (130), /* SQXBR */
210 COSTS_N_INSNS (66), /* SQDBR */
211 COSTS_N_INSNS (38), /* SQEBR */
212 COSTS_N_INSNS (1), /* MADBR */
213 COSTS_N_INSNS (1), /* MAEBR */
214 COSTS_N_INSNS (60), /* DXBR */
215 COSTS_N_INSNS (40), /* DDBR */
216 COSTS_N_INSNS (26), /* DEBR */
217 COSTS_N_INSNS (30), /* DLGR */
218 COSTS_N_INSNS (23), /* DLR */
219 COSTS_N_INSNS (23), /* DR */
220 COSTS_N_INSNS (24), /* DSGFR */
221 COSTS_N_INSNS (24), /* DSGR */
225 struct processor_costs z10_cost =
227 COSTS_N_INSNS (10), /* M */
228 COSTS_N_INSNS (10), /* MGHI */
229 COSTS_N_INSNS (10), /* MH */
230 COSTS_N_INSNS (10), /* MHI */
231 COSTS_N_INSNS (10), /* ML */
232 COSTS_N_INSNS (10), /* MR */
233 COSTS_N_INSNS (10), /* MS */
234 COSTS_N_INSNS (10), /* MSG */
235 COSTS_N_INSNS (10), /* MSGF */
236 COSTS_N_INSNS (10), /* MSGFR */
237 COSTS_N_INSNS (10), /* MSGR */
238 COSTS_N_INSNS (10), /* MSR */
239 COSTS_N_INSNS (1) , /* multiplication in DFmode */
240 COSTS_N_INSNS (50), /* MXBR */
241 COSTS_N_INSNS (120), /* SQXBR */
242 COSTS_N_INSNS (52), /* SQDBR */
243 COSTS_N_INSNS (38), /* SQEBR */
244 COSTS_N_INSNS (1), /* MADBR */
245 COSTS_N_INSNS (1), /* MAEBR */
246 COSTS_N_INSNS (111), /* DXBR */
247 COSTS_N_INSNS (39), /* DDBR */
248 COSTS_N_INSNS (32), /* DEBR */
249 COSTS_N_INSNS (160), /* DLGR */
250 COSTS_N_INSNS (71), /* DLR */
251 COSTS_N_INSNS (71), /* DR */
252 COSTS_N_INSNS (71), /* DSGFR */
253 COSTS_N_INSNS (71), /* DSGR */
257 struct processor_costs z196_cost =
259 COSTS_N_INSNS (7), /* M */
260 COSTS_N_INSNS (5), /* MGHI */
261 COSTS_N_INSNS (5), /* MH */
262 COSTS_N_INSNS (5), /* MHI */
263 COSTS_N_INSNS (7), /* ML */
264 COSTS_N_INSNS (7), /* MR */
265 COSTS_N_INSNS (6), /* MS */
266 COSTS_N_INSNS (8), /* MSG */
267 COSTS_N_INSNS (6), /* MSGF */
268 COSTS_N_INSNS (6), /* MSGFR */
269 COSTS_N_INSNS (8), /* MSGR */
270 COSTS_N_INSNS (6), /* MSR */
271 COSTS_N_INSNS (1) , /* multiplication in DFmode */
272 COSTS_N_INSNS (40), /* MXBR B+40 */
273 COSTS_N_INSNS (100), /* SQXBR B+100 */
274 COSTS_N_INSNS (42), /* SQDBR B+42 */
275 COSTS_N_INSNS (28), /* SQEBR B+28 */
276 COSTS_N_INSNS (1), /* MADBR B */
277 COSTS_N_INSNS (1), /* MAEBR B */
278 COSTS_N_INSNS (101), /* DXBR B+101 */
279 COSTS_N_INSNS (29), /* DDBR */
280 COSTS_N_INSNS (22), /* DEBR */
281 COSTS_N_INSNS (160), /* DLGR cracked */
282 COSTS_N_INSNS (160), /* DLR cracked */
283 COSTS_N_INSNS (160), /* DR expanded */
284 COSTS_N_INSNS (160), /* DSGFR cracked */
285 COSTS_N_INSNS (160), /* DSGR cracked */
289 struct processor_costs zEC12_cost =
291 COSTS_N_INSNS (7), /* M */
292 COSTS_N_INSNS (5), /* MGHI */
293 COSTS_N_INSNS (5), /* MH */
294 COSTS_N_INSNS (5), /* MHI */
295 COSTS_N_INSNS (7), /* ML */
296 COSTS_N_INSNS (7), /* MR */
297 COSTS_N_INSNS (6), /* MS */
298 COSTS_N_INSNS (8), /* MSG */
299 COSTS_N_INSNS (6), /* MSGF */
300 COSTS_N_INSNS (6), /* MSGFR */
301 COSTS_N_INSNS (8), /* MSGR */
302 COSTS_N_INSNS (6), /* MSR */
303 COSTS_N_INSNS (1) , /* multiplication in DFmode */
304 COSTS_N_INSNS (40), /* MXBR B+40 */
305 COSTS_N_INSNS (100), /* SQXBR B+100 */
306 COSTS_N_INSNS (42), /* SQDBR B+42 */
307 COSTS_N_INSNS (28), /* SQEBR B+28 */
308 COSTS_N_INSNS (1), /* MADBR B */
309 COSTS_N_INSNS (1), /* MAEBR B */
310 COSTS_N_INSNS (131), /* DXBR B+131 */
311 COSTS_N_INSNS (29), /* DDBR */
312 COSTS_N_INSNS (22), /* DEBR */
313 COSTS_N_INSNS (160), /* DLGR cracked */
314 COSTS_N_INSNS (160), /* DLR cracked */
315 COSTS_N_INSNS (160), /* DR expanded */
316 COSTS_N_INSNS (160), /* DSGFR cracked */
317 COSTS_N_INSNS (160), /* DSGR cracked */
322 /* The preferred name to be used in user visible output. */
323 const char *const name;
324 /* CPU name as it should be passed to Binutils via .machine */
325 const char *const binutils_name;
326 const enum processor_type processor;
327 const struct processor_costs *cost;
329 const processor_table[] =
331 { "g5", "g5", PROCESSOR_9672_G5, &z900_cost },
332 { "g6", "g6", PROCESSOR_9672_G6, &z900_cost },
333 { "z900", "z900", PROCESSOR_2064_Z900, &z900_cost },
334 { "z990", "z990", PROCESSOR_2084_Z990, &z990_cost },
335 { "z9-109", "z9-109", PROCESSOR_2094_Z9_109, &z9_109_cost },
336 { "z9-ec", "z9-ec", PROCESSOR_2094_Z9_EC, &z9_109_cost },
337 { "z10", "z10", PROCESSOR_2097_Z10, &z10_cost },
338 { "z196", "z196", PROCESSOR_2817_Z196, &z196_cost },
339 { "zEC12", "zEC12", PROCESSOR_2827_ZEC12, &zEC12_cost },
340 { "z13", "z13", PROCESSOR_2964_Z13, &zEC12_cost },
341 { "z14", "arch12", PROCESSOR_3906_Z14, &zEC12_cost },
342 { "native", "", PROCESSOR_NATIVE, NULL }
345 extern int reload_completed;
347 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
348 static rtx_insn *last_scheduled_insn;
349 #define MAX_SCHED_UNITS 3
350 static int last_scheduled_unit_distance[MAX_SCHED_UNITS];
352 /* The maximum score added for an instruction whose unit hasn't been
353 in use for MAX_SCHED_MIX_DISTANCE steps. Increase this value to
354 give instruction mix scheduling more priority over instruction
356 #define MAX_SCHED_MIX_SCORE 8
358 /* The maximum distance up to which individual scores will be
359 calculated. Everything beyond this gives MAX_SCHED_MIX_SCORE.
360 Increase this with the OOO windows size of the machine. */
361 #define MAX_SCHED_MIX_DISTANCE 100
363 /* Structure used to hold the components of a S/390 memory
364 address. A legitimate address on S/390 is of the general
366 base + index + displacement
367 where any of the components is optional.
369 base and index are registers of the class ADDR_REGS,
370 displacement is an unsigned 12-bit immediate constant. */
381 /* The following structure is embedded in the machine
382 specific part of struct function. */
384 struct GTY (()) s390_frame_layout
386 /* Offset within stack frame. */
387 HOST_WIDE_INT gprs_offset;
388 HOST_WIDE_INT f0_offset;
389 HOST_WIDE_INT f4_offset;
390 HOST_WIDE_INT f8_offset;
391 HOST_WIDE_INT backchain_offset;
393 /* Number of first and last gpr where slots in the register
394 save area are reserved for. */
395 int first_save_gpr_slot;
396 int last_save_gpr_slot;
398 /* Location (FP register number) where GPRs (r0-r15) should
400 0 - does not need to be saved at all
402 #define SAVE_SLOT_NONE 0
403 #define SAVE_SLOT_STACK -1
404 signed char gpr_save_slots[16];
406 /* Number of first and last gpr to be saved, restored. */
408 int first_restore_gpr;
410 int last_restore_gpr;
412 /* Bits standing for floating point registers. Set, if the
413 respective register has to be saved. Starting with reg 16 (f0)
414 at the rightmost bit.
415 Bit 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
416 fpr 15 13 11 9 14 12 10 8 7 5 3 1 6 4 2 0
417 reg 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 */
418 unsigned int fpr_bitmap;
420 /* Number of floating point registers f8-f15 which must be saved. */
423 /* Set if return address needs to be saved.
424 This flag is set by s390_return_addr_rtx if it could not use
425 the initial value of r14 and therefore depends on r14 saved
427 bool save_return_addr_p;
429 /* Size of stack frame. */
430 HOST_WIDE_INT frame_size;
433 /* Define the structure for the machine field in struct function. */
435 struct GTY(()) machine_function
437 struct s390_frame_layout frame_layout;
439 /* Literal pool base register. */
442 /* True if we may need to perform branch splitting. */
443 bool split_branches_pending_p;
445 bool has_landing_pad_p;
447 /* True if the current function may contain a tbegin clobbering
451 /* For -fsplit-stack support: A stack local which holds a pointer to
452 the stack arguments for a function with a variable number of
453 arguments. This is set at the start of the function and is used
454 to initialize the overflow_arg_area field of the va_list
456 rtx split_stack_varargs_pointer;
459 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
461 #define cfun_frame_layout (cfun->machine->frame_layout)
462 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
463 #define cfun_save_arg_fprs_p (!!(TARGET_64BIT \
464 ? cfun_frame_layout.fpr_bitmap & 0x0f \
465 : cfun_frame_layout.fpr_bitmap & 0x03))
466 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
467 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
468 #define cfun_set_fpr_save(REGNO) (cfun->machine->frame_layout.fpr_bitmap |= \
469 (1 << (REGNO - FPR0_REGNUM)))
470 #define cfun_fpr_save_p(REGNO) (!!(cfun->machine->frame_layout.fpr_bitmap & \
471 (1 << (REGNO - FPR0_REGNUM))))
472 #define cfun_gpr_save_slot(REGNO) \
473 cfun->machine->frame_layout.gpr_save_slots[REGNO]
475 /* Number of GPRs and FPRs used for argument passing. */
476 #define GP_ARG_NUM_REG 5
477 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
478 #define VEC_ARG_NUM_REG 8
480 /* A couple of shortcuts. */
481 #define CONST_OK_FOR_J(x) \
482 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
483 #define CONST_OK_FOR_K(x) \
484 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
485 #define CONST_OK_FOR_Os(x) \
486 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
487 #define CONST_OK_FOR_Op(x) \
488 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
489 #define CONST_OK_FOR_On(x) \
490 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
492 #define REGNO_PAIR_OK(REGNO, MODE) \
493 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
495 /* That's the read ahead of the dynamic branch prediction unit in
496 bytes on a z10 (or higher) CPU. */
497 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
500 /* Indicate which ABI has been used for passing vector args.
501 0 - no vector type arguments have been passed where the ABI is relevant
502 1 - the old ABI has been used
503 2 - a vector type argument has been passed either in a vector register
504 or on the stack by value */
505 static int s390_vector_abi = 0;
507 /* Set the vector ABI marker if TYPE is subject to the vector ABI
508 switch. The vector ABI affects only vector data types. There are
509 two aspects of the vector ABI relevant here:
511 1. vectors >= 16 bytes have an alignment of 8 bytes with the new
512 ABI and natural alignment with the old.
514 2. vector <= 16 bytes are passed in VRs or by value on the stack
515 with the new ABI but by reference on the stack with the old.
517 If ARG_P is true TYPE is used for a function argument or return
518 value. The ABI marker then is set for all vector data types. If
519 ARG_P is false only type 1 vectors are being checked. */
522 s390_check_type_for_vector_abi (const_tree type, bool arg_p, bool in_struct_p)
524 static hash_set<const_tree> visited_types_hash;
529 if (type == NULL_TREE || TREE_CODE (type) == ERROR_MARK)
532 if (visited_types_hash.contains (type))
535 visited_types_hash.add (type);
537 if (VECTOR_TYPE_P (type))
539 int type_size = int_size_in_bytes (type);
541 /* Outside arguments only the alignment is changing and this
542 only happens for vector types >= 16 bytes. */
543 if (!arg_p && type_size < 16)
546 /* In arguments vector types > 16 are passed as before (GCC
547 never enforced the bigger alignment for arguments which was
548 required by the old vector ABI). However, it might still be
549 ABI relevant due to the changed alignment if it is a struct
551 if (arg_p && type_size > 16 && !in_struct_p)
554 s390_vector_abi = TARGET_VX_ABI ? 2 : 1;
556 else if (POINTER_TYPE_P (type) || TREE_CODE (type) == ARRAY_TYPE)
558 /* ARRAY_TYPE: Since with neither of the ABIs we have more than
559 natural alignment there will never be ABI dependent padding
560 in an array type. That's why we do not set in_struct_p to
562 s390_check_type_for_vector_abi (TREE_TYPE (type), arg_p, in_struct_p);
564 else if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
568 /* Check the return type. */
569 s390_check_type_for_vector_abi (TREE_TYPE (type), true, false);
571 for (arg_chain = TYPE_ARG_TYPES (type);
573 arg_chain = TREE_CHAIN (arg_chain))
574 s390_check_type_for_vector_abi (TREE_VALUE (arg_chain), true, false);
576 else if (RECORD_OR_UNION_TYPE_P (type))
580 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
582 if (TREE_CODE (field) != FIELD_DECL)
585 s390_check_type_for_vector_abi (TREE_TYPE (field), arg_p, true);
591 /* System z builtins. */
593 #include "s390-builtins.h"
595 const unsigned int bflags_builtin[S390_BUILTIN_MAX + 1] =
600 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, ...) BFLAGS,
602 #define OB_DEF_VAR(...)
603 #include "s390-builtins.def"
607 const unsigned int opflags_builtin[S390_BUILTIN_MAX + 1] =
612 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, OPFLAGS, ...) OPFLAGS,
614 #define OB_DEF_VAR(...)
615 #include "s390-builtins.def"
619 const unsigned int bflags_overloaded_builtin[S390_OVERLOADED_BUILTIN_MAX + 1] =
625 #define OB_DEF(NAME, FIRST_VAR_NAME, LAST_VAR_NAME, BFLAGS, ...) BFLAGS,
626 #define OB_DEF_VAR(...)
627 #include "s390-builtins.def"
632 bflags_overloaded_builtin_var[S390_OVERLOADED_BUILTIN_VAR_MAX + 1] =
639 #define OB_DEF_VAR(NAME, PATTERN, FLAGS, OPFLAGS, FNTYPE) FLAGS,
640 #include "s390-builtins.def"
645 opflags_overloaded_builtin_var[S390_OVERLOADED_BUILTIN_VAR_MAX + 1] =
652 #define OB_DEF_VAR(NAME, PATTERN, FLAGS, OPFLAGS, FNTYPE) OPFLAGS,
653 #include "s390-builtins.def"
657 tree s390_builtin_types[BT_MAX];
658 tree s390_builtin_fn_types[BT_FN_MAX];
659 tree s390_builtin_decls[S390_BUILTIN_MAX +
660 S390_OVERLOADED_BUILTIN_MAX +
661 S390_OVERLOADED_BUILTIN_VAR_MAX];
663 static enum insn_code const code_for_builtin[S390_BUILTIN_MAX + 1] = {
667 #define B_DEF(NAME, PATTERN, ...) CODE_FOR_##PATTERN,
669 #define OB_DEF_VAR(...)
671 #include "s390-builtins.def"
676 s390_init_builtins (void)
678 /* These definitions are being used in s390-builtins.def. */
679 tree returns_twice_attr = tree_cons (get_identifier ("returns_twice"),
681 tree noreturn_attr = tree_cons (get_identifier ("noreturn"), NULL, NULL);
682 tree c_uint64_type_node;
684 /* The uint64_type_node from tree.c is not compatible to the C99
685 uint64_t data type. What we want is c_uint64_type_node from
686 c-common.c. But since backend code is not supposed to interface
687 with the frontend we recreate it here. */
689 c_uint64_type_node = long_unsigned_type_node;
691 c_uint64_type_node = long_long_unsigned_type_node;
694 #define DEF_TYPE(INDEX, NODE, CONST_P) \
695 if (s390_builtin_types[INDEX] == NULL) \
696 s390_builtin_types[INDEX] = (!CONST_P) ? \
697 (NODE) : build_type_variant ((NODE), 1, 0);
699 #undef DEF_POINTER_TYPE
700 #define DEF_POINTER_TYPE(INDEX, INDEX_BASE) \
701 if (s390_builtin_types[INDEX] == NULL) \
702 s390_builtin_types[INDEX] = \
703 build_pointer_type (s390_builtin_types[INDEX_BASE]);
705 #undef DEF_DISTINCT_TYPE
706 #define DEF_DISTINCT_TYPE(INDEX, INDEX_BASE) \
707 if (s390_builtin_types[INDEX] == NULL) \
708 s390_builtin_types[INDEX] = \
709 build_distinct_type_copy (s390_builtin_types[INDEX_BASE]);
711 #undef DEF_VECTOR_TYPE
712 #define DEF_VECTOR_TYPE(INDEX, INDEX_BASE, ELEMENTS) \
713 if (s390_builtin_types[INDEX] == NULL) \
714 s390_builtin_types[INDEX] = \
715 build_vector_type (s390_builtin_types[INDEX_BASE], ELEMENTS);
717 #undef DEF_OPAQUE_VECTOR_TYPE
718 #define DEF_OPAQUE_VECTOR_TYPE(INDEX, INDEX_BASE, ELEMENTS) \
719 if (s390_builtin_types[INDEX] == NULL) \
720 s390_builtin_types[INDEX] = \
721 build_opaque_vector_type (s390_builtin_types[INDEX_BASE], ELEMENTS);
724 #define DEF_FN_TYPE(INDEX, args...) \
725 if (s390_builtin_fn_types[INDEX] == NULL) \
726 s390_builtin_fn_types[INDEX] = \
727 build_function_type_list (args, NULL_TREE);
729 #define DEF_OV_TYPE(...)
730 #include "s390-builtin-types.def"
733 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, OPFLAGS, FNTYPE) \
734 if (s390_builtin_decls[S390_BUILTIN_##NAME] == NULL) \
735 s390_builtin_decls[S390_BUILTIN_##NAME] = \
736 add_builtin_function ("__builtin_" #NAME, \
737 s390_builtin_fn_types[FNTYPE], \
738 S390_BUILTIN_##NAME, \
743 #define OB_DEF(NAME, FIRST_VAR_NAME, LAST_VAR_NAME, BFLAGS, FNTYPE) \
744 if (s390_builtin_decls[S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX] \
746 s390_builtin_decls[S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX] = \
747 add_builtin_function ("__builtin_" #NAME, \
748 s390_builtin_fn_types[FNTYPE], \
749 S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX, \
754 #define OB_DEF_VAR(...)
755 #include "s390-builtins.def"
759 /* Return true if ARG is appropriate as argument number ARGNUM of
760 builtin DECL. The operand flags from s390-builtins.def have to
761 passed as OP_FLAGS. */
763 s390_const_operand_ok (tree arg, int argnum, int op_flags, tree decl)
765 if (O_UIMM_P (op_flags))
767 int bitwidths[] = { 1, 2, 3, 4, 5, 8, 12, 16, 32 };
768 int bitwidth = bitwidths[op_flags - O_U1];
770 if (!tree_fits_uhwi_p (arg)
771 || tree_to_uhwi (arg) > (HOST_WIDE_INT_1U << bitwidth) - 1)
773 error("constant argument %d for builtin %qF is out of range (0.."
774 HOST_WIDE_INT_PRINT_UNSIGNED ")",
776 (HOST_WIDE_INT_1U << bitwidth) - 1);
781 if (O_SIMM_P (op_flags))
783 int bitwidths[] = { 2, 3, 4, 5, 8, 12, 16, 32 };
784 int bitwidth = bitwidths[op_flags - O_S2];
786 if (!tree_fits_shwi_p (arg)
787 || tree_to_shwi (arg) < -(HOST_WIDE_INT_1 << (bitwidth - 1))
788 || tree_to_shwi (arg) > ((HOST_WIDE_INT_1 << (bitwidth - 1)) - 1))
790 error("constant argument %d for builtin %qF is out of range ("
791 HOST_WIDE_INT_PRINT_DEC ".."
792 HOST_WIDE_INT_PRINT_DEC ")",
794 -(HOST_WIDE_INT_1 << (bitwidth - 1)),
795 (HOST_WIDE_INT_1 << (bitwidth - 1)) - 1);
802 /* Expand an expression EXP that calls a built-in function,
803 with result going to TARGET if that's convenient
804 (and in mode MODE if that's convenient).
805 SUBTARGET may be used as the target for computing one of EXP's operands.
806 IGNORE is nonzero if the value is to be ignored. */
809 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
810 machine_mode mode ATTRIBUTE_UNUSED,
811 int ignore ATTRIBUTE_UNUSED)
815 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
816 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
817 enum insn_code icode;
818 rtx op[MAX_ARGS], pat;
822 call_expr_arg_iterator iter;
823 unsigned int all_op_flags = opflags_for_builtin (fcode);
824 machine_mode last_vec_mode = VOIDmode;
826 if (TARGET_DEBUG_ARG)
829 "s390_expand_builtin, code = %4d, %s, bflags = 0x%x\n",
830 (int)fcode, IDENTIFIER_POINTER (DECL_NAME (fndecl)),
831 bflags_for_builtin (fcode));
834 if (S390_USE_TARGET_ATTRIBUTE)
838 bflags = bflags_for_builtin (fcode);
839 if ((bflags & B_HTM) && !TARGET_HTM)
841 error ("builtin %qF is not supported without -mhtm "
842 "(default with -march=zEC12 and higher).", fndecl);
845 if (((bflags & B_VX) || (bflags & B_VXE)) && !TARGET_VX)
847 error ("builtin %qF requires -mvx "
848 "(default with -march=z13 and higher).", fndecl);
852 if ((bflags & B_VXE) && !TARGET_VXE)
854 error ("Builtin %qF requires z14 or higher.", fndecl);
858 if (fcode >= S390_OVERLOADED_BUILTIN_VAR_OFFSET
859 && fcode < S390_ALL_BUILTIN_MAX)
863 else if (fcode < S390_OVERLOADED_BUILTIN_OFFSET)
865 icode = code_for_builtin[fcode];
866 /* Set a flag in the machine specific cfun part in order to support
867 saving/restoring of FPRs. */
868 if (fcode == S390_BUILTIN_tbegin || fcode == S390_BUILTIN_tbegin_retry)
869 cfun->machine->tbegin_p = true;
871 else if (fcode < S390_OVERLOADED_BUILTIN_VAR_OFFSET)
873 error ("unresolved overloaded builtin");
877 internal_error ("bad builtin fcode");
880 internal_error ("bad builtin icode");
882 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
886 machine_mode tmode = insn_data[icode].operand[0].mode;
888 || GET_MODE (target) != tmode
889 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
890 target = gen_reg_rtx (tmode);
892 /* There are builtins (e.g. vec_promote) with no vector
893 arguments but an element selector. So we have to also look
894 at the vector return type when emitting the modulo
896 if (VECTOR_MODE_P (insn_data[icode].operand[0].mode))
897 last_vec_mode = insn_data[icode].operand[0].mode;
901 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
904 const struct insn_operand_data *insn_op;
905 unsigned int op_flags = all_op_flags & ((1 << O_SHIFT) - 1);
907 all_op_flags = all_op_flags >> O_SHIFT;
909 if (arg == error_mark_node)
911 if (arity >= MAX_ARGS)
914 if (O_IMM_P (op_flags)
915 && TREE_CODE (arg) != INTEGER_CST)
917 error ("constant value required for builtin %qF argument %d",
922 if (!s390_const_operand_ok (arg, arity + 1, op_flags, fndecl))
925 insn_op = &insn_data[icode].operand[arity + nonvoid];
926 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
928 /* expand_expr truncates constants to the target mode only if it
929 is "convenient". However, our checks below rely on this
931 if (CONST_INT_P (op[arity])
932 && SCALAR_INT_MODE_P (insn_op->mode)
933 && GET_MODE (op[arity]) != insn_op->mode)
934 op[arity] = GEN_INT (trunc_int_for_mode (INTVAL (op[arity]),
937 /* Wrap the expanded RTX for pointer types into a MEM expr with
938 the proper mode. This allows us to use e.g. (match_operand
939 "memory_operand"..) in the insn patterns instead of (mem
940 (match_operand "address_operand)). This is helpful for
941 patterns not just accepting MEMs. */
942 if (POINTER_TYPE_P (TREE_TYPE (arg))
943 && insn_op->predicate != address_operand)
944 op[arity] = gen_rtx_MEM (insn_op->mode, op[arity]);
946 /* Expand the module operation required on element selectors. */
947 if (op_flags == O_ELEM)
949 gcc_assert (last_vec_mode != VOIDmode);
950 op[arity] = simplify_expand_binop (SImode, code_to_optab (AND),
952 GEN_INT (GET_MODE_NUNITS (last_vec_mode) - 1),
953 NULL_RTX, 1, OPTAB_DIRECT);
956 /* Record the vector mode used for an element selector. This assumes:
957 1. There is no builtin with two different vector modes and an element selector
958 2. The element selector comes after the vector type it is referring to.
959 This currently the true for all the builtins but FIXME we
960 should better check for that. */
961 if (VECTOR_MODE_P (insn_op->mode))
962 last_vec_mode = insn_op->mode;
964 if (insn_op->predicate (op[arity], insn_op->mode))
970 if (MEM_P (op[arity])
971 && insn_op->predicate == memory_operand
972 && (GET_MODE (XEXP (op[arity], 0)) == Pmode
973 || GET_MODE (XEXP (op[arity], 0)) == VOIDmode))
975 op[arity] = replace_equiv_address (op[arity],
976 copy_to_mode_reg (Pmode,
977 XEXP (op[arity], 0)));
979 /* Some of the builtins require different modes/types than the
980 pattern in order to implement a specific API. Instead of
981 adding many expanders which do the mode change we do it here.
982 E.g. s390_vec_add_u128 required to have vector unsigned char
983 arguments is mapped to addti3. */
984 else if (insn_op->mode != VOIDmode
985 && GET_MODE (op[arity]) != VOIDmode
986 && GET_MODE (op[arity]) != insn_op->mode
987 && ((tmp_rtx = simplify_gen_subreg (insn_op->mode, op[arity],
988 GET_MODE (op[arity]), 0))
993 else if (GET_MODE (op[arity]) == insn_op->mode
994 || GET_MODE (op[arity]) == VOIDmode
995 || (insn_op->predicate == address_operand
996 && GET_MODE (op[arity]) == Pmode))
998 /* An address_operand usually has VOIDmode in the expander
999 so we cannot use this. */
1000 machine_mode target_mode =
1001 (insn_op->predicate == address_operand
1002 ? Pmode : insn_op->mode);
1003 op[arity] = copy_to_mode_reg (target_mode, op[arity]);
1006 if (!insn_op->predicate (op[arity], insn_op->mode))
1008 error ("invalid argument %d for builtin %qF", arity + 1, fndecl);
1017 pat = GEN_FCN (icode) (target);
1021 pat = GEN_FCN (icode) (target, op[0]);
1023 pat = GEN_FCN (icode) (op[0]);
1027 pat = GEN_FCN (icode) (target, op[0], op[1]);
1029 pat = GEN_FCN (icode) (op[0], op[1]);
1033 pat = GEN_FCN (icode) (target, op[0], op[1], op[2]);
1035 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
1039 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3]);
1041 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
1045 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4]);
1047 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]);
1051 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4], op[5]);
1053 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4], op[5]);
1069 static const int s390_hotpatch_hw_max = 1000000;
1070 static int s390_hotpatch_hw_before_label = 0;
1071 static int s390_hotpatch_hw_after_label = 0;
1073 /* Check whether the hotpatch attribute is applied to a function and, if it has
1074 an argument, the argument is valid. */
1077 s390_handle_hotpatch_attribute (tree *node, tree name, tree args,
1078 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1084 if (TREE_CODE (*node) != FUNCTION_DECL)
1086 warning (OPT_Wattributes, "%qE attribute only applies to functions",
1088 *no_add_attrs = true;
1090 if (args != NULL && TREE_CHAIN (args) != NULL)
1092 expr = TREE_VALUE (args);
1093 expr2 = TREE_VALUE (TREE_CHAIN (args));
1095 if (args == NULL || TREE_CHAIN (args) == NULL)
1097 else if (TREE_CODE (expr) != INTEGER_CST
1098 || !INTEGRAL_TYPE_P (TREE_TYPE (expr))
1099 || wi::gtu_p (expr, s390_hotpatch_hw_max))
1101 else if (TREE_CODE (expr2) != INTEGER_CST
1102 || !INTEGRAL_TYPE_P (TREE_TYPE (expr2))
1103 || wi::gtu_p (expr2, s390_hotpatch_hw_max))
1109 error ("requested %qE attribute is not a comma separated pair of"
1110 " non-negative integer constants or too large (max. %d)", name,
1111 s390_hotpatch_hw_max);
1112 *no_add_attrs = true;
1118 /* Expand the s390_vector_bool type attribute. */
1121 s390_handle_vectorbool_attribute (tree *node, tree name ATTRIBUTE_UNUSED,
1122 tree args ATTRIBUTE_UNUSED,
1123 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1125 tree type = *node, result = NULL_TREE;
1128 while (POINTER_TYPE_P (type)
1129 || TREE_CODE (type) == FUNCTION_TYPE
1130 || TREE_CODE (type) == METHOD_TYPE
1131 || TREE_CODE (type) == ARRAY_TYPE)
1132 type = TREE_TYPE (type);
1134 mode = TYPE_MODE (type);
1137 case E_DImode: case E_V2DImode:
1138 result = s390_builtin_types[BT_BV2DI];
1140 case E_SImode: case E_V4SImode:
1141 result = s390_builtin_types[BT_BV4SI];
1143 case E_HImode: case E_V8HImode:
1144 result = s390_builtin_types[BT_BV8HI];
1146 case E_QImode: case E_V16QImode:
1147 result = s390_builtin_types[BT_BV16QI];
1153 *no_add_attrs = true; /* No need to hang on to the attribute. */
1156 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
1161 static const struct attribute_spec s390_attribute_table[] = {
1162 { "hotpatch", 2, 2, true, false, false, s390_handle_hotpatch_attribute, false },
1163 { "s390_vector_bool", 0, 0, false, true, false, s390_handle_vectorbool_attribute, true },
1165 { NULL, 0, 0, false, false, false, NULL, false }
1168 /* Return the alignment for LABEL. We default to the -falign-labels
1169 value except for the literal pool base label. */
1171 s390_label_align (rtx_insn *label)
1173 rtx_insn *prev_insn = prev_active_insn (label);
1176 if (prev_insn == NULL_RTX)
1179 set = single_set (prev_insn);
1181 if (set == NULL_RTX)
1184 src = SET_SRC (set);
1186 /* Don't align literal pool base labels. */
1187 if (GET_CODE (src) == UNSPEC
1188 && XINT (src, 1) == UNSPEC_MAIN_BASE)
1192 return align_labels_log;
1195 static GTY(()) rtx got_symbol;
1197 /* Return the GOT table symbol. The symbol will be created when the
1198 function is invoked for the first time. */
1201 s390_got_symbol (void)
1205 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
1206 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
1213 s390_libgcc_cmp_return_mode (void)
1215 return TARGET_64BIT ? DImode : SImode;
1219 s390_libgcc_shift_count_mode (void)
1221 return TARGET_64BIT ? DImode : SImode;
1225 s390_unwind_word_mode (void)
1227 return TARGET_64BIT ? DImode : SImode;
1230 /* Return true if the back end supports mode MODE. */
1232 s390_scalar_mode_supported_p (machine_mode mode)
1234 /* In contrast to the default implementation reject TImode constants on 31bit
1235 TARGET_ZARCH for ABI compliance. */
1236 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
1239 if (DECIMAL_FLOAT_MODE_P (mode))
1240 return default_decimal_float_supported_p ();
1242 return default_scalar_mode_supported_p (mode);
1245 /* Return true if the back end supports vector mode MODE. */
1247 s390_vector_mode_supported_p (machine_mode mode)
1251 if (!VECTOR_MODE_P (mode)
1253 || GET_MODE_SIZE (mode) > 16)
1256 inner = GET_MODE_INNER (mode);
1274 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
1277 s390_set_has_landing_pad_p (bool value)
1279 cfun->machine->has_landing_pad_p = value;
1282 /* If two condition code modes are compatible, return a condition code
1283 mode which is compatible with both. Otherwise, return
1287 s390_cc_modes_compatible (machine_mode m1, machine_mode m2)
1295 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
1296 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
1317 /* Return true if SET either doesn't set the CC register, or else
1318 the source and destination have matching CC modes and that
1319 CC mode is at least as constrained as REQ_MODE. */
1322 s390_match_ccmode_set (rtx set, machine_mode req_mode)
1324 machine_mode set_mode;
1326 gcc_assert (GET_CODE (set) == SET);
1328 /* These modes are supposed to be used only in CC consumer
1330 gcc_assert (req_mode != CCVIALLmode && req_mode != CCVIANYmode
1331 && req_mode != CCVFALLmode && req_mode != CCVFANYmode);
1333 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
1336 set_mode = GET_MODE (SET_DEST (set));
1356 if (req_mode != set_mode)
1361 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
1362 && req_mode != CCSRmode && req_mode != CCURmode
1363 && req_mode != CCZ1mode)
1369 if (req_mode != CCAmode)
1377 return (GET_MODE (SET_SRC (set)) == set_mode);
1380 /* Return true if every SET in INSN that sets the CC register
1381 has source and destination with matching CC modes and that
1382 CC mode is at least as constrained as REQ_MODE.
1383 If REQ_MODE is VOIDmode, always return false. */
1386 s390_match_ccmode (rtx_insn *insn, machine_mode req_mode)
1390 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
1391 if (req_mode == VOIDmode)
1394 if (GET_CODE (PATTERN (insn)) == SET)
1395 return s390_match_ccmode_set (PATTERN (insn), req_mode);
1397 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1398 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1400 rtx set = XVECEXP (PATTERN (insn), 0, i);
1401 if (GET_CODE (set) == SET)
1402 if (!s390_match_ccmode_set (set, req_mode))
1409 /* If a test-under-mask instruction can be used to implement
1410 (compare (and ... OP1) OP2), return the CC mode required
1411 to do that. Otherwise, return VOIDmode.
1412 MIXED is true if the instruction can distinguish between
1413 CC1 and CC2 for mixed selected bits (TMxx), it is false
1414 if the instruction cannot (TM). */
1417 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
1421 /* ??? Fixme: should work on CONST_WIDE_INT as well. */
1422 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
1425 /* Selected bits all zero: CC0.
1426 e.g.: int a; if ((a & (16 + 128)) == 0) */
1427 if (INTVAL (op2) == 0)
1430 /* Selected bits all one: CC3.
1431 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
1432 if (INTVAL (op2) == INTVAL (op1))
1435 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
1437 if ((a & (16 + 128)) == 16) -> CCT1
1438 if ((a & (16 + 128)) == 128) -> CCT2 */
1441 bit1 = exact_log2 (INTVAL (op2));
1442 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
1443 if (bit0 != -1 && bit1 != -1)
1444 return bit0 > bit1 ? CCT1mode : CCT2mode;
1450 /* Given a comparison code OP (EQ, NE, etc.) and the operands
1451 OP0 and OP1 of a COMPARE, return the mode to be used for the
1455 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
1461 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
1462 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1464 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
1465 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
1467 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1468 || GET_CODE (op1) == NEG)
1469 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1472 if (GET_CODE (op0) == AND)
1474 /* Check whether we can potentially do it via TM. */
1475 machine_mode ccmode;
1476 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
1477 if (ccmode != VOIDmode)
1479 /* Relax CCTmode to CCZmode to allow fall-back to AND
1480 if that turns out to be beneficial. */
1481 return ccmode == CCTmode ? CCZmode : ccmode;
1485 if (register_operand (op0, HImode)
1486 && GET_CODE (op1) == CONST_INT
1487 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
1489 if (register_operand (op0, QImode)
1490 && GET_CODE (op1) == CONST_INT
1491 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
1500 /* The only overflow condition of NEG and ABS happens when
1501 -INT_MAX is used as parameter, which stays negative. So
1502 we have an overflow from a positive value to a negative.
1503 Using CCAP mode the resulting cc can be used for comparisons. */
1504 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
1505 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1508 /* If constants are involved in an add instruction it is possible to use
1509 the resulting cc for comparisons with zero. Knowing the sign of the
1510 constant the overflow behavior gets predictable. e.g.:
1511 int a, b; if ((b = a + c) > 0)
1512 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
1513 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
1514 && (CONST_OK_FOR_K (INTVAL (XEXP (op0, 1)))
1515 || (CONST_OK_FOR_CONSTRAINT_P (INTVAL (XEXP (op0, 1)), 'O', "Os")
1516 /* Avoid INT32_MIN on 32 bit. */
1517 && (!TARGET_ZARCH || INTVAL (XEXP (op0, 1)) != -0x7fffffff - 1))))
1519 if (INTVAL (XEXP((op0), 1)) < 0)
1533 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1534 && GET_CODE (op1) != CONST_INT)
1540 if (GET_CODE (op0) == PLUS
1541 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1544 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1545 && GET_CODE (op1) != CONST_INT)
1551 if (GET_CODE (op0) == MINUS
1552 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1555 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1556 && GET_CODE (op1) != CONST_INT)
1565 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
1566 that we can implement more efficiently. */
1569 s390_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
1570 bool op0_preserve_value)
1572 if (op0_preserve_value)
1575 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
1576 if ((*code == EQ || *code == NE)
1577 && *op1 == const0_rtx
1578 && GET_CODE (*op0) == ZERO_EXTRACT
1579 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
1580 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
1581 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
1583 rtx inner = XEXP (*op0, 0);
1584 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
1585 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
1586 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
1588 if (len > 0 && len < modesize
1589 && pos >= 0 && pos + len <= modesize
1590 && modesize <= HOST_BITS_PER_WIDE_INT)
1592 unsigned HOST_WIDE_INT block;
1593 block = (HOST_WIDE_INT_1U << len) - 1;
1594 block <<= modesize - pos - len;
1596 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
1597 gen_int_mode (block, GET_MODE (inner)));
1601 /* Narrow AND of memory against immediate to enable TM. */
1602 if ((*code == EQ || *code == NE)
1603 && *op1 == const0_rtx
1604 && GET_CODE (*op0) == AND
1605 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
1606 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
1608 rtx inner = XEXP (*op0, 0);
1609 rtx mask = XEXP (*op0, 1);
1611 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
1612 if (GET_CODE (inner) == SUBREG
1613 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
1614 && (GET_MODE_SIZE (GET_MODE (inner))
1615 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
1617 & GET_MODE_MASK (GET_MODE (inner))
1618 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
1620 inner = SUBREG_REG (inner);
1622 /* Do not change volatile MEMs. */
1623 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
1625 int part = s390_single_part (XEXP (*op0, 1),
1626 GET_MODE (inner), QImode, 0);
1629 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
1630 inner = adjust_address_nv (inner, QImode, part);
1631 *op0 = gen_rtx_AND (QImode, inner, mask);
1636 /* Narrow comparisons against 0xffff to HImode if possible. */
1637 if ((*code == EQ || *code == NE)
1638 && GET_CODE (*op1) == CONST_INT
1639 && INTVAL (*op1) == 0xffff
1640 && SCALAR_INT_MODE_P (GET_MODE (*op0))
1641 && (nonzero_bits (*op0, GET_MODE (*op0))
1642 & ~HOST_WIDE_INT_UC (0xffff)) == 0)
1644 *op0 = gen_lowpart (HImode, *op0);
1648 /* Remove redundant UNSPEC_STRCMPCC_TO_INT conversions if possible. */
1649 if (GET_CODE (*op0) == UNSPEC
1650 && XINT (*op0, 1) == UNSPEC_STRCMPCC_TO_INT
1651 && XVECLEN (*op0, 0) == 1
1652 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
1653 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
1654 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
1655 && *op1 == const0_rtx)
1657 enum rtx_code new_code = UNKNOWN;
1660 case EQ: new_code = EQ; break;
1661 case NE: new_code = NE; break;
1662 case LT: new_code = GTU; break;
1663 case GT: new_code = LTU; break;
1664 case LE: new_code = GEU; break;
1665 case GE: new_code = LEU; break;
1669 if (new_code != UNKNOWN)
1671 *op0 = XVECEXP (*op0, 0, 0);
1676 /* Remove redundant UNSPEC_CC_TO_INT conversions if possible. */
1677 if (GET_CODE (*op0) == UNSPEC
1678 && XINT (*op0, 1) == UNSPEC_CC_TO_INT
1679 && XVECLEN (*op0, 0) == 1
1680 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
1681 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
1682 && CONST_INT_P (*op1))
1684 enum rtx_code new_code = UNKNOWN;
1685 switch (GET_MODE (XVECEXP (*op0, 0, 0)))
1691 case EQ: new_code = EQ; break;
1692 case NE: new_code = NE; break;
1699 if (new_code != UNKNOWN)
1701 /* For CCRAWmode put the required cc mask into the second
1703 if (GET_MODE (XVECEXP (*op0, 0, 0)) == CCRAWmode
1704 && INTVAL (*op1) >= 0 && INTVAL (*op1) <= 3)
1705 *op1 = gen_rtx_CONST_INT (VOIDmode, 1 << (3 - INTVAL (*op1)));
1706 *op0 = XVECEXP (*op0, 0, 0);
1711 /* Simplify cascaded EQ, NE with const0_rtx. */
1712 if ((*code == NE || *code == EQ)
1713 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
1714 && GET_MODE (*op0) == SImode
1715 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
1716 && REG_P (XEXP (*op0, 0))
1717 && XEXP (*op0, 1) == const0_rtx
1718 && *op1 == const0_rtx)
1720 if ((*code == EQ && GET_CODE (*op0) == NE)
1721 || (*code == NE && GET_CODE (*op0) == EQ))
1725 *op0 = XEXP (*op0, 0);
1728 /* Prefer register over memory as first operand. */
1729 if (MEM_P (*op0) && REG_P (*op1))
1731 rtx tem = *op0; *op0 = *op1; *op1 = tem;
1732 *code = (int)swap_condition ((enum rtx_code)*code);
1735 /* A comparison result is compared against zero. Replace it with
1736 the (perhaps inverted) original comparison.
1737 This probably should be done by simplify_relational_operation. */
1738 if ((*code == EQ || *code == NE)
1739 && *op1 == const0_rtx
1740 && COMPARISON_P (*op0)
1741 && CC_REG_P (XEXP (*op0, 0)))
1743 enum rtx_code new_code;
1746 new_code = reversed_comparison_code_parts (GET_CODE (*op0),
1748 XEXP (*op1, 0), NULL);
1750 new_code = GET_CODE (*op0);
1752 if (new_code != UNKNOWN)
1755 *op1 = XEXP (*op0, 1);
1756 *op0 = XEXP (*op0, 0);
1762 /* Emit a compare instruction suitable to implement the comparison
1763 OP0 CODE OP1. Return the correct condition RTL to be placed in
1764 the IF_THEN_ELSE of the conditional branch testing the result. */
1767 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
1769 machine_mode mode = s390_select_ccmode (code, op0, op1);
1772 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
1774 /* Do not output a redundant compare instruction if a
1775 compare_and_swap pattern already computed the result and the
1776 machine modes are compatible. */
1777 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
1783 cc = gen_rtx_REG (mode, CC_REGNUM);
1784 emit_insn (gen_rtx_SET (cc, gen_rtx_COMPARE (mode, op0, op1)));
1787 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
1790 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
1792 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
1793 conditional branch testing the result. */
1796 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem,
1797 rtx cmp, rtx new_rtx, machine_mode ccmode)
1801 cc = gen_rtx_REG (ccmode, CC_REGNUM);
1802 switch (GET_MODE (mem))
1805 emit_insn (gen_atomic_compare_and_swapsi_internal (old, mem, cmp,
1809 emit_insn (gen_atomic_compare_and_swapdi_internal (old, mem, cmp,
1813 emit_insn (gen_atomic_compare_and_swapti_internal (old, mem, cmp,
1821 return s390_emit_compare (code, cc, const0_rtx);
1824 /* Emit a jump instruction to TARGET and return it. If COND is
1825 NULL_RTX, emit an unconditional jump, else a conditional jump under
1829 s390_emit_jump (rtx target, rtx cond)
1833 target = gen_rtx_LABEL_REF (VOIDmode, target);
1835 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
1837 insn = gen_rtx_SET (pc_rtx, target);
1838 return emit_jump_insn (insn);
1841 /* Return branch condition mask to implement a branch
1842 specified by CODE. Return -1 for invalid comparisons. */
1845 s390_branch_condition_mask (rtx code)
1847 const int CC0 = 1 << 3;
1848 const int CC1 = 1 << 2;
1849 const int CC2 = 1 << 1;
1850 const int CC3 = 1 << 0;
1852 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
1853 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
1854 gcc_assert (XEXP (code, 1) == const0_rtx
1855 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
1856 && CONST_INT_P (XEXP (code, 1))));
1859 switch (GET_MODE (XEXP (code, 0)))
1863 switch (GET_CODE (code))
1865 case EQ: return CC0;
1866 case NE: return CC1 | CC2 | CC3;
1872 switch (GET_CODE (code))
1874 case EQ: return CC1;
1875 case NE: return CC0 | CC2 | CC3;
1881 switch (GET_CODE (code))
1883 case EQ: return CC2;
1884 case NE: return CC0 | CC1 | CC3;
1890 switch (GET_CODE (code))
1892 case EQ: return CC3;
1893 case NE: return CC0 | CC1 | CC2;
1899 switch (GET_CODE (code))
1901 case EQ: return CC0 | CC2;
1902 case NE: return CC1 | CC3;
1908 switch (GET_CODE (code))
1910 case LTU: return CC2 | CC3; /* carry */
1911 case GEU: return CC0 | CC1; /* no carry */
1917 switch (GET_CODE (code))
1919 case GTU: return CC0 | CC1; /* borrow */
1920 case LEU: return CC2 | CC3; /* no borrow */
1926 switch (GET_CODE (code))
1928 case EQ: return CC0 | CC2;
1929 case NE: return CC1 | CC3;
1930 case LTU: return CC1;
1931 case GTU: return CC3;
1932 case LEU: return CC1 | CC2;
1933 case GEU: return CC2 | CC3;
1938 switch (GET_CODE (code))
1940 case EQ: return CC0;
1941 case NE: return CC1 | CC2 | CC3;
1942 case LTU: return CC1;
1943 case GTU: return CC2;
1944 case LEU: return CC0 | CC1;
1945 case GEU: return CC0 | CC2;
1951 switch (GET_CODE (code))
1953 case EQ: return CC0;
1954 case NE: return CC2 | CC1 | CC3;
1955 case LTU: return CC2;
1956 case GTU: return CC1;
1957 case LEU: return CC0 | CC2;
1958 case GEU: return CC0 | CC1;
1964 switch (GET_CODE (code))
1966 case EQ: return CC0;
1967 case NE: return CC1 | CC2 | CC3;
1968 case LT: return CC1 | CC3;
1969 case GT: return CC2;
1970 case LE: return CC0 | CC1 | CC3;
1971 case GE: return CC0 | CC2;
1977 switch (GET_CODE (code))
1979 case EQ: return CC0;
1980 case NE: return CC1 | CC2 | CC3;
1981 case LT: return CC1;
1982 case GT: return CC2 | CC3;
1983 case LE: return CC0 | CC1;
1984 case GE: return CC0 | CC2 | CC3;
1990 switch (GET_CODE (code))
1992 case EQ: return CC0;
1993 case NE: return CC1 | CC2 | CC3;
1994 case LT: return CC1;
1995 case GT: return CC2;
1996 case LE: return CC0 | CC1;
1997 case GE: return CC0 | CC2;
1998 case UNORDERED: return CC3;
1999 case ORDERED: return CC0 | CC1 | CC2;
2000 case UNEQ: return CC0 | CC3;
2001 case UNLT: return CC1 | CC3;
2002 case UNGT: return CC2 | CC3;
2003 case UNLE: return CC0 | CC1 | CC3;
2004 case UNGE: return CC0 | CC2 | CC3;
2005 case LTGT: return CC1 | CC2;
2011 switch (GET_CODE (code))
2013 case EQ: return CC0;
2014 case NE: return CC2 | CC1 | CC3;
2015 case LT: return CC2;
2016 case GT: return CC1;
2017 case LE: return CC0 | CC2;
2018 case GE: return CC0 | CC1;
2019 case UNORDERED: return CC3;
2020 case ORDERED: return CC0 | CC2 | CC1;
2021 case UNEQ: return CC0 | CC3;
2022 case UNLT: return CC2 | CC3;
2023 case UNGT: return CC1 | CC3;
2024 case UNLE: return CC0 | CC2 | CC3;
2025 case UNGE: return CC0 | CC1 | CC3;
2026 case LTGT: return CC2 | CC1;
2031 /* Vector comparison modes. */
2032 /* CC2 will never be set. It however is part of the negated
2035 switch (GET_CODE (code))
2040 case GE: return CC0;
2041 /* The inverted modes are in fact *any* modes. */
2045 case LT: return CC3 | CC1 | CC2;
2050 switch (GET_CODE (code))
2055 case GE: return CC0 | CC1;
2056 /* The inverted modes are in fact *all* modes. */
2060 case LT: return CC3 | CC2;
2064 switch (GET_CODE (code))
2068 case GE: return CC0;
2069 /* The inverted modes are in fact *any* modes. */
2072 case UNLT: return CC3 | CC1 | CC2;
2077 switch (GET_CODE (code))
2081 case GE: return CC0 | CC1;
2082 /* The inverted modes are in fact *all* modes. */
2085 case UNLT: return CC3 | CC2;
2090 switch (GET_CODE (code))
2093 return INTVAL (XEXP (code, 1));
2095 return (INTVAL (XEXP (code, 1))) ^ 0xf;
2106 /* Return branch condition mask to implement a compare and branch
2107 specified by CODE. Return -1 for invalid comparisons. */
2110 s390_compare_and_branch_condition_mask (rtx code)
2112 const int CC0 = 1 << 3;
2113 const int CC1 = 1 << 2;
2114 const int CC2 = 1 << 1;
2116 switch (GET_CODE (code))
2140 /* If INV is false, return assembler mnemonic string to implement
2141 a branch specified by CODE. If INV is true, return mnemonic
2142 for the corresponding inverted branch. */
2145 s390_branch_condition_mnemonic (rtx code, int inv)
2149 static const char *const mnemonic[16] =
2151 NULL, "o", "h", "nle",
2152 "l", "nhe", "lh", "ne",
2153 "e", "nlh", "he", "nl",
2154 "le", "nh", "no", NULL
2157 if (GET_CODE (XEXP (code, 0)) == REG
2158 && REGNO (XEXP (code, 0)) == CC_REGNUM
2159 && (XEXP (code, 1) == const0_rtx
2160 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
2161 && CONST_INT_P (XEXP (code, 1)))))
2162 mask = s390_branch_condition_mask (code);
2164 mask = s390_compare_and_branch_condition_mask (code);
2166 gcc_assert (mask >= 0);
2171 gcc_assert (mask >= 1 && mask <= 14);
2173 return mnemonic[mask];
2176 /* Return the part of op which has a value different from def.
2177 The size of the part is determined by mode.
2178 Use this function only if you already know that op really
2179 contains such a part. */
2181 unsigned HOST_WIDE_INT
2182 s390_extract_part (rtx op, machine_mode mode, int def)
2184 unsigned HOST_WIDE_INT value = 0;
2185 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
2186 int part_bits = GET_MODE_BITSIZE (mode);
2187 unsigned HOST_WIDE_INT part_mask = (HOST_WIDE_INT_1U << part_bits) - 1;
2190 for (i = 0; i < max_parts; i++)
2193 value = UINTVAL (op);
2195 value >>= part_bits;
2197 if ((value & part_mask) != (def & part_mask))
2198 return value & part_mask;
2204 /* If OP is an integer constant of mode MODE with exactly one
2205 part of mode PART_MODE unequal to DEF, return the number of that
2206 part. Otherwise, return -1. */
2209 s390_single_part (rtx op,
2211 machine_mode part_mode,
2214 unsigned HOST_WIDE_INT value = 0;
2215 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
2216 unsigned HOST_WIDE_INT part_mask
2217 = (HOST_WIDE_INT_1U << GET_MODE_BITSIZE (part_mode)) - 1;
2220 if (GET_CODE (op) != CONST_INT)
2223 for (i = 0; i < n_parts; i++)
2226 value = UINTVAL (op);
2228 value >>= GET_MODE_BITSIZE (part_mode);
2230 if ((value & part_mask) != (def & part_mask))
2238 return part == -1 ? -1 : n_parts - 1 - part;
2241 /* Return true if IN contains a contiguous bitfield in the lower SIZE
2242 bits and no other bits are set in (the lower SIZE bits of) IN.
2244 PSTART and PEND can be used to obtain the start and end
2245 position (inclusive) of the bitfield relative to 64
2246 bits. *PSTART / *PEND gives the position of the first/last bit
2247 of the bitfield counting from the highest order bit starting
2251 s390_contiguous_bitmask_nowrap_p (unsigned HOST_WIDE_INT in, int size,
2252 int *pstart, int *pend)
2256 int lowbit = HOST_BITS_PER_WIDE_INT - 1;
2257 int highbit = HOST_BITS_PER_WIDE_INT - size;
2258 unsigned HOST_WIDE_INT bitmask = HOST_WIDE_INT_1U;
2260 gcc_assert (!!pstart == !!pend);
2261 for (start = lowbit; start >= highbit; bitmask <<= 1, start--)
2264 /* Look for the rightmost bit of a contiguous range of ones. */
2271 /* Look for the firt zero bit after the range of ones. */
2272 if (! (bitmask & in))
2276 /* We're one past the last one-bit. */
2280 /* No one bits found. */
2283 if (start > highbit)
2285 unsigned HOST_WIDE_INT mask;
2287 /* Calculate a mask for all bits beyond the contiguous bits. */
2288 mask = ((~HOST_WIDE_INT_0U >> highbit)
2289 & (~HOST_WIDE_INT_0U << (lowbit - start + 1)));
2291 /* There are more bits set beyond the first range of one bits. */
2304 /* Same as s390_contiguous_bitmask_nowrap_p but also returns true
2305 if ~IN contains a contiguous bitfield. In that case, *END is <
2308 If WRAP_P is true, a bitmask that wraps around is also tested.
2309 When a wraparoud occurs *START is greater than *END (in
2310 non-null pointers), and the uppermost (64 - SIZE) bits are thus
2311 part of the range. If WRAP_P is false, no wraparound is
2315 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, bool wrap_p,
2316 int size, int *start, int *end)
2318 int bs = HOST_BITS_PER_WIDE_INT;
2321 gcc_assert (!!start == !!end);
2322 if ((in & ((~HOST_WIDE_INT_0U) >> (bs - size))) == 0)
2323 /* This cannot be expressed as a contiguous bitmask. Exit early because
2324 the second call of s390_contiguous_bitmask_nowrap_p would accept this as
2327 b = s390_contiguous_bitmask_nowrap_p (in, size, start, end);
2332 b = s390_contiguous_bitmask_nowrap_p (~in, size, start, end);
2338 gcc_assert (s >= 1);
2339 *start = ((e + 1) & (bs - 1));
2340 *end = ((s - 1 + bs) & (bs - 1));
2346 /* Return true if OP contains the same contiguous bitfield in *all*
2347 its elements. START and END can be used to obtain the start and
2348 end position of the bitfield.
2350 START/STOP give the position of the first/last bit of the bitfield
2351 counting from the lowest order bit starting with zero. In order to
2352 use these values for S/390 instructions this has to be converted to
2353 "bits big endian" style. */
2356 s390_contiguous_bitmask_vector_p (rtx op, int *start, int *end)
2358 unsigned HOST_WIDE_INT mask;
2363 gcc_assert (!!start == !!end);
2364 if (!const_vec_duplicate_p (op, &elt)
2365 || !CONST_INT_P (elt))
2368 size = GET_MODE_UNIT_BITSIZE (GET_MODE (op));
2370 /* We cannot deal with V1TI/V1TF. This would require a vgmq. */
2374 mask = UINTVAL (elt);
2376 b = s390_contiguous_bitmask_p (mask, true, size, start, end);
2381 *start -= (HOST_BITS_PER_WIDE_INT - size);
2382 *end -= (HOST_BITS_PER_WIDE_INT - size);
2390 /* Return true if C consists only of byte chunks being either 0 or
2391 0xff. If MASK is !=NULL a byte mask is generated which is
2392 appropriate for the vector generate byte mask instruction. */
2395 s390_bytemask_vector_p (rtx op, unsigned *mask)
2398 unsigned tmp_mask = 0;
2399 int nunit, unit_size;
2401 if (!VECTOR_MODE_P (GET_MODE (op))
2402 || GET_CODE (op) != CONST_VECTOR
2403 || !CONST_INT_P (XVECEXP (op, 0, 0)))
2406 nunit = GET_MODE_NUNITS (GET_MODE (op));
2407 unit_size = GET_MODE_UNIT_SIZE (GET_MODE (op));
2409 for (i = 0; i < nunit; i++)
2411 unsigned HOST_WIDE_INT c;
2414 if (!CONST_INT_P (XVECEXP (op, 0, i)))
2417 c = UINTVAL (XVECEXP (op, 0, i));
2418 for (j = 0; j < unit_size; j++)
2420 if ((c & 0xff) != 0 && (c & 0xff) != 0xff)
2422 tmp_mask |= (c & 1) << ((nunit - 1 - i) * unit_size + j);
2423 c = c >> BITS_PER_UNIT;
2433 /* Check whether a rotate of ROTL followed by an AND of CONTIG is
2434 equivalent to a shift followed by the AND. In particular, CONTIG
2435 should not overlap the (rotated) bit 0/bit 63 gap. Negative values
2436 for ROTL indicate a rotate to the right. */
2439 s390_extzv_shift_ok (int bitsize, int rotl, unsigned HOST_WIDE_INT contig)
2444 ok = s390_contiguous_bitmask_nowrap_p (contig, bitsize, &start, &end);
2448 return (64 - end >= rotl);
2451 /* Translate "- rotate right" in BITSIZE mode to "rotate left" in
2453 rotl = -rotl + (64 - bitsize);
2454 return (start >= rotl);
2458 /* Check whether we can (and want to) split a double-word
2459 move in mode MODE from SRC to DST into two single-word
2460 moves, moving the subword FIRST_SUBWORD first. */
2463 s390_split_ok_p (rtx dst, rtx src, machine_mode mode, int first_subword)
2465 /* Floating point and vector registers cannot be split. */
2466 if (FP_REG_P (src) || FP_REG_P (dst) || VECTOR_REG_P (src) || VECTOR_REG_P (dst))
2469 /* Non-offsettable memory references cannot be split. */
2470 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
2471 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
2474 /* Moving the first subword must not clobber a register
2475 needed to move the second subword. */
2476 if (register_operand (dst, mode))
2478 rtx subreg = operand_subword (dst, first_subword, 0, mode);
2479 if (reg_overlap_mentioned_p (subreg, src))
2486 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
2487 and [MEM2, MEM2 + SIZE] do overlap and false
2491 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
2493 rtx addr1, addr2, addr_delta;
2494 HOST_WIDE_INT delta;
2496 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
2502 addr1 = XEXP (mem1, 0);
2503 addr2 = XEXP (mem2, 0);
2505 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
2507 /* This overlapping check is used by peepholes merging memory block operations.
2508 Overlapping operations would otherwise be recognized by the S/390 hardware
2509 and would fall back to a slower implementation. Allowing overlapping
2510 operations would lead to slow code but not to wrong code. Therefore we are
2511 somewhat optimistic if we cannot prove that the memory blocks are
2513 That's why we return false here although this may accept operations on
2514 overlapping memory areas. */
2515 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
2518 delta = INTVAL (addr_delta);
2521 || (delta > 0 && delta < size)
2522 || (delta < 0 && -delta < size))
2528 /* Check whether the address of memory reference MEM2 equals exactly
2529 the address of memory reference MEM1 plus DELTA. Return true if
2530 we can prove this to be the case, false otherwise. */
2533 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
2535 rtx addr1, addr2, addr_delta;
2537 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
2540 addr1 = XEXP (mem1, 0);
2541 addr2 = XEXP (mem2, 0);
2543 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
2544 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
2550 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
2553 s390_expand_logical_operator (enum rtx_code code, machine_mode mode,
2556 machine_mode wmode = mode;
2557 rtx dst = operands[0];
2558 rtx src1 = operands[1];
2559 rtx src2 = operands[2];
2562 /* If we cannot handle the operation directly, use a temp register. */
2563 if (!s390_logical_operator_ok_p (operands))
2564 dst = gen_reg_rtx (mode);
2566 /* QImode and HImode patterns make sense only if we have a destination
2567 in memory. Otherwise perform the operation in SImode. */
2568 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
2571 /* Widen operands if required. */
2574 if (GET_CODE (dst) == SUBREG
2575 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
2577 else if (REG_P (dst))
2578 dst = gen_rtx_SUBREG (wmode, dst, 0);
2580 dst = gen_reg_rtx (wmode);
2582 if (GET_CODE (src1) == SUBREG
2583 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
2585 else if (GET_MODE (src1) != VOIDmode)
2586 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
2588 if (GET_CODE (src2) == SUBREG
2589 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
2591 else if (GET_MODE (src2) != VOIDmode)
2592 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
2595 /* Emit the instruction. */
2596 op = gen_rtx_SET (dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
2597 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
2598 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
2600 /* Fix up the destination if needed. */
2601 if (dst != operands[0])
2602 emit_move_insn (operands[0], gen_lowpart (mode, dst));
2605 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
2608 s390_logical_operator_ok_p (rtx *operands)
2610 /* If the destination operand is in memory, it needs to coincide
2611 with one of the source operands. After reload, it has to be
2612 the first source operand. */
2613 if (GET_CODE (operands[0]) == MEM)
2614 return rtx_equal_p (operands[0], operands[1])
2615 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
2620 /* Narrow logical operation CODE of memory operand MEMOP with immediate
2621 operand IMMOP to switch from SS to SI type instructions. */
2624 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
2626 int def = code == AND ? -1 : 0;
2630 gcc_assert (GET_CODE (*memop) == MEM);
2631 gcc_assert (!MEM_VOLATILE_P (*memop));
2633 mask = s390_extract_part (*immop, QImode, def);
2634 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
2635 gcc_assert (part >= 0);
2637 *memop = adjust_address (*memop, QImode, part);
2638 *immop = gen_int_mode (mask, QImode);
2642 /* How to allocate a 'struct machine_function'. */
2644 static struct machine_function *
2645 s390_init_machine_status (void)
2647 return ggc_cleared_alloc<machine_function> ();
2650 /* Map for smallest class containing reg regno. */
2652 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
2653 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 0 */
2654 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 4 */
2655 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 8 */
2656 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 12 */
2657 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 16 */
2658 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 20 */
2659 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 24 */
2660 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 28 */
2661 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS, /* 32 */
2662 ACCESS_REGS, ACCESS_REGS, VEC_REGS, VEC_REGS, /* 36 */
2663 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 40 */
2664 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 44 */
2665 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 48 */
2666 VEC_REGS, VEC_REGS /* 52 */
2669 /* Return attribute type of insn. */
2671 static enum attr_type
2672 s390_safe_attr_type (rtx_insn *insn)
2674 if (recog_memoized (insn) >= 0)
2675 return get_attr_type (insn);
2680 /* Return true if DISP is a valid short displacement. */
2683 s390_short_displacement (rtx disp)
2685 /* No displacement is OK. */
2689 /* Without the long displacement facility we don't need to
2690 distingiush between long and short displacement. */
2691 if (!TARGET_LONG_DISPLACEMENT)
2694 /* Integer displacement in range. */
2695 if (GET_CODE (disp) == CONST_INT)
2696 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
2698 /* GOT offset is not OK, the GOT can be large. */
2699 if (GET_CODE (disp) == CONST
2700 && GET_CODE (XEXP (disp, 0)) == UNSPEC
2701 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
2702 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
2705 /* All other symbolic constants are literal pool references,
2706 which are OK as the literal pool must be small. */
2707 if (GET_CODE (disp) == CONST)
2713 /* Decompose a RTL expression ADDR for a memory address into
2714 its components, returned in OUT.
2716 Returns false if ADDR is not a valid memory address, true
2717 otherwise. If OUT is NULL, don't return the components,
2718 but check for validity only.
2720 Note: Only addresses in canonical form are recognized.
2721 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
2722 canonical form so that they will be recognized. */
2725 s390_decompose_address (rtx addr, struct s390_address *out)
2727 HOST_WIDE_INT offset = 0;
2728 rtx base = NULL_RTX;
2729 rtx indx = NULL_RTX;
2730 rtx disp = NULL_RTX;
2732 bool pointer = false;
2733 bool base_ptr = false;
2734 bool indx_ptr = false;
2735 bool literal_pool = false;
2737 /* We may need to substitute the literal pool base register into the address
2738 below. However, at this point we do not know which register is going to
2739 be used as base, so we substitute the arg pointer register. This is going
2740 to be treated as holding a pointer below -- it shouldn't be used for any
2742 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
2744 /* Decompose address into base + index + displacement. */
2746 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
2749 else if (GET_CODE (addr) == PLUS)
2751 rtx op0 = XEXP (addr, 0);
2752 rtx op1 = XEXP (addr, 1);
2753 enum rtx_code code0 = GET_CODE (op0);
2754 enum rtx_code code1 = GET_CODE (op1);
2756 if (code0 == REG || code0 == UNSPEC)
2758 if (code1 == REG || code1 == UNSPEC)
2760 indx = op0; /* index + base */
2766 base = op0; /* base + displacement */
2771 else if (code0 == PLUS)
2773 indx = XEXP (op0, 0); /* index + base + disp */
2774 base = XEXP (op0, 1);
2785 disp = addr; /* displacement */
2787 /* Extract integer part of displacement. */
2791 if (GET_CODE (disp) == CONST_INT)
2793 offset = INTVAL (disp);
2796 else if (GET_CODE (disp) == CONST
2797 && GET_CODE (XEXP (disp, 0)) == PLUS
2798 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
2800 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
2801 disp = XEXP (XEXP (disp, 0), 0);
2805 /* Strip off CONST here to avoid special case tests later. */
2806 if (disp && GET_CODE (disp) == CONST)
2807 disp = XEXP (disp, 0);
2809 /* We can convert literal pool addresses to
2810 displacements by basing them off the base register. */
2811 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
2816 base = fake_pool_base, literal_pool = true;
2818 /* Mark up the displacement. */
2819 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
2820 UNSPEC_LTREL_OFFSET);
2823 /* Validate base register. */
2826 if (GET_CODE (base) == UNSPEC)
2827 switch (XINT (base, 1))
2831 disp = gen_rtx_UNSPEC (Pmode,
2832 gen_rtvec (1, XVECEXP (base, 0, 0)),
2833 UNSPEC_LTREL_OFFSET);
2837 base = XVECEXP (base, 0, 1);
2840 case UNSPEC_LTREL_BASE:
2841 if (XVECLEN (base, 0) == 1)
2842 base = fake_pool_base, literal_pool = true;
2844 base = XVECEXP (base, 0, 1);
2851 if (!REG_P (base) || GET_MODE (base) != Pmode)
2854 if (REGNO (base) == STACK_POINTER_REGNUM
2855 || REGNO (base) == FRAME_POINTER_REGNUM
2856 || ((reload_completed || reload_in_progress)
2857 && frame_pointer_needed
2858 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
2859 || REGNO (base) == ARG_POINTER_REGNUM
2861 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
2862 pointer = base_ptr = true;
2864 if ((reload_completed || reload_in_progress)
2865 && base == cfun->machine->base_reg)
2866 pointer = base_ptr = literal_pool = true;
2869 /* Validate index register. */
2872 if (GET_CODE (indx) == UNSPEC)
2873 switch (XINT (indx, 1))
2877 disp = gen_rtx_UNSPEC (Pmode,
2878 gen_rtvec (1, XVECEXP (indx, 0, 0)),
2879 UNSPEC_LTREL_OFFSET);
2883 indx = XVECEXP (indx, 0, 1);
2886 case UNSPEC_LTREL_BASE:
2887 if (XVECLEN (indx, 0) == 1)
2888 indx = fake_pool_base, literal_pool = true;
2890 indx = XVECEXP (indx, 0, 1);
2897 if (!REG_P (indx) || GET_MODE (indx) != Pmode)
2900 if (REGNO (indx) == STACK_POINTER_REGNUM
2901 || REGNO (indx) == FRAME_POINTER_REGNUM
2902 || ((reload_completed || reload_in_progress)
2903 && frame_pointer_needed
2904 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
2905 || REGNO (indx) == ARG_POINTER_REGNUM
2907 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
2908 pointer = indx_ptr = true;
2910 if ((reload_completed || reload_in_progress)
2911 && indx == cfun->machine->base_reg)
2912 pointer = indx_ptr = literal_pool = true;
2915 /* Prefer to use pointer as base, not index. */
2916 if (base && indx && !base_ptr
2917 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
2924 /* Validate displacement. */
2927 /* If virtual registers are involved, the displacement will change later
2928 anyway as the virtual registers get eliminated. This could make a
2929 valid displacement invalid, but it is more likely to make an invalid
2930 displacement valid, because we sometimes access the register save area
2931 via negative offsets to one of those registers.
2932 Thus we don't check the displacement for validity here. If after
2933 elimination the displacement turns out to be invalid after all,
2934 this is fixed up by reload in any case. */
2935 /* LRA maintains always displacements up to date and we need to
2936 know the displacement is right during all LRA not only at the
2937 final elimination. */
2939 || (base != arg_pointer_rtx
2940 && indx != arg_pointer_rtx
2941 && base != return_address_pointer_rtx
2942 && indx != return_address_pointer_rtx
2943 && base != frame_pointer_rtx
2944 && indx != frame_pointer_rtx
2945 && base != virtual_stack_vars_rtx
2946 && indx != virtual_stack_vars_rtx))
2947 if (!DISP_IN_RANGE (offset))
2952 /* All the special cases are pointers. */
2955 /* In the small-PIC case, the linker converts @GOT
2956 and @GOTNTPOFF offsets to possible displacements. */
2957 if (GET_CODE (disp) == UNSPEC
2958 && (XINT (disp, 1) == UNSPEC_GOT
2959 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
2965 /* Accept pool label offsets. */
2966 else if (GET_CODE (disp) == UNSPEC
2967 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
2970 /* Accept literal pool references. */
2971 else if (GET_CODE (disp) == UNSPEC
2972 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
2974 /* In case CSE pulled a non literal pool reference out of
2975 the pool we have to reject the address. This is
2976 especially important when loading the GOT pointer on non
2977 zarch CPUs. In this case the literal pool contains an lt
2978 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
2979 will most likely exceed the displacement. */
2980 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
2981 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
2984 orig_disp = gen_rtx_CONST (Pmode, disp);
2987 /* If we have an offset, make sure it does not
2988 exceed the size of the constant pool entry. */
2989 rtx sym = XVECEXP (disp, 0, 0);
2990 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
2993 orig_disp = plus_constant (Pmode, orig_disp, offset);
3008 out->disp = orig_disp;
3009 out->pointer = pointer;
3010 out->literal_pool = literal_pool;
3016 /* Decompose a RTL expression OP for an address style operand into its
3017 components, and return the base register in BASE and the offset in
3018 OFFSET. While OP looks like an address it is never supposed to be
3021 Return true if OP is a valid address operand, false if not. */
3024 s390_decompose_addrstyle_without_index (rtx op, rtx *base,
3025 HOST_WIDE_INT *offset)
3029 /* We can have an integer constant, an address register,
3030 or a sum of the two. */
3031 if (CONST_SCALAR_INT_P (op))
3036 if (op && GET_CODE (op) == PLUS && CONST_SCALAR_INT_P (XEXP (op, 1)))
3041 while (op && GET_CODE (op) == SUBREG)
3042 op = SUBREG_REG (op);
3044 if (op && GET_CODE (op) != REG)
3049 if (off == NULL_RTX)
3051 else if (CONST_INT_P (off))
3052 *offset = INTVAL (off);
3053 else if (CONST_WIDE_INT_P (off))
3054 /* The offset will anyway be cut down to 12 bits so take just
3055 the lowest order chunk of the wide int. */
3056 *offset = CONST_WIDE_INT_ELT (off, 0);
3067 /* Return true if CODE is a valid address without index. */
3070 s390_legitimate_address_without_index_p (rtx op)
3072 struct s390_address addr;
3074 if (!s390_decompose_address (XEXP (op, 0), &addr))
3083 /* Return TRUE if ADDR is an operand valid for a load/store relative
3084 instruction. Be aware that the alignment of the operand needs to
3085 be checked separately.
3086 Valid addresses are single references or a sum of a reference and a
3087 constant integer. Return these parts in SYMREF and ADDEND. You can
3088 pass NULL in REF and/or ADDEND if you are not interested in these
3089 values. Literal pool references are *not* considered symbol
3093 s390_loadrelative_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
3095 HOST_WIDE_INT tmpaddend = 0;
3097 if (GET_CODE (addr) == CONST)
3098 addr = XEXP (addr, 0);
3100 if (GET_CODE (addr) == PLUS)
3102 if (!CONST_INT_P (XEXP (addr, 1)))
3105 tmpaddend = INTVAL (XEXP (addr, 1));
3106 addr = XEXP (addr, 0);
3109 if ((GET_CODE (addr) == SYMBOL_REF && !CONSTANT_POOL_ADDRESS_P (addr))
3110 || (GET_CODE (addr) == UNSPEC
3111 && (XINT (addr, 1) == UNSPEC_GOTENT
3112 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
3117 *addend = tmpaddend;
3124 /* Return true if the address in OP is valid for constraint letter C
3125 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
3126 pool MEMs should be accepted. Only the Q, R, S, T constraint
3127 letters are allowed for C. */
3130 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
3132 struct s390_address addr;
3133 bool decomposed = false;
3135 if (!address_operand (op, GET_MODE (op)))
3138 /* This check makes sure that no symbolic address (except literal
3139 pool references) are accepted by the R or T constraints. */
3140 if (s390_loadrelative_operand_p (op, NULL, NULL))
3143 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
3146 if (!s390_decompose_address (op, &addr))
3148 if (addr.literal_pool)
3153 /* With reload, we sometimes get intermediate address forms that are
3154 actually invalid as-is, but we need to accept them in the most
3155 generic cases below ('R' or 'T'), since reload will in fact fix
3156 them up. LRA behaves differently here; we never see such forms,
3157 but on the other hand, we need to strictly reject every invalid
3158 address form. Perform this check right up front. */
3159 if (lra_in_progress)
3161 if (!decomposed && !s390_decompose_address (op, &addr))
3168 case 'Q': /* no index short displacement */
3169 if (!decomposed && !s390_decompose_address (op, &addr))
3173 if (!s390_short_displacement (addr.disp))
3177 case 'R': /* with index short displacement */
3178 if (TARGET_LONG_DISPLACEMENT)
3180 if (!decomposed && !s390_decompose_address (op, &addr))
3182 if (!s390_short_displacement (addr.disp))
3185 /* Any invalid address here will be fixed up by reload,
3186 so accept it for the most generic constraint. */
3189 case 'S': /* no index long displacement */
3190 if (!decomposed && !s390_decompose_address (op, &addr))
3196 case 'T': /* with index long displacement */
3197 /* Any invalid address here will be fixed up by reload,
3198 so accept it for the most generic constraint. */
3208 /* Evaluates constraint strings described by the regular expression
3209 ([A|B|Z](Q|R|S|T))|Y and returns 1 if OP is a valid operand for
3210 the constraint given in STR, or 0 else. */
3213 s390_mem_constraint (const char *str, rtx op)
3220 /* Check for offsettable variants of memory constraints. */
3221 if (!MEM_P (op) || MEM_VOLATILE_P (op))
3223 if ((reload_completed || reload_in_progress)
3224 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
3226 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
3228 /* Check for non-literal-pool variants of memory constraints. */
3231 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
3236 if (GET_CODE (op) != MEM)
3238 return s390_check_qrst_address (c, XEXP (op, 0), true);
3240 /* Simply check for the basic form of a shift count. Reload will
3241 take care of making sure we have a proper base register. */
3242 if (!s390_decompose_addrstyle_without_index (op, NULL, NULL))
3246 return s390_check_qrst_address (str[1], op, true);
3254 /* Evaluates constraint strings starting with letter O. Input
3255 parameter C is the second letter following the "O" in the constraint
3256 string. Returns 1 if VALUE meets the respective constraint and 0
3260 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
3268 return trunc_int_for_mode (value, SImode) == value;
3272 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
3275 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
3283 /* Evaluates constraint strings starting with letter N. Parameter STR
3284 contains the letters following letter "N" in the constraint string.
3285 Returns true if VALUE matches the constraint. */
3288 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
3290 machine_mode mode, part_mode;
3292 int part, part_goal;
3298 part_goal = str[0] - '0';
3342 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
3345 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
3348 if (part_goal != -1 && part_goal != part)
3355 /* Returns true if the input parameter VALUE is a float zero. */
3358 s390_float_const_zero_p (rtx value)
3360 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
3361 && value == CONST0_RTX (GET_MODE (value)));
3364 /* Implement TARGET_REGISTER_MOVE_COST. */
3367 s390_register_move_cost (machine_mode mode,
3368 reg_class_t from, reg_class_t to)
3370 /* On s390, copy between fprs and gprs is expensive. */
3372 /* It becomes somewhat faster having ldgr/lgdr. */
3373 if (TARGET_Z10 && GET_MODE_SIZE (mode) == 8)
3375 /* ldgr is single cycle. */
3376 if (reg_classes_intersect_p (from, GENERAL_REGS)
3377 && reg_classes_intersect_p (to, FP_REGS))
3379 /* lgdr needs 3 cycles. */
3380 if (reg_classes_intersect_p (to, GENERAL_REGS)
3381 && reg_classes_intersect_p (from, FP_REGS))
3385 /* Otherwise copying is done via memory. */
3386 if ((reg_classes_intersect_p (from, GENERAL_REGS)
3387 && reg_classes_intersect_p (to, FP_REGS))
3388 || (reg_classes_intersect_p (from, FP_REGS)
3389 && reg_classes_intersect_p (to, GENERAL_REGS)))
3395 /* Implement TARGET_MEMORY_MOVE_COST. */
3398 s390_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
3399 reg_class_t rclass ATTRIBUTE_UNUSED,
3400 bool in ATTRIBUTE_UNUSED)
3405 /* Compute a (partial) cost for rtx X. Return true if the complete
3406 cost has been computed, and false if subexpressions should be
3407 scanned. In either case, *TOTAL contains the cost result. The
3408 initial value of *TOTAL is the default value computed by
3409 rtx_cost. It may be left unmodified. OUTER_CODE contains the
3410 code of the superexpression of x. */
3413 s390_rtx_costs (rtx x, machine_mode mode, int outer_code,
3414 int opno ATTRIBUTE_UNUSED,
3415 int *total, bool speed ATTRIBUTE_UNUSED)
3417 int code = GET_CODE (x);
3425 case CONST_WIDE_INT:
3432 /* Without this a conditional move instruction would be
3433 accounted as 3 * COSTS_N_INSNS (set, if_then_else,
3434 comparison operator). That's a bit pessimistic. */
3436 if (!TARGET_Z196 || GET_CODE (SET_SRC (x)) != IF_THEN_ELSE)
3439 rtx cond = XEXP (SET_SRC (x), 0);
3441 if (!CC_REG_P (XEXP (cond, 0)) || !CONST_INT_P (XEXP (cond, 1)))
3444 /* It is going to be a load/store on condition. Make it
3445 slightly more expensive than a normal load. */
3446 *total = COSTS_N_INSNS (1) + 1;
3448 rtx dst = SET_DEST (x);
3449 rtx then = XEXP (SET_SRC (x), 1);
3450 rtx els = XEXP (SET_SRC (x), 2);
3452 /* It is a real IF-THEN-ELSE. An additional move will be
3453 needed to implement that. */
3454 if (reload_completed
3455 && !rtx_equal_p (dst, then)
3456 && !rtx_equal_p (dst, els))
3457 *total += COSTS_N_INSNS (1) / 2;
3459 /* A minor penalty for constants we cannot directly handle. */
3460 if ((CONST_INT_P (then) || CONST_INT_P (els))
3461 && (!TARGET_Z13 || MEM_P (dst)
3462 || (CONST_INT_P (then) && !satisfies_constraint_K (then))
3463 || (CONST_INT_P (els) && !satisfies_constraint_K (els))))
3464 *total += COSTS_N_INSNS (1) / 2;
3466 /* A store on condition can only handle register src operands. */
3467 if (MEM_P (dst) && (!REG_P (then) || !REG_P (els)))
3468 *total += COSTS_N_INSNS (1) / 2;
3474 if (GET_CODE (XEXP (x, 0)) == AND
3475 && GET_CODE (XEXP (x, 1)) == ASHIFT
3476 && REG_P (XEXP (XEXP (x, 0), 0))
3477 && REG_P (XEXP (XEXP (x, 1), 0))
3478 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
3479 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
3480 && (UINTVAL (XEXP (XEXP (x, 0), 1)) ==
3481 (HOST_WIDE_INT_1U << UINTVAL (XEXP (XEXP (x, 1), 1))) - 1))
3483 *total = COSTS_N_INSNS (2);
3487 /* ~AND on a 128 bit mode. This can be done using a vector
3490 && GET_CODE (XEXP (x, 0)) == NOT
3491 && GET_CODE (XEXP (x, 1)) == NOT
3492 && REG_P (XEXP (XEXP (x, 0), 0))
3493 && REG_P (XEXP (XEXP (x, 1), 0))
3494 && GET_MODE_SIZE (GET_MODE (XEXP (XEXP (x, 0), 0))) == 16
3495 && s390_hard_regno_mode_ok (VR0_REGNUM,
3496 GET_MODE (XEXP (XEXP (x, 0), 0))))
3498 *total = COSTS_N_INSNS (1);
3511 *total = COSTS_N_INSNS (1);
3516 *total = COSTS_N_INSNS (1);
3524 rtx left = XEXP (x, 0);
3525 rtx right = XEXP (x, 1);
3526 if (GET_CODE (right) == CONST_INT
3527 && CONST_OK_FOR_K (INTVAL (right)))
3528 *total = s390_cost->mhi;
3529 else if (GET_CODE (left) == SIGN_EXTEND)
3530 *total = s390_cost->mh;
3532 *total = s390_cost->ms; /* msr, ms, msy */
3537 rtx left = XEXP (x, 0);
3538 rtx right = XEXP (x, 1);
3541 if (GET_CODE (right) == CONST_INT
3542 && CONST_OK_FOR_K (INTVAL (right)))
3543 *total = s390_cost->mghi;
3544 else if (GET_CODE (left) == SIGN_EXTEND)
3545 *total = s390_cost->msgf;
3547 *total = s390_cost->msg; /* msgr, msg */
3549 else /* TARGET_31BIT */
3551 if (GET_CODE (left) == SIGN_EXTEND
3552 && GET_CODE (right) == SIGN_EXTEND)
3553 /* mulsidi case: mr, m */
3554 *total = s390_cost->m;
3555 else if (GET_CODE (left) == ZERO_EXTEND
3556 && GET_CODE (right) == ZERO_EXTEND
3557 && TARGET_CPU_ZARCH)
3558 /* umulsidi case: ml, mlr */
3559 *total = s390_cost->ml;
3561 /* Complex calculation is required. */
3562 *total = COSTS_N_INSNS (40);
3568 *total = s390_cost->mult_df;
3571 *total = s390_cost->mxbr;
3582 *total = s390_cost->madbr;
3585 *total = s390_cost->maebr;
3590 /* Negate in the third argument is free: FMSUB. */
3591 if (GET_CODE (XEXP (x, 2)) == NEG)
3593 *total += (rtx_cost (XEXP (x, 0), mode, FMA, 0, speed)
3594 + rtx_cost (XEXP (x, 1), mode, FMA, 1, speed)
3595 + rtx_cost (XEXP (XEXP (x, 2), 0), mode, FMA, 2, speed));
3602 if (mode == TImode) /* 128 bit division */
3603 *total = s390_cost->dlgr;
3604 else if (mode == DImode)
3606 rtx right = XEXP (x, 1);
3607 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
3608 *total = s390_cost->dlr;
3609 else /* 64 by 64 bit division */
3610 *total = s390_cost->dlgr;
3612 else if (mode == SImode) /* 32 bit division */
3613 *total = s390_cost->dlr;
3620 rtx right = XEXP (x, 1);
3621 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
3623 *total = s390_cost->dsgfr;
3625 *total = s390_cost->dr;
3626 else /* 64 by 64 bit division */
3627 *total = s390_cost->dsgr;
3629 else if (mode == SImode) /* 32 bit division */
3630 *total = s390_cost->dlr;
3631 else if (mode == SFmode)
3633 *total = s390_cost->debr;
3635 else if (mode == DFmode)
3637 *total = s390_cost->ddbr;
3639 else if (mode == TFmode)
3641 *total = s390_cost->dxbr;
3647 *total = s390_cost->sqebr;
3648 else if (mode == DFmode)
3649 *total = s390_cost->sqdbr;
3651 *total = s390_cost->sqxbr;
3656 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
3657 || outer_code == PLUS || outer_code == MINUS
3658 || outer_code == COMPARE)
3663 *total = COSTS_N_INSNS (1);
3664 if (GET_CODE (XEXP (x, 0)) == AND
3665 && GET_CODE (XEXP (x, 1)) == CONST_INT
3666 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
3668 rtx op0 = XEXP (XEXP (x, 0), 0);
3669 rtx op1 = XEXP (XEXP (x, 0), 1);
3670 rtx op2 = XEXP (x, 1);
3672 if (memory_operand (op0, GET_MODE (op0))
3673 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
3675 if (register_operand (op0, GET_MODE (op0))
3676 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
3686 /* Return the cost of an address rtx ADDR. */
3689 s390_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
3690 addr_space_t as ATTRIBUTE_UNUSED,
3691 bool speed ATTRIBUTE_UNUSED)
3693 struct s390_address ad;
3694 if (!s390_decompose_address (addr, &ad))
3697 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
3700 /* Implement targetm.vectorize.builtin_vectorization_cost. */
3702 s390_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
3704 int misalign ATTRIBUTE_UNUSED)
3706 switch (type_of_cost)
3716 case cond_branch_not_taken:
3718 case vec_promote_demote:
3719 case unaligned_load:
3720 case unaligned_store:
3723 case cond_branch_taken:
3727 return TYPE_VECTOR_SUBPARTS (vectype) - 1;
3734 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
3735 otherwise return 0. */
3738 tls_symbolic_operand (rtx op)
3740 if (GET_CODE (op) != SYMBOL_REF)
3742 return SYMBOL_REF_TLS_MODEL (op);
3745 /* Split DImode access register reference REG (on 64-bit) into its constituent
3746 low and high parts, and store them into LO and HI. Note that gen_lowpart/
3747 gen_highpart cannot be used as they assume all registers are word-sized,
3748 while our access registers have only half that size. */
3751 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
3753 gcc_assert (TARGET_64BIT);
3754 gcc_assert (ACCESS_REG_P (reg));
3755 gcc_assert (GET_MODE (reg) == DImode);
3756 gcc_assert (!(REGNO (reg) & 1));
3758 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
3759 *hi = gen_rtx_REG (SImode, REGNO (reg));
3762 /* Return true if OP contains a symbol reference */
3765 symbolic_reference_mentioned_p (rtx op)
3770 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
3773 fmt = GET_RTX_FORMAT (GET_CODE (op));
3774 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3780 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3781 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3785 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
3792 /* Return true if OP contains a reference to a thread-local symbol. */
3795 tls_symbolic_reference_mentioned_p (rtx op)
3800 if (GET_CODE (op) == SYMBOL_REF)
3801 return tls_symbolic_operand (op);
3803 fmt = GET_RTX_FORMAT (GET_CODE (op));
3804 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3810 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3811 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3815 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
3823 /* Return true if OP is a legitimate general operand when
3824 generating PIC code. It is given that flag_pic is on
3825 and that OP satisfies CONSTANT_P. */
3828 legitimate_pic_operand_p (rtx op)
3830 /* Accept all non-symbolic constants. */
3831 if (!SYMBOLIC_CONST (op))
3834 /* Reject everything else; must be handled
3835 via emit_symbolic_move. */
3839 /* Returns true if the constant value OP is a legitimate general operand.
3840 It is given that OP satisfies CONSTANT_P. */
3843 s390_legitimate_constant_p (machine_mode mode, rtx op)
3845 if (TARGET_VX && VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
3847 if (GET_MODE_SIZE (mode) != 16)
3850 if (!satisfies_constraint_j00 (op)
3851 && !satisfies_constraint_jm1 (op)
3852 && !satisfies_constraint_jKK (op)
3853 && !satisfies_constraint_jxx (op)
3854 && !satisfies_constraint_jyy (op))
3858 /* Accept all non-symbolic constants. */
3859 if (!SYMBOLIC_CONST (op))
3862 /* Accept immediate LARL operands. */
3863 if (TARGET_CPU_ZARCH && larl_operand (op, mode))
3866 /* Thread-local symbols are never legal constants. This is
3867 so that emit_call knows that computing such addresses
3868 might require a function call. */
3869 if (TLS_SYMBOLIC_CONST (op))
3872 /* In the PIC case, symbolic constants must *not* be
3873 forced into the literal pool. We accept them here,
3874 so that they will be handled by emit_symbolic_move. */
3878 /* All remaining non-PIC symbolic constants are
3879 forced into the literal pool. */
3883 /* Determine if it's legal to put X into the constant pool. This
3884 is not possible if X contains the address of a symbol that is
3885 not constant (TLS) or not known at final link time (PIC). */
3888 s390_cannot_force_const_mem (machine_mode mode, rtx x)
3890 switch (GET_CODE (x))
3894 case CONST_WIDE_INT:
3896 /* Accept all non-symbolic constants. */
3900 /* Labels are OK iff we are non-PIC. */
3901 return flag_pic != 0;
3904 /* 'Naked' TLS symbol references are never OK,
3905 non-TLS symbols are OK iff we are non-PIC. */
3906 if (tls_symbolic_operand (x))
3909 return flag_pic != 0;
3912 return s390_cannot_force_const_mem (mode, XEXP (x, 0));
3915 return s390_cannot_force_const_mem (mode, XEXP (x, 0))
3916 || s390_cannot_force_const_mem (mode, XEXP (x, 1));
3919 switch (XINT (x, 1))
3921 /* Only lt-relative or GOT-relative UNSPECs are OK. */
3922 case UNSPEC_LTREL_OFFSET:
3930 case UNSPEC_GOTNTPOFF:
3931 case UNSPEC_INDNTPOFF:
3934 /* If the literal pool shares the code section, be put
3935 execute template placeholders into the pool as well. */
3937 return TARGET_CPU_ZARCH;
3949 /* Returns true if the constant value OP is a legitimate general
3950 operand during and after reload. The difference to
3951 legitimate_constant_p is that this function will not accept
3952 a constant that would need to be forced to the literal pool
3953 before it can be used as operand.
3954 This function accepts all constants which can be loaded directly
3958 legitimate_reload_constant_p (rtx op)
3960 /* Accept la(y) operands. */
3961 if (GET_CODE (op) == CONST_INT
3962 && DISP_IN_RANGE (INTVAL (op)))
3965 /* Accept l(g)hi/l(g)fi operands. */
3966 if (GET_CODE (op) == CONST_INT
3967 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
3970 /* Accept lliXX operands. */
3972 && GET_CODE (op) == CONST_INT
3973 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
3974 && s390_single_part (op, word_mode, HImode, 0) >= 0)
3978 && GET_CODE (op) == CONST_INT
3979 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
3980 && s390_single_part (op, word_mode, SImode, 0) >= 0)
3983 /* Accept larl operands. */
3984 if (TARGET_CPU_ZARCH
3985 && larl_operand (op, VOIDmode))
3988 /* Accept floating-point zero operands that fit into a single GPR. */
3989 if (GET_CODE (op) == CONST_DOUBLE
3990 && s390_float_const_zero_p (op)
3991 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
3994 /* Accept double-word operands that can be split. */
3995 if (GET_CODE (op) == CONST_WIDE_INT
3996 || (GET_CODE (op) == CONST_INT
3997 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op)))
3999 machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
4000 rtx hi = operand_subword (op, 0, 0, dword_mode);
4001 rtx lo = operand_subword (op, 1, 0, dword_mode);
4002 return legitimate_reload_constant_p (hi)
4003 && legitimate_reload_constant_p (lo);
4006 /* Everything else cannot be handled without reload. */
4010 /* Returns true if the constant value OP is a legitimate fp operand
4011 during and after reload.
4012 This function accepts all constants which can be loaded directly
4016 legitimate_reload_fp_constant_p (rtx op)
4018 /* Accept floating-point zero operands if the load zero instruction
4019 can be used. Prior to z196 the load fp zero instruction caused a
4020 performance penalty if the result is used as BFP number. */
4022 && GET_CODE (op) == CONST_DOUBLE
4023 && s390_float_const_zero_p (op))
4029 /* Returns true if the constant value OP is a legitimate vector operand
4030 during and after reload.
4031 This function accepts all constants which can be loaded directly
4035 legitimate_reload_vector_constant_p (rtx op)
4037 if (TARGET_VX && GET_MODE_SIZE (GET_MODE (op)) == 16
4038 && (satisfies_constraint_j00 (op)
4039 || satisfies_constraint_jm1 (op)
4040 || satisfies_constraint_jKK (op)
4041 || satisfies_constraint_jxx (op)
4042 || satisfies_constraint_jyy (op)))
4048 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
4049 return the class of reg to actually use. */
4052 s390_preferred_reload_class (rtx op, reg_class_t rclass)
4054 switch (GET_CODE (op))
4056 /* Constants we cannot reload into general registers
4057 must be forced into the literal pool. */
4061 case CONST_WIDE_INT:
4062 if (reg_class_subset_p (GENERAL_REGS, rclass)
4063 && legitimate_reload_constant_p (op))
4064 return GENERAL_REGS;
4065 else if (reg_class_subset_p (ADDR_REGS, rclass)
4066 && legitimate_reload_constant_p (op))
4068 else if (reg_class_subset_p (FP_REGS, rclass)
4069 && legitimate_reload_fp_constant_p (op))
4071 else if (reg_class_subset_p (VEC_REGS, rclass)
4072 && legitimate_reload_vector_constant_p (op))
4077 /* If a symbolic constant or a PLUS is reloaded,
4078 it is most likely being used as an address, so
4079 prefer ADDR_REGS. If 'class' is not a superset
4080 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
4082 /* Symrefs cannot be pushed into the literal pool with -fPIC
4083 so we *MUST NOT* return NO_REGS for these cases
4084 (s390_cannot_force_const_mem will return true).
4086 On the other hand we MUST return NO_REGS for symrefs with
4087 invalid addend which might have been pushed to the literal
4088 pool (no -fPIC). Usually we would expect them to be
4089 handled via secondary reload but this does not happen if
4090 they are used as literal pool slot replacement in reload
4091 inheritance (see emit_input_reload_insns). */
4092 if (TARGET_CPU_ZARCH
4093 && GET_CODE (XEXP (op, 0)) == PLUS
4094 && GET_CODE (XEXP (XEXP(op, 0), 0)) == SYMBOL_REF
4095 && GET_CODE (XEXP (XEXP(op, 0), 1)) == CONST_INT)
4097 if (flag_pic && reg_class_subset_p (ADDR_REGS, rclass))
4105 if (!legitimate_reload_constant_p (op))
4109 /* load address will be used. */
4110 if (reg_class_subset_p (ADDR_REGS, rclass))
4122 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
4123 multiple of ALIGNMENT and the SYMBOL_REF being naturally
4127 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
4129 HOST_WIDE_INT addend;
4132 /* The "required alignment" might be 0 (e.g. for certain structs
4133 accessed via BLKmode). Early abort in this case, as well as when
4134 an alignment > 8 is required. */
4135 if (alignment < 2 || alignment > 8)
4138 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
4141 if (addend & (alignment - 1))
4144 if (GET_CODE (symref) == SYMBOL_REF)
4146 /* We have load-relative instructions for 2-byte, 4-byte, and
4147 8-byte alignment so allow only these. */
4150 case 8: return !SYMBOL_FLAG_NOTALIGN8_P (symref);
4151 case 4: return !SYMBOL_FLAG_NOTALIGN4_P (symref);
4152 case 2: return !SYMBOL_FLAG_NOTALIGN2_P (symref);
4153 default: return false;
4157 if (GET_CODE (symref) == UNSPEC
4158 && alignment <= UNITS_PER_LONG)
4164 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
4165 operand SCRATCH is used to reload the even part of the address and
4169 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
4171 HOST_WIDE_INT addend;
4174 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
4178 /* Easy case. The addend is even so larl will do fine. */
4179 emit_move_insn (reg, addr);
4182 /* We can leave the scratch register untouched if the target
4183 register is a valid base register. */
4184 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
4185 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
4188 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
4189 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
4192 emit_move_insn (scratch,
4193 gen_rtx_CONST (Pmode,
4194 gen_rtx_PLUS (Pmode, symref,
4195 GEN_INT (addend - 1))));
4197 emit_move_insn (scratch, symref);
4199 /* Increment the address using la in order to avoid clobbering cc. */
4200 s390_load_address (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
4204 /* Generate what is necessary to move between REG and MEM using
4205 SCRATCH. The direction is given by TOMEM. */
4208 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
4210 /* Reload might have pulled a constant out of the literal pool.
4211 Force it back in. */
4212 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
4213 || GET_CODE (mem) == CONST_WIDE_INT
4214 || GET_CODE (mem) == CONST_VECTOR
4215 || GET_CODE (mem) == CONST)
4216 mem = force_const_mem (GET_MODE (reg), mem);
4218 gcc_assert (MEM_P (mem));
4220 /* For a load from memory we can leave the scratch register
4221 untouched if the target register is a valid base register. */
4223 && REGNO (reg) < FIRST_PSEUDO_REGISTER
4224 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
4225 && GET_MODE (reg) == GET_MODE (scratch))
4228 /* Load address into scratch register. Since we can't have a
4229 secondary reload for a secondary reload we have to cover the case
4230 where larl would need a secondary reload here as well. */
4231 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
4233 /* Now we can use a standard load/store to do the move. */
4235 emit_move_insn (replace_equiv_address (mem, scratch), reg);
4237 emit_move_insn (reg, replace_equiv_address (mem, scratch));
4240 /* Inform reload about cases where moving X with a mode MODE to a register in
4241 RCLASS requires an extra scratch or immediate register. Return the class
4242 needed for the immediate register. */
4245 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
4246 machine_mode mode, secondary_reload_info *sri)
4248 enum reg_class rclass = (enum reg_class) rclass_i;
4250 /* Intermediate register needed. */
4251 if (reg_classes_intersect_p (CC_REGS, rclass))
4252 return GENERAL_REGS;
4256 /* The vst/vl vector move instructions allow only for short
4259 && GET_CODE (XEXP (x, 0)) == PLUS
4260 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4261 && !SHORT_DISP_IN_RANGE(INTVAL (XEXP (XEXP (x, 0), 1)))
4262 && reg_class_subset_p (rclass, VEC_REGS)
4263 && (!reg_class_subset_p (rclass, FP_REGS)
4264 || (GET_MODE_SIZE (mode) > 8
4265 && s390_class_max_nregs (FP_REGS, mode) == 1)))
4268 sri->icode = (TARGET_64BIT ?
4269 CODE_FOR_reloaddi_la_in :
4270 CODE_FOR_reloadsi_la_in);
4272 sri->icode = (TARGET_64BIT ?
4273 CODE_FOR_reloaddi_la_out :
4274 CODE_FOR_reloadsi_la_out);
4280 HOST_WIDE_INT offset;
4283 /* On z10 several optimizer steps may generate larl operands with
4286 && s390_loadrelative_operand_p (x, &symref, &offset)
4288 && !SYMBOL_FLAG_NOTALIGN2_P (symref)
4289 && (offset & 1) == 1)
4290 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
4291 : CODE_FOR_reloadsi_larl_odd_addend_z10);
4293 /* Handle all the (mem (symref)) accesses we cannot use the z10
4294 instructions for. */
4296 && s390_loadrelative_operand_p (XEXP (x, 0), NULL, NULL)
4298 || !reg_class_subset_p (rclass, GENERAL_REGS)
4299 || GET_MODE_SIZE (mode) > UNITS_PER_WORD
4300 || !s390_check_symref_alignment (XEXP (x, 0),
4301 GET_MODE_SIZE (mode))))
4303 #define __SECONDARY_RELOAD_CASE(M,m) \
4306 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
4307 CODE_FOR_reload##m##di_tomem_z10; \
4309 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
4310 CODE_FOR_reload##m##si_tomem_z10; \
4313 switch (GET_MODE (x))
4315 __SECONDARY_RELOAD_CASE (QI, qi);
4316 __SECONDARY_RELOAD_CASE (HI, hi);
4317 __SECONDARY_RELOAD_CASE (SI, si);
4318 __SECONDARY_RELOAD_CASE (DI, di);
4319 __SECONDARY_RELOAD_CASE (TI, ti);
4320 __SECONDARY_RELOAD_CASE (SF, sf);
4321 __SECONDARY_RELOAD_CASE (DF, df);
4322 __SECONDARY_RELOAD_CASE (TF, tf);
4323 __SECONDARY_RELOAD_CASE (SD, sd);
4324 __SECONDARY_RELOAD_CASE (DD, dd);
4325 __SECONDARY_RELOAD_CASE (TD, td);
4326 __SECONDARY_RELOAD_CASE (V1QI, v1qi);
4327 __SECONDARY_RELOAD_CASE (V2QI, v2qi);
4328 __SECONDARY_RELOAD_CASE (V4QI, v4qi);
4329 __SECONDARY_RELOAD_CASE (V8QI, v8qi);
4330 __SECONDARY_RELOAD_CASE (V16QI, v16qi);
4331 __SECONDARY_RELOAD_CASE (V1HI, v1hi);
4332 __SECONDARY_RELOAD_CASE (V2HI, v2hi);
4333 __SECONDARY_RELOAD_CASE (V4HI, v4hi);
4334 __SECONDARY_RELOAD_CASE (V8HI, v8hi);
4335 __SECONDARY_RELOAD_CASE (V1SI, v1si);
4336 __SECONDARY_RELOAD_CASE (V2SI, v2si);
4337 __SECONDARY_RELOAD_CASE (V4SI, v4si);
4338 __SECONDARY_RELOAD_CASE (V1DI, v1di);
4339 __SECONDARY_RELOAD_CASE (V2DI, v2di);
4340 __SECONDARY_RELOAD_CASE (V1TI, v1ti);
4341 __SECONDARY_RELOAD_CASE (V1SF, v1sf);
4342 __SECONDARY_RELOAD_CASE (V2SF, v2sf);
4343 __SECONDARY_RELOAD_CASE (V4SF, v4sf);
4344 __SECONDARY_RELOAD_CASE (V1DF, v1df);
4345 __SECONDARY_RELOAD_CASE (V2DF, v2df);
4346 __SECONDARY_RELOAD_CASE (V1TF, v1tf);
4350 #undef __SECONDARY_RELOAD_CASE
4354 /* We need a scratch register when loading a PLUS expression which
4355 is not a legitimate operand of the LOAD ADDRESS instruction. */
4356 /* LRA can deal with transformation of plus op very well -- so we
4357 don't need to prompt LRA in this case. */
4358 if (! lra_in_progress && in_p && s390_plus_operand (x, mode))
4359 sri->icode = (TARGET_64BIT ?
4360 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
4362 /* Performing a multiword move from or to memory we have to make sure the
4363 second chunk in memory is addressable without causing a displacement
4364 overflow. If that would be the case we calculate the address in
4365 a scratch register. */
4367 && GET_CODE (XEXP (x, 0)) == PLUS
4368 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4369 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
4370 + GET_MODE_SIZE (mode) - 1))
4372 /* For GENERAL_REGS a displacement overflow is no problem if occurring
4373 in a s_operand address since we may fallback to lm/stm. So we only
4374 have to care about overflows in the b+i+d case. */
4375 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
4376 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
4377 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
4378 /* For FP_REGS no lm/stm is available so this check is triggered
4379 for displacement overflows in b+i+d and b+d like addresses. */
4380 || (reg_classes_intersect_p (FP_REGS, rclass)
4381 && s390_class_max_nregs (FP_REGS, mode) > 1))
4384 sri->icode = (TARGET_64BIT ?
4385 CODE_FOR_reloaddi_la_in :
4386 CODE_FOR_reloadsi_la_in);
4388 sri->icode = (TARGET_64BIT ?
4389 CODE_FOR_reloaddi_la_out :
4390 CODE_FOR_reloadsi_la_out);
4394 /* A scratch address register is needed when a symbolic constant is
4395 copied to r0 compiling with -fPIC. In other cases the target
4396 register might be used as temporary (see legitimize_pic_address). */
4397 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
4398 sri->icode = (TARGET_64BIT ?
4399 CODE_FOR_reloaddi_PIC_addr :
4400 CODE_FOR_reloadsi_PIC_addr);
4402 /* Either scratch or no register needed. */
4406 /* Generate code to load SRC, which is PLUS that is not a
4407 legitimate operand for the LA instruction, into TARGET.
4408 SCRATCH may be used as scratch register. */
4411 s390_expand_plus_operand (rtx target, rtx src,
4415 struct s390_address ad;
4417 /* src must be a PLUS; get its two operands. */
4418 gcc_assert (GET_CODE (src) == PLUS);
4419 gcc_assert (GET_MODE (src) == Pmode);
4421 /* Check if any of the two operands is already scheduled
4422 for replacement by reload. This can happen e.g. when
4423 float registers occur in an address. */
4424 sum1 = find_replacement (&XEXP (src, 0));
4425 sum2 = find_replacement (&XEXP (src, 1));
4426 src = gen_rtx_PLUS (Pmode, sum1, sum2);
4428 /* If the address is already strictly valid, there's nothing to do. */
4429 if (!s390_decompose_address (src, &ad)
4430 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4431 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
4433 /* Otherwise, one of the operands cannot be an address register;
4434 we reload its value into the scratch register. */
4435 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
4437 emit_move_insn (scratch, sum1);
4440 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
4442 emit_move_insn (scratch, sum2);
4446 /* According to the way these invalid addresses are generated
4447 in reload.c, it should never happen (at least on s390) that
4448 *neither* of the PLUS components, after find_replacements
4449 was applied, is an address register. */
4450 if (sum1 == scratch && sum2 == scratch)
4456 src = gen_rtx_PLUS (Pmode, sum1, sum2);
4459 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
4460 is only ever performed on addresses, so we can mark the
4461 sum as legitimate for LA in any case. */
4462 s390_load_address (target, src);
4466 /* Return true if ADDR is a valid memory address.
4467 STRICT specifies whether strict register checking applies. */
4470 s390_legitimate_address_p (machine_mode mode, rtx addr, bool strict)
4472 struct s390_address ad;
4475 && larl_operand (addr, VOIDmode)
4476 && (mode == VOIDmode
4477 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
4480 if (!s390_decompose_address (addr, &ad))
4485 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4488 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
4494 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
4495 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
4499 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
4500 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
4506 /* Return true if OP is a valid operand for the LA instruction.
4507 In 31-bit, we need to prove that the result is used as an
4508 address, as LA performs only a 31-bit addition. */
4511 legitimate_la_operand_p (rtx op)
4513 struct s390_address addr;
4514 if (!s390_decompose_address (op, &addr))
4517 return (TARGET_64BIT || addr.pointer);
4520 /* Return true if it is valid *and* preferable to use LA to
4521 compute the sum of OP1 and OP2. */
4524 preferred_la_operand_p (rtx op1, rtx op2)
4526 struct s390_address addr;
4528 if (op2 != const0_rtx)
4529 op1 = gen_rtx_PLUS (Pmode, op1, op2);
4531 if (!s390_decompose_address (op1, &addr))
4533 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
4535 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
4538 /* Avoid LA instructions with index register on z196; it is
4539 preferable to use regular add instructions when possible.
4540 Starting with zEC12 the la with index register is "uncracked"
4542 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
4545 if (!TARGET_64BIT && !addr.pointer)
4551 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
4552 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
4558 /* Emit a forced load-address operation to load SRC into DST.
4559 This will use the LOAD ADDRESS instruction even in situations
4560 where legitimate_la_operand_p (SRC) returns false. */
4563 s390_load_address (rtx dst, rtx src)
4566 emit_move_insn (dst, src);
4568 emit_insn (gen_force_la_31 (dst, src));
4571 /* Return true if it ok to use SYMBOL_REF in a relative address. */
4574 s390_rel_address_ok_p (rtx symbol_ref)
4578 if (symbol_ref == s390_got_symbol () || CONSTANT_POOL_ADDRESS_P (symbol_ref))
4581 decl = SYMBOL_REF_DECL (symbol_ref);
4583 if (!flag_pic || SYMBOL_REF_LOCAL_P (symbol_ref))
4584 return (s390_pic_data_is_text_relative
4586 && TREE_CODE (decl) == FUNCTION_DECL));
4591 /* Return a legitimate reference for ORIG (an address) using the
4592 register REG. If REG is 0, a new pseudo is generated.
4594 There are two types of references that must be handled:
4596 1. Global data references must load the address from the GOT, via
4597 the PIC reg. An insn is emitted to do this load, and the reg is
4600 2. Static data references, constant pool addresses, and code labels
4601 compute the address as an offset from the GOT, whose base is in
4602 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
4603 differentiate them from global data objects. The returned
4604 address is the PIC reg + an unspec constant.
4606 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
4607 reg also appears in the address. */
4610 legitimize_pic_address (rtx orig, rtx reg)
4613 rtx addend = const0_rtx;
4616 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
4618 if (GET_CODE (addr) == CONST)
4619 addr = XEXP (addr, 0);
4621 if (GET_CODE (addr) == PLUS)
4623 addend = XEXP (addr, 1);
4624 addr = XEXP (addr, 0);
4627 if ((GET_CODE (addr) == LABEL_REF
4628 || (SYMBOL_REF_P (addr) && s390_rel_address_ok_p (addr))
4629 || (GET_CODE (addr) == UNSPEC &&
4630 (XINT (addr, 1) == UNSPEC_GOTENT
4631 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
4632 && GET_CODE (addend) == CONST_INT)
4634 /* This can be locally addressed. */
4636 /* larl_operand requires UNSPECs to be wrapped in a const rtx. */
4637 rtx const_addr = (GET_CODE (addr) == UNSPEC ?
4638 gen_rtx_CONST (Pmode, addr) : addr);
4640 if (TARGET_CPU_ZARCH
4641 && larl_operand (const_addr, VOIDmode)
4642 && INTVAL (addend) < HOST_WIDE_INT_1 << 31
4643 && INTVAL (addend) >= -(HOST_WIDE_INT_1 << 31))
4645 if (INTVAL (addend) & 1)
4647 /* LARL can't handle odd offsets, so emit a pair of LARL
4649 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4651 if (!DISP_IN_RANGE (INTVAL (addend)))
4653 HOST_WIDE_INT even = INTVAL (addend) - 1;
4654 addr = gen_rtx_PLUS (Pmode, addr, GEN_INT (even));
4655 addr = gen_rtx_CONST (Pmode, addr);
4656 addend = const1_rtx;
4659 emit_move_insn (temp, addr);
4660 new_rtx = gen_rtx_PLUS (Pmode, temp, addend);
4664 s390_load_address (reg, new_rtx);
4670 /* If the offset is even, we can just use LARL. This
4671 will happen automatically. */
4676 /* No larl - Access local symbols relative to the GOT. */
4678 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4680 if (reload_in_progress || reload_completed)
4681 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4683 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
4684 if (addend != const0_rtx)
4685 addr = gen_rtx_PLUS (Pmode, addr, addend);
4686 addr = gen_rtx_CONST (Pmode, addr);
4687 addr = force_const_mem (Pmode, addr);
4688 emit_move_insn (temp, addr);
4690 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4693 s390_load_address (reg, new_rtx);
4698 else if (GET_CODE (addr) == SYMBOL_REF && addend == const0_rtx)
4700 /* A non-local symbol reference without addend.
4702 The symbol ref is wrapped into an UNSPEC to make sure the
4703 proper operand modifier (@GOT or @GOTENT) will be emitted.
4704 This will tell the linker to put the symbol into the GOT.
4706 Additionally the code dereferencing the GOT slot is emitted here.
4708 An addend to the symref needs to be added afterwards.
4709 legitimize_pic_address calls itself recursively to handle
4710 that case. So no need to do it here. */
4713 reg = gen_reg_rtx (Pmode);
4717 /* Use load relative if possible.
4718 lgrl <target>, sym@GOTENT */
4719 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
4720 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4721 new_rtx = gen_const_mem (GET_MODE (reg), new_rtx);
4723 emit_move_insn (reg, new_rtx);
4726 else if (flag_pic == 1)
4728 /* Assume GOT offset is a valid displacement operand (< 4k
4729 or < 512k with z990). This is handled the same way in
4730 both 31- and 64-bit code (@GOT).
4731 lg <target>, sym@GOT(r12) */
4733 if (reload_in_progress || reload_completed)
4734 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4736 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
4737 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4738 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
4739 new_rtx = gen_const_mem (Pmode, new_rtx);
4740 emit_move_insn (reg, new_rtx);
4743 else if (TARGET_CPU_ZARCH)
4745 /* If the GOT offset might be >= 4k, we determine the position
4746 of the GOT entry via a PC-relative LARL (@GOTENT).
4747 larl temp, sym@GOTENT
4748 lg <target>, 0(temp) */
4750 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
4752 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
4753 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
4755 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
4756 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4757 emit_move_insn (temp, new_rtx);
4759 new_rtx = gen_const_mem (Pmode, temp);
4760 emit_move_insn (reg, new_rtx);
4766 /* If the GOT offset might be >= 4k, we have to load it
4767 from the literal pool (@GOT).
4769 lg temp, lit-litbase(r13)
4770 lg <target>, 0(temp)
4771 lit: .long sym@GOT */
4773 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
4775 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
4776 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
4778 if (reload_in_progress || reload_completed)
4779 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4781 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
4782 addr = gen_rtx_CONST (Pmode, addr);
4783 addr = force_const_mem (Pmode, addr);
4784 emit_move_insn (temp, addr);
4786 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4787 new_rtx = gen_const_mem (Pmode, new_rtx);
4788 emit_move_insn (reg, new_rtx);
4792 else if (GET_CODE (addr) == UNSPEC && GET_CODE (addend) == CONST_INT)
4794 gcc_assert (XVECLEN (addr, 0) == 1);
4795 switch (XINT (addr, 1))
4797 /* These address symbols (or PLT slots) relative to the GOT
4798 (not GOT slots!). In general this will exceed the
4799 displacement range so these value belong into the literal
4803 new_rtx = force_const_mem (Pmode, orig);
4806 /* For -fPIC the GOT size might exceed the displacement
4807 range so make sure the value is in the literal pool. */
4810 new_rtx = force_const_mem (Pmode, orig);
4813 /* For @GOTENT larl is used. This is handled like local
4819 /* @PLT is OK as is on 64-bit, must be converted to
4820 GOT-relative @PLTOFF on 31-bit. */
4822 if (!TARGET_CPU_ZARCH)
4824 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4826 if (reload_in_progress || reload_completed)
4827 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4829 addr = XVECEXP (addr, 0, 0);
4830 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
4832 if (addend != const0_rtx)
4833 addr = gen_rtx_PLUS (Pmode, addr, addend);
4834 addr = gen_rtx_CONST (Pmode, addr);
4835 addr = force_const_mem (Pmode, addr);
4836 emit_move_insn (temp, addr);
4838 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4841 s390_load_address (reg, new_rtx);
4846 /* On 64 bit larl can be used. This case is handled like
4847 local symbol refs. */
4851 /* Everything else cannot happen. */
4856 else if (addend != const0_rtx)
4858 /* Otherwise, compute the sum. */
4860 rtx base = legitimize_pic_address (addr, reg);
4861 new_rtx = legitimize_pic_address (addend,
4862 base == reg ? NULL_RTX : reg);
4863 if (GET_CODE (new_rtx) == CONST_INT)
4864 new_rtx = plus_constant (Pmode, base, INTVAL (new_rtx));
4867 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
4869 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
4870 new_rtx = XEXP (new_rtx, 1);
4872 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
4875 if (GET_CODE (new_rtx) == CONST)
4876 new_rtx = XEXP (new_rtx, 0);
4877 new_rtx = force_operand (new_rtx, 0);
4883 /* Load the thread pointer into a register. */
4886 s390_get_thread_pointer (void)
4888 rtx tp = gen_reg_rtx (Pmode);
4890 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
4891 mark_reg_pointer (tp, BITS_PER_WORD);
4896 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
4897 in s390_tls_symbol which always refers to __tls_get_offset.
4898 The returned offset is written to RESULT_REG and an USE rtx is
4899 generated for TLS_CALL. */
4901 static GTY(()) rtx s390_tls_symbol;
4904 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
4909 emit_insn (s390_load_got ());
4911 if (!s390_tls_symbol)
4912 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
4914 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
4915 gen_rtx_REG (Pmode, RETURN_REGNUM));
4917 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
4918 RTL_CONST_CALL_P (insn) = 1;
4921 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
4922 this (thread-local) address. REG may be used as temporary. */
4925 legitimize_tls_address (rtx addr, rtx reg)
4927 rtx new_rtx, tls_call, temp, base, r2;
4930 if (GET_CODE (addr) == SYMBOL_REF)
4931 switch (tls_symbolic_operand (addr))
4933 case TLS_MODEL_GLOBAL_DYNAMIC:
4935 r2 = gen_rtx_REG (Pmode, 2);
4936 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
4937 new_rtx = gen_rtx_CONST (Pmode, tls_call);
4938 new_rtx = force_const_mem (Pmode, new_rtx);
4939 emit_move_insn (r2, new_rtx);
4940 s390_emit_tls_call_insn (r2, tls_call);
4941 insn = get_insns ();
4944 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
4945 temp = gen_reg_rtx (Pmode);
4946 emit_libcall_block (insn, temp, r2, new_rtx);
4948 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4951 s390_load_address (reg, new_rtx);
4956 case TLS_MODEL_LOCAL_DYNAMIC:
4958 r2 = gen_rtx_REG (Pmode, 2);
4959 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
4960 new_rtx = gen_rtx_CONST (Pmode, tls_call);
4961 new_rtx = force_const_mem (Pmode, new_rtx);
4962 emit_move_insn (r2, new_rtx);
4963 s390_emit_tls_call_insn (r2, tls_call);
4964 insn = get_insns ();
4967 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
4968 temp = gen_reg_rtx (Pmode);
4969 emit_libcall_block (insn, temp, r2, new_rtx);
4971 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
4972 base = gen_reg_rtx (Pmode);
4973 s390_load_address (base, new_rtx);
4975 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
4976 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4977 new_rtx = force_const_mem (Pmode, new_rtx);
4978 temp = gen_reg_rtx (Pmode);
4979 emit_move_insn (temp, new_rtx);
4981 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
4984 s390_load_address (reg, new_rtx);
4989 case TLS_MODEL_INITIAL_EXEC:
4992 /* Assume GOT offset < 4k. This is handled the same way
4993 in both 31- and 64-bit code. */
4995 if (reload_in_progress || reload_completed)
4996 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4998 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
4999 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5000 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
5001 new_rtx = gen_const_mem (Pmode, new_rtx);
5002 temp = gen_reg_rtx (Pmode);
5003 emit_move_insn (temp, new_rtx);
5005 else if (TARGET_CPU_ZARCH)
5007 /* If the GOT offset might be >= 4k, we determine the position
5008 of the GOT entry via a PC-relative LARL. */
5010 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
5011 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5012 temp = gen_reg_rtx (Pmode);
5013 emit_move_insn (temp, new_rtx);
5015 new_rtx = gen_const_mem (Pmode, temp);
5016 temp = gen_reg_rtx (Pmode);
5017 emit_move_insn (temp, new_rtx);
5021 /* If the GOT offset might be >= 4k, we have to load it
5022 from the literal pool. */
5024 if (reload_in_progress || reload_completed)
5025 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
5027 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
5028 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5029 new_rtx = force_const_mem (Pmode, new_rtx);
5030 temp = gen_reg_rtx (Pmode);
5031 emit_move_insn (temp, new_rtx);
5033 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
5034 new_rtx = gen_const_mem (Pmode, new_rtx);
5036 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
5037 temp = gen_reg_rtx (Pmode);
5038 emit_insn (gen_rtx_SET (temp, new_rtx));
5042 /* In position-dependent code, load the absolute address of
5043 the GOT entry from the literal pool. */
5045 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
5046 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5047 new_rtx = force_const_mem (Pmode, new_rtx);
5048 temp = gen_reg_rtx (Pmode);
5049 emit_move_insn (temp, new_rtx);
5052 new_rtx = gen_const_mem (Pmode, new_rtx);
5053 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
5054 temp = gen_reg_rtx (Pmode);
5055 emit_insn (gen_rtx_SET (temp, new_rtx));
5058 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
5061 s390_load_address (reg, new_rtx);
5066 case TLS_MODEL_LOCAL_EXEC:
5067 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
5068 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5069 new_rtx = force_const_mem (Pmode, new_rtx);
5070 temp = gen_reg_rtx (Pmode);
5071 emit_move_insn (temp, new_rtx);
5073 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
5076 s390_load_address (reg, new_rtx);
5085 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
5087 switch (XINT (XEXP (addr, 0), 1))
5089 case UNSPEC_INDNTPOFF:
5090 gcc_assert (TARGET_CPU_ZARCH);
5099 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
5100 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
5102 new_rtx = XEXP (XEXP (addr, 0), 0);
5103 if (GET_CODE (new_rtx) != SYMBOL_REF)
5104 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5106 new_rtx = legitimize_tls_address (new_rtx, reg);
5107 new_rtx = plus_constant (Pmode, new_rtx,
5108 INTVAL (XEXP (XEXP (addr, 0), 1)));
5109 new_rtx = force_operand (new_rtx, 0);
5113 gcc_unreachable (); /* for now ... */
5118 /* Emit insns making the address in operands[1] valid for a standard
5119 move to operands[0]. operands[1] is replaced by an address which
5120 should be used instead of the former RTX to emit the move
5124 emit_symbolic_move (rtx *operands)
5126 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
5128 if (GET_CODE (operands[0]) == MEM)
5129 operands[1] = force_reg (Pmode, operands[1]);
5130 else if (TLS_SYMBOLIC_CONST (operands[1]))
5131 operands[1] = legitimize_tls_address (operands[1], temp);
5133 operands[1] = legitimize_pic_address (operands[1], temp);
5136 /* Try machine-dependent ways of modifying an illegitimate address X
5137 to be legitimate. If we find one, return the new, valid address.
5139 OLDX is the address as it was before break_out_memory_refs was called.
5140 In some cases it is useful to look at this to decide what needs to be done.
5142 MODE is the mode of the operand pointed to by X.
5144 When -fpic is used, special handling is needed for symbolic references.
5145 See comments by legitimize_pic_address for details. */
5148 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
5149 machine_mode mode ATTRIBUTE_UNUSED)
5151 rtx constant_term = const0_rtx;
5153 if (TLS_SYMBOLIC_CONST (x))
5155 x = legitimize_tls_address (x, 0);
5157 if (s390_legitimate_address_p (mode, x, FALSE))
5160 else if (GET_CODE (x) == PLUS
5161 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
5162 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
5168 if (SYMBOLIC_CONST (x)
5169 || (GET_CODE (x) == PLUS
5170 && (SYMBOLIC_CONST (XEXP (x, 0))
5171 || SYMBOLIC_CONST (XEXP (x, 1)))))
5172 x = legitimize_pic_address (x, 0);
5174 if (s390_legitimate_address_p (mode, x, FALSE))
5178 x = eliminate_constant_term (x, &constant_term);
5180 /* Optimize loading of large displacements by splitting them
5181 into the multiple of 4K and the rest; this allows the
5182 former to be CSE'd if possible.
5184 Don't do this if the displacement is added to a register
5185 pointing into the stack frame, as the offsets will
5186 change later anyway. */
5188 if (GET_CODE (constant_term) == CONST_INT
5189 && !TARGET_LONG_DISPLACEMENT
5190 && !DISP_IN_RANGE (INTVAL (constant_term))
5191 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
5193 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
5194 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
5196 rtx temp = gen_reg_rtx (Pmode);
5197 rtx val = force_operand (GEN_INT (upper), temp);
5199 emit_move_insn (temp, val);
5201 x = gen_rtx_PLUS (Pmode, x, temp);
5202 constant_term = GEN_INT (lower);
5205 if (GET_CODE (x) == PLUS)
5207 if (GET_CODE (XEXP (x, 0)) == REG)
5209 rtx temp = gen_reg_rtx (Pmode);
5210 rtx val = force_operand (XEXP (x, 1), temp);
5212 emit_move_insn (temp, val);
5214 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
5217 else if (GET_CODE (XEXP (x, 1)) == REG)
5219 rtx temp = gen_reg_rtx (Pmode);
5220 rtx val = force_operand (XEXP (x, 0), temp);
5222 emit_move_insn (temp, val);
5224 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
5228 if (constant_term != const0_rtx)
5229 x = gen_rtx_PLUS (Pmode, x, constant_term);
5234 /* Try a machine-dependent way of reloading an illegitimate address AD
5235 operand. If we find one, push the reload and return the new address.
5237 MODE is the mode of the enclosing MEM. OPNUM is the operand number
5238 and TYPE is the reload type of the current reload. */
5241 legitimize_reload_address (rtx ad, machine_mode mode ATTRIBUTE_UNUSED,
5242 int opnum, int type)
5244 if (!optimize || TARGET_LONG_DISPLACEMENT)
5247 if (GET_CODE (ad) == PLUS)
5249 rtx tem = simplify_binary_operation (PLUS, Pmode,
5250 XEXP (ad, 0), XEXP (ad, 1));
5255 if (GET_CODE (ad) == PLUS
5256 && GET_CODE (XEXP (ad, 0)) == REG
5257 && GET_CODE (XEXP (ad, 1)) == CONST_INT
5258 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
5260 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
5261 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
5262 rtx cst, tem, new_rtx;
5264 cst = GEN_INT (upper);
5265 if (!legitimate_reload_constant_p (cst))
5266 cst = force_const_mem (Pmode, cst);
5268 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
5269 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
5271 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
5272 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
5273 opnum, (enum reload_type) type);
5280 /* Emit code to move LEN bytes from DST to SRC. */
5283 s390_expand_movmem (rtx dst, rtx src, rtx len)
5285 /* When tuning for z10 or higher we rely on the Glibc functions to
5286 do the right thing. Only for constant lengths below 64k we will
5287 generate inline code. */
5288 if (s390_tune >= PROCESSOR_2097_Z10
5289 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
5292 /* Expand memcpy for constant length operands without a loop if it
5293 is shorter that way.
5295 With a constant length argument a
5296 memcpy loop (without pfd) is 36 bytes -> 6 * mvc */
5297 if (GET_CODE (len) == CONST_INT
5298 && INTVAL (len) >= 0
5299 && INTVAL (len) <= 256 * 6
5300 && (!TARGET_MVCLE || INTVAL (len) <= 256))
5304 for (l = INTVAL (len), o = 0; l > 0; l -= 256, o += 256)
5306 rtx newdst = adjust_address (dst, BLKmode, o);
5307 rtx newsrc = adjust_address (src, BLKmode, o);
5308 emit_insn (gen_movmem_short (newdst, newsrc,
5309 GEN_INT (l > 256 ? 255 : l - 1)));
5313 else if (TARGET_MVCLE)
5315 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
5320 rtx dst_addr, src_addr, count, blocks, temp;
5321 rtx_code_label *loop_start_label = gen_label_rtx ();
5322 rtx_code_label *loop_end_label = gen_label_rtx ();
5323 rtx_code_label *end_label = gen_label_rtx ();
5326 mode = GET_MODE (len);
5327 if (mode == VOIDmode)
5330 dst_addr = gen_reg_rtx (Pmode);
5331 src_addr = gen_reg_rtx (Pmode);
5332 count = gen_reg_rtx (mode);
5333 blocks = gen_reg_rtx (mode);
5335 convert_move (count, len, 1);
5336 emit_cmp_and_jump_insns (count, const0_rtx,
5337 EQ, NULL_RTX, mode, 1, end_label);
5339 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
5340 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
5341 dst = change_address (dst, VOIDmode, dst_addr);
5342 src = change_address (src, VOIDmode, src_addr);
5344 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5347 emit_move_insn (count, temp);
5349 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5352 emit_move_insn (blocks, temp);
5354 emit_cmp_and_jump_insns (blocks, const0_rtx,
5355 EQ, NULL_RTX, mode, 1, loop_end_label);
5357 emit_label (loop_start_label);
5360 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
5364 /* Issue a read prefetch for the +3 cache line. */
5365 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
5366 const0_rtx, const0_rtx);
5367 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5368 emit_insn (prefetch);
5370 /* Issue a write prefetch for the +3 cache line. */
5371 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
5372 const1_rtx, const0_rtx);
5373 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5374 emit_insn (prefetch);
5377 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
5378 s390_load_address (dst_addr,
5379 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
5380 s390_load_address (src_addr,
5381 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
5383 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5386 emit_move_insn (blocks, temp);
5388 emit_cmp_and_jump_insns (blocks, const0_rtx,
5389 EQ, NULL_RTX, mode, 1, loop_end_label);
5391 emit_jump (loop_start_label);
5392 emit_label (loop_end_label);
5394 emit_insn (gen_movmem_short (dst, src,
5395 convert_to_mode (Pmode, count, 1)));
5396 emit_label (end_label);
5401 /* Emit code to set LEN bytes at DST to VAL.
5402 Make use of clrmem if VAL is zero. */
5405 s390_expand_setmem (rtx dst, rtx len, rtx val)
5407 if (GET_CODE (len) == CONST_INT && INTVAL (len) <= 0)
5410 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
5412 /* Expand setmem/clrmem for a constant length operand without a
5413 loop if it will be shorter that way.
5414 With a constant length and without pfd argument a
5415 clrmem loop is 32 bytes -> 5.3 * xc
5416 setmem loop is 36 bytes -> 3.6 * (mvi/stc + mvc) */
5417 if (GET_CODE (len) == CONST_INT
5418 && ((INTVAL (len) <= 256 * 5 && val == const0_rtx)
5419 || INTVAL (len) <= 257 * 3)
5420 && (!TARGET_MVCLE || INTVAL (len) <= 256))
5424 if (val == const0_rtx)
5425 /* clrmem: emit 256 byte blockwise XCs. */
5426 for (l = INTVAL (len), o = 0; l > 0; l -= 256, o += 256)
5428 rtx newdst = adjust_address (dst, BLKmode, o);
5429 emit_insn (gen_clrmem_short (newdst,
5430 GEN_INT (l > 256 ? 255 : l - 1)));
5433 /* setmem: emit 1(mvi) + 256(mvc) byte blockwise memsets by
5434 setting first byte to val and using a 256 byte mvc with one
5435 byte overlap to propagate the byte. */
5436 for (l = INTVAL (len), o = 0; l > 0; l -= 257, o += 257)
5438 rtx newdst = adjust_address (dst, BLKmode, o);
5439 emit_move_insn (adjust_address (dst, QImode, o), val);
5442 rtx newdstp1 = adjust_address (dst, BLKmode, o + 1);
5443 emit_insn (gen_movmem_short (newdstp1, newdst,
5444 GEN_INT (l > 257 ? 255 : l - 2)));
5449 else if (TARGET_MVCLE)
5451 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
5453 emit_insn (gen_setmem_long_di (dst, convert_to_mode (Pmode, len, 1),
5456 emit_insn (gen_setmem_long_si (dst, convert_to_mode (Pmode, len, 1),
5462 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
5463 rtx_code_label *loop_start_label = gen_label_rtx ();
5464 rtx_code_label *onebyte_end_label = gen_label_rtx ();
5465 rtx_code_label *zerobyte_end_label = gen_label_rtx ();
5466 rtx_code_label *restbyte_end_label = gen_label_rtx ();
5469 mode = GET_MODE (len);
5470 if (mode == VOIDmode)
5473 dst_addr = gen_reg_rtx (Pmode);
5474 count = gen_reg_rtx (mode);
5475 blocks = gen_reg_rtx (mode);
5477 convert_move (count, len, 1);
5478 emit_cmp_and_jump_insns (count, const0_rtx,
5479 EQ, NULL_RTX, mode, 1, zerobyte_end_label,
5480 profile_probability::very_unlikely ());
5482 /* We need to make a copy of the target address since memset is
5483 supposed to return it unmodified. We have to make it here
5484 already since the new reg is used at onebyte_end_label. */
5485 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
5486 dst = change_address (dst, VOIDmode, dst_addr);
5488 if (val != const0_rtx)
5490 /* When using the overlapping mvc the original target
5491 address is only accessed as single byte entity (even by
5492 the mvc reading this value). */
5493 set_mem_size (dst, 1);
5494 dstp1 = adjust_address (dst, VOIDmode, 1);
5495 emit_cmp_and_jump_insns (count,
5496 const1_rtx, EQ, NULL_RTX, mode, 1,
5498 profile_probability::very_unlikely ());
5501 /* There is one unconditional (mvi+mvc)/xc after the loop
5502 dealing with the rest of the bytes, subtracting two (mvi+mvc)
5503 or one (xc) here leaves this number of bytes to be handled by
5505 temp = expand_binop (mode, add_optab, count,
5506 val == const0_rtx ? constm1_rtx : GEN_INT (-2),
5507 count, 1, OPTAB_DIRECT);
5509 emit_move_insn (count, temp);
5511 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5514 emit_move_insn (blocks, temp);
5516 emit_cmp_and_jump_insns (blocks, const0_rtx,
5517 EQ, NULL_RTX, mode, 1, restbyte_end_label);
5519 emit_jump (loop_start_label);
5521 if (val != const0_rtx)
5523 /* The 1 byte != 0 special case. Not handled efficiently
5524 since we require two jumps for that. However, this
5525 should be very rare. */
5526 emit_label (onebyte_end_label);
5527 emit_move_insn (adjust_address (dst, QImode, 0), val);
5528 emit_jump (zerobyte_end_label);
5531 emit_label (loop_start_label);
5534 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
5536 /* Issue a write prefetch for the +4 cache line. */
5537 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
5539 const1_rtx, const0_rtx);
5540 emit_insn (prefetch);
5541 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5544 if (val == const0_rtx)
5545 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
5548 /* Set the first byte in the block to the value and use an
5549 overlapping mvc for the block. */
5550 emit_move_insn (adjust_address (dst, QImode, 0), val);
5551 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (254)));
5553 s390_load_address (dst_addr,
5554 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
5556 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5559 emit_move_insn (blocks, temp);
5561 emit_cmp_and_jump_insns (blocks, const0_rtx,
5562 NE, NULL_RTX, mode, 1, loop_start_label);
5564 emit_label (restbyte_end_label);
5566 if (val == const0_rtx)
5567 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
5570 /* Set the first byte in the block to the value and use an
5571 overlapping mvc for the block. */
5572 emit_move_insn (adjust_address (dst, QImode, 0), val);
5573 /* execute only uses the lowest 8 bits of count that's
5574 exactly what we need here. */
5575 emit_insn (gen_movmem_short (dstp1, dst,
5576 convert_to_mode (Pmode, count, 1)));
5579 emit_label (zerobyte_end_label);
5583 /* Emit code to compare LEN bytes at OP0 with those at OP1,
5584 and return the result in TARGET. */
5587 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
5589 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
5592 /* When tuning for z10 or higher we rely on the Glibc functions to
5593 do the right thing. Only for constant lengths below 64k we will
5594 generate inline code. */
5595 if (s390_tune >= PROCESSOR_2097_Z10
5596 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
5599 /* As the result of CMPINT is inverted compared to what we need,
5600 we have to swap the operands. */
5601 tmp = op0; op0 = op1; op1 = tmp;
5603 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
5605 if (INTVAL (len) > 0)
5607 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
5608 emit_insn (gen_cmpint (target, ccreg));
5611 emit_move_insn (target, const0_rtx);
5613 else if (TARGET_MVCLE)
5615 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
5616 emit_insn (gen_cmpint (target, ccreg));
5620 rtx addr0, addr1, count, blocks, temp;
5621 rtx_code_label *loop_start_label = gen_label_rtx ();
5622 rtx_code_label *loop_end_label = gen_label_rtx ();
5623 rtx_code_label *end_label = gen_label_rtx ();
5626 mode = GET_MODE (len);
5627 if (mode == VOIDmode)
5630 addr0 = gen_reg_rtx (Pmode);
5631 addr1 = gen_reg_rtx (Pmode);
5632 count = gen_reg_rtx (mode);
5633 blocks = gen_reg_rtx (mode);
5635 convert_move (count, len, 1);
5636 emit_cmp_and_jump_insns (count, const0_rtx,
5637 EQ, NULL_RTX, mode, 1, end_label);
5639 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
5640 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
5641 op0 = change_address (op0, VOIDmode, addr0);
5642 op1 = change_address (op1, VOIDmode, addr1);
5644 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5647 emit_move_insn (count, temp);
5649 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5652 emit_move_insn (blocks, temp);
5654 emit_cmp_and_jump_insns (blocks, const0_rtx,
5655 EQ, NULL_RTX, mode, 1, loop_end_label);
5657 emit_label (loop_start_label);
5660 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
5664 /* Issue a read prefetch for the +2 cache line of operand 1. */
5665 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
5666 const0_rtx, const0_rtx);
5667 emit_insn (prefetch);
5668 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5670 /* Issue a read prefetch for the +2 cache line of operand 2. */
5671 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
5672 const0_rtx, const0_rtx);
5673 emit_insn (prefetch);
5674 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5677 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
5678 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
5679 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
5680 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
5681 temp = gen_rtx_SET (pc_rtx, temp);
5682 emit_jump_insn (temp);
5684 s390_load_address (addr0,
5685 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
5686 s390_load_address (addr1,
5687 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
5689 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5692 emit_move_insn (blocks, temp);
5694 emit_cmp_and_jump_insns (blocks, const0_rtx,
5695 EQ, NULL_RTX, mode, 1, loop_end_label);
5697 emit_jump (loop_start_label);
5698 emit_label (loop_end_label);
5700 emit_insn (gen_cmpmem_short (op0, op1,
5701 convert_to_mode (Pmode, count, 1)));
5702 emit_label (end_label);
5704 emit_insn (gen_cmpint (target, ccreg));
5709 /* Emit a conditional jump to LABEL for condition code mask MASK using
5710 comparsion operator COMPARISON. Return the emitted jump insn. */
5713 s390_emit_ccraw_jump (HOST_WIDE_INT mask, enum rtx_code comparison, rtx label)
5717 gcc_assert (comparison == EQ || comparison == NE);
5718 gcc_assert (mask > 0 && mask < 15);
5720 temp = gen_rtx_fmt_ee (comparison, VOIDmode,
5721 gen_rtx_REG (CCRAWmode, CC_REGNUM), GEN_INT (mask));
5722 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
5723 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
5724 temp = gen_rtx_SET (pc_rtx, temp);
5725 return emit_jump_insn (temp);
5728 /* Emit the instructions to implement strlen of STRING and store the
5729 result in TARGET. The string has the known ALIGNMENT. This
5730 version uses vector instructions and is therefore not appropriate
5731 for targets prior to z13. */
5734 s390_expand_vec_strlen (rtx target, rtx string, rtx alignment)
5736 rtx highest_index_to_load_reg = gen_reg_rtx (Pmode);
5737 rtx str_reg = gen_reg_rtx (V16QImode);
5738 rtx str_addr_base_reg = gen_reg_rtx (Pmode);
5739 rtx str_idx_reg = gen_reg_rtx (Pmode);
5740 rtx result_reg = gen_reg_rtx (V16QImode);
5741 rtx is_aligned_label = gen_label_rtx ();
5742 rtx into_loop_label = NULL_RTX;
5743 rtx loop_start_label = gen_label_rtx ();
5745 rtx len = gen_reg_rtx (QImode);
5748 s390_load_address (str_addr_base_reg, XEXP (string, 0));
5749 emit_move_insn (str_idx_reg, const0_rtx);
5751 if (INTVAL (alignment) < 16)
5753 /* Check whether the address happens to be aligned properly so
5754 jump directly to the aligned loop. */
5755 emit_cmp_and_jump_insns (gen_rtx_AND (Pmode,
5756 str_addr_base_reg, GEN_INT (15)),
5757 const0_rtx, EQ, NULL_RTX,
5758 Pmode, 1, is_aligned_label);
5760 temp = gen_reg_rtx (Pmode);
5761 temp = expand_binop (Pmode, and_optab, str_addr_base_reg,
5762 GEN_INT (15), temp, 1, OPTAB_DIRECT);
5763 gcc_assert (REG_P (temp));
5764 highest_index_to_load_reg =
5765 expand_binop (Pmode, sub_optab, GEN_INT (15), temp,
5766 highest_index_to_load_reg, 1, OPTAB_DIRECT);
5767 gcc_assert (REG_P (highest_index_to_load_reg));
5768 emit_insn (gen_vllv16qi (str_reg,
5769 convert_to_mode (SImode, highest_index_to_load_reg, 1),
5770 gen_rtx_MEM (BLKmode, str_addr_base_reg)));
5772 into_loop_label = gen_label_rtx ();
5773 s390_emit_jump (into_loop_label, NULL_RTX);
5777 emit_label (is_aligned_label);
5778 LABEL_NUSES (is_aligned_label) = INTVAL (alignment) < 16 ? 2 : 1;
5780 /* Reaching this point we are only performing 16 bytes aligned
5782 emit_move_insn (highest_index_to_load_reg, GEN_INT (15));
5784 emit_label (loop_start_label);
5785 LABEL_NUSES (loop_start_label) = 1;
5787 /* Load 16 bytes of the string into VR. */
5788 emit_move_insn (str_reg,
5789 gen_rtx_MEM (V16QImode,
5790 gen_rtx_PLUS (Pmode, str_idx_reg,
5791 str_addr_base_reg)));
5792 if (into_loop_label != NULL_RTX)
5794 emit_label (into_loop_label);
5795 LABEL_NUSES (into_loop_label) = 1;
5798 /* Increment string index by 16 bytes. */
5799 expand_binop (Pmode, add_optab, str_idx_reg, GEN_INT (16),
5800 str_idx_reg, 1, OPTAB_DIRECT);
5802 emit_insn (gen_vec_vfenesv16qi (result_reg, str_reg, str_reg,
5803 GEN_INT (VSTRING_FLAG_ZS | VSTRING_FLAG_CS)));
5805 add_int_reg_note (s390_emit_ccraw_jump (8, NE, loop_start_label),
5807 profile_probability::very_likely ().to_reg_br_prob_note ());
5808 emit_insn (gen_vec_extractv16qiqi (len, result_reg, GEN_INT (7)));
5810 /* If the string pointer wasn't aligned we have loaded less then 16
5811 bytes and the remaining bytes got filled with zeros (by vll).
5812 Now we have to check whether the resulting index lies within the
5813 bytes actually part of the string. */
5815 cond = s390_emit_compare (GT, convert_to_mode (Pmode, len, 1),
5816 highest_index_to_load_reg);
5817 s390_load_address (highest_index_to_load_reg,
5818 gen_rtx_PLUS (Pmode, highest_index_to_load_reg,
5821 emit_insn (gen_movdicc (str_idx_reg, cond,
5822 highest_index_to_load_reg, str_idx_reg));
5824 emit_insn (gen_movsicc (str_idx_reg, cond,
5825 highest_index_to_load_reg, str_idx_reg));
5827 add_reg_br_prob_note (s390_emit_jump (is_aligned_label, cond),
5828 profile_probability::very_unlikely ());
5830 expand_binop (Pmode, add_optab, str_idx_reg,
5831 GEN_INT (-16), str_idx_reg, 1, OPTAB_DIRECT);
5832 /* FIXME: len is already zero extended - so avoid the llgcr emitted
5834 temp = expand_binop (Pmode, add_optab, str_idx_reg,
5835 convert_to_mode (Pmode, len, 1),
5836 target, 1, OPTAB_DIRECT);
5838 emit_move_insn (target, temp);
5842 s390_expand_vec_movstr (rtx result, rtx dst, rtx src)
5844 rtx temp = gen_reg_rtx (Pmode);
5845 rtx src_addr = XEXP (src, 0);
5846 rtx dst_addr = XEXP (dst, 0);
5847 rtx src_addr_reg = gen_reg_rtx (Pmode);
5848 rtx dst_addr_reg = gen_reg_rtx (Pmode);
5849 rtx offset = gen_reg_rtx (Pmode);
5850 rtx vsrc = gen_reg_rtx (V16QImode);
5851 rtx vpos = gen_reg_rtx (V16QImode);
5852 rtx loadlen = gen_reg_rtx (SImode);
5853 rtx gpos_qi = gen_reg_rtx(QImode);
5854 rtx gpos = gen_reg_rtx (SImode);
5855 rtx done_label = gen_label_rtx ();
5856 rtx loop_label = gen_label_rtx ();
5857 rtx exit_label = gen_label_rtx ();
5858 rtx full_label = gen_label_rtx ();
5860 /* Perform a quick check for string ending on the first up to 16
5861 bytes and exit early if successful. */
5863 emit_insn (gen_vlbb (vsrc, src, GEN_INT (6)));
5864 emit_insn (gen_lcbb (loadlen, src_addr, GEN_INT (6)));
5865 emit_insn (gen_vfenezv16qi (vpos, vsrc, vsrc));
5866 emit_insn (gen_vec_extractv16qiqi (gpos_qi, vpos, GEN_INT (7)));
5867 emit_move_insn (gpos, gen_rtx_SUBREG (SImode, gpos_qi, 0));
5868 /* gpos is the byte index if a zero was found and 16 otherwise.
5869 So if it is lower than the loaded bytes we have a hit. */
5870 emit_cmp_and_jump_insns (gpos, loadlen, GE, NULL_RTX, SImode, 1,
5872 emit_insn (gen_vstlv16qi (vsrc, gpos, dst));
5874 force_expand_binop (Pmode, add_optab, dst_addr, gpos, result,
5876 emit_jump (exit_label);
5879 emit_label (full_label);
5880 LABEL_NUSES (full_label) = 1;
5882 /* Calculate `offset' so that src + offset points to the last byte
5883 before 16 byte alignment. */
5885 /* temp = src_addr & 0xf */
5886 force_expand_binop (Pmode, and_optab, src_addr, GEN_INT (15), temp,
5889 /* offset = 0xf - temp */
5890 emit_move_insn (offset, GEN_INT (15));
5891 force_expand_binop (Pmode, sub_optab, offset, temp, offset,
5894 /* Store `offset' bytes in the dstination string. The quick check
5895 has loaded at least `offset' bytes into vsrc. */
5897 emit_insn (gen_vstlv16qi (vsrc, gen_lowpart (SImode, offset), dst));
5899 /* Advance to the next byte to be loaded. */
5900 force_expand_binop (Pmode, add_optab, offset, const1_rtx, offset,
5903 /* Make sure the addresses are single regs which can be used as a
5905 emit_move_insn (src_addr_reg, src_addr);
5906 emit_move_insn (dst_addr_reg, dst_addr);
5910 emit_label (loop_label);
5911 LABEL_NUSES (loop_label) = 1;
5913 emit_move_insn (vsrc,
5914 gen_rtx_MEM (V16QImode,
5915 gen_rtx_PLUS (Pmode, src_addr_reg, offset)));
5917 emit_insn (gen_vec_vfenesv16qi (vpos, vsrc, vsrc,
5918 GEN_INT (VSTRING_FLAG_ZS | VSTRING_FLAG_CS)));
5919 add_int_reg_note (s390_emit_ccraw_jump (8, EQ, done_label),
5920 REG_BR_PROB, profile_probability::very_unlikely ()
5921 .to_reg_br_prob_note ());
5923 emit_move_insn (gen_rtx_MEM (V16QImode,
5924 gen_rtx_PLUS (Pmode, dst_addr_reg, offset)),
5927 force_expand_binop (Pmode, add_optab, offset, GEN_INT (16),
5928 offset, 1, OPTAB_DIRECT);
5930 emit_jump (loop_label);
5935 /* We are done. Add the offset of the zero character to the dst_addr
5936 pointer to get the result. */
5938 emit_label (done_label);
5939 LABEL_NUSES (done_label) = 1;
5941 force_expand_binop (Pmode, add_optab, dst_addr_reg, offset, dst_addr_reg,
5944 emit_insn (gen_vec_extractv16qiqi (gpos_qi, vpos, GEN_INT (7)));
5945 emit_move_insn (gpos, gen_rtx_SUBREG (SImode, gpos_qi, 0));
5947 emit_insn (gen_vstlv16qi (vsrc, gpos, gen_rtx_MEM (BLKmode, dst_addr_reg)));
5949 force_expand_binop (Pmode, add_optab, dst_addr_reg, gpos, result,
5954 emit_label (exit_label);
5955 LABEL_NUSES (exit_label) = 1;
5959 /* Expand conditional increment or decrement using alc/slb instructions.
5960 Should generate code setting DST to either SRC or SRC + INCREMENT,
5961 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
5962 Returns true if successful, false otherwise.
5964 That makes it possible to implement some if-constructs without jumps e.g.:
5965 (borrow = CC0 | CC1 and carry = CC2 | CC3)
5966 unsigned int a, b, c;
5967 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
5968 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
5969 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
5970 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
5972 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
5973 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
5974 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
5975 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
5976 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
5979 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
5980 rtx dst, rtx src, rtx increment)
5982 machine_mode cmp_mode;
5983 machine_mode cc_mode;
5989 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
5990 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
5992 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
5993 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
5998 /* Try ADD LOGICAL WITH CARRY. */
5999 if (increment == const1_rtx)
6001 /* Determine CC mode to use. */
6002 if (cmp_code == EQ || cmp_code == NE)
6004 if (cmp_op1 != const0_rtx)
6006 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
6007 NULL_RTX, 0, OPTAB_WIDEN);
6008 cmp_op1 = const0_rtx;
6011 cmp_code = cmp_code == EQ ? LEU : GTU;
6014 if (cmp_code == LTU || cmp_code == LEU)
6019 cmp_code = swap_condition (cmp_code);
6036 /* Emit comparison instruction pattern. */
6037 if (!register_operand (cmp_op0, cmp_mode))
6038 cmp_op0 = force_reg (cmp_mode, cmp_op0);
6040 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
6041 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
6042 /* We use insn_invalid_p here to add clobbers if required. */
6043 ret = insn_invalid_p (emit_insn (insn), false);
6046 /* Emit ALC instruction pattern. */
6047 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
6048 gen_rtx_REG (cc_mode, CC_REGNUM),
6051 if (src != const0_rtx)
6053 if (!register_operand (src, GET_MODE (dst)))
6054 src = force_reg (GET_MODE (dst), src);
6056 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
6057 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
6060 p = rtvec_alloc (2);
6062 gen_rtx_SET (dst, op_res);
6064 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6065 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
6070 /* Try SUBTRACT LOGICAL WITH BORROW. */
6071 if (increment == constm1_rtx)
6073 /* Determine CC mode to use. */
6074 if (cmp_code == EQ || cmp_code == NE)
6076 if (cmp_op1 != const0_rtx)
6078 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
6079 NULL_RTX, 0, OPTAB_WIDEN);
6080 cmp_op1 = const0_rtx;
6083 cmp_code = cmp_code == EQ ? LEU : GTU;
6086 if (cmp_code == GTU || cmp_code == GEU)
6091 cmp_code = swap_condition (cmp_code);
6108 /* Emit comparison instruction pattern. */
6109 if (!register_operand (cmp_op0, cmp_mode))
6110 cmp_op0 = force_reg (cmp_mode, cmp_op0);
6112 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
6113 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
6114 /* We use insn_invalid_p here to add clobbers if required. */
6115 ret = insn_invalid_p (emit_insn (insn), false);
6118 /* Emit SLB instruction pattern. */
6119 if (!register_operand (src, GET_MODE (dst)))
6120 src = force_reg (GET_MODE (dst), src);
6122 op_res = gen_rtx_MINUS (GET_MODE (dst),
6123 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
6124 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
6125 gen_rtx_REG (cc_mode, CC_REGNUM),
6127 p = rtvec_alloc (2);
6129 gen_rtx_SET (dst, op_res);
6131 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6132 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
6140 /* Expand code for the insv template. Return true if successful. */
6143 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
6145 int bitsize = INTVAL (op1);
6146 int bitpos = INTVAL (op2);
6147 machine_mode mode = GET_MODE (dest);
6149 int smode_bsize, mode_bsize;
6152 if (bitsize + bitpos > GET_MODE_BITSIZE (mode))
6155 /* Generate INSERT IMMEDIATE (IILL et al). */
6156 /* (set (ze (reg)) (const_int)). */
6158 && register_operand (dest, word_mode)
6159 && (bitpos % 16) == 0
6160 && (bitsize % 16) == 0
6161 && const_int_operand (src, VOIDmode))
6163 HOST_WIDE_INT val = INTVAL (src);
6164 int regpos = bitpos + bitsize;
6166 while (regpos > bitpos)
6168 machine_mode putmode;
6171 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
6176 putsize = GET_MODE_BITSIZE (putmode);
6178 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
6181 gen_int_mode (val, putmode));
6184 gcc_assert (regpos == bitpos);
6188 smode = smallest_mode_for_size (bitsize, MODE_INT);
6189 smode_bsize = GET_MODE_BITSIZE (smode);
6190 mode_bsize = GET_MODE_BITSIZE (mode);
6192 /* Generate STORE CHARACTERS UNDER MASK (STCM et al). */
6194 && (bitsize % BITS_PER_UNIT) == 0
6196 && (register_operand (src, word_mode)
6197 || const_int_operand (src, VOIDmode)))
6199 /* Emit standard pattern if possible. */
6200 if (smode_bsize == bitsize)
6202 emit_move_insn (adjust_address (dest, smode, 0),
6203 gen_lowpart (smode, src));
6207 /* (set (ze (mem)) (const_int)). */
6208 else if (const_int_operand (src, VOIDmode))
6210 int size = bitsize / BITS_PER_UNIT;
6211 rtx src_mem = adjust_address (force_const_mem (word_mode, src),
6213 UNITS_PER_WORD - size);
6215 dest = adjust_address (dest, BLKmode, 0);
6216 set_mem_size (dest, size);
6217 s390_expand_movmem (dest, src_mem, GEN_INT (size));
6221 /* (set (ze (mem)) (reg)). */
6222 else if (register_operand (src, word_mode))
6225 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
6229 /* Emit st,stcmh sequence. */
6230 int stcmh_width = bitsize - 32;
6231 int size = stcmh_width / BITS_PER_UNIT;
6233 emit_move_insn (adjust_address (dest, SImode, size),
6234 gen_lowpart (SImode, src));
6235 set_mem_size (dest, size);
6236 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
6237 GEN_INT (stcmh_width),
6239 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT (32)));
6245 /* Generate INSERT CHARACTERS UNDER MASK (IC, ICM et al). */
6246 if ((bitpos % BITS_PER_UNIT) == 0
6247 && (bitsize % BITS_PER_UNIT) == 0
6248 && (bitpos & 32) == ((bitpos + bitsize - 1) & 32)
6250 && (mode == DImode || mode == SImode)
6251 && register_operand (dest, mode))
6253 /* Emit a strict_low_part pattern if possible. */
6254 if (smode_bsize == bitsize && bitpos == mode_bsize - smode_bsize)
6256 op = gen_rtx_STRICT_LOW_PART (VOIDmode, gen_lowpart (smode, dest));
6257 op = gen_rtx_SET (op, gen_lowpart (smode, src));
6258 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6259 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
6263 /* ??? There are more powerful versions of ICM that are not
6264 completely represented in the md file. */
6267 /* For z10, generate ROTATE THEN INSERT SELECTED BITS (RISBG et al). */
6268 if (TARGET_Z10 && (mode == DImode || mode == SImode))
6270 machine_mode mode_s = GET_MODE (src);
6272 if (CONSTANT_P (src))
6274 /* For constant zero values the representation with AND
6275 appears to be folded in more situations than the (set
6276 (zero_extract) ...).
6277 We only do this when the start and end of the bitfield
6278 remain in the same SImode chunk. That way nihf or nilf
6280 The AND patterns might still generate a risbg for this. */
6281 if (src == const0_rtx && bitpos / 32 == (bitpos + bitsize - 1) / 32)
6284 src = force_reg (mode, src);
6286 else if (mode_s != mode)
6288 gcc_assert (GET_MODE_BITSIZE (mode_s) >= bitsize);
6289 src = force_reg (mode_s, src);
6290 src = gen_lowpart (mode, src);
6293 op = gen_rtx_ZERO_EXTRACT (mode, dest, op1, op2),
6294 op = gen_rtx_SET (op, src);
6298 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6299 op = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber));
6309 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
6310 register that holds VAL of mode MODE shifted by COUNT bits. */
6313 s390_expand_mask_and_shift (rtx val, machine_mode mode, rtx count)
6315 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
6316 NULL_RTX, 1, OPTAB_DIRECT);
6317 return expand_simple_binop (SImode, ASHIFT, val, count,
6318 NULL_RTX, 1, OPTAB_DIRECT);
6321 /* Generate a vector comparison COND of CMP_OP1 and CMP_OP2 and store
6322 the result in TARGET. */
6325 s390_expand_vec_compare (rtx target, enum rtx_code cond,
6326 rtx cmp_op1, rtx cmp_op2)
6328 machine_mode mode = GET_MODE (target);
6329 bool neg_p = false, swap_p = false;
6332 if (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_VECTOR_FLOAT)
6336 /* NE a != b -> !(a == b) */
6337 case NE: cond = EQ; neg_p = true; break;
6338 /* UNGT a u> b -> !(b >= a) */
6339 case UNGT: cond = GE; neg_p = true; swap_p = true; break;
6340 /* UNGE a u>= b -> !(b > a) */
6341 case UNGE: cond = GT; neg_p = true; swap_p = true; break;
6342 /* LE: a <= b -> b >= a */
6343 case LE: cond = GE; swap_p = true; break;
6344 /* UNLE: a u<= b -> !(a > b) */
6345 case UNLE: cond = GT; neg_p = true; break;
6346 /* LT: a < b -> b > a */
6347 case LT: cond = GT; swap_p = true; break;
6348 /* UNLT: a u< b -> !(a >= b) */
6349 case UNLT: cond = GE; neg_p = true; break;
6351 emit_insn (gen_vec_cmpuneqv2df (target, cmp_op1, cmp_op2));
6354 emit_insn (gen_vec_cmpltgtv2df (target, cmp_op1, cmp_op2));
6357 emit_insn (gen_vec_orderedv2df (target, cmp_op1, cmp_op2));
6360 emit_insn (gen_vec_unorderedv2df (target, cmp_op1, cmp_op2));
6369 /* NE: a != b -> !(a == b) */
6370 case NE: cond = EQ; neg_p = true; break;
6371 /* GE: a >= b -> !(b > a) */
6372 case GE: cond = GT; neg_p = true; swap_p = true; break;
6373 /* GEU: a >= b -> !(b > a) */
6374 case GEU: cond = GTU; neg_p = true; swap_p = true; break;
6375 /* LE: a <= b -> !(a > b) */
6376 case LE: cond = GT; neg_p = true; break;
6377 /* LEU: a <= b -> !(a > b) */
6378 case LEU: cond = GTU; neg_p = true; break;
6379 /* LT: a < b -> b > a */
6380 case LT: cond = GT; swap_p = true; break;
6381 /* LTU: a < b -> b > a */
6382 case LTU: cond = GTU; swap_p = true; break;
6389 tmp = cmp_op1; cmp_op1 = cmp_op2; cmp_op2 = tmp;
6392 emit_insn (gen_rtx_SET (target, gen_rtx_fmt_ee (cond,
6394 cmp_op1, cmp_op2)));
6396 emit_insn (gen_rtx_SET (target, gen_rtx_NOT (mode, target)));
6399 /* Expand the comparison CODE of CMP1 and CMP2 and copy 1 or 0 into
6400 TARGET if either all (ALL_P is true) or any (ALL_P is false) of the
6401 elements in CMP1 and CMP2 fulfill the comparison.
6402 This function is only used to emit patterns for the vx builtins and
6403 therefore only handles comparison codes required by the
6406 s390_expand_vec_compare_cc (rtx target, enum rtx_code code,
6407 rtx cmp1, rtx cmp2, bool all_p)
6409 machine_mode cc_producer_mode, cc_consumer_mode, scratch_mode;
6410 rtx tmp_reg = gen_reg_rtx (SImode);
6411 bool swap_p = false;
6413 if (GET_MODE_CLASS (GET_MODE (cmp1)) == MODE_VECTOR_INT)
6419 cc_producer_mode = CCVEQmode;
6423 code = swap_condition (code);
6428 cc_producer_mode = CCVIHmode;
6432 code = swap_condition (code);
6437 cc_producer_mode = CCVIHUmode;
6443 scratch_mode = GET_MODE (cmp1);
6444 /* These codes represent inverted CC interpretations. Inverting
6445 an ALL CC mode results in an ANY CC mode and the other way
6446 around. Invert the all_p flag here to compensate for
6448 if (code == NE || code == LE || code == LEU)
6451 cc_consumer_mode = all_p ? CCVIALLmode : CCVIANYmode;
6453 else if (GET_MODE_CLASS (GET_MODE (cmp1)) == MODE_VECTOR_FLOAT)
6459 case EQ: cc_producer_mode = CCVEQmode; break;
6460 case NE: cc_producer_mode = CCVEQmode; inv_p = true; break;
6461 case GT: cc_producer_mode = CCVFHmode; break;
6462 case GE: cc_producer_mode = CCVFHEmode; break;
6463 case UNLE: cc_producer_mode = CCVFHmode; inv_p = true; break;
6464 case UNLT: cc_producer_mode = CCVFHEmode; inv_p = true; break;
6465 case LT: cc_producer_mode = CCVFHmode; code = GT; swap_p = true; break;
6466 case LE: cc_producer_mode = CCVFHEmode; code = GE; swap_p = true; break;
6467 default: gcc_unreachable ();
6469 scratch_mode = mode_for_vector (
6470 int_mode_for_mode (GET_MODE_INNER (GET_MODE (cmp1))),
6471 GET_MODE_NUNITS (GET_MODE (cmp1)));
6472 gcc_assert (scratch_mode != BLKmode);
6477 cc_consumer_mode = all_p ? CCVFALLmode : CCVFANYmode;
6489 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6490 gen_rtvec (2, gen_rtx_SET (
6491 gen_rtx_REG (cc_producer_mode, CC_REGNUM),
6492 gen_rtx_COMPARE (cc_producer_mode, cmp1, cmp2)),
6493 gen_rtx_CLOBBER (VOIDmode,
6494 gen_rtx_SCRATCH (scratch_mode)))));
6495 emit_move_insn (target, const0_rtx);
6496 emit_move_insn (tmp_reg, const1_rtx);
6498 emit_move_insn (target,
6499 gen_rtx_IF_THEN_ELSE (SImode,
6500 gen_rtx_fmt_ee (code, VOIDmode,
6501 gen_rtx_REG (cc_consumer_mode, CC_REGNUM),
6506 /* Invert the comparison CODE applied to a CC mode. This is only safe
6507 if we know whether there result was created by a floating point
6508 compare or not. For the CCV modes this is encoded as part of the
6511 s390_reverse_condition (machine_mode mode, enum rtx_code code)
6513 /* Reversal of FP compares takes care -- an ordered compare
6514 becomes an unordered compare and vice versa. */
6515 if (mode == CCVFALLmode || mode == CCVFANYmode)
6516 return reverse_condition_maybe_unordered (code);
6517 else if (mode == CCVIALLmode || mode == CCVIANYmode)
6518 return reverse_condition (code);
6523 /* Generate a vector comparison expression loading either elements of
6524 THEN or ELS into TARGET depending on the comparison COND of CMP_OP1
6528 s390_expand_vcond (rtx target, rtx then, rtx els,
6529 enum rtx_code cond, rtx cmp_op1, rtx cmp_op2)
6532 machine_mode result_mode;
6535 machine_mode target_mode = GET_MODE (target);
6536 machine_mode cmp_mode = GET_MODE (cmp_op1);
6537 rtx op = (cond == LT) ? els : then;
6539 /* Try to optimize x < 0 ? -1 : 0 into (signed) x >> 31
6540 and x < 0 ? 1 : 0 into (unsigned) x >> 31. Likewise
6541 for short and byte (x >> 15 and x >> 7 respectively). */
6542 if ((cond == LT || cond == GE)
6543 && target_mode == cmp_mode
6544 && cmp_op2 == CONST0_RTX (cmp_mode)
6545 && op == CONST0_RTX (target_mode)
6546 && s390_vector_mode_supported_p (target_mode)
6547 && GET_MODE_CLASS (target_mode) == MODE_VECTOR_INT)
6549 rtx negop = (cond == LT) ? then : els;
6551 int shift = GET_MODE_BITSIZE (GET_MODE_INNER (target_mode)) - 1;
6553 /* if x < 0 ? 1 : 0 or if x >= 0 ? 0 : 1 */
6554 if (negop == CONST1_RTX (target_mode))
6556 rtx res = expand_simple_binop (cmp_mode, LSHIFTRT, cmp_op1,
6557 GEN_INT (shift), target,
6560 emit_move_insn (target, res);
6564 /* if x < 0 ? -1 : 0 or if x >= 0 ? 0 : -1 */
6565 else if (all_ones_operand (negop, target_mode))
6567 rtx res = expand_simple_binop (cmp_mode, ASHIFTRT, cmp_op1,
6568 GEN_INT (shift), target,
6571 emit_move_insn (target, res);
6576 /* We always use an integral type vector to hold the comparison
6578 result_mode = mode_for_vector (int_mode_for_mode (GET_MODE_INNER (cmp_mode)),
6579 GET_MODE_NUNITS (cmp_mode));
6580 result_target = gen_reg_rtx (result_mode);
6582 /* We allow vector immediates as comparison operands that
6583 can be handled by the optimization above but not by the
6584 following code. Hence, force them into registers here. */
6585 if (!REG_P (cmp_op1))
6586 cmp_op1 = force_reg (GET_MODE (cmp_op1), cmp_op1);
6588 if (!REG_P (cmp_op2))
6589 cmp_op2 = force_reg (GET_MODE (cmp_op2), cmp_op2);
6591 s390_expand_vec_compare (result_target, cond,
6594 /* If the results are supposed to be either -1 or 0 we are done
6595 since this is what our compare instructions generate anyway. */
6596 if (all_ones_operand (then, GET_MODE (then))
6597 && const0_operand (els, GET_MODE (els)))
6599 emit_move_insn (target, gen_rtx_SUBREG (target_mode,
6604 /* Otherwise we will do a vsel afterwards. */
6605 /* This gets triggered e.g.
6606 with gcc.c-torture/compile/pr53410-1.c */
6608 then = force_reg (target_mode, then);
6611 els = force_reg (target_mode, els);
6613 tmp = gen_rtx_fmt_ee (EQ, VOIDmode,
6615 CONST0_RTX (result_mode));
6617 /* We compared the result against zero above so we have to swap then
6619 tmp = gen_rtx_IF_THEN_ELSE (target_mode, tmp, els, then);
6621 gcc_assert (target_mode == GET_MODE (then));
6622 emit_insn (gen_rtx_SET (target, tmp));
6625 /* Emit the RTX necessary to initialize the vector TARGET with values
6628 s390_expand_vec_init (rtx target, rtx vals)
6630 machine_mode mode = GET_MODE (target);
6631 machine_mode inner_mode = GET_MODE_INNER (mode);
6632 int n_elts = GET_MODE_NUNITS (mode);
6633 bool all_same = true, all_regs = true, all_const_int = true;
6637 for (i = 0; i < n_elts; ++i)
6639 x = XVECEXP (vals, 0, i);
6641 if (!CONST_INT_P (x))
6642 all_const_int = false;
6644 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6651 /* Use vector gen mask or vector gen byte mask if possible. */
6652 if (all_same && all_const_int
6653 && (XVECEXP (vals, 0, 0) == const0_rtx
6654 || s390_contiguous_bitmask_vector_p (XVECEXP (vals, 0, 0),
6656 || s390_bytemask_vector_p (XVECEXP (vals, 0, 0), NULL)))
6658 emit_insn (gen_rtx_SET (target,
6659 gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0))));
6665 emit_insn (gen_rtx_SET (target,
6666 gen_rtx_VEC_DUPLICATE (mode,
6667 XVECEXP (vals, 0, 0))));
6674 && GET_MODE_SIZE (inner_mode) == 8)
6676 /* Use vector load pair. */
6677 emit_insn (gen_rtx_SET (target,
6678 gen_rtx_VEC_CONCAT (mode,
6679 XVECEXP (vals, 0, 0),
6680 XVECEXP (vals, 0, 1))));
6684 /* Use vector load logical element and zero. */
6685 if (TARGET_VXE && (mode == V4SImode || mode == V4SFmode))
6689 x = XVECEXP (vals, 0, 0);
6690 if (memory_operand (x, inner_mode))
6692 for (i = 1; i < n_elts; ++i)
6693 found = found && XVECEXP (vals, 0, i) == const0_rtx;
6697 machine_mode half_mode = (inner_mode == SFmode
6698 ? V2SFmode : V2SImode);
6699 emit_insn (gen_rtx_SET (target,
6700 gen_rtx_VEC_CONCAT (mode,
6701 gen_rtx_VEC_CONCAT (half_mode,
6704 gen_rtx_VEC_CONCAT (half_mode,
6712 /* We are about to set the vector elements one by one. Zero out the
6713 full register first in order to help the data flow framework to
6714 detect it as full VR set. */
6715 emit_insn (gen_rtx_SET (target, CONST0_RTX (mode)));
6717 /* Unfortunately the vec_init expander is not allowed to fail. So
6718 we have to implement the fallback ourselves. */
6719 for (i = 0; i < n_elts; i++)
6721 rtx elem = XVECEXP (vals, 0, i);
6722 if (!general_operand (elem, GET_MODE (elem)))
6723 elem = force_reg (inner_mode, elem);
6725 emit_insn (gen_rtx_SET (target,
6726 gen_rtx_UNSPEC (mode,
6728 GEN_INT (i), target),
6733 /* Structure to hold the initial parameters for a compare_and_swap operation
6734 in HImode and QImode. */
6736 struct alignment_context
6738 rtx memsi; /* SI aligned memory location. */
6739 rtx shift; /* Bit offset with regard to lsb. */
6740 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
6741 rtx modemaski; /* ~modemask */
6742 bool aligned; /* True if memory is aligned, false else. */
6745 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
6746 structure AC for transparent simplifying, if the memory alignment is known
6747 to be at least 32bit. MEM is the memory location for the actual operation
6748 and MODE its mode. */
6751 init_alignment_context (struct alignment_context *ac, rtx mem,
6754 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
6755 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
6758 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
6761 /* Alignment is unknown. */
6762 rtx byteoffset, addr, align;
6764 /* Force the address into a register. */
6765 addr = force_reg (Pmode, XEXP (mem, 0));
6767 /* Align it to SImode. */
6768 align = expand_simple_binop (Pmode, AND, addr,
6769 GEN_INT (-GET_MODE_SIZE (SImode)),
6770 NULL_RTX, 1, OPTAB_DIRECT);
6772 ac->memsi = gen_rtx_MEM (SImode, align);
6773 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
6774 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
6775 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
6777 /* Calculate shiftcount. */
6778 byteoffset = expand_simple_binop (Pmode, AND, addr,
6779 GEN_INT (GET_MODE_SIZE (SImode) - 1),
6780 NULL_RTX, 1, OPTAB_DIRECT);
6781 /* As we already have some offset, evaluate the remaining distance. */
6782 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
6783 NULL_RTX, 1, OPTAB_DIRECT);
6786 /* Shift is the byte count, but we need the bitcount. */
6787 ac->shift = expand_simple_binop (SImode, ASHIFT, ac->shift, GEN_INT (3),
6788 NULL_RTX, 1, OPTAB_DIRECT);
6790 /* Calculate masks. */
6791 ac->modemask = expand_simple_binop (SImode, ASHIFT,
6792 GEN_INT (GET_MODE_MASK (mode)),
6793 ac->shift, NULL_RTX, 1, OPTAB_DIRECT);
6794 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask,
6798 /* A subroutine of s390_expand_cs_hqi. Insert INS into VAL. If possible,
6799 use a single insv insn into SEQ2. Otherwise, put prep insns in SEQ1 and
6800 perform the merge in SEQ2. */
6803 s390_two_part_insv (struct alignment_context *ac, rtx *seq1, rtx *seq2,
6804 machine_mode mode, rtx val, rtx ins)
6811 tmp = copy_to_mode_reg (SImode, val);
6812 if (s390_expand_insv (tmp, GEN_INT (GET_MODE_BITSIZE (mode)),
6816 *seq2 = get_insns ();
6823 /* Failed to use insv. Generate a two part shift and mask. */
6825 tmp = s390_expand_mask_and_shift (ins, mode, ac->shift);
6826 *seq1 = get_insns ();
6830 tmp = expand_simple_binop (SImode, IOR, tmp, val, NULL_RTX, 1, OPTAB_DIRECT);
6831 *seq2 = get_insns ();
6837 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
6838 the memory location, CMP the old value to compare MEM with and NEW_RTX the
6839 value to set if CMP == MEM. */
6842 s390_expand_cs_hqi (machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
6843 rtx cmp, rtx new_rtx, bool is_weak)
6845 struct alignment_context ac;
6846 rtx cmpv, newv, val, cc, seq0, seq1, seq2, seq3;
6847 rtx res = gen_reg_rtx (SImode);
6848 rtx_code_label *csloop = NULL, *csend = NULL;
6850 gcc_assert (MEM_P (mem));
6852 init_alignment_context (&ac, mem, mode);
6854 /* Load full word. Subsequent loads are performed by CS. */
6855 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
6856 NULL_RTX, 1, OPTAB_DIRECT);
6858 /* Prepare insertions of cmp and new_rtx into the loaded value. When
6859 possible, we try to use insv to make this happen efficiently. If
6860 that fails we'll generate code both inside and outside the loop. */
6861 cmpv = s390_two_part_insv (&ac, &seq0, &seq2, mode, val, cmp);
6862 newv = s390_two_part_insv (&ac, &seq1, &seq3, mode, val, new_rtx);
6869 /* Start CS loop. */
6872 /* Begin assuming success. */
6873 emit_move_insn (btarget, const1_rtx);
6875 csloop = gen_label_rtx ();
6876 csend = gen_label_rtx ();
6877 emit_label (csloop);
6880 /* val = "<mem>00..0<mem>"
6881 * cmp = "00..0<cmp>00..0"
6882 * new = "00..0<new>00..0"
6888 cc = s390_emit_compare_and_swap (EQ, res, ac.memsi, cmpv, newv, CCZ1mode);
6890 emit_insn (gen_cstorecc4 (btarget, cc, XEXP (cc, 0), XEXP (cc, 1)));
6895 /* Jump to end if we're done (likely?). */
6896 s390_emit_jump (csend, cc);
6898 /* Check for changes outside mode, and loop internal if so.
6899 Arrange the moves so that the compare is adjacent to the
6900 branch so that we can generate CRJ. */
6901 tmp = copy_to_reg (val);
6902 force_expand_binop (SImode, and_optab, res, ac.modemaski, val,
6904 cc = s390_emit_compare (NE, val, tmp);
6905 s390_emit_jump (csloop, cc);
6908 emit_move_insn (btarget, const0_rtx);
6912 /* Return the correct part of the bitfield. */
6913 convert_move (vtarget, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
6914 NULL_RTX, 1, OPTAB_DIRECT), 1);
6917 /* Variant of s390_expand_cs for SI, DI and TI modes. */
6919 s390_expand_cs_tdsi (machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
6920 rtx cmp, rtx new_rtx, bool is_weak)
6922 rtx output = vtarget;
6923 rtx_code_label *skip_cs_label = NULL;
6924 bool do_const_opt = false;
6926 if (!register_operand (output, mode))
6927 output = gen_reg_rtx (mode);
6929 /* If IS_WEAK is true and the INPUT value is a constant, compare the memory
6930 with the constant first and skip the compare_and_swap because its very
6931 expensive and likely to fail anyway.
6932 Note 1: This is done only for IS_WEAK. C11 allows optimizations that may
6933 cause spurious in that case.
6934 Note 2: It may be useful to do this also for non-constant INPUT.
6935 Note 3: Currently only targets with "load on condition" are supported
6936 (z196 and newer). */
6939 && (mode == SImode || mode == DImode))
6940 do_const_opt = (is_weak && CONST_INT_P (cmp));
6944 rtx cc = gen_rtx_REG (CCZmode, CC_REGNUM);
6946 skip_cs_label = gen_label_rtx ();
6947 emit_move_insn (btarget, const0_rtx);
6948 if (CONST_INT_P (cmp) && INTVAL (cmp) == 0)
6950 rtvec lt = rtvec_alloc (2);
6952 /* Load-and-test + conditional jump. */
6954 = gen_rtx_SET (cc, gen_rtx_COMPARE (CCZmode, mem, cmp));
6955 RTVEC_ELT (lt, 1) = gen_rtx_SET (output, mem);
6956 emit_insn (gen_rtx_PARALLEL (VOIDmode, lt));
6960 emit_move_insn (output, mem);
6961 emit_insn (gen_rtx_SET (cc, gen_rtx_COMPARE (CCZmode, output, cmp)));
6963 s390_emit_jump (skip_cs_label, gen_rtx_NE (VOIDmode, cc, const0_rtx));
6964 add_reg_br_prob_note (get_last_insn (),
6965 profile_probability::very_unlikely ());
6966 /* If the jump is not taken, OUTPUT is the expected value. */
6968 /* Reload newval to a register manually, *after* the compare and jump
6969 above. Otherwise Reload might place it before the jump. */
6972 cmp = force_reg (mode, cmp);
6973 new_rtx = force_reg (mode, new_rtx);
6974 s390_emit_compare_and_swap (EQ, output, mem, cmp, new_rtx,
6975 (do_const_opt) ? CCZmode : CCZ1mode);
6976 if (skip_cs_label != NULL)
6977 emit_label (skip_cs_label);
6979 /* We deliberately accept non-register operands in the predicate
6980 to ensure the write back to the output operand happens *before*
6981 the store-flags code below. This makes it easier for combine
6982 to merge the store-flags code with a potential test-and-branch
6983 pattern following (immediately!) afterwards. */
6984 if (output != vtarget)
6985 emit_move_insn (vtarget, output);
6991 /* Do not use gen_cstorecc4 here because it writes either 1 or 0, but
6992 btarget has already been initialized with 0 above. */
6993 cc = gen_rtx_REG (CCZmode, CC_REGNUM);
6994 cond = gen_rtx_EQ (VOIDmode, cc, const0_rtx);
6995 ite = gen_rtx_IF_THEN_ELSE (SImode, cond, const1_rtx, btarget);
6996 emit_insn (gen_rtx_SET (btarget, ite));
7002 cc = gen_rtx_REG (CCZ1mode, CC_REGNUM);
7003 cond = gen_rtx_EQ (SImode, cc, const0_rtx);
7004 emit_insn (gen_cstorecc4 (btarget, cond, cc, const0_rtx));
7008 /* Expand an atomic compare and swap operation. MEM is the memory location,
7009 CMP the old value to compare MEM with and NEW_RTX the value to set if
7013 s390_expand_cs (machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
7014 rtx cmp, rtx new_rtx, bool is_weak)
7021 s390_expand_cs_tdsi (mode, btarget, vtarget, mem, cmp, new_rtx, is_weak);
7025 s390_expand_cs_hqi (mode, btarget, vtarget, mem, cmp, new_rtx, is_weak);
7032 /* Expand an atomic_exchange operation simulated with a compare-and-swap loop.
7033 The memory location MEM is set to INPUT. OUTPUT is set to the previous value
7037 s390_expand_atomic_exchange_tdsi (rtx output, rtx mem, rtx input)
7039 machine_mode mode = GET_MODE (mem);
7040 rtx_code_label *csloop;
7043 && (mode == DImode || mode == SImode)
7044 && CONST_INT_P (input) && INTVAL (input) == 0)
7046 emit_move_insn (output, const0_rtx);
7048 emit_insn (gen_atomic_fetch_anddi (output, mem, const0_rtx, input));
7050 emit_insn (gen_atomic_fetch_andsi (output, mem, const0_rtx, input));
7054 input = force_reg (mode, input);
7055 emit_move_insn (output, mem);
7056 csloop = gen_label_rtx ();
7057 emit_label (csloop);
7058 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, output, mem, output,
7062 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
7063 and VAL the value to play with. If AFTER is true then store the value
7064 MEM holds after the operation, if AFTER is false then store the value MEM
7065 holds before the operation. If TARGET is zero then discard that value, else
7066 store it to TARGET. */
7069 s390_expand_atomic (machine_mode mode, enum rtx_code code,
7070 rtx target, rtx mem, rtx val, bool after)
7072 struct alignment_context ac;
7074 rtx new_rtx = gen_reg_rtx (SImode);
7075 rtx orig = gen_reg_rtx (SImode);
7076 rtx_code_label *csloop = gen_label_rtx ();
7078 gcc_assert (!target || register_operand (target, VOIDmode));
7079 gcc_assert (MEM_P (mem));
7081 init_alignment_context (&ac, mem, mode);
7083 /* Shift val to the correct bit positions.
7084 Preserve "icm", but prevent "ex icm". */
7085 if (!(ac.aligned && code == SET && MEM_P (val)))
7086 val = s390_expand_mask_and_shift (val, mode, ac.shift);
7088 /* Further preparation insns. */
7089 if (code == PLUS || code == MINUS)
7090 emit_move_insn (orig, val);
7091 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
7092 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
7093 NULL_RTX, 1, OPTAB_DIRECT);
7095 /* Load full word. Subsequent loads are performed by CS. */
7096 cmp = force_reg (SImode, ac.memsi);
7098 /* Start CS loop. */
7099 emit_label (csloop);
7100 emit_move_insn (new_rtx, cmp);
7102 /* Patch new with val at correct position. */
7107 val = expand_simple_binop (SImode, code, new_rtx, orig,
7108 NULL_RTX, 1, OPTAB_DIRECT);
7109 val = expand_simple_binop (SImode, AND, val, ac.modemask,
7110 NULL_RTX, 1, OPTAB_DIRECT);
7113 if (ac.aligned && MEM_P (val))
7114 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0,
7115 0, 0, SImode, val, false);
7118 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
7119 NULL_RTX, 1, OPTAB_DIRECT);
7120 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
7121 NULL_RTX, 1, OPTAB_DIRECT);
7127 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
7128 NULL_RTX, 1, OPTAB_DIRECT);
7130 case MULT: /* NAND */
7131 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
7132 NULL_RTX, 1, OPTAB_DIRECT);
7133 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
7134 NULL_RTX, 1, OPTAB_DIRECT);
7140 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
7141 ac.memsi, cmp, new_rtx,
7144 /* Return the correct part of the bitfield. */
7146 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
7147 after ? new_rtx : cmp, ac.shift,
7148 NULL_RTX, 1, OPTAB_DIRECT), 1);
7151 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
7152 We need to emit DTP-relative relocations. */
7154 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
7157 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
7162 fputs ("\t.long\t", file);
7165 fputs ("\t.quad\t", file);
7170 output_addr_const (file, x);
7171 fputs ("@DTPOFF", file);
7174 /* Return the proper mode for REGNO being represented in the dwarf
7177 s390_dwarf_frame_reg_mode (int regno)
7179 machine_mode save_mode = default_dwarf_frame_reg_mode (regno);
7181 /* Make sure not to return DImode for any GPR with -m31 -mzarch. */
7182 if (GENERAL_REGNO_P (regno))
7185 /* The rightmost 64 bits of vector registers are call-clobbered. */
7186 if (GET_MODE_SIZE (save_mode) > 8)
7192 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
7193 /* Implement TARGET_MANGLE_TYPE. */
7196 s390_mangle_type (const_tree type)
7198 type = TYPE_MAIN_VARIANT (type);
7200 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
7201 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
7204 if (type == s390_builtin_types[BT_BV16QI]) return "U6__boolc";
7205 if (type == s390_builtin_types[BT_BV8HI]) return "U6__bools";
7206 if (type == s390_builtin_types[BT_BV4SI]) return "U6__booli";
7207 if (type == s390_builtin_types[BT_BV2DI]) return "U6__booll";
7209 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
7210 && TARGET_LONG_DOUBLE_128)
7213 /* For all other types, use normal C++ mangling. */
7218 /* In the name of slightly smaller debug output, and to cater to
7219 general assembler lossage, recognize various UNSPEC sequences
7220 and turn them back into a direct symbol reference. */
7223 s390_delegitimize_address (rtx orig_x)
7227 orig_x = delegitimize_mem_from_attrs (orig_x);
7230 /* Extract the symbol ref from:
7231 (plus:SI (reg:SI 12 %r12)
7232 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
7233 UNSPEC_GOTOFF/PLTOFF)))
7235 (plus:SI (reg:SI 12 %r12)
7236 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
7237 UNSPEC_GOTOFF/PLTOFF)
7238 (const_int 4 [0x4])))) */
7239 if (GET_CODE (x) == PLUS
7240 && REG_P (XEXP (x, 0))
7241 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
7242 && GET_CODE (XEXP (x, 1)) == CONST)
7244 HOST_WIDE_INT offset = 0;
7246 /* The const operand. */
7247 y = XEXP (XEXP (x, 1), 0);
7249 if (GET_CODE (y) == PLUS
7250 && GET_CODE (XEXP (y, 1)) == CONST_INT)
7252 offset = INTVAL (XEXP (y, 1));
7256 if (GET_CODE (y) == UNSPEC
7257 && (XINT (y, 1) == UNSPEC_GOTOFF
7258 || XINT (y, 1) == UNSPEC_PLTOFF))
7259 return plus_constant (Pmode, XVECEXP (y, 0, 0), offset);
7262 if (GET_CODE (x) != MEM)
7266 if (GET_CODE (x) == PLUS
7267 && GET_CODE (XEXP (x, 1)) == CONST
7268 && GET_CODE (XEXP (x, 0)) == REG
7269 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
7271 y = XEXP (XEXP (x, 1), 0);
7272 if (GET_CODE (y) == UNSPEC
7273 && XINT (y, 1) == UNSPEC_GOT)
7274 y = XVECEXP (y, 0, 0);
7278 else if (GET_CODE (x) == CONST)
7280 /* Extract the symbol ref from:
7281 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
7282 UNSPEC_PLT/GOTENT))) */
7285 if (GET_CODE (y) == UNSPEC
7286 && (XINT (y, 1) == UNSPEC_GOTENT
7287 || XINT (y, 1) == UNSPEC_PLT))
7288 y = XVECEXP (y, 0, 0);
7295 if (GET_MODE (orig_x) != Pmode)
7297 if (GET_MODE (orig_x) == BLKmode)
7299 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
7306 /* Output operand OP to stdio stream FILE.
7307 OP is an address (register + offset) which is not used to address data;
7308 instead the rightmost bits are interpreted as the value. */
7311 print_addrstyle_operand (FILE *file, rtx op)
7313 HOST_WIDE_INT offset;
7316 /* Extract base register and offset. */
7317 if (!s390_decompose_addrstyle_without_index (op, &base, &offset))
7323 gcc_assert (GET_CODE (base) == REG);
7324 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
7325 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
7328 /* Offsets are constricted to twelve bits. */
7329 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
7331 fprintf (file, "(%s)", reg_names[REGNO (base)]);
7334 /* Assigns the number of NOP halfwords to be emitted before and after the
7335 function label to *HW_BEFORE and *HW_AFTER. Both pointers must not be NULL.
7336 If hotpatching is disabled for the function, the values are set to zero.
7340 s390_function_num_hotpatch_hw (tree decl,
7346 attr = lookup_attribute ("hotpatch", DECL_ATTRIBUTES (decl));
7348 /* Handle the arguments of the hotpatch attribute. The values
7349 specified via attribute might override the cmdline argument
7353 tree args = TREE_VALUE (attr);
7355 *hw_before = TREE_INT_CST_LOW (TREE_VALUE (args));
7356 *hw_after = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (args)));
7360 /* Use the values specified by the cmdline arguments. */
7361 *hw_before = s390_hotpatch_hw_before_label;
7362 *hw_after = s390_hotpatch_hw_after_label;
7366 /* Write the current .machine and .machinemode specification to the assembler
7369 #ifdef HAVE_AS_MACHINE_MACHINEMODE
7371 s390_asm_output_machine_for_arch (FILE *asm_out_file)
7373 fprintf (asm_out_file, "\t.machinemode %s\n",
7374 (TARGET_ZARCH) ? "zarch" : "esa");
7375 fprintf (asm_out_file, "\t.machine \"%s",
7376 processor_table[s390_arch].binutils_name);
7377 if (S390_USE_ARCHITECTURE_MODIFIERS)
7381 cpu_flags = processor_flags_table[(int) s390_arch];
7382 if (TARGET_HTM && !(cpu_flags & PF_TX))
7383 fprintf (asm_out_file, "+htm");
7384 else if (!TARGET_HTM && (cpu_flags & PF_TX))
7385 fprintf (asm_out_file, "+nohtm");
7386 if (TARGET_VX && !(cpu_flags & PF_VX))
7387 fprintf (asm_out_file, "+vx");
7388 else if (!TARGET_VX && (cpu_flags & PF_VX))
7389 fprintf (asm_out_file, "+novx");
7391 fprintf (asm_out_file, "\"\n");
7394 /* Write an extra function header before the very start of the function. */
7397 s390_asm_output_function_prefix (FILE *asm_out_file,
7398 const char *fnname ATTRIBUTE_UNUSED)
7400 if (DECL_FUNCTION_SPECIFIC_TARGET (current_function_decl) == NULL)
7402 /* Since only the function specific options are saved but not the indications
7403 which options are set, it's too much work here to figure out which options
7404 have actually changed. Thus, generate .machine and .machinemode whenever a
7405 function has the target attribute or pragma. */
7406 fprintf (asm_out_file, "\t.machinemode push\n");
7407 fprintf (asm_out_file, "\t.machine push\n");
7408 s390_asm_output_machine_for_arch (asm_out_file);
7411 /* Write an extra function footer after the very end of the function. */
7414 s390_asm_declare_function_size (FILE *asm_out_file,
7415 const char *fnname, tree decl)
7417 if (!flag_inhibit_size_directive)
7418 ASM_OUTPUT_MEASURED_SIZE (asm_out_file, fnname);
7419 if (DECL_FUNCTION_SPECIFIC_TARGET (decl) == NULL)
7421 fprintf (asm_out_file, "\t.machine pop\n");
7422 fprintf (asm_out_file, "\t.machinemode pop\n");
7426 /* Write the extra assembler code needed to declare a function properly. */
7429 s390_asm_output_function_label (FILE *asm_out_file, const char *fname,
7432 int hw_before, hw_after;
7434 s390_function_num_hotpatch_hw (decl, &hw_before, &hw_after);
7437 unsigned int function_alignment;
7440 /* Add a trampoline code area before the function label and initialize it
7441 with two-byte nop instructions. This area can be overwritten with code
7442 that jumps to a patched version of the function. */
7443 asm_fprintf (asm_out_file, "\tnopr\t%%r0"
7444 "\t# pre-label NOPs for hotpatch (%d halfwords)\n",
7446 for (i = 1; i < hw_before; i++)
7447 fputs ("\tnopr\t%r0\n", asm_out_file);
7449 /* Note: The function label must be aligned so that (a) the bytes of the
7450 following nop do not cross a cacheline boundary, and (b) a jump address
7451 (eight bytes for 64 bit targets, 4 bytes for 32 bit targets) can be
7452 stored directly before the label without crossing a cacheline
7453 boundary. All this is necessary to make sure the trampoline code can
7454 be changed atomically.
7455 This alignment is done automatically using the FOUNCTION_BOUNDARY, but
7456 if there are NOPs before the function label, the alignment is placed
7457 before them. So it is necessary to duplicate the alignment after the
7459 function_alignment = MAX (8, DECL_ALIGN (decl) / BITS_PER_UNIT);
7460 if (! DECL_USER_ALIGN (decl))
7461 function_alignment = MAX (function_alignment,
7462 (unsigned int) align_functions);
7463 fputs ("\t# alignment for hotpatch\n", asm_out_file);
7464 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (function_alignment));
7467 if (S390_USE_TARGET_ATTRIBUTE && TARGET_DEBUG_ARG)
7469 asm_fprintf (asm_out_file, "\t# fn:%s ar%d\n", fname, s390_arch);
7470 asm_fprintf (asm_out_file, "\t# fn:%s tu%d\n", fname, s390_tune);
7471 asm_fprintf (asm_out_file, "\t# fn:%s sg%d\n", fname, s390_stack_guard);
7472 asm_fprintf (asm_out_file, "\t# fn:%s ss%d\n", fname, s390_stack_size);
7473 asm_fprintf (asm_out_file, "\t# fn:%s bc%d\n", fname, s390_branch_cost);
7474 asm_fprintf (asm_out_file, "\t# fn:%s wf%d\n", fname,
7475 s390_warn_framesize);
7476 asm_fprintf (asm_out_file, "\t# fn:%s ba%d\n", fname, TARGET_BACKCHAIN);
7477 asm_fprintf (asm_out_file, "\t# fn:%s hd%d\n", fname, TARGET_HARD_DFP);
7478 asm_fprintf (asm_out_file, "\t# fn:%s hf%d\n", fname, !TARGET_SOFT_FLOAT);
7479 asm_fprintf (asm_out_file, "\t# fn:%s ht%d\n", fname, TARGET_OPT_HTM);
7480 asm_fprintf (asm_out_file, "\t# fn:%s vx%d\n", fname, TARGET_OPT_VX);
7481 asm_fprintf (asm_out_file, "\t# fn:%s ps%d\n", fname,
7482 TARGET_PACKED_STACK);
7483 asm_fprintf (asm_out_file, "\t# fn:%s se%d\n", fname, TARGET_SMALL_EXEC);
7484 asm_fprintf (asm_out_file, "\t# fn:%s mv%d\n", fname, TARGET_MVCLE);
7485 asm_fprintf (asm_out_file, "\t# fn:%s zv%d\n", fname, TARGET_ZVECTOR);
7486 asm_fprintf (asm_out_file, "\t# fn:%s wd%d\n", fname,
7487 s390_warn_dynamicstack_p);
7489 ASM_OUTPUT_LABEL (asm_out_file, fname);
7491 asm_fprintf (asm_out_file,
7492 "\t# post-label NOPs for hotpatch (%d halfwords)\n",
7496 /* Output machine-dependent UNSPECs occurring in address constant X
7497 in assembler syntax to stdio stream FILE. Returns true if the
7498 constant X could be recognized, false otherwise. */
7501 s390_output_addr_const_extra (FILE *file, rtx x)
7503 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
7504 switch (XINT (x, 1))
7507 output_addr_const (file, XVECEXP (x, 0, 0));
7508 fprintf (file, "@GOTENT");
7511 output_addr_const (file, XVECEXP (x, 0, 0));
7512 fprintf (file, "@GOT");
7515 output_addr_const (file, XVECEXP (x, 0, 0));
7516 fprintf (file, "@GOTOFF");
7519 output_addr_const (file, XVECEXP (x, 0, 0));
7520 fprintf (file, "@PLT");
7523 output_addr_const (file, XVECEXP (x, 0, 0));
7524 fprintf (file, "@PLTOFF");
7527 output_addr_const (file, XVECEXP (x, 0, 0));
7528 fprintf (file, "@TLSGD");
7531 assemble_name (file, get_some_local_dynamic_name ());
7532 fprintf (file, "@TLSLDM");
7535 output_addr_const (file, XVECEXP (x, 0, 0));
7536 fprintf (file, "@DTPOFF");
7539 output_addr_const (file, XVECEXP (x, 0, 0));
7540 fprintf (file, "@NTPOFF");
7542 case UNSPEC_GOTNTPOFF:
7543 output_addr_const (file, XVECEXP (x, 0, 0));
7544 fprintf (file, "@GOTNTPOFF");
7546 case UNSPEC_INDNTPOFF:
7547 output_addr_const (file, XVECEXP (x, 0, 0));
7548 fprintf (file, "@INDNTPOFF");
7552 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
7553 switch (XINT (x, 1))
7555 case UNSPEC_POOL_OFFSET:
7556 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
7557 output_addr_const (file, x);
7563 /* Output address operand ADDR in assembler syntax to
7564 stdio stream FILE. */
7567 print_operand_address (FILE *file, rtx addr)
7569 struct s390_address ad;
7570 memset (&ad, 0, sizeof (s390_address));
7572 if (s390_loadrelative_operand_p (addr, NULL, NULL))
7576 output_operand_lossage ("symbolic memory references are "
7577 "only supported on z10 or later");
7580 output_addr_const (file, addr);
7584 if (!s390_decompose_address (addr, &ad)
7585 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7586 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
7587 output_operand_lossage ("cannot decompose address");
7590 output_addr_const (file, ad.disp);
7592 fprintf (file, "0");
7594 if (ad.base && ad.indx)
7595 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
7596 reg_names[REGNO (ad.base)]);
7598 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
7601 /* Output operand X in assembler syntax to stdio stream FILE.
7602 CODE specified the format flag. The following format flags
7605 'C': print opcode suffix for branch condition.
7606 'D': print opcode suffix for inverse branch condition.
7607 'E': print opcode suffix for branch on index instruction.
7608 'G': print the size of the operand in bytes.
7609 'J': print tls_load/tls_gdcall/tls_ldcall suffix
7610 'M': print the second word of a TImode operand.
7611 'N': print the second word of a DImode operand.
7612 'O': print only the displacement of a memory reference or address.
7613 'R': print only the base register of a memory reference or address.
7614 'S': print S-type memory reference (base+displacement).
7615 'Y': print address style operand without index (e.g. shift count or setmem
7618 'b': print integer X as if it's an unsigned byte.
7619 'c': print integer X as if it's an signed byte.
7620 'e': "end" contiguous bitmask X in either DImode or vector inner mode.
7621 'f': "end" contiguous bitmask X in SImode.
7622 'h': print integer X as if it's a signed halfword.
7623 'i': print the first nonzero HImode part of X.
7624 'j': print the first HImode part unequal to -1 of X.
7625 'k': print the first nonzero SImode part of X.
7626 'm': print the first SImode part unequal to -1 of X.
7627 'o': print integer X as if it's an unsigned 32bit word.
7628 's': "start" of contiguous bitmask X in either DImode or vector inner mode.
7629 't': CONST_INT: "start" of contiguous bitmask X in SImode.
7630 CONST_VECTOR: Generate a bitmask for vgbm instruction.
7631 'x': print integer X as if it's an unsigned halfword.
7632 'v': print register number as vector register (v1 instead of f1).
7636 print_operand (FILE *file, rtx x, int code)
7643 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
7647 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
7651 if (GET_CODE (x) == LE)
7652 fprintf (file, "l");
7653 else if (GET_CODE (x) == GT)
7654 fprintf (file, "h");
7656 output_operand_lossage ("invalid comparison operator "
7657 "for 'E' output modifier");
7661 if (GET_CODE (x) == SYMBOL_REF)
7663 fprintf (file, "%s", ":tls_load:");
7664 output_addr_const (file, x);
7666 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
7668 fprintf (file, "%s", ":tls_gdcall:");
7669 output_addr_const (file, XVECEXP (x, 0, 0));
7671 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
7673 fprintf (file, "%s", ":tls_ldcall:");
7674 const char *name = get_some_local_dynamic_name ();
7676 assemble_name (file, name);
7679 output_operand_lossage ("invalid reference for 'J' output modifier");
7683 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
7688 struct s390_address ad;
7691 ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
7694 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7697 output_operand_lossage ("invalid address for 'O' output modifier");
7702 output_addr_const (file, ad.disp);
7704 fprintf (file, "0");
7710 struct s390_address ad;
7713 ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
7716 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7719 output_operand_lossage ("invalid address for 'R' output modifier");
7724 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
7726 fprintf (file, "0");
7732 struct s390_address ad;
7737 output_operand_lossage ("memory reference expected for "
7738 "'S' output modifier");
7741 ret = s390_decompose_address (XEXP (x, 0), &ad);
7744 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7747 output_operand_lossage ("invalid address for 'S' output modifier");
7752 output_addr_const (file, ad.disp);
7754 fprintf (file, "0");
7757 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
7762 if (GET_CODE (x) == REG)
7763 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
7764 else if (GET_CODE (x) == MEM)
7765 x = change_address (x, VOIDmode,
7766 plus_constant (Pmode, XEXP (x, 0), 4));
7768 output_operand_lossage ("register or memory expression expected "
7769 "for 'N' output modifier");
7773 if (GET_CODE (x) == REG)
7774 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
7775 else if (GET_CODE (x) == MEM)
7776 x = change_address (x, VOIDmode,
7777 plus_constant (Pmode, XEXP (x, 0), 8));
7779 output_operand_lossage ("register or memory expression expected "
7780 "for 'M' output modifier");
7784 print_addrstyle_operand (file, x);
7788 switch (GET_CODE (x))
7791 /* Print FP regs as fx instead of vx when they are accessed
7792 through non-vector mode. */
7794 || VECTOR_NOFP_REG_P (x)
7795 || (FP_REG_P (x) && VECTOR_MODE_P (GET_MODE (x)))
7796 || (VECTOR_REG_P (x)
7797 && (GET_MODE_SIZE (GET_MODE (x)) /
7798 s390_class_max_nregs (FP_REGS, GET_MODE (x))) > 8))
7799 fprintf (file, "%%v%s", reg_names[REGNO (x)] + 2);
7801 fprintf (file, "%s", reg_names[REGNO (x)]);
7805 output_address (GET_MODE (x), XEXP (x, 0));
7812 output_addr_const (file, x);
7825 ival = ((ival & 0xff) ^ 0x80) - 0x80;
7831 ival = ((ival & 0xffff) ^ 0x8000) - 0x8000;
7834 ival = s390_extract_part (x, HImode, 0);
7837 ival = s390_extract_part (x, HImode, -1);
7840 ival = s390_extract_part (x, SImode, 0);
7843 ival = s390_extract_part (x, SImode, -1);
7855 len = (code == 's' || code == 'e' ? 64 : 32);
7856 ok = s390_contiguous_bitmask_p (ival, true, len, &start, &end);
7858 if (code == 's' || code == 't')
7865 output_operand_lossage ("invalid constant for output modifier '%c'", code);
7867 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
7870 case CONST_WIDE_INT:
7872 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7873 CONST_WIDE_INT_ELT (x, 0) & 0xff);
7874 else if (code == 'x')
7875 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7876 CONST_WIDE_INT_ELT (x, 0) & 0xffff);
7877 else if (code == 'h')
7878 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7879 ((CONST_WIDE_INT_ELT (x, 0) & 0xffff) ^ 0x8000) - 0x8000);
7883 output_operand_lossage ("invalid constant - try using "
7884 "an output modifier");
7886 output_operand_lossage ("invalid constant for output modifier '%c'",
7894 gcc_assert (const_vec_duplicate_p (x));
7895 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7896 ((INTVAL (XVECEXP (x, 0, 0)) & 0xffff) ^ 0x8000) - 0x8000);
7904 ok = s390_contiguous_bitmask_vector_p (x, &start, &end);
7906 ival = (code == 's') ? start : end;
7907 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
7913 bool ok = s390_bytemask_vector_p (x, &mask);
7915 fprintf (file, "%u", mask);
7920 output_operand_lossage ("invalid constant vector for output "
7921 "modifier '%c'", code);
7927 output_operand_lossage ("invalid expression - try using "
7928 "an output modifier");
7930 output_operand_lossage ("invalid expression for output "
7931 "modifier '%c'", code);
7936 /* Target hook for assembling integer objects. We need to define it
7937 here to work a round a bug in some versions of GAS, which couldn't
7938 handle values smaller than INT_MIN when printed in decimal. */
7941 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
7943 if (size == 8 && aligned_p
7944 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
7946 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
7950 return default_assemble_integer (x, size, aligned_p);
7953 /* Returns true if register REGNO is used for forming
7954 a memory address in expression X. */
7957 reg_used_in_mem_p (int regno, rtx x)
7959 enum rtx_code code = GET_CODE (x);
7965 if (refers_to_regno_p (regno, XEXP (x, 0)))
7968 else if (code == SET
7969 && GET_CODE (SET_DEST (x)) == PC)
7971 if (refers_to_regno_p (regno, SET_SRC (x)))
7975 fmt = GET_RTX_FORMAT (code);
7976 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7979 && reg_used_in_mem_p (regno, XEXP (x, i)))
7982 else if (fmt[i] == 'E')
7983 for (j = 0; j < XVECLEN (x, i); j++)
7984 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
7990 /* Returns true if expression DEP_RTX sets an address register
7991 used by instruction INSN to address memory. */
7994 addr_generation_dependency_p (rtx dep_rtx, rtx_insn *insn)
7998 if (NONJUMP_INSN_P (dep_rtx))
7999 dep_rtx = PATTERN (dep_rtx);
8001 if (GET_CODE (dep_rtx) == SET)
8003 target = SET_DEST (dep_rtx);
8004 if (GET_CODE (target) == STRICT_LOW_PART)
8005 target = XEXP (target, 0);
8006 while (GET_CODE (target) == SUBREG)
8007 target = SUBREG_REG (target);
8009 if (GET_CODE (target) == REG)
8011 int regno = REGNO (target);
8013 if (s390_safe_attr_type (insn) == TYPE_LA)
8015 pat = PATTERN (insn);
8016 if (GET_CODE (pat) == PARALLEL)
8018 gcc_assert (XVECLEN (pat, 0) == 2);
8019 pat = XVECEXP (pat, 0, 0);
8021 gcc_assert (GET_CODE (pat) == SET);
8022 return refers_to_regno_p (regno, SET_SRC (pat));
8024 else if (get_attr_atype (insn) == ATYPE_AGEN)
8025 return reg_used_in_mem_p (regno, PATTERN (insn));
8031 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
8034 s390_agen_dep_p (rtx_insn *dep_insn, rtx_insn *insn)
8036 rtx dep_rtx = PATTERN (dep_insn);
8039 if (GET_CODE (dep_rtx) == SET
8040 && addr_generation_dependency_p (dep_rtx, insn))
8042 else if (GET_CODE (dep_rtx) == PARALLEL)
8044 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
8046 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
8054 /* A C statement (sans semicolon) to update the integer scheduling priority
8055 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
8056 reduce the priority to execute INSN later. Do not define this macro if
8057 you do not need to adjust the scheduling priorities of insns.
8059 A STD instruction should be scheduled earlier,
8060 in order to use the bypass. */
8062 s390_adjust_priority (rtx_insn *insn, int priority)
8064 if (! INSN_P (insn))
8067 if (s390_tune <= PROCESSOR_2064_Z900)
8070 switch (s390_safe_attr_type (insn))
8074 priority = priority << 3;
8078 priority = priority << 1;
8087 /* The number of instructions that can be issued per cycle. */
8090 s390_issue_rate (void)
8094 case PROCESSOR_2084_Z990:
8095 case PROCESSOR_2094_Z9_109:
8096 case PROCESSOR_2094_Z9_EC:
8097 case PROCESSOR_2817_Z196:
8099 case PROCESSOR_2097_Z10:
8101 case PROCESSOR_9672_G5:
8102 case PROCESSOR_9672_G6:
8103 case PROCESSOR_2064_Z900:
8104 /* Starting with EC12 we use the sched_reorder hook to take care
8105 of instruction dispatch constraints. The algorithm only
8106 picks the best instruction and assumes only a single
8107 instruction gets issued per cycle. */
8108 case PROCESSOR_2827_ZEC12:
8109 case PROCESSOR_2964_Z13:
8110 case PROCESSOR_3906_Z14:
8117 s390_first_cycle_multipass_dfa_lookahead (void)
8122 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
8123 Fix up MEMs as required. */
8126 annotate_constant_pool_refs (rtx *x)
8131 gcc_assert (GET_CODE (*x) != SYMBOL_REF
8132 || !CONSTANT_POOL_ADDRESS_P (*x));
8134 /* Literal pool references can only occur inside a MEM ... */
8135 if (GET_CODE (*x) == MEM)
8137 rtx memref = XEXP (*x, 0);
8139 if (GET_CODE (memref) == SYMBOL_REF
8140 && CONSTANT_POOL_ADDRESS_P (memref))
8142 rtx base = cfun->machine->base_reg;
8143 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
8146 *x = replace_equiv_address (*x, addr);
8150 if (GET_CODE (memref) == CONST
8151 && GET_CODE (XEXP (memref, 0)) == PLUS
8152 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
8153 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
8154 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
8156 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
8157 rtx sym = XEXP (XEXP (memref, 0), 0);
8158 rtx base = cfun->machine->base_reg;
8159 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
8162 *x = replace_equiv_address (*x, plus_constant (Pmode, addr, off));
8167 /* ... or a load-address type pattern. */
8168 if (GET_CODE (*x) == SET)
8170 rtx addrref = SET_SRC (*x);
8172 if (GET_CODE (addrref) == SYMBOL_REF
8173 && CONSTANT_POOL_ADDRESS_P (addrref))
8175 rtx base = cfun->machine->base_reg;
8176 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
8179 SET_SRC (*x) = addr;
8183 if (GET_CODE (addrref) == CONST
8184 && GET_CODE (XEXP (addrref, 0)) == PLUS
8185 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
8186 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
8187 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
8189 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
8190 rtx sym = XEXP (XEXP (addrref, 0), 0);
8191 rtx base = cfun->machine->base_reg;
8192 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
8195 SET_SRC (*x) = plus_constant (Pmode, addr, off);
8200 /* Annotate LTREL_BASE as well. */
8201 if (GET_CODE (*x) == UNSPEC
8202 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
8204 rtx base = cfun->machine->base_reg;
8205 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
8210 fmt = GET_RTX_FORMAT (GET_CODE (*x));
8211 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
8215 annotate_constant_pool_refs (&XEXP (*x, i));
8217 else if (fmt[i] == 'E')
8219 for (j = 0; j < XVECLEN (*x, i); j++)
8220 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
8225 /* Split all branches that exceed the maximum distance.
8226 Returns true if this created a new literal pool entry. */
8229 s390_split_branches (void)
8231 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8232 int new_literal = 0, ret;
8237 /* We need correct insn addresses. */
8239 shorten_branches (get_insns ());
8241 /* Find all branches that exceed 64KB, and split them. */
8243 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8245 if (! JUMP_P (insn) || tablejump_p (insn, NULL, NULL))
8248 pat = PATTERN (insn);
8249 if (GET_CODE (pat) == PARALLEL)
8250 pat = XVECEXP (pat, 0, 0);
8251 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
8254 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
8256 label = &SET_SRC (pat);
8258 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
8260 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
8261 label = &XEXP (SET_SRC (pat), 1);
8262 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
8263 label = &XEXP (SET_SRC (pat), 2);
8270 if (get_attr_length (insn) <= 4)
8273 /* We are going to use the return register as scratch register,
8274 make sure it will be saved/restored by the prologue/epilogue. */
8275 cfun_frame_layout.save_return_addr_p = 1;
8280 rtx mem = force_const_mem (Pmode, *label);
8281 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, mem),
8283 INSN_ADDRESSES_NEW (set_insn, -1);
8284 annotate_constant_pool_refs (&PATTERN (set_insn));
8291 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
8292 UNSPEC_LTREL_OFFSET);
8293 target = gen_rtx_CONST (Pmode, target);
8294 target = force_const_mem (Pmode, target);
8295 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, target),
8297 INSN_ADDRESSES_NEW (set_insn, -1);
8298 annotate_constant_pool_refs (&PATTERN (set_insn));
8300 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
8301 cfun->machine->base_reg),
8303 target = gen_rtx_PLUS (Pmode, temp_reg, target);
8306 ret = validate_change (insn, label, target, 0);
8314 /* Find an annotated literal pool symbol referenced in RTX X,
8315 and store it at REF. Will abort if X contains references to
8316 more than one such pool symbol; multiple references to the same
8317 symbol are allowed, however.
8319 The rtx pointed to by REF must be initialized to NULL_RTX
8320 by the caller before calling this routine. */
8323 find_constant_pool_ref (rtx x, rtx *ref)
8328 /* Ignore LTREL_BASE references. */
8329 if (GET_CODE (x) == UNSPEC
8330 && XINT (x, 1) == UNSPEC_LTREL_BASE)
8332 /* Likewise POOL_ENTRY insns. */
8333 if (GET_CODE (x) == UNSPEC_VOLATILE
8334 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
8337 gcc_assert (GET_CODE (x) != SYMBOL_REF
8338 || !CONSTANT_POOL_ADDRESS_P (x));
8340 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
8342 rtx sym = XVECEXP (x, 0, 0);
8343 gcc_assert (GET_CODE (sym) == SYMBOL_REF
8344 && CONSTANT_POOL_ADDRESS_P (sym));
8346 if (*ref == NULL_RTX)
8349 gcc_assert (*ref == sym);
8354 fmt = GET_RTX_FORMAT (GET_CODE (x));
8355 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8359 find_constant_pool_ref (XEXP (x, i), ref);
8361 else if (fmt[i] == 'E')
8363 for (j = 0; j < XVECLEN (x, i); j++)
8364 find_constant_pool_ref (XVECEXP (x, i, j), ref);
8369 /* Replace every reference to the annotated literal pool
8370 symbol REF in X by its base plus OFFSET. */
8373 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
8378 gcc_assert (*x != ref);
8380 if (GET_CODE (*x) == UNSPEC
8381 && XINT (*x, 1) == UNSPEC_LTREF
8382 && XVECEXP (*x, 0, 0) == ref)
8384 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
8388 if (GET_CODE (*x) == PLUS
8389 && GET_CODE (XEXP (*x, 1)) == CONST_INT
8390 && GET_CODE (XEXP (*x, 0)) == UNSPEC
8391 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
8392 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
8394 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
8395 *x = plus_constant (Pmode, addr, INTVAL (XEXP (*x, 1)));
8399 fmt = GET_RTX_FORMAT (GET_CODE (*x));
8400 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
8404 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
8406 else if (fmt[i] == 'E')
8408 for (j = 0; j < XVECLEN (*x, i); j++)
8409 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
8414 /* Check whether X contains an UNSPEC_LTREL_BASE.
8415 Return its constant pool symbol if found, NULL_RTX otherwise. */
8418 find_ltrel_base (rtx x)
8423 if (GET_CODE (x) == UNSPEC
8424 && XINT (x, 1) == UNSPEC_LTREL_BASE)
8425 return XVECEXP (x, 0, 0);
8427 fmt = GET_RTX_FORMAT (GET_CODE (x));
8428 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8432 rtx fnd = find_ltrel_base (XEXP (x, i));
8436 else if (fmt[i] == 'E')
8438 for (j = 0; j < XVECLEN (x, i); j++)
8440 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
8450 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
8453 replace_ltrel_base (rtx *x)
8458 if (GET_CODE (*x) == UNSPEC
8459 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
8461 *x = XVECEXP (*x, 0, 1);
8465 fmt = GET_RTX_FORMAT (GET_CODE (*x));
8466 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
8470 replace_ltrel_base (&XEXP (*x, i));
8472 else if (fmt[i] == 'E')
8474 for (j = 0; j < XVECLEN (*x, i); j++)
8475 replace_ltrel_base (&XVECEXP (*x, i, j));
8481 /* We keep a list of constants which we have to add to internal
8482 constant tables in the middle of large functions. */
8484 #define NR_C_MODES 32
8485 machine_mode constant_modes[NR_C_MODES] =
8487 TFmode, TImode, TDmode,
8488 V16QImode, V8HImode, V4SImode, V2DImode, V1TImode,
8489 V4SFmode, V2DFmode, V1TFmode,
8490 DFmode, DImode, DDmode,
8491 V8QImode, V4HImode, V2SImode, V1DImode, V2SFmode, V1DFmode,
8492 SFmode, SImode, SDmode,
8493 V4QImode, V2HImode, V1SImode, V1SFmode,
8502 struct constant *next;
8504 rtx_code_label *label;
8507 struct constant_pool
8509 struct constant_pool *next;
8510 rtx_insn *first_insn;
8511 rtx_insn *pool_insn;
8513 rtx_insn *emit_pool_after;
8515 struct constant *constants[NR_C_MODES];
8516 struct constant *execute;
8517 rtx_code_label *label;
8521 /* Allocate new constant_pool structure. */
8523 static struct constant_pool *
8524 s390_alloc_pool (void)
8526 struct constant_pool *pool;
8529 pool = (struct constant_pool *) xmalloc (sizeof *pool);
8531 for (i = 0; i < NR_C_MODES; i++)
8532 pool->constants[i] = NULL;
8534 pool->execute = NULL;
8535 pool->label = gen_label_rtx ();
8536 pool->first_insn = NULL;
8537 pool->pool_insn = NULL;
8538 pool->insns = BITMAP_ALLOC (NULL);
8540 pool->emit_pool_after = NULL;
8545 /* Create new constant pool covering instructions starting at INSN
8546 and chain it to the end of POOL_LIST. */
8548 static struct constant_pool *
8549 s390_start_pool (struct constant_pool **pool_list, rtx_insn *insn)
8551 struct constant_pool *pool, **prev;
8553 pool = s390_alloc_pool ();
8554 pool->first_insn = insn;
8556 for (prev = pool_list; *prev; prev = &(*prev)->next)
8563 /* End range of instructions covered by POOL at INSN and emit
8564 placeholder insn representing the pool. */
8567 s390_end_pool (struct constant_pool *pool, rtx_insn *insn)
8569 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
8572 insn = get_last_insn ();
8574 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
8575 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8578 /* Add INSN to the list of insns covered by POOL. */
8581 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
8583 bitmap_set_bit (pool->insns, INSN_UID (insn));
8586 /* Return pool out of POOL_LIST that covers INSN. */
8588 static struct constant_pool *
8589 s390_find_pool (struct constant_pool *pool_list, rtx insn)
8591 struct constant_pool *pool;
8593 for (pool = pool_list; pool; pool = pool->next)
8594 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
8600 /* Add constant VAL of mode MODE to the constant pool POOL. */
8603 s390_add_constant (struct constant_pool *pool, rtx val, machine_mode mode)
8608 for (i = 0; i < NR_C_MODES; i++)
8609 if (constant_modes[i] == mode)
8611 gcc_assert (i != NR_C_MODES);
8613 for (c = pool->constants[i]; c != NULL; c = c->next)
8614 if (rtx_equal_p (val, c->value))
8619 c = (struct constant *) xmalloc (sizeof *c);
8621 c->label = gen_label_rtx ();
8622 c->next = pool->constants[i];
8623 pool->constants[i] = c;
8624 pool->size += GET_MODE_SIZE (mode);
8628 /* Return an rtx that represents the offset of X from the start of
8632 s390_pool_offset (struct constant_pool *pool, rtx x)
8636 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
8637 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
8638 UNSPEC_POOL_OFFSET);
8639 return gen_rtx_CONST (GET_MODE (x), x);
8642 /* Find constant VAL of mode MODE in the constant pool POOL.
8643 Return an RTX describing the distance from the start of
8644 the pool to the location of the new constant. */
8647 s390_find_constant (struct constant_pool *pool, rtx val,
8653 for (i = 0; i < NR_C_MODES; i++)
8654 if (constant_modes[i] == mode)
8656 gcc_assert (i != NR_C_MODES);
8658 for (c = pool->constants[i]; c != NULL; c = c->next)
8659 if (rtx_equal_p (val, c->value))
8664 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
8667 /* Check whether INSN is an execute. Return the label_ref to its
8668 execute target template if so, NULL_RTX otherwise. */
8671 s390_execute_label (rtx insn)
8673 if (NONJUMP_INSN_P (insn)
8674 && GET_CODE (PATTERN (insn)) == PARALLEL
8675 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
8676 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
8677 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
8682 /* Add execute target for INSN to the constant pool POOL. */
8685 s390_add_execute (struct constant_pool *pool, rtx insn)
8689 for (c = pool->execute; c != NULL; c = c->next)
8690 if (INSN_UID (insn) == INSN_UID (c->value))
8695 c = (struct constant *) xmalloc (sizeof *c);
8697 c->label = gen_label_rtx ();
8698 c->next = pool->execute;
8704 /* Find execute target for INSN in the constant pool POOL.
8705 Return an RTX describing the distance from the start of
8706 the pool to the location of the execute target. */
8709 s390_find_execute (struct constant_pool *pool, rtx insn)
8713 for (c = pool->execute; c != NULL; c = c->next)
8714 if (INSN_UID (insn) == INSN_UID (c->value))
8719 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
8722 /* For an execute INSN, extract the execute target template. */
8725 s390_execute_target (rtx insn)
8727 rtx pattern = PATTERN (insn);
8728 gcc_assert (s390_execute_label (insn));
8730 if (XVECLEN (pattern, 0) == 2)
8732 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
8736 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
8739 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
8740 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
8742 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
8748 /* Indicate that INSN cannot be duplicated. This is the case for
8749 execute insns that carry a unique label. */
8752 s390_cannot_copy_insn_p (rtx_insn *insn)
8754 rtx label = s390_execute_label (insn);
8755 return label && label != const0_rtx;
8758 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
8759 do not emit the pool base label. */
8762 s390_dump_pool (struct constant_pool *pool, bool remote_label)
8765 rtx_insn *insn = pool->pool_insn;
8768 /* Switch to rodata section. */
8769 if (TARGET_CPU_ZARCH)
8771 insn = emit_insn_after (gen_pool_section_start (), insn);
8772 INSN_ADDRESSES_NEW (insn, -1);
8775 /* Ensure minimum pool alignment. */
8776 if (TARGET_CPU_ZARCH)
8777 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
8779 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
8780 INSN_ADDRESSES_NEW (insn, -1);
8782 /* Emit pool base label. */
8785 insn = emit_label_after (pool->label, insn);
8786 INSN_ADDRESSES_NEW (insn, -1);
8789 /* Dump constants in descending alignment requirement order,
8790 ensuring proper alignment for every constant. */
8791 for (i = 0; i < NR_C_MODES; i++)
8792 for (c = pool->constants[i]; c; c = c->next)
8794 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
8795 rtx value = copy_rtx (c->value);
8796 if (GET_CODE (value) == CONST
8797 && GET_CODE (XEXP (value, 0)) == UNSPEC
8798 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
8799 && XVECLEN (XEXP (value, 0), 0) == 1)
8800 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
8802 insn = emit_label_after (c->label, insn);
8803 INSN_ADDRESSES_NEW (insn, -1);
8805 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
8806 gen_rtvec (1, value),
8807 UNSPECV_POOL_ENTRY);
8808 insn = emit_insn_after (value, insn);
8809 INSN_ADDRESSES_NEW (insn, -1);
8812 /* Ensure minimum alignment for instructions. */
8813 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
8814 INSN_ADDRESSES_NEW (insn, -1);
8816 /* Output in-pool execute template insns. */
8817 for (c = pool->execute; c; c = c->next)
8819 insn = emit_label_after (c->label, insn);
8820 INSN_ADDRESSES_NEW (insn, -1);
8822 insn = emit_insn_after (s390_execute_target (c->value), insn);
8823 INSN_ADDRESSES_NEW (insn, -1);
8826 /* Switch back to previous section. */
8827 if (TARGET_CPU_ZARCH)
8829 insn = emit_insn_after (gen_pool_section_end (), insn);
8830 INSN_ADDRESSES_NEW (insn, -1);
8833 insn = emit_barrier_after (insn);
8834 INSN_ADDRESSES_NEW (insn, -1);
8836 /* Remove placeholder insn. */
8837 remove_insn (pool->pool_insn);
8840 /* Free all memory used by POOL. */
8843 s390_free_pool (struct constant_pool *pool)
8845 struct constant *c, *next;
8848 for (i = 0; i < NR_C_MODES; i++)
8849 for (c = pool->constants[i]; c; c = next)
8855 for (c = pool->execute; c; c = next)
8861 BITMAP_FREE (pool->insns);
8866 /* Collect main literal pool. Return NULL on overflow. */
8868 static struct constant_pool *
8869 s390_mainpool_start (void)
8871 struct constant_pool *pool;
8874 pool = s390_alloc_pool ();
8876 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8878 if (NONJUMP_INSN_P (insn)
8879 && GET_CODE (PATTERN (insn)) == SET
8880 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
8881 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
8883 /* There might be two main_pool instructions if base_reg
8884 is call-clobbered; one for shrink-wrapped code and one
8885 for the rest. We want to keep the first. */
8886 if (pool->pool_insn)
8888 insn = PREV_INSN (insn);
8889 delete_insn (NEXT_INSN (insn));
8892 pool->pool_insn = insn;
8895 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
8897 s390_add_execute (pool, insn);
8899 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8901 rtx pool_ref = NULL_RTX;
8902 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8905 rtx constant = get_pool_constant (pool_ref);
8906 machine_mode mode = get_pool_mode (pool_ref);
8907 s390_add_constant (pool, constant, mode);
8911 /* If hot/cold partitioning is enabled we have to make sure that
8912 the literal pool is emitted in the same section where the
8913 initialization of the literal pool base pointer takes place.
8914 emit_pool_after is only used in the non-overflow case on non
8915 Z cpus where we can emit the literal pool at the end of the
8916 function body within the text section. */
8918 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
8919 && !pool->emit_pool_after)
8920 pool->emit_pool_after = PREV_INSN (insn);
8923 gcc_assert (pool->pool_insn || pool->size == 0);
8925 if (pool->size >= 4096)
8927 /* We're going to chunkify the pool, so remove the main
8928 pool placeholder insn. */
8929 remove_insn (pool->pool_insn);
8931 s390_free_pool (pool);
8935 /* If the functions ends with the section where the literal pool
8936 should be emitted set the marker to its end. */
8937 if (pool && !pool->emit_pool_after)
8938 pool->emit_pool_after = get_last_insn ();
8943 /* POOL holds the main literal pool as collected by s390_mainpool_start.
8944 Modify the current function to output the pool constants as well as
8945 the pool register setup instruction. */
8948 s390_mainpool_finish (struct constant_pool *pool)
8950 rtx base_reg = cfun->machine->base_reg;
8952 /* If the pool is empty, we're done. */
8953 if (pool->size == 0)
8955 /* We don't actually need a base register after all. */
8956 cfun->machine->base_reg = NULL_RTX;
8958 if (pool->pool_insn)
8959 remove_insn (pool->pool_insn);
8960 s390_free_pool (pool);
8964 /* We need correct insn addresses. */
8965 shorten_branches (get_insns ());
8967 /* On zSeries, we use a LARL to load the pool register. The pool is
8968 located in the .rodata section, so we emit it after the function. */
8969 if (TARGET_CPU_ZARCH)
8971 rtx set = gen_main_base_64 (base_reg, pool->label);
8972 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
8973 INSN_ADDRESSES_NEW (insn, -1);
8974 remove_insn (pool->pool_insn);
8976 insn = get_last_insn ();
8977 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
8978 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8980 s390_dump_pool (pool, 0);
8983 /* On S/390, if the total size of the function's code plus literal pool
8984 does not exceed 4096 bytes, we use BASR to set up a function base
8985 pointer, and emit the literal pool at the end of the function. */
8986 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
8987 + pool->size + 8 /* alignment slop */ < 4096)
8989 rtx set = gen_main_base_31_small (base_reg, pool->label);
8990 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
8991 INSN_ADDRESSES_NEW (insn, -1);
8992 remove_insn (pool->pool_insn);
8994 insn = emit_label_after (pool->label, insn);
8995 INSN_ADDRESSES_NEW (insn, -1);
8997 /* emit_pool_after will be set by s390_mainpool_start to the
8998 last insn of the section where the literal pool should be
9000 insn = pool->emit_pool_after;
9002 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
9003 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
9005 s390_dump_pool (pool, 1);
9008 /* Otherwise, we emit an inline literal pool and use BASR to branch
9009 over it, setting up the pool register at the same time. */
9012 rtx_code_label *pool_end = gen_label_rtx ();
9014 rtx pat = gen_main_base_31_large (base_reg, pool->label, pool_end);
9015 rtx_insn *insn = emit_jump_insn_after (pat, pool->pool_insn);
9016 JUMP_LABEL (insn) = pool_end;
9017 INSN_ADDRESSES_NEW (insn, -1);
9018 remove_insn (pool->pool_insn);
9020 insn = emit_label_after (pool->label, insn);
9021 INSN_ADDRESSES_NEW (insn, -1);
9023 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
9024 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
9026 insn = emit_label_after (pool_end, pool->pool_insn);
9027 INSN_ADDRESSES_NEW (insn, -1);
9029 s390_dump_pool (pool, 1);
9033 /* Replace all literal pool references. */
9035 for (rtx_insn *insn = get_insns (); insn; insn = NEXT_INSN (insn))
9038 replace_ltrel_base (&PATTERN (insn));
9040 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
9042 rtx addr, pool_ref = NULL_RTX;
9043 find_constant_pool_ref (PATTERN (insn), &pool_ref);
9046 if (s390_execute_label (insn))
9047 addr = s390_find_execute (pool, insn);
9049 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
9050 get_pool_mode (pool_ref));
9052 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
9053 INSN_CODE (insn) = -1;
9059 /* Free the pool. */
9060 s390_free_pool (pool);
9063 /* POOL holds the main literal pool as collected by s390_mainpool_start.
9064 We have decided we cannot use this pool, so revert all changes
9065 to the current function that were done by s390_mainpool_start. */
9067 s390_mainpool_cancel (struct constant_pool *pool)
9069 /* We didn't actually change the instruction stream, so simply
9070 free the pool memory. */
9071 s390_free_pool (pool);
9075 /* Chunkify the literal pool. */
9077 #define S390_POOL_CHUNK_MIN 0xc00
9078 #define S390_POOL_CHUNK_MAX 0xe00
9080 static struct constant_pool *
9081 s390_chunkify_start (void)
9083 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
9086 rtx pending_ltrel = NULL_RTX;
9089 rtx (*gen_reload_base) (rtx, rtx) =
9090 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
9093 /* We need correct insn addresses. */
9095 shorten_branches (get_insns ());
9097 /* Scan all insns and move literals to pool chunks. */
9099 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9101 bool section_switch_p = false;
9103 /* Check for pending LTREL_BASE. */
9106 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
9109 gcc_assert (ltrel_base == pending_ltrel);
9110 pending_ltrel = NULL_RTX;
9114 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
9117 curr_pool = s390_start_pool (&pool_list, insn);
9119 s390_add_execute (curr_pool, insn);
9120 s390_add_pool_insn (curr_pool, insn);
9122 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
9124 rtx pool_ref = NULL_RTX;
9125 find_constant_pool_ref (PATTERN (insn), &pool_ref);
9128 rtx constant = get_pool_constant (pool_ref);
9129 machine_mode mode = get_pool_mode (pool_ref);
9132 curr_pool = s390_start_pool (&pool_list, insn);
9134 s390_add_constant (curr_pool, constant, mode);
9135 s390_add_pool_insn (curr_pool, insn);
9137 /* Don't split the pool chunk between a LTREL_OFFSET load
9138 and the corresponding LTREL_BASE. */
9139 if (GET_CODE (constant) == CONST
9140 && GET_CODE (XEXP (constant, 0)) == UNSPEC
9141 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
9143 gcc_assert (!pending_ltrel);
9144 pending_ltrel = pool_ref;
9149 if (JUMP_P (insn) || JUMP_TABLE_DATA_P (insn) || LABEL_P (insn))
9152 s390_add_pool_insn (curr_pool, insn);
9153 /* An LTREL_BASE must follow within the same basic block. */
9154 gcc_assert (!pending_ltrel);
9158 switch (NOTE_KIND (insn))
9160 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
9161 section_switch_p = true;
9163 case NOTE_INSN_VAR_LOCATION:
9164 case NOTE_INSN_CALL_ARG_LOCATION:
9171 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
9172 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
9175 if (TARGET_CPU_ZARCH)
9177 if (curr_pool->size < S390_POOL_CHUNK_MAX)
9180 s390_end_pool (curr_pool, NULL);
9185 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
9186 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
9189 /* We will later have to insert base register reload insns.
9190 Those will have an effect on code size, which we need to
9191 consider here. This calculation makes rather pessimistic
9192 worst-case assumptions. */
9196 if (chunk_size < S390_POOL_CHUNK_MIN
9197 && curr_pool->size < S390_POOL_CHUNK_MIN
9198 && !section_switch_p)
9201 /* Pool chunks can only be inserted after BARRIERs ... */
9202 if (BARRIER_P (insn))
9204 s390_end_pool (curr_pool, insn);
9209 /* ... so if we don't find one in time, create one. */
9210 else if (chunk_size > S390_POOL_CHUNK_MAX
9211 || curr_pool->size > S390_POOL_CHUNK_MAX
9212 || section_switch_p)
9214 rtx_insn *label, *jump, *barrier, *next, *prev;
9216 if (!section_switch_p)
9218 /* We can insert the barrier only after a 'real' insn. */
9219 if (! NONJUMP_INSN_P (insn) && ! CALL_P (insn))
9221 if (get_attr_length (insn) == 0)
9223 /* Don't separate LTREL_BASE from the corresponding
9224 LTREL_OFFSET load. */
9231 next = NEXT_INSN (insn);
9235 && (NOTE_KIND (next) == NOTE_INSN_VAR_LOCATION
9236 || NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION));
9240 gcc_assert (!pending_ltrel);
9242 /* The old pool has to end before the section switch
9243 note in order to make it part of the current
9245 insn = PREV_INSN (insn);
9248 label = gen_label_rtx ();
9250 if (prev && NOTE_P (prev))
9251 prev = prev_nonnote_insn (prev);
9253 jump = emit_jump_insn_after_setloc (gen_jump (label), insn,
9254 INSN_LOCATION (prev));
9256 jump = emit_jump_insn_after_noloc (gen_jump (label), insn);
9257 barrier = emit_barrier_after (jump);
9258 insn = emit_label_after (label, barrier);
9259 JUMP_LABEL (jump) = label;
9260 LABEL_NUSES (label) = 1;
9262 INSN_ADDRESSES_NEW (jump, -1);
9263 INSN_ADDRESSES_NEW (barrier, -1);
9264 INSN_ADDRESSES_NEW (insn, -1);
9266 s390_end_pool (curr_pool, barrier);
9274 s390_end_pool (curr_pool, NULL);
9275 gcc_assert (!pending_ltrel);
9277 /* Find all labels that are branched into
9278 from an insn belonging to a different chunk. */
9280 far_labels = BITMAP_ALLOC (NULL);
9282 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9284 rtx_jump_table_data *table;
9286 /* Labels marked with LABEL_PRESERVE_P can be target
9287 of non-local jumps, so we have to mark them.
9288 The same holds for named labels.
9290 Don't do that, however, if it is the label before
9294 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
9296 rtx_insn *vec_insn = NEXT_INSN (insn);
9297 if (! vec_insn || ! JUMP_TABLE_DATA_P (vec_insn))
9298 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
9300 /* Check potential targets in a table jump (casesi_jump). */
9301 else if (tablejump_p (insn, NULL, &table))
9303 rtx vec_pat = PATTERN (table);
9304 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
9306 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
9308 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
9310 if (s390_find_pool (pool_list, label)
9311 != s390_find_pool (pool_list, insn))
9312 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
9315 /* If we have a direct jump (conditional or unconditional),
9316 check all potential targets. */
9317 else if (JUMP_P (insn))
9319 rtx pat = PATTERN (insn);
9321 if (GET_CODE (pat) == PARALLEL)
9322 pat = XVECEXP (pat, 0, 0);
9324 if (GET_CODE (pat) == SET)
9326 rtx label = JUMP_LABEL (insn);
9327 if (label && !ANY_RETURN_P (label))
9329 if (s390_find_pool (pool_list, label)
9330 != s390_find_pool (pool_list, insn))
9331 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
9337 /* Insert base register reload insns before every pool. */
9339 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
9341 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
9343 rtx_insn *insn = curr_pool->first_insn;
9344 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
9347 /* Insert base register reload insns at every far label. */
9349 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9351 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
9353 struct constant_pool *pool = s390_find_pool (pool_list, insn);
9356 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
9358 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
9363 BITMAP_FREE (far_labels);
9366 /* Recompute insn addresses. */
9368 init_insn_lengths ();
9369 shorten_branches (get_insns ());
9374 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
9375 After we have decided to use this list, finish implementing
9376 all changes to the current function as required. */
9379 s390_chunkify_finish (struct constant_pool *pool_list)
9381 struct constant_pool *curr_pool = NULL;
9385 /* Replace all literal pool references. */
9387 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9390 replace_ltrel_base (&PATTERN (insn));
9392 curr_pool = s390_find_pool (pool_list, insn);
9396 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
9398 rtx addr, pool_ref = NULL_RTX;
9399 find_constant_pool_ref (PATTERN (insn), &pool_ref);
9402 if (s390_execute_label (insn))
9403 addr = s390_find_execute (curr_pool, insn);
9405 addr = s390_find_constant (curr_pool,
9406 get_pool_constant (pool_ref),
9407 get_pool_mode (pool_ref));
9409 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
9410 INSN_CODE (insn) = -1;
9415 /* Dump out all literal pools. */
9417 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
9418 s390_dump_pool (curr_pool, 0);
9420 /* Free pool list. */
9424 struct constant_pool *next = pool_list->next;
9425 s390_free_pool (pool_list);
9430 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
9431 We have decided we cannot use this list, so revert all changes
9432 to the current function that were done by s390_chunkify_start. */
9435 s390_chunkify_cancel (struct constant_pool *pool_list)
9437 struct constant_pool *curr_pool = NULL;
9440 /* Remove all pool placeholder insns. */
9442 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
9444 /* Did we insert an extra barrier? Remove it. */
9445 rtx_insn *barrier = PREV_INSN (curr_pool->pool_insn);
9446 rtx_insn *jump = barrier? PREV_INSN (barrier) : NULL;
9447 rtx_insn *label = NEXT_INSN (curr_pool->pool_insn);
9449 if (jump && JUMP_P (jump)
9450 && barrier && BARRIER_P (barrier)
9451 && label && LABEL_P (label)
9452 && GET_CODE (PATTERN (jump)) == SET
9453 && SET_DEST (PATTERN (jump)) == pc_rtx
9454 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
9455 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
9458 remove_insn (barrier);
9459 remove_insn (label);
9462 remove_insn (curr_pool->pool_insn);
9465 /* Remove all base register reload insns. */
9467 for (insn = get_insns (); insn; )
9469 rtx_insn *next_insn = NEXT_INSN (insn);
9471 if (NONJUMP_INSN_P (insn)
9472 && GET_CODE (PATTERN (insn)) == SET
9473 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
9474 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
9480 /* Free pool list. */
9484 struct constant_pool *next = pool_list->next;
9485 s390_free_pool (pool_list);
9490 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
9493 s390_output_pool_entry (rtx exp, machine_mode mode, unsigned int align)
9495 switch (GET_MODE_CLASS (mode))
9498 case MODE_DECIMAL_FLOAT:
9499 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
9501 assemble_real (*CONST_DOUBLE_REAL_VALUE (exp),
9502 as_a <scalar_float_mode> (mode), align);
9506 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
9507 mark_symbol_refs_as_used (exp);
9510 case MODE_VECTOR_INT:
9511 case MODE_VECTOR_FLOAT:
9514 machine_mode inner_mode;
9515 gcc_assert (GET_CODE (exp) == CONST_VECTOR);
9517 inner_mode = GET_MODE_INNER (GET_MODE (exp));
9518 for (i = 0; i < XVECLEN (exp, 0); i++)
9519 s390_output_pool_entry (XVECEXP (exp, 0, i),
9523 : GET_MODE_BITSIZE (inner_mode));
9533 /* Return an RTL expression representing the value of the return address
9534 for the frame COUNT steps up from the current frame. FRAME is the
9535 frame pointer of that frame. */
9538 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
9543 /* Without backchain, we fail for all but the current frame. */
9545 if (!TARGET_BACKCHAIN && count > 0)
9548 /* For the current frame, we need to make sure the initial
9549 value of RETURN_REGNUM is actually saved. */
9553 /* On non-z architectures branch splitting could overwrite r14. */
9554 if (TARGET_CPU_ZARCH)
9555 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
9558 cfun_frame_layout.save_return_addr_p = true;
9559 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
9563 if (TARGET_PACKED_STACK)
9564 offset = -2 * UNITS_PER_LONG;
9566 offset = RETURN_REGNUM * UNITS_PER_LONG;
9568 addr = plus_constant (Pmode, frame, offset);
9569 addr = memory_address (Pmode, addr);
9570 return gen_rtx_MEM (Pmode, addr);
9573 /* Return an RTL expression representing the back chain stored in
9574 the current stack frame. */
9577 s390_back_chain_rtx (void)
9581 gcc_assert (TARGET_BACKCHAIN);
9583 if (TARGET_PACKED_STACK)
9584 chain = plus_constant (Pmode, stack_pointer_rtx,
9585 STACK_POINTER_OFFSET - UNITS_PER_LONG);
9587 chain = stack_pointer_rtx;
9589 chain = gen_rtx_MEM (Pmode, chain);
9593 /* Find first call clobbered register unused in a function.
9594 This could be used as base register in a leaf function
9595 or for holding the return address before epilogue. */
9598 find_unused_clobbered_reg (void)
9601 for (i = 0; i < 6; i++)
9602 if (!df_regs_ever_live_p (i))
9608 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
9609 clobbered hard regs in SETREG. */
9612 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
9614 char *regs_ever_clobbered = (char *)data;
9615 unsigned int i, regno;
9616 machine_mode mode = GET_MODE (setreg);
9618 if (GET_CODE (setreg) == SUBREG)
9620 rtx inner = SUBREG_REG (setreg);
9621 if (!GENERAL_REG_P (inner) && !FP_REG_P (inner))
9623 regno = subreg_regno (setreg);
9625 else if (GENERAL_REG_P (setreg) || FP_REG_P (setreg))
9626 regno = REGNO (setreg);
9631 i < regno + HARD_REGNO_NREGS (regno, mode);
9633 regs_ever_clobbered[i] = 1;
9636 /* Walks through all basic blocks of the current function looking
9637 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
9638 of the passed integer array REGS_EVER_CLOBBERED are set to one for
9639 each of those regs. */
9642 s390_regs_ever_clobbered (char regs_ever_clobbered[])
9648 memset (regs_ever_clobbered, 0, 32);
9650 /* For non-leaf functions we have to consider all call clobbered regs to be
9654 for (i = 0; i < 32; i++)
9655 regs_ever_clobbered[i] = call_really_used_regs[i];
9658 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
9659 this work is done by liveness analysis (mark_regs_live_at_end).
9660 Special care is needed for functions containing landing pads. Landing pads
9661 may use the eh registers, but the code which sets these registers is not
9662 contained in that function. Hence s390_regs_ever_clobbered is not able to
9663 deal with this automatically. */
9664 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
9665 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
9666 if (crtl->calls_eh_return
9667 || (cfun->machine->has_landing_pad_p
9668 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
9669 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
9671 /* For nonlocal gotos all call-saved registers have to be saved.
9672 This flag is also set for the unwinding code in libgcc.
9673 See expand_builtin_unwind_init. For regs_ever_live this is done by
9675 if (crtl->saves_all_registers)
9676 for (i = 0; i < 32; i++)
9677 if (!call_really_used_regs[i])
9678 regs_ever_clobbered[i] = 1;
9680 FOR_EACH_BB_FN (cur_bb, cfun)
9682 FOR_BB_INSNS (cur_bb, cur_insn)
9686 if (!INSN_P (cur_insn))
9689 pat = PATTERN (cur_insn);
9691 /* Ignore GPR restore insns. */
9692 if (epilogue_completed && RTX_FRAME_RELATED_P (cur_insn))
9694 if (GET_CODE (pat) == SET
9695 && GENERAL_REG_P (SET_DEST (pat)))
9698 if (GET_MODE (SET_SRC (pat)) == DImode
9699 && FP_REG_P (SET_SRC (pat)))
9703 if (GET_CODE (SET_SRC (pat)) == MEM)
9708 if (GET_CODE (pat) == PARALLEL
9709 && load_multiple_operation (pat, VOIDmode))
9714 s390_reg_clobbered_rtx,
9715 regs_ever_clobbered);
9720 /* Determine the frame area which actually has to be accessed
9721 in the function epilogue. The values are stored at the
9722 given pointers AREA_BOTTOM (address of the lowest used stack
9723 address) and AREA_TOP (address of the first item which does
9724 not belong to the stack frame). */
9727 s390_frame_area (int *area_bottom, int *area_top)
9734 if (cfun_frame_layout.first_restore_gpr != -1)
9736 b = (cfun_frame_layout.gprs_offset
9737 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
9738 t = b + (cfun_frame_layout.last_restore_gpr
9739 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
9742 if (TARGET_64BIT && cfun_save_high_fprs_p)
9744 b = MIN (b, cfun_frame_layout.f8_offset);
9745 t = MAX (t, (cfun_frame_layout.f8_offset
9746 + cfun_frame_layout.high_fprs * 8));
9751 if (cfun_fpr_save_p (FPR4_REGNUM))
9753 b = MIN (b, cfun_frame_layout.f4_offset);
9754 t = MAX (t, cfun_frame_layout.f4_offset + 8);
9756 if (cfun_fpr_save_p (FPR6_REGNUM))
9758 b = MIN (b, cfun_frame_layout.f4_offset + 8);
9759 t = MAX (t, cfun_frame_layout.f4_offset + 16);
9765 /* Update gpr_save_slots in the frame layout trying to make use of
9766 FPRs as GPR save slots.
9767 This is a helper routine of s390_register_info. */
9770 s390_register_info_gprtofpr ()
9772 int save_reg_slot = FPR0_REGNUM;
9775 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
9778 /* builtin_eh_return needs to be able to modify the return address
9779 on the stack. It could also adjust the FPR save slot instead but
9780 is it worth the trouble?! */
9781 if (crtl->calls_eh_return)
9784 for (i = 15; i >= 6; i--)
9786 if (cfun_gpr_save_slot (i) == SAVE_SLOT_NONE)
9789 /* Advance to the next FP register which can be used as a
9791 while ((!call_really_used_regs[save_reg_slot]
9792 || df_regs_ever_live_p (save_reg_slot)
9793 || cfun_fpr_save_p (save_reg_slot))
9794 && FP_REGNO_P (save_reg_slot))
9796 if (!FP_REGNO_P (save_reg_slot))
9798 /* We only want to use ldgr/lgdr if we can get rid of
9799 stm/lm entirely. So undo the gpr slot allocation in
9800 case we ran out of FPR save slots. */
9801 for (j = 6; j <= 15; j++)
9802 if (FP_REGNO_P (cfun_gpr_save_slot (j)))
9803 cfun_gpr_save_slot (j) = SAVE_SLOT_STACK;
9806 cfun_gpr_save_slot (i) = save_reg_slot++;
9810 /* Set the bits in fpr_bitmap for FPRs which need to be saved due to
9812 This is a helper routine for s390_register_info. */
9815 s390_register_info_stdarg_fpr ()
9821 /* Save the FP argument regs for stdarg. f0, f2 for 31 bit and
9822 f0-f4 for 64 bit. */
9824 || !TARGET_HARD_FLOAT
9825 || !cfun->va_list_fpr_size
9826 || crtl->args.info.fprs >= FP_ARG_NUM_REG)
9829 min_fpr = crtl->args.info.fprs;
9830 max_fpr = min_fpr + cfun->va_list_fpr_size - 1;
9831 if (max_fpr >= FP_ARG_NUM_REG)
9832 max_fpr = FP_ARG_NUM_REG - 1;
9834 /* FPR argument regs start at f0. */
9835 min_fpr += FPR0_REGNUM;
9836 max_fpr += FPR0_REGNUM;
9838 for (i = min_fpr; i <= max_fpr; i++)
9839 cfun_set_fpr_save (i);
9842 /* Reserve the GPR save slots for GPRs which need to be saved due to
9844 This is a helper routine for s390_register_info. */
9847 s390_register_info_stdarg_gpr ()
9854 || !cfun->va_list_gpr_size
9855 || crtl->args.info.gprs >= GP_ARG_NUM_REG)
9858 min_gpr = crtl->args.info.gprs;
9859 max_gpr = min_gpr + cfun->va_list_gpr_size - 1;
9860 if (max_gpr >= GP_ARG_NUM_REG)
9861 max_gpr = GP_ARG_NUM_REG - 1;
9863 /* GPR argument regs start at r2. */
9864 min_gpr += GPR2_REGNUM;
9865 max_gpr += GPR2_REGNUM;
9867 /* If r6 was supposed to be saved into an FPR and now needs to go to
9868 the stack for vararg we have to adjust the restore range to make
9869 sure that the restore is done from stack as well. */
9870 if (FP_REGNO_P (cfun_gpr_save_slot (GPR6_REGNUM))
9871 && min_gpr <= GPR6_REGNUM
9872 && max_gpr >= GPR6_REGNUM)
9874 if (cfun_frame_layout.first_restore_gpr == -1
9875 || cfun_frame_layout.first_restore_gpr > GPR6_REGNUM)
9876 cfun_frame_layout.first_restore_gpr = GPR6_REGNUM;
9877 if (cfun_frame_layout.last_restore_gpr == -1
9878 || cfun_frame_layout.last_restore_gpr < GPR6_REGNUM)
9879 cfun_frame_layout.last_restore_gpr = GPR6_REGNUM;
9882 if (cfun_frame_layout.first_save_gpr == -1
9883 || cfun_frame_layout.first_save_gpr > min_gpr)
9884 cfun_frame_layout.first_save_gpr = min_gpr;
9886 if (cfun_frame_layout.last_save_gpr == -1
9887 || cfun_frame_layout.last_save_gpr < max_gpr)
9888 cfun_frame_layout.last_save_gpr = max_gpr;
9890 for (i = min_gpr; i <= max_gpr; i++)
9891 cfun_gpr_save_slot (i) = SAVE_SLOT_STACK;
9894 /* Calculate the save and restore ranges for stm(g) and lm(g) in the
9895 prologue and epilogue. */
9898 s390_register_info_set_ranges ()
9902 /* Find the first and the last save slot supposed to use the stack
9903 to set the restore range.
9904 Vararg regs might be marked as save to stack but only the
9905 call-saved regs really need restoring (i.e. r6). This code
9906 assumes that the vararg regs have not yet been recorded in
9907 cfun_gpr_save_slot. */
9908 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != SAVE_SLOT_STACK; i++);
9909 for (j = 15; j > i && cfun_gpr_save_slot (j) != SAVE_SLOT_STACK; j--);
9910 cfun_frame_layout.first_restore_gpr = (i == 16) ? -1 : i;
9911 cfun_frame_layout.last_restore_gpr = (i == 16) ? -1 : j;
9912 cfun_frame_layout.first_save_gpr = (i == 16) ? -1 : i;
9913 cfun_frame_layout.last_save_gpr = (i == 16) ? -1 : j;
9916 /* The GPR and FPR save slots in cfun->machine->frame_layout are set
9917 for registers which need to be saved in function prologue.
9918 This function can be used until the insns emitted for save/restore
9919 of the regs are visible in the RTL stream. */
9922 s390_register_info ()
9925 char clobbered_regs[32];
9927 gcc_assert (!epilogue_completed);
9929 if (reload_completed)
9930 /* After reload we rely on our own routine to determine which
9931 registers need saving. */
9932 s390_regs_ever_clobbered (clobbered_regs);
9934 /* During reload we use regs_ever_live as a base since reload
9935 does changes in there which we otherwise would not be aware
9937 for (i = 0; i < 32; i++)
9938 clobbered_regs[i] = df_regs_ever_live_p (i);
9940 for (i = 0; i < 32; i++)
9941 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
9943 /* Mark the call-saved FPRs which need to be saved.
9944 This needs to be done before checking the special GPRs since the
9945 stack pointer usage depends on whether high FPRs have to be saved
9947 cfun_frame_layout.fpr_bitmap = 0;
9948 cfun_frame_layout.high_fprs = 0;
9949 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
9950 if (clobbered_regs[i] && !call_really_used_regs[i])
9952 cfun_set_fpr_save (i);
9953 if (i >= FPR8_REGNUM)
9954 cfun_frame_layout.high_fprs++;
9957 /* Register 12 is used for GOT address, but also as temp in prologue
9958 for split-stack stdarg functions (unless r14 is available). */
9960 |= ((flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
9961 || (flag_split_stack && cfun->stdarg
9962 && (crtl->is_leaf || TARGET_TPF_PROFILING
9963 || has_hard_reg_initial_val (Pmode, RETURN_REGNUM))));
9965 clobbered_regs[BASE_REGNUM]
9966 |= (cfun->machine->base_reg
9967 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
9969 clobbered_regs[HARD_FRAME_POINTER_REGNUM]
9970 |= !!frame_pointer_needed;
9972 /* On pre z900 machines this might take until machine dependent
9974 save_return_addr_p will only be set on non-zarch machines so
9975 there is no risk that r14 goes into an FPR instead of a stack
9977 clobbered_regs[RETURN_REGNUM]
9979 || TARGET_TPF_PROFILING
9980 || cfun->machine->split_branches_pending_p
9981 || cfun_frame_layout.save_return_addr_p
9982 || crtl->calls_eh_return);
9984 clobbered_regs[STACK_POINTER_REGNUM]
9986 || TARGET_TPF_PROFILING
9987 || cfun_save_high_fprs_p
9988 || get_frame_size () > 0
9989 || (reload_completed && cfun_frame_layout.frame_size > 0)
9990 || cfun->calls_alloca);
9992 memset (cfun_frame_layout.gpr_save_slots, SAVE_SLOT_NONE, 16);
9994 for (i = 6; i < 16; i++)
9995 if (clobbered_regs[i])
9996 cfun_gpr_save_slot (i) = SAVE_SLOT_STACK;
9998 s390_register_info_stdarg_fpr ();
9999 s390_register_info_gprtofpr ();
10000 s390_register_info_set_ranges ();
10001 /* stdarg functions might need to save GPRs 2 to 6. This might
10002 override the GPR->FPR save decision made by
10003 s390_register_info_gprtofpr for r6 since vararg regs must go to
10005 s390_register_info_stdarg_gpr ();
10008 /* This function is called by s390_optimize_prologue in order to get
10009 rid of unnecessary GPR save/restore instructions. The register info
10010 for the GPRs is re-computed and the ranges are re-calculated. */
10013 s390_optimize_register_info ()
10015 char clobbered_regs[32];
10018 gcc_assert (epilogue_completed);
10019 gcc_assert (!cfun->machine->split_branches_pending_p);
10021 s390_regs_ever_clobbered (clobbered_regs);
10023 for (i = 0; i < 32; i++)
10024 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
10026 /* There is still special treatment needed for cases invisible to
10027 s390_regs_ever_clobbered. */
10028 clobbered_regs[RETURN_REGNUM]
10029 |= (TARGET_TPF_PROFILING
10030 /* When expanding builtin_return_addr in ESA mode we do not
10031 know whether r14 will later be needed as scratch reg when
10032 doing branch splitting. So the builtin always accesses the
10033 r14 save slot and we need to stick to the save/restore
10034 decision for r14 even if it turns out that it didn't get
10036 || cfun_frame_layout.save_return_addr_p
10037 || crtl->calls_eh_return);
10039 memset (cfun_frame_layout.gpr_save_slots, SAVE_SLOT_NONE, 6);
10041 for (i = 6; i < 16; i++)
10042 if (!clobbered_regs[i])
10043 cfun_gpr_save_slot (i) = SAVE_SLOT_NONE;
10045 s390_register_info_set_ranges ();
10046 s390_register_info_stdarg_gpr ();
10049 /* Fill cfun->machine with info about frame of current function. */
10052 s390_frame_info (void)
10054 HOST_WIDE_INT lowest_offset;
10056 cfun_frame_layout.first_save_gpr_slot = cfun_frame_layout.first_save_gpr;
10057 cfun_frame_layout.last_save_gpr_slot = cfun_frame_layout.last_save_gpr;
10059 /* The va_arg builtin uses a constant distance of 16 *
10060 UNITS_PER_LONG (r0-r15) to reach the FPRs from the reg_save_area
10061 pointer. So even if we are going to save the stack pointer in an
10062 FPR we need the stack space in order to keep the offsets
10064 if (cfun->stdarg && cfun_save_arg_fprs_p)
10066 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
10068 if (cfun_frame_layout.first_save_gpr_slot == -1)
10069 cfun_frame_layout.first_save_gpr_slot = STACK_POINTER_REGNUM;
10072 cfun_frame_layout.frame_size = get_frame_size ();
10073 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
10074 fatal_error (input_location,
10075 "total size of local variables exceeds architecture limit");
10077 if (!TARGET_PACKED_STACK)
10079 /* Fixed stack layout. */
10080 cfun_frame_layout.backchain_offset = 0;
10081 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
10082 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
10083 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
10084 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
10087 else if (TARGET_BACKCHAIN)
10089 /* Kernel stack layout - packed stack, backchain, no float */
10090 gcc_assert (TARGET_SOFT_FLOAT);
10091 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
10094 /* The distance between the backchain and the return address
10095 save slot must not change. So we always need a slot for the
10096 stack pointer which resides in between. */
10097 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
10099 cfun_frame_layout.gprs_offset
10100 = cfun_frame_layout.backchain_offset - cfun_gprs_save_area_size;
10102 /* FPRs will not be saved. Nevertheless pick sane values to
10103 keep area calculations valid. */
10104 cfun_frame_layout.f0_offset =
10105 cfun_frame_layout.f4_offset =
10106 cfun_frame_layout.f8_offset = cfun_frame_layout.gprs_offset;
10112 /* Packed stack layout without backchain. */
10114 /* With stdarg FPRs need their dedicated slots. */
10115 num_fprs = (TARGET_64BIT && cfun->stdarg ? 2
10116 : (cfun_fpr_save_p (FPR4_REGNUM) +
10117 cfun_fpr_save_p (FPR6_REGNUM)));
10118 cfun_frame_layout.f4_offset = STACK_POINTER_OFFSET - 8 * num_fprs;
10120 num_fprs = (cfun->stdarg ? 2
10121 : (cfun_fpr_save_p (FPR0_REGNUM)
10122 + cfun_fpr_save_p (FPR2_REGNUM)));
10123 cfun_frame_layout.f0_offset = cfun_frame_layout.f4_offset - 8 * num_fprs;
10125 cfun_frame_layout.gprs_offset
10126 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
10128 cfun_frame_layout.f8_offset = (cfun_frame_layout.gprs_offset
10129 - cfun_frame_layout.high_fprs * 8);
10132 if (cfun_save_high_fprs_p)
10133 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
10135 if (!crtl->is_leaf)
10136 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
10138 /* In the following cases we have to allocate a STACK_POINTER_OFFSET
10139 sized area at the bottom of the stack. This is required also for
10140 leaf functions. When GCC generates a local stack reference it
10141 will always add STACK_POINTER_OFFSET to all these references. */
10143 && !TARGET_TPF_PROFILING
10144 && cfun_frame_layout.frame_size == 0
10145 && !cfun->calls_alloca)
10148 /* Calculate the number of bytes we have used in our own register
10149 save area. With the packed stack layout we can re-use the
10150 remaining bytes for normal stack elements. */
10152 if (TARGET_PACKED_STACK)
10153 lowest_offset = MIN (MIN (cfun_frame_layout.f0_offset,
10154 cfun_frame_layout.f4_offset),
10155 cfun_frame_layout.gprs_offset);
10159 if (TARGET_BACKCHAIN)
10160 lowest_offset = MIN (lowest_offset, cfun_frame_layout.backchain_offset);
10162 cfun_frame_layout.frame_size += STACK_POINTER_OFFSET - lowest_offset;
10164 /* If under 31 bit an odd number of gprs has to be saved we have to
10165 adjust the frame size to sustain 8 byte alignment of stack
10167 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
10168 STACK_BOUNDARY / BITS_PER_UNIT - 1)
10169 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
10172 /* Generate frame layout. Fills in register and frame data for the current
10173 function in cfun->machine. This routine can be called multiple times;
10174 it will re-do the complete frame layout every time. */
10177 s390_init_frame_layout (void)
10179 HOST_WIDE_INT frame_size;
10182 /* After LRA the frame layout is supposed to be read-only and should
10183 not be re-computed. */
10184 if (reload_completed)
10187 /* On S/390 machines, we may need to perform branch splitting, which
10188 will require both base and return address register. We have no
10189 choice but to assume we're going to need them until right at the
10190 end of the machine dependent reorg phase. */
10191 if (!TARGET_CPU_ZARCH)
10192 cfun->machine->split_branches_pending_p = true;
10196 frame_size = cfun_frame_layout.frame_size;
10198 /* Try to predict whether we'll need the base register. */
10199 base_used = cfun->machine->split_branches_pending_p
10200 || crtl->uses_const_pool
10201 || (!DISP_IN_RANGE (frame_size)
10202 && !CONST_OK_FOR_K (frame_size));
10204 /* Decide which register to use as literal pool base. In small
10205 leaf functions, try to use an unused call-clobbered register
10206 as base register to avoid save/restore overhead. */
10208 cfun->machine->base_reg = NULL_RTX;
10214 /* Prefer r5 (most likely to be free). */
10215 for (br = 5; br >= 2 && df_regs_ever_live_p (br); br--)
10217 cfun->machine->base_reg =
10218 gen_rtx_REG (Pmode, (br >= 2) ? br : BASE_REGNUM);
10221 s390_register_info ();
10222 s390_frame_info ();
10224 while (frame_size != cfun_frame_layout.frame_size);
10227 /* Remove the FPR clobbers from a tbegin insn if it can be proven that
10228 the TX is nonescaping. A transaction is considered escaping if
10229 there is at least one path from tbegin returning CC0 to the
10230 function exit block without an tend.
10232 The check so far has some limitations:
10233 - only single tbegin/tend BBs are supported
10234 - the first cond jump after tbegin must separate the CC0 path from ~CC0
10235 - when CC is copied to a GPR and the CC0 check is done with the GPR
10236 this is not supported
10240 s390_optimize_nonescaping_tx (void)
10242 const unsigned int CC0 = 1 << 3;
10243 basic_block tbegin_bb = NULL;
10244 basic_block tend_bb = NULL;
10247 bool result = true;
10249 rtx_insn *tbegin_insn = NULL;
10251 if (!cfun->machine->tbegin_p)
10254 for (bb_index = 0; bb_index < n_basic_blocks_for_fn (cfun); bb_index++)
10256 bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
10261 FOR_BB_INSNS (bb, insn)
10263 rtx ite, cc, pat, target;
10264 unsigned HOST_WIDE_INT mask;
10266 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
10269 pat = PATTERN (insn);
10271 if (GET_CODE (pat) == PARALLEL)
10272 pat = XVECEXP (pat, 0, 0);
10274 if (GET_CODE (pat) != SET
10275 || GET_CODE (SET_SRC (pat)) != UNSPEC_VOLATILE)
10278 if (XINT (SET_SRC (pat), 1) == UNSPECV_TBEGIN)
10282 tbegin_insn = insn;
10284 /* Just return if the tbegin doesn't have clobbers. */
10285 if (GET_CODE (PATTERN (insn)) != PARALLEL)
10288 if (tbegin_bb != NULL)
10291 /* Find the next conditional jump. */
10292 for (tmp = NEXT_INSN (insn);
10294 tmp = NEXT_INSN (tmp))
10296 if (reg_set_p (gen_rtx_REG (CCmode, CC_REGNUM), tmp))
10301 ite = SET_SRC (PATTERN (tmp));
10302 if (GET_CODE (ite) != IF_THEN_ELSE)
10305 cc = XEXP (XEXP (ite, 0), 0);
10306 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc))
10307 || GET_MODE (cc) != CCRAWmode
10308 || GET_CODE (XEXP (XEXP (ite, 0), 1)) != CONST_INT)
10311 if (bb->succs->length () != 2)
10314 mask = INTVAL (XEXP (XEXP (ite, 0), 1));
10315 if (GET_CODE (XEXP (ite, 0)) == NE)
10319 target = XEXP (ite, 1);
10320 else if (mask == (CC0 ^ 0xf))
10321 target = XEXP (ite, 2);
10329 ei = ei_start (bb->succs);
10330 e1 = ei_safe_edge (ei);
10332 e2 = ei_safe_edge (ei);
10334 if (e2->flags & EDGE_FALLTHRU)
10337 e1 = ei_safe_edge (ei);
10340 if (!(e1->flags & EDGE_FALLTHRU))
10343 tbegin_bb = (target == pc_rtx) ? e1->dest : e2->dest;
10345 if (tmp == BB_END (bb))
10350 if (XINT (SET_SRC (pat), 1) == UNSPECV_TEND)
10352 if (tend_bb != NULL)
10359 /* Either we successfully remove the FPR clobbers here or we are not
10360 able to do anything for this TX. Both cases don't qualify for
10362 cfun->machine->tbegin_p = false;
10364 if (tbegin_bb == NULL || tend_bb == NULL)
10367 calculate_dominance_info (CDI_POST_DOMINATORS);
10368 result = dominated_by_p (CDI_POST_DOMINATORS, tbegin_bb, tend_bb);
10369 free_dominance_info (CDI_POST_DOMINATORS);
10374 PATTERN (tbegin_insn) = gen_rtx_PARALLEL (VOIDmode,
10376 XVECEXP (PATTERN (tbegin_insn), 0, 0),
10377 XVECEXP (PATTERN (tbegin_insn), 0, 1)));
10378 INSN_CODE (tbegin_insn) = -1;
10379 df_insn_rescan (tbegin_insn);
10384 /* Return true if it is legal to put a value with MODE into REGNO. */
10387 s390_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
10389 if (!TARGET_VX && VECTOR_NOFP_REGNO_P (regno))
10392 switch (REGNO_REG_CLASS (regno))
10395 return ((GET_MODE_CLASS (mode) == MODE_INT
10396 && s390_class_max_nregs (VEC_REGS, mode) == 1)
10398 || (TARGET_VXE && mode == SFmode)
10399 || s390_vector_mode_supported_p (mode));
10403 && ((GET_MODE_CLASS (mode) == MODE_INT
10404 && s390_class_max_nregs (FP_REGS, mode) == 1)
10406 || s390_vector_mode_supported_p (mode)))
10409 if (REGNO_PAIR_OK (regno, mode))
10411 if (mode == SImode || mode == DImode)
10414 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
10419 if (FRAME_REGNO_P (regno) && mode == Pmode)
10424 if (REGNO_PAIR_OK (regno, mode))
10427 || (mode != TFmode && mode != TCmode && mode != TDmode))
10432 if (GET_MODE_CLASS (mode) == MODE_CC)
10436 if (REGNO_PAIR_OK (regno, mode))
10438 if (mode == SImode || mode == Pmode)
10449 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
10452 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
10454 /* Once we've decided upon a register to use as base register, it must
10455 no longer be used for any other purpose. */
10456 if (cfun->machine->base_reg)
10457 if (REGNO (cfun->machine->base_reg) == old_reg
10458 || REGNO (cfun->machine->base_reg) == new_reg)
10461 /* Prevent regrename from using call-saved regs which haven't
10462 actually been saved. This is necessary since regrename assumes
10463 the backend save/restore decisions are based on
10464 df_regs_ever_live. Since we have our own routine we have to tell
10465 regrename manually about it. */
10466 if (GENERAL_REGNO_P (new_reg)
10467 && !call_really_used_regs[new_reg]
10468 && cfun_gpr_save_slot (new_reg) == SAVE_SLOT_NONE)
10474 /* Return nonzero if register REGNO can be used as a scratch register
10478 s390_hard_regno_scratch_ok (unsigned int regno)
10480 /* See s390_hard_regno_rename_ok. */
10481 if (GENERAL_REGNO_P (regno)
10482 && !call_really_used_regs[regno]
10483 && cfun_gpr_save_slot (regno) == SAVE_SLOT_NONE)
10489 /* Maximum number of registers to represent a value of mode MODE
10490 in a register of class RCLASS. */
10493 s390_class_max_nregs (enum reg_class rclass, machine_mode mode)
10496 bool reg_pair_required_p = false;
10502 reg_size = TARGET_VX ? 16 : 8;
10504 /* TF and TD modes would fit into a VR but we put them into a
10505 register pair since we do not have 128bit FP instructions on
10508 && SCALAR_FLOAT_MODE_P (mode)
10509 && GET_MODE_SIZE (mode) >= 16)
10510 reg_pair_required_p = true;
10512 /* Even if complex types would fit into a single FPR/VR we force
10513 them into a register pair to deal with the parts more easily.
10514 (FIXME: What about complex ints?) */
10515 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
10516 reg_pair_required_p = true;
10522 reg_size = UNITS_PER_WORD;
10526 if (reg_pair_required_p)
10527 return 2 * ((GET_MODE_SIZE (mode) / 2 + reg_size - 1) / reg_size);
10529 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
10532 /* Return TRUE if changing mode from FROM to TO should not be allowed
10533 for register class CLASS. */
10536 s390_cannot_change_mode_class (machine_mode from_mode,
10537 machine_mode to_mode,
10538 enum reg_class rclass)
10540 machine_mode small_mode;
10541 machine_mode big_mode;
10543 /* V1TF and TF have different representations in vector
10545 if (reg_classes_intersect_p (VEC_REGS, rclass)
10546 && ((from_mode == V1TFmode && to_mode == TFmode)
10547 || (from_mode == TFmode && to_mode == V1TFmode)))
10550 if (GET_MODE_SIZE (from_mode) == GET_MODE_SIZE (to_mode))
10553 if (GET_MODE_SIZE (from_mode) < GET_MODE_SIZE (to_mode))
10555 small_mode = from_mode;
10556 big_mode = to_mode;
10560 small_mode = to_mode;
10561 big_mode = from_mode;
10564 /* Values residing in VRs are little-endian style. All modes are
10565 placed left-aligned in an VR. This means that we cannot allow
10566 switching between modes with differing sizes. Also if the vector
10567 facility is available we still place TFmode values in VR register
10568 pairs, since the only instructions we have operating on TFmodes
10569 only deal with register pairs. Therefore we have to allow DFmode
10570 subregs of TFmodes to enable the TFmode splitters. */
10571 if (reg_classes_intersect_p (VEC_REGS, rclass)
10572 && (GET_MODE_SIZE (small_mode) < 8
10573 || s390_class_max_nregs (VEC_REGS, big_mode) == 1))
10576 /* Likewise for access registers, since they have only half the
10577 word size on 64-bit. */
10578 if (reg_classes_intersect_p (ACCESS_REGS, rclass))
10584 /* Return true if we use LRA instead of reload pass. */
10588 return s390_lra_flag;
10591 /* Return true if register FROM can be eliminated via register TO. */
10594 s390_can_eliminate (const int from, const int to)
10596 /* On zSeries machines, we have not marked the base register as fixed.
10597 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
10598 If a function requires the base register, we say here that this
10599 elimination cannot be performed. This will cause reload to free
10600 up the base register (as if it were fixed). On the other hand,
10601 if the current function does *not* require the base register, we
10602 say here the elimination succeeds, which in turn allows reload
10603 to allocate the base register for any other purpose. */
10604 if (from == BASE_REGNUM && to == BASE_REGNUM)
10606 if (TARGET_CPU_ZARCH)
10608 s390_init_frame_layout ();
10609 return cfun->machine->base_reg == NULL_RTX;
10615 /* Everything else must point into the stack frame. */
10616 gcc_assert (to == STACK_POINTER_REGNUM
10617 || to == HARD_FRAME_POINTER_REGNUM);
10619 gcc_assert (from == FRAME_POINTER_REGNUM
10620 || from == ARG_POINTER_REGNUM
10621 || from == RETURN_ADDRESS_POINTER_REGNUM);
10623 /* Make sure we actually saved the return address. */
10624 if (from == RETURN_ADDRESS_POINTER_REGNUM)
10625 if (!crtl->calls_eh_return
10627 && !cfun_frame_layout.save_return_addr_p)
10633 /* Return offset between register FROM and TO initially after prolog. */
10636 s390_initial_elimination_offset (int from, int to)
10638 HOST_WIDE_INT offset;
10640 /* ??? Why are we called for non-eliminable pairs? */
10641 if (!s390_can_eliminate (from, to))
10646 case FRAME_POINTER_REGNUM:
10647 offset = (get_frame_size()
10648 + STACK_POINTER_OFFSET
10649 + crtl->outgoing_args_size);
10652 case ARG_POINTER_REGNUM:
10653 s390_init_frame_layout ();
10654 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
10657 case RETURN_ADDRESS_POINTER_REGNUM:
10658 s390_init_frame_layout ();
10660 if (cfun_frame_layout.first_save_gpr_slot == -1)
10662 /* If it turns out that for stdarg nothing went into the reg
10663 save area we also do not need the return address
10665 if (cfun->stdarg && !cfun_save_arg_fprs_p)
10668 gcc_unreachable ();
10671 /* In order to make the following work it is not necessary for
10672 r14 to have a save slot. It is sufficient if one other GPR
10673 got one. Since the GPRs are always stored without gaps we
10674 are able to calculate where the r14 save slot would
10676 offset = (cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset +
10677 (RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot) *
10686 gcc_unreachable ();
10692 /* Emit insn to save fpr REGNUM at offset OFFSET relative
10693 to register BASE. Return generated insn. */
10696 save_fpr (rtx base, int offset, int regnum)
10699 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
10701 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
10702 set_mem_alias_set (addr, get_varargs_alias_set ());
10704 set_mem_alias_set (addr, get_frame_alias_set ());
10706 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
10709 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
10710 to register BASE. Return generated insn. */
10713 restore_fpr (rtx base, int offset, int regnum)
10716 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
10717 set_mem_alias_set (addr, get_frame_alias_set ());
10719 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
10722 /* Return true if REGNO is a global register, but not one
10723 of the special ones that need to be saved/restored in anyway. */
10726 global_not_special_regno_p (int regno)
10728 return (global_regs[regno]
10729 /* These registers are special and need to be
10730 restored in any case. */
10731 && !(regno == STACK_POINTER_REGNUM
10732 || regno == RETURN_REGNUM
10733 || regno == BASE_REGNUM
10734 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
10737 /* Generate insn to save registers FIRST to LAST into
10738 the register save area located at offset OFFSET
10739 relative to register BASE. */
10742 save_gprs (rtx base, int offset, int first, int last)
10744 rtx addr, insn, note;
10747 addr = plus_constant (Pmode, base, offset);
10748 addr = gen_rtx_MEM (Pmode, addr);
10750 set_mem_alias_set (addr, get_frame_alias_set ());
10752 /* Special-case single register. */
10756 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
10758 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
10760 if (!global_not_special_regno_p (first))
10761 RTX_FRAME_RELATED_P (insn) = 1;
10766 insn = gen_store_multiple (addr,
10767 gen_rtx_REG (Pmode, first),
10768 GEN_INT (last - first + 1));
10770 if (first <= 6 && cfun->stdarg)
10771 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
10773 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
10775 if (first + i <= 6)
10776 set_mem_alias_set (mem, get_varargs_alias_set ());
10779 /* We need to set the FRAME_RELATED flag on all SETs
10780 inside the store-multiple pattern.
10782 However, we must not emit DWARF records for registers 2..5
10783 if they are stored for use by variable arguments ...
10785 ??? Unfortunately, it is not enough to simply not the
10786 FRAME_RELATED flags for those SETs, because the first SET
10787 of the PARALLEL is always treated as if it had the flag
10788 set, even if it does not. Therefore we emit a new pattern
10789 without those registers as REG_FRAME_RELATED_EXPR note. */
10791 if (first >= 6 && !global_not_special_regno_p (first))
10793 rtx pat = PATTERN (insn);
10795 for (i = 0; i < XVECLEN (pat, 0); i++)
10796 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
10797 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
10799 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
10801 RTX_FRAME_RELATED_P (insn) = 1;
10803 else if (last >= 6)
10807 for (start = first >= 6 ? first : 6; start <= last; start++)
10808 if (!global_not_special_regno_p (start))
10814 addr = plus_constant (Pmode, base,
10815 offset + (start - first) * UNITS_PER_LONG);
10820 note = gen_movdi (gen_rtx_MEM (Pmode, addr),
10821 gen_rtx_REG (Pmode, start));
10823 note = gen_movsi (gen_rtx_MEM (Pmode, addr),
10824 gen_rtx_REG (Pmode, start));
10825 note = PATTERN (note);
10827 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
10828 RTX_FRAME_RELATED_P (insn) = 1;
10833 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
10834 gen_rtx_REG (Pmode, start),
10835 GEN_INT (last - start + 1));
10836 note = PATTERN (note);
10838 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
10840 for (i = 0; i < XVECLEN (note, 0); i++)
10841 if (GET_CODE (XVECEXP (note, 0, i)) == SET
10842 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
10844 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
10846 RTX_FRAME_RELATED_P (insn) = 1;
10852 /* Generate insn to restore registers FIRST to LAST from
10853 the register save area located at offset OFFSET
10854 relative to register BASE. */
10857 restore_gprs (rtx base, int offset, int first, int last)
10861 addr = plus_constant (Pmode, base, offset);
10862 addr = gen_rtx_MEM (Pmode, addr);
10863 set_mem_alias_set (addr, get_frame_alias_set ());
10865 /* Special-case single register. */
10869 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
10871 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
10873 RTX_FRAME_RELATED_P (insn) = 1;
10877 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
10879 GEN_INT (last - first + 1));
10880 RTX_FRAME_RELATED_P (insn) = 1;
10884 /* Return insn sequence to load the GOT register. */
10887 s390_load_got (void)
10891 /* We cannot use pic_offset_table_rtx here since we use this
10892 function also for non-pic if __tls_get_offset is called and in
10893 that case PIC_OFFSET_TABLE_REGNUM as well as pic_offset_table_rtx
10895 rtx got_rtx = gen_rtx_REG (Pmode, 12);
10899 if (TARGET_CPU_ZARCH)
10901 emit_move_insn (got_rtx, s390_got_symbol ());
10907 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, s390_got_symbol ()),
10908 UNSPEC_LTREL_OFFSET);
10909 offset = gen_rtx_CONST (Pmode, offset);
10910 offset = force_const_mem (Pmode, offset);
10912 emit_move_insn (got_rtx, offset);
10914 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
10915 UNSPEC_LTREL_BASE);
10916 offset = gen_rtx_PLUS (Pmode, got_rtx, offset);
10918 emit_move_insn (got_rtx, offset);
10921 insns = get_insns ();
10926 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
10927 and the change to the stack pointer. */
10930 s390_emit_stack_tie (void)
10932 rtx mem = gen_frame_mem (BLKmode,
10933 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
10935 emit_insn (gen_stack_tie (mem));
10938 /* Copy GPRS into FPR save slots. */
10941 s390_save_gprs_to_fprs (void)
10945 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
10948 for (i = 6; i < 16; i++)
10950 if (FP_REGNO_P (cfun_gpr_save_slot (i)))
10953 emit_move_insn (gen_rtx_REG (DImode, cfun_gpr_save_slot (i)),
10954 gen_rtx_REG (DImode, i));
10955 RTX_FRAME_RELATED_P (insn) = 1;
10956 /* This prevents dwarf2cfi from interpreting the set. Doing
10957 so it might emit def_cfa_register infos setting an FPR as
10959 add_reg_note (insn, REG_CFA_REGISTER, copy_rtx (PATTERN (insn)));
10964 /* Restore GPRs from FPR save slots. */
10967 s390_restore_gprs_from_fprs (void)
10971 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
10974 for (i = 6; i < 16; i++)
10978 if (!FP_REGNO_P (cfun_gpr_save_slot (i)))
10981 rtx fpr = gen_rtx_REG (DImode, cfun_gpr_save_slot (i));
10983 if (i == STACK_POINTER_REGNUM)
10984 insn = emit_insn (gen_stack_restore_from_fpr (fpr));
10986 insn = emit_move_insn (gen_rtx_REG (DImode, i), fpr);
10988 df_set_regs_ever_live (i, true);
10989 add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, i));
10990 if (i == STACK_POINTER_REGNUM)
10991 add_reg_note (insn, REG_CFA_DEF_CFA,
10992 plus_constant (Pmode, stack_pointer_rtx,
10993 STACK_POINTER_OFFSET));
10994 RTX_FRAME_RELATED_P (insn) = 1;
10999 /* A pass run immediately before shrink-wrapping and prologue and epilogue
11004 const pass_data pass_data_s390_early_mach =
11006 RTL_PASS, /* type */
11007 "early_mach", /* name */
11008 OPTGROUP_NONE, /* optinfo_flags */
11009 TV_MACH_DEP, /* tv_id */
11010 0, /* properties_required */
11011 0, /* properties_provided */
11012 0, /* properties_destroyed */
11013 0, /* todo_flags_start */
11014 ( TODO_df_verify | TODO_df_finish ), /* todo_flags_finish */
11017 class pass_s390_early_mach : public rtl_opt_pass
11020 pass_s390_early_mach (gcc::context *ctxt)
11021 : rtl_opt_pass (pass_data_s390_early_mach, ctxt)
11024 /* opt_pass methods: */
11025 virtual unsigned int execute (function *);
11027 }; // class pass_s390_early_mach
11030 pass_s390_early_mach::execute (function *fun)
11034 /* Try to get rid of the FPR clobbers. */
11035 s390_optimize_nonescaping_tx ();
11037 /* Re-compute register info. */
11038 s390_register_info ();
11040 /* If we're using a base register, ensure that it is always valid for
11041 the first non-prologue instruction. */
11042 if (fun->machine->base_reg)
11043 emit_insn_at_entry (gen_main_pool (fun->machine->base_reg));
11045 /* Annotate all constant pool references to let the scheduler know
11046 they implicitly use the base register. */
11047 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11050 annotate_constant_pool_refs (&PATTERN (insn));
11051 df_insn_rescan (insn);
11056 } // anon namespace
11058 /* Expand the prologue into a bunch of separate insns. */
11061 s390_emit_prologue (void)
11069 /* Choose best register to use for temp use within prologue.
11070 TPF with profiling must avoid the register 14 - the tracing function
11071 needs the original contents of r14 to be preserved. */
11073 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
11075 && !TARGET_TPF_PROFILING)
11076 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
11077 else if (flag_split_stack && cfun->stdarg)
11078 temp_reg = gen_rtx_REG (Pmode, 12);
11080 temp_reg = gen_rtx_REG (Pmode, 1);
11082 s390_save_gprs_to_fprs ();
11084 /* Save call saved gprs. */
11085 if (cfun_frame_layout.first_save_gpr != -1)
11087 insn = save_gprs (stack_pointer_rtx,
11088 cfun_frame_layout.gprs_offset +
11089 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
11090 - cfun_frame_layout.first_save_gpr_slot),
11091 cfun_frame_layout.first_save_gpr,
11092 cfun_frame_layout.last_save_gpr);
11096 /* Dummy insn to mark literal pool slot. */
11098 if (cfun->machine->base_reg)
11099 emit_insn (gen_main_pool (cfun->machine->base_reg));
11101 offset = cfun_frame_layout.f0_offset;
11103 /* Save f0 and f2. */
11104 for (i = FPR0_REGNUM; i <= FPR0_REGNUM + 1; i++)
11106 if (cfun_fpr_save_p (i))
11108 save_fpr (stack_pointer_rtx, offset, i);
11111 else if (!TARGET_PACKED_STACK || cfun->stdarg)
11115 /* Save f4 and f6. */
11116 offset = cfun_frame_layout.f4_offset;
11117 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
11119 if (cfun_fpr_save_p (i))
11121 insn = save_fpr (stack_pointer_rtx, offset, i);
11124 /* If f4 and f6 are call clobbered they are saved due to
11125 stdargs and therefore are not frame related. */
11126 if (!call_really_used_regs[i])
11127 RTX_FRAME_RELATED_P (insn) = 1;
11129 else if (!TARGET_PACKED_STACK || call_really_used_regs[i])
11133 if (TARGET_PACKED_STACK
11134 && cfun_save_high_fprs_p
11135 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
11137 offset = (cfun_frame_layout.f8_offset
11138 + (cfun_frame_layout.high_fprs - 1) * 8);
11140 for (i = FPR15_REGNUM; i >= FPR8_REGNUM && offset >= 0; i--)
11141 if (cfun_fpr_save_p (i))
11143 insn = save_fpr (stack_pointer_rtx, offset, i);
11145 RTX_FRAME_RELATED_P (insn) = 1;
11148 if (offset >= cfun_frame_layout.f8_offset)
11152 if (!TARGET_PACKED_STACK)
11153 next_fpr = cfun_save_high_fprs_p ? FPR15_REGNUM : 0;
11155 if (flag_stack_usage_info)
11156 current_function_static_stack_size = cfun_frame_layout.frame_size;
11158 /* Decrement stack pointer. */
11160 if (cfun_frame_layout.frame_size > 0)
11162 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
11163 rtx real_frame_off;
11165 if (s390_stack_size)
11167 HOST_WIDE_INT stack_guard;
11169 if (s390_stack_guard)
11170 stack_guard = s390_stack_guard;
11173 /* If no value for stack guard is provided the smallest power of 2
11174 larger than the current frame size is chosen. */
11176 while (stack_guard < cfun_frame_layout.frame_size)
11180 if (cfun_frame_layout.frame_size >= s390_stack_size)
11182 warning (0, "frame size of function %qs is %wd"
11183 " bytes exceeding user provided stack limit of "
11185 "An unconditional trap is added.",
11186 current_function_name(), cfun_frame_layout.frame_size,
11188 emit_insn (gen_trap ());
11193 /* stack_guard has to be smaller than s390_stack_size.
11194 Otherwise we would emit an AND with zero which would
11195 not match the test under mask pattern. */
11196 if (stack_guard >= s390_stack_size)
11198 warning (0, "frame size of function %qs is %wd"
11199 " bytes which is more than half the stack size. "
11200 "The dynamic check would not be reliable. "
11201 "No check emitted for this function.",
11202 current_function_name(),
11203 cfun_frame_layout.frame_size);
11207 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
11208 & ~(stack_guard - 1));
11210 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
11211 GEN_INT (stack_check_mask));
11213 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
11215 t, const0_rtx, const0_rtx));
11217 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
11219 t, const0_rtx, const0_rtx));
11224 if (s390_warn_framesize > 0
11225 && cfun_frame_layout.frame_size >= s390_warn_framesize)
11226 warning (0, "frame size of %qs is %wd bytes",
11227 current_function_name (), cfun_frame_layout.frame_size);
11229 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
11230 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
11232 /* Save incoming stack pointer into temp reg. */
11233 if (TARGET_BACKCHAIN || next_fpr)
11234 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
11236 /* Subtract frame size from stack pointer. */
11238 if (DISP_IN_RANGE (INTVAL (frame_off)))
11240 insn = gen_rtx_SET (stack_pointer_rtx,
11241 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11243 insn = emit_insn (insn);
11247 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
11248 frame_off = force_const_mem (Pmode, frame_off);
11250 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
11251 annotate_constant_pool_refs (&PATTERN (insn));
11254 RTX_FRAME_RELATED_P (insn) = 1;
11255 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
11256 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
11257 gen_rtx_SET (stack_pointer_rtx,
11258 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11261 /* Set backchain. */
11263 if (TARGET_BACKCHAIN)
11265 if (cfun_frame_layout.backchain_offset)
11266 addr = gen_rtx_MEM (Pmode,
11267 plus_constant (Pmode, stack_pointer_rtx,
11268 cfun_frame_layout.backchain_offset));
11270 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
11271 set_mem_alias_set (addr, get_frame_alias_set ());
11272 insn = emit_insn (gen_move_insn (addr, temp_reg));
11275 /* If we support non-call exceptions (e.g. for Java),
11276 we need to make sure the backchain pointer is set up
11277 before any possibly trapping memory access. */
11278 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
11280 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
11281 emit_clobber (addr);
11285 /* Save fprs 8 - 15 (64 bit ABI). */
11287 if (cfun_save_high_fprs_p && next_fpr)
11289 /* If the stack might be accessed through a different register
11290 we have to make sure that the stack pointer decrement is not
11291 moved below the use of the stack slots. */
11292 s390_emit_stack_tie ();
11294 insn = emit_insn (gen_add2_insn (temp_reg,
11295 GEN_INT (cfun_frame_layout.f8_offset)));
11299 for (i = FPR8_REGNUM; i <= next_fpr; i++)
11300 if (cfun_fpr_save_p (i))
11302 rtx addr = plus_constant (Pmode, stack_pointer_rtx,
11303 cfun_frame_layout.frame_size
11304 + cfun_frame_layout.f8_offset
11307 insn = save_fpr (temp_reg, offset, i);
11309 RTX_FRAME_RELATED_P (insn) = 1;
11310 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
11311 gen_rtx_SET (gen_rtx_MEM (DFmode, addr),
11312 gen_rtx_REG (DFmode, i)));
11316 /* Set frame pointer, if needed. */
11318 if (frame_pointer_needed)
11320 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
11321 RTX_FRAME_RELATED_P (insn) = 1;
11324 /* Set up got pointer, if needed. */
11326 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
11328 rtx_insn *insns = s390_load_got ();
11330 for (rtx_insn *insn = insns; insn; insn = NEXT_INSN (insn))
11331 annotate_constant_pool_refs (&PATTERN (insn));
11336 if (TARGET_TPF_PROFILING)
11338 /* Generate a BAS instruction to serve as a function
11339 entry intercept to facilitate the use of tracing
11340 algorithms located at the branch target. */
11341 emit_insn (gen_prologue_tpf ());
11343 /* Emit a blockage here so that all code
11344 lies between the profiling mechanisms. */
11345 emit_insn (gen_blockage ());
11349 /* Expand the epilogue into a bunch of separate insns. */
11352 s390_emit_epilogue (bool sibcall)
11354 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
11355 int area_bottom, area_top, offset = 0;
11360 if (TARGET_TPF_PROFILING)
11363 /* Generate a BAS instruction to serve as a function
11364 entry intercept to facilitate the use of tracing
11365 algorithms located at the branch target. */
11367 /* Emit a blockage here so that all code
11368 lies between the profiling mechanisms. */
11369 emit_insn (gen_blockage ());
11371 emit_insn (gen_epilogue_tpf ());
11374 /* Check whether to use frame or stack pointer for restore. */
11376 frame_pointer = (frame_pointer_needed
11377 ? hard_frame_pointer_rtx : stack_pointer_rtx);
11379 s390_frame_area (&area_bottom, &area_top);
11381 /* Check whether we can access the register save area.
11382 If not, increment the frame pointer as required. */
11384 if (area_top <= area_bottom)
11386 /* Nothing to restore. */
11388 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
11389 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
11391 /* Area is in range. */
11392 offset = cfun_frame_layout.frame_size;
11396 rtx insn, frame_off, cfa;
11398 offset = area_bottom < 0 ? -area_bottom : 0;
11399 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
11401 cfa = gen_rtx_SET (frame_pointer,
11402 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
11403 if (DISP_IN_RANGE (INTVAL (frame_off)))
11405 insn = gen_rtx_SET (frame_pointer,
11406 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
11407 insn = emit_insn (insn);
11411 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
11412 frame_off = force_const_mem (Pmode, frame_off);
11414 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
11415 annotate_constant_pool_refs (&PATTERN (insn));
11417 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
11418 RTX_FRAME_RELATED_P (insn) = 1;
11421 /* Restore call saved fprs. */
11425 if (cfun_save_high_fprs_p)
11427 next_offset = cfun_frame_layout.f8_offset;
11428 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
11430 if (cfun_fpr_save_p (i))
11432 restore_fpr (frame_pointer,
11433 offset + next_offset, i);
11435 = alloc_reg_note (REG_CFA_RESTORE,
11436 gen_rtx_REG (DFmode, i), cfa_restores);
11445 next_offset = cfun_frame_layout.f4_offset;
11447 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
11449 if (cfun_fpr_save_p (i))
11451 restore_fpr (frame_pointer,
11452 offset + next_offset, i);
11454 = alloc_reg_note (REG_CFA_RESTORE,
11455 gen_rtx_REG (DFmode, i), cfa_restores);
11458 else if (!TARGET_PACKED_STACK)
11464 /* Return register. */
11466 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
11468 /* Restore call saved gprs. */
11470 if (cfun_frame_layout.first_restore_gpr != -1)
11475 /* Check for global register and save them
11476 to stack location from where they get restored. */
11478 for (i = cfun_frame_layout.first_restore_gpr;
11479 i <= cfun_frame_layout.last_restore_gpr;
11482 if (global_not_special_regno_p (i))
11484 addr = plus_constant (Pmode, frame_pointer,
11485 offset + cfun_frame_layout.gprs_offset
11486 + (i - cfun_frame_layout.first_save_gpr_slot)
11488 addr = gen_rtx_MEM (Pmode, addr);
11489 set_mem_alias_set (addr, get_frame_alias_set ());
11490 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
11494 = alloc_reg_note (REG_CFA_RESTORE,
11495 gen_rtx_REG (Pmode, i), cfa_restores);
11498 /* Fetch return address from stack before load multiple,
11499 this will do good for scheduling.
11501 Only do this if we already decided that r14 needs to be
11502 saved to a stack slot. (And not just because r14 happens to
11503 be in between two GPRs which need saving.) Otherwise it
11504 would be difficult to take that decision back in
11505 s390_optimize_prologue.
11507 This optimization is only helpful on in-order machines. */
11509 && cfun_gpr_save_slot (RETURN_REGNUM) == SAVE_SLOT_STACK
11510 && s390_tune <= PROCESSOR_2097_Z10)
11512 int return_regnum = find_unused_clobbered_reg();
11513 if (!return_regnum)
11515 return_reg = gen_rtx_REG (Pmode, return_regnum);
11517 addr = plus_constant (Pmode, frame_pointer,
11518 offset + cfun_frame_layout.gprs_offset
11520 - cfun_frame_layout.first_save_gpr_slot)
11522 addr = gen_rtx_MEM (Pmode, addr);
11523 set_mem_alias_set (addr, get_frame_alias_set ());
11524 emit_move_insn (return_reg, addr);
11526 /* Once we did that optimization we have to make sure
11527 s390_optimize_prologue does not try to remove the store
11528 of r14 since we will not be able to find the load issued
11530 cfun_frame_layout.save_return_addr_p = true;
11533 insn = restore_gprs (frame_pointer,
11534 offset + cfun_frame_layout.gprs_offset
11535 + (cfun_frame_layout.first_restore_gpr
11536 - cfun_frame_layout.first_save_gpr_slot)
11538 cfun_frame_layout.first_restore_gpr,
11539 cfun_frame_layout.last_restore_gpr);
11540 insn = emit_insn (insn);
11541 REG_NOTES (insn) = cfa_restores;
11542 add_reg_note (insn, REG_CFA_DEF_CFA,
11543 plus_constant (Pmode, stack_pointer_rtx,
11544 STACK_POINTER_OFFSET));
11545 RTX_FRAME_RELATED_P (insn) = 1;
11548 s390_restore_gprs_from_fprs ();
11553 /* Return to caller. */
11555 p = rtvec_alloc (2);
11557 RTVEC_ELT (p, 0) = ret_rtx;
11558 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
11559 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
11563 /* Implement TARGET_SET_UP_BY_PROLOGUE. */
11566 s300_set_up_by_prologue (hard_reg_set_container *regs)
11568 if (cfun->machine->base_reg
11569 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
11570 SET_HARD_REG_BIT (regs->set, REGNO (cfun->machine->base_reg));
11573 /* -fsplit-stack support. */
11575 /* A SYMBOL_REF for __morestack. */
11576 static GTY(()) rtx morestack_ref;
11578 /* When using -fsplit-stack, the allocation routines set a field in
11579 the TCB to the bottom of the stack plus this much space, measured
11582 #define SPLIT_STACK_AVAILABLE 1024
11584 /* Emit -fsplit-stack prologue, which goes before the regular function
11588 s390_expand_split_stack_prologue (void)
11590 rtx r1, guard, cc = NULL;
11592 /* Offset from thread pointer to __private_ss. */
11593 int psso = TARGET_64BIT ? 0x38 : 0x20;
11594 /* Pointer size in bytes. */
11595 /* Frame size and argument size - the two parameters to __morestack. */
11596 HOST_WIDE_INT frame_size = cfun_frame_layout.frame_size;
11597 /* Align argument size to 8 bytes - simplifies __morestack code. */
11598 HOST_WIDE_INT args_size = crtl->args.size >= 0
11599 ? ((crtl->args.size + 7) & ~7)
11601 /* Label to be called by __morestack. */
11602 rtx_code_label *call_done = NULL;
11603 rtx_code_label *parm_base = NULL;
11606 gcc_assert (flag_split_stack && reload_completed);
11607 if (!TARGET_CPU_ZARCH)
11609 sorry ("CPUs older than z900 are not supported for -fsplit-stack");
11613 r1 = gen_rtx_REG (Pmode, 1);
11615 /* If no stack frame will be allocated, don't do anything. */
11618 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11620 /* If va_start is used, just use r15. */
11621 emit_move_insn (r1,
11622 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11623 GEN_INT (STACK_POINTER_OFFSET)));
11629 if (morestack_ref == NULL_RTX)
11631 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
11632 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
11633 | SYMBOL_FLAG_FUNCTION);
11636 if (CONST_OK_FOR_K (frame_size) || CONST_OK_FOR_Op (frame_size))
11638 /* If frame_size will fit in an add instruction, do a stack space
11639 check, and only call __morestack if there's not enough space. */
11641 /* Get thread pointer. r1 is the only register we can always destroy - r0
11642 could contain a static chain (and cannot be used to address memory
11643 anyway), r2-r6 can contain parameters, and r6-r15 are callee-saved. */
11644 emit_move_insn (r1, gen_rtx_REG (Pmode, TP_REGNUM));
11645 /* Aim at __private_ss. */
11646 guard = gen_rtx_MEM (Pmode, plus_constant (Pmode, r1, psso));
11648 /* If less that 1kiB used, skip addition and compare directly with
11650 if (frame_size > SPLIT_STACK_AVAILABLE)
11652 emit_move_insn (r1, guard);
11654 emit_insn (gen_adddi3 (r1, r1, GEN_INT (frame_size)));
11656 emit_insn (gen_addsi3 (r1, r1, GEN_INT (frame_size)));
11660 /* Compare the (maybe adjusted) guard with the stack pointer. */
11661 cc = s390_emit_compare (LT, stack_pointer_rtx, guard);
11664 call_done = gen_label_rtx ();
11665 parm_base = gen_label_rtx ();
11667 /* Emit the parameter block. */
11668 tmp = gen_split_stack_data (parm_base, call_done,
11669 GEN_INT (frame_size),
11670 GEN_INT (args_size));
11671 insn = emit_insn (tmp);
11672 add_reg_note (insn, REG_LABEL_OPERAND, call_done);
11673 LABEL_NUSES (call_done)++;
11674 add_reg_note (insn, REG_LABEL_OPERAND, parm_base);
11675 LABEL_NUSES (parm_base)++;
11677 /* %r1 = litbase. */
11678 insn = emit_move_insn (r1, gen_rtx_LABEL_REF (VOIDmode, parm_base));
11679 add_reg_note (insn, REG_LABEL_OPERAND, parm_base);
11680 LABEL_NUSES (parm_base)++;
11682 /* Now, we need to call __morestack. It has very special calling
11683 conventions: it preserves param/return/static chain registers for
11684 calling main function body, and looks for its own parameters at %r1. */
11688 tmp = gen_split_stack_cond_call (morestack_ref, cc, call_done);
11690 insn = emit_jump_insn (tmp);
11691 JUMP_LABEL (insn) = call_done;
11692 LABEL_NUSES (call_done)++;
11694 /* Mark the jump as very unlikely to be taken. */
11695 add_reg_br_prob_note (insn,
11696 profile_probability::very_unlikely ());
11698 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11700 /* If va_start is used, and __morestack was not called, just use
11702 emit_move_insn (r1,
11703 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11704 GEN_INT (STACK_POINTER_OFFSET)));
11709 tmp = gen_split_stack_call (morestack_ref, call_done);
11710 insn = emit_jump_insn (tmp);
11711 JUMP_LABEL (insn) = call_done;
11712 LABEL_NUSES (call_done)++;
11716 /* __morestack will call us here. */
11718 emit_label (call_done);
11721 /* We may have to tell the dataflow pass that the split stack prologue
11722 is initializing a register. */
11725 s390_live_on_entry (bitmap regs)
11727 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11729 gcc_assert (flag_split_stack);
11730 bitmap_set_bit (regs, 1);
11734 /* Return true if the function can use simple_return to return outside
11735 of a shrink-wrapped region. At present shrink-wrapping is supported
11739 s390_can_use_simple_return_insn (void)
11744 /* Return true if the epilogue is guaranteed to contain only a return
11745 instruction and if a direct return can therefore be used instead.
11746 One of the main advantages of using direct return instructions
11747 is that we can then use conditional returns. */
11750 s390_can_use_return_insn (void)
11754 if (!reload_completed)
11760 if (TARGET_TPF_PROFILING)
11763 for (i = 0; i < 16; i++)
11764 if (cfun_gpr_save_slot (i) != SAVE_SLOT_NONE)
11767 /* For 31 bit this is not covered by the frame_size check below
11768 since f4, f6 are saved in the register save area without needing
11769 additional stack space. */
11771 && (cfun_fpr_save_p (FPR4_REGNUM) || cfun_fpr_save_p (FPR6_REGNUM)))
11774 if (cfun->machine->base_reg
11775 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
11778 return cfun_frame_layout.frame_size == 0;
11781 /* The VX ABI differs for vararg functions. Therefore we need the
11782 prototype of the callee to be available when passing vector type
11784 static const char *
11785 s390_invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
11787 return ((TARGET_VX_ABI
11789 && VECTOR_TYPE_P (TREE_TYPE (val))
11790 && (funcdecl == NULL_TREE
11791 || (TREE_CODE (funcdecl) == FUNCTION_DECL
11792 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
11793 ? N_("vector argument passed to unprototyped function")
11798 /* Return the size in bytes of a function argument of
11799 type TYPE and/or mode MODE. At least one of TYPE or
11800 MODE must be specified. */
11803 s390_function_arg_size (machine_mode mode, const_tree type)
11806 return int_size_in_bytes (type);
11808 /* No type info available for some library calls ... */
11809 if (mode != BLKmode)
11810 return GET_MODE_SIZE (mode);
11812 /* If we have neither type nor mode, abort */
11813 gcc_unreachable ();
11816 /* Return true if a function argument of type TYPE and mode MODE
11817 is to be passed in a vector register, if available. */
11820 s390_function_arg_vector (machine_mode mode, const_tree type)
11822 if (!TARGET_VX_ABI)
11825 if (s390_function_arg_size (mode, type) > 16)
11828 /* No type info available for some library calls ... */
11830 return VECTOR_MODE_P (mode);
11832 /* The ABI says that record types with a single member are treated
11833 just like that member would be. */
11834 while (TREE_CODE (type) == RECORD_TYPE)
11836 tree field, single = NULL_TREE;
11838 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
11840 if (TREE_CODE (field) != FIELD_DECL)
11843 if (single == NULL_TREE)
11844 single = TREE_TYPE (field);
11849 if (single == NULL_TREE)
11853 /* If the field declaration adds extra byte due to
11854 e.g. padding this is not accepted as vector type. */
11855 if (int_size_in_bytes (single) <= 0
11856 || int_size_in_bytes (single) != int_size_in_bytes (type))
11862 return VECTOR_TYPE_P (type);
11865 /* Return true if a function argument of type TYPE and mode MODE
11866 is to be passed in a floating-point register, if available. */
11869 s390_function_arg_float (machine_mode mode, const_tree type)
11871 if (s390_function_arg_size (mode, type) > 8)
11874 /* Soft-float changes the ABI: no floating-point registers are used. */
11875 if (TARGET_SOFT_FLOAT)
11878 /* No type info available for some library calls ... */
11880 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
11882 /* The ABI says that record types with a single member are treated
11883 just like that member would be. */
11884 while (TREE_CODE (type) == RECORD_TYPE)
11886 tree field, single = NULL_TREE;
11888 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
11890 if (TREE_CODE (field) != FIELD_DECL)
11893 if (single == NULL_TREE)
11894 single = TREE_TYPE (field);
11899 if (single == NULL_TREE)
11905 return TREE_CODE (type) == REAL_TYPE;
11908 /* Return true if a function argument of type TYPE and mode MODE
11909 is to be passed in an integer register, or a pair of integer
11910 registers, if available. */
11913 s390_function_arg_integer (machine_mode mode, const_tree type)
11915 int size = s390_function_arg_size (mode, type);
11919 /* No type info available for some library calls ... */
11921 return GET_MODE_CLASS (mode) == MODE_INT
11922 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
11924 /* We accept small integral (and similar) types. */
11925 if (INTEGRAL_TYPE_P (type)
11926 || POINTER_TYPE_P (type)
11927 || TREE_CODE (type) == NULLPTR_TYPE
11928 || TREE_CODE (type) == OFFSET_TYPE
11929 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
11932 /* We also accept structs of size 1, 2, 4, 8 that are not
11933 passed in floating-point registers. */
11934 if (AGGREGATE_TYPE_P (type)
11935 && exact_log2 (size) >= 0
11936 && !s390_function_arg_float (mode, type))
11942 /* Return 1 if a function argument of type TYPE and mode MODE
11943 is to be passed by reference. The ABI specifies that only
11944 structures of size 1, 2, 4, or 8 bytes are passed by value,
11945 all other structures (and complex numbers) are passed by
11949 s390_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
11950 machine_mode mode, const_tree type,
11951 bool named ATTRIBUTE_UNUSED)
11953 int size = s390_function_arg_size (mode, type);
11955 if (s390_function_arg_vector (mode, type))
11963 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
11966 if (TREE_CODE (type) == COMPLEX_TYPE
11967 || TREE_CODE (type) == VECTOR_TYPE)
11974 /* Update the data in CUM to advance over an argument of mode MODE and
11975 data type TYPE. (TYPE is null for libcalls where that information
11976 may not be available.). The boolean NAMED specifies whether the
11977 argument is a named argument (as opposed to an unnamed argument
11978 matching an ellipsis). */
11981 s390_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
11982 const_tree type, bool named)
11984 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11986 if (s390_function_arg_vector (mode, type))
11988 /* We are called for unnamed vector stdarg arguments which are
11989 passed on the stack. In this case this hook does not have to
11990 do anything since stack arguments are tracked by common
11996 else if (s390_function_arg_float (mode, type))
12000 else if (s390_function_arg_integer (mode, type))
12002 int size = s390_function_arg_size (mode, type);
12003 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
12006 gcc_unreachable ();
12009 /* Define where to put the arguments to a function.
12010 Value is zero to push the argument on the stack,
12011 or a hard register in which to store the argument.
12013 MODE is the argument's machine mode.
12014 TYPE is the data type of the argument (as a tree).
12015 This is null for libcalls where that information may
12017 CUM is a variable of type CUMULATIVE_ARGS which gives info about
12018 the preceding args and about the function being called.
12019 NAMED is nonzero if this argument is a named parameter
12020 (otherwise it is an extra parameter matching an ellipsis).
12022 On S/390, we use general purpose registers 2 through 6 to
12023 pass integer, pointer, and certain structure arguments, and
12024 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
12025 to pass floating point arguments. All remaining arguments
12026 are pushed to the stack. */
12029 s390_function_arg (cumulative_args_t cum_v, machine_mode mode,
12030 const_tree type, bool named)
12032 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12035 s390_check_type_for_vector_abi (type, true, false);
12037 if (s390_function_arg_vector (mode, type))
12039 /* Vector arguments being part of the ellipsis are passed on the
12041 if (!named || (cum->vrs + 1 > VEC_ARG_NUM_REG))
12044 return gen_rtx_REG (mode, cum->vrs + FIRST_VEC_ARG_REGNO);
12046 else if (s390_function_arg_float (mode, type))
12048 if (cum->fprs + 1 > FP_ARG_NUM_REG)
12051 return gen_rtx_REG (mode, cum->fprs + 16);
12053 else if (s390_function_arg_integer (mode, type))
12055 int size = s390_function_arg_size (mode, type);
12056 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
12058 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
12060 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
12061 return gen_rtx_REG (mode, cum->gprs + 2);
12062 else if (n_gprs == 2)
12064 rtvec p = rtvec_alloc (2);
12067 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
12070 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
12073 return gen_rtx_PARALLEL (mode, p);
12077 /* After the real arguments, expand_call calls us once again
12078 with a void_type_node type. Whatever we return here is
12079 passed as operand 2 to the call expanders.
12081 We don't need this feature ... */
12082 else if (type == void_type_node)
12085 gcc_unreachable ();
12088 /* Return true if return values of type TYPE should be returned
12089 in a memory buffer whose address is passed by the caller as
12090 hidden first argument. */
12093 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
12095 /* We accept small integral (and similar) types. */
12096 if (INTEGRAL_TYPE_P (type)
12097 || POINTER_TYPE_P (type)
12098 || TREE_CODE (type) == OFFSET_TYPE
12099 || TREE_CODE (type) == REAL_TYPE)
12100 return int_size_in_bytes (type) > 8;
12102 /* vector types which fit into a VR. */
12104 && VECTOR_TYPE_P (type)
12105 && int_size_in_bytes (type) <= 16)
12108 /* Aggregates and similar constructs are always returned
12110 if (AGGREGATE_TYPE_P (type)
12111 || TREE_CODE (type) == COMPLEX_TYPE
12112 || VECTOR_TYPE_P (type))
12115 /* ??? We get called on all sorts of random stuff from
12116 aggregate_value_p. We can't abort, but it's not clear
12117 what's safe to return. Pretend it's a struct I guess. */
12121 /* Function arguments and return values are promoted to word size. */
12123 static machine_mode
12124 s390_promote_function_mode (const_tree type, machine_mode mode,
12126 const_tree fntype ATTRIBUTE_UNUSED,
12127 int for_return ATTRIBUTE_UNUSED)
12129 if (INTEGRAL_MODE_P (mode)
12130 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
12132 if (type != NULL_TREE && POINTER_TYPE_P (type))
12133 *punsignedp = POINTERS_EXTEND_UNSIGNED;
12140 /* Define where to return a (scalar) value of type RET_TYPE.
12141 If RET_TYPE is null, define where to return a (scalar)
12142 value of mode MODE from a libcall. */
12145 s390_function_and_libcall_value (machine_mode mode,
12146 const_tree ret_type,
12147 const_tree fntype_or_decl,
12148 bool outgoing ATTRIBUTE_UNUSED)
12150 /* For vector return types it is important to use the RET_TYPE
12151 argument whenever available since the middle-end might have
12152 changed the mode to a scalar mode. */
12153 bool vector_ret_type_p = ((ret_type && VECTOR_TYPE_P (ret_type))
12154 || (!ret_type && VECTOR_MODE_P (mode)));
12156 /* For normal functions perform the promotion as
12157 promote_function_mode would do. */
12160 int unsignedp = TYPE_UNSIGNED (ret_type);
12161 mode = promote_function_mode (ret_type, mode, &unsignedp,
12162 fntype_or_decl, 1);
12165 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT
12166 || SCALAR_FLOAT_MODE_P (mode)
12167 || (TARGET_VX_ABI && vector_ret_type_p));
12168 gcc_assert (GET_MODE_SIZE (mode) <= (TARGET_VX_ABI ? 16 : 8));
12170 if (TARGET_VX_ABI && vector_ret_type_p)
12171 return gen_rtx_REG (mode, FIRST_VEC_ARG_REGNO);
12172 else if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
12173 return gen_rtx_REG (mode, 16);
12174 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
12175 || UNITS_PER_LONG == UNITS_PER_WORD)
12176 return gen_rtx_REG (mode, 2);
12177 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
12179 /* This case is triggered when returning a 64 bit value with
12180 -m31 -mzarch. Although the value would fit into a single
12181 register it has to be forced into a 32 bit register pair in
12182 order to match the ABI. */
12183 rtvec p = rtvec_alloc (2);
12186 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
12188 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
12190 return gen_rtx_PARALLEL (mode, p);
12193 gcc_unreachable ();
12196 /* Define where to return a scalar return value of type RET_TYPE. */
12199 s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
12202 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
12203 fn_decl_or_type, outgoing);
12206 /* Define where to return a scalar libcall return value of mode
12210 s390_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
12212 return s390_function_and_libcall_value (mode, NULL_TREE,
12217 /* Create and return the va_list datatype.
12219 On S/390, va_list is an array type equivalent to
12221 typedef struct __va_list_tag
12225 void *__overflow_arg_area;
12226 void *__reg_save_area;
12229 where __gpr and __fpr hold the number of general purpose
12230 or floating point arguments used up to now, respectively,
12231 __overflow_arg_area points to the stack location of the
12232 next argument passed on the stack, and __reg_save_area
12233 always points to the start of the register area in the
12234 call frame of the current function. The function prologue
12235 saves all registers used for argument passing into this
12236 area if the function uses variable arguments. */
12239 s390_build_builtin_va_list (void)
12241 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
12243 record = lang_hooks.types.make_type (RECORD_TYPE);
12246 build_decl (BUILTINS_LOCATION,
12247 TYPE_DECL, get_identifier ("__va_list_tag"), record);
12249 f_gpr = build_decl (BUILTINS_LOCATION,
12250 FIELD_DECL, get_identifier ("__gpr"),
12251 long_integer_type_node);
12252 f_fpr = build_decl (BUILTINS_LOCATION,
12253 FIELD_DECL, get_identifier ("__fpr"),
12254 long_integer_type_node);
12255 f_ovf = build_decl (BUILTINS_LOCATION,
12256 FIELD_DECL, get_identifier ("__overflow_arg_area"),
12258 f_sav = build_decl (BUILTINS_LOCATION,
12259 FIELD_DECL, get_identifier ("__reg_save_area"),
12262 va_list_gpr_counter_field = f_gpr;
12263 va_list_fpr_counter_field = f_fpr;
12265 DECL_FIELD_CONTEXT (f_gpr) = record;
12266 DECL_FIELD_CONTEXT (f_fpr) = record;
12267 DECL_FIELD_CONTEXT (f_ovf) = record;
12268 DECL_FIELD_CONTEXT (f_sav) = record;
12270 TYPE_STUB_DECL (record) = type_decl;
12271 TYPE_NAME (record) = type_decl;
12272 TYPE_FIELDS (record) = f_gpr;
12273 DECL_CHAIN (f_gpr) = f_fpr;
12274 DECL_CHAIN (f_fpr) = f_ovf;
12275 DECL_CHAIN (f_ovf) = f_sav;
12277 layout_type (record);
12279 /* The correct type is an array type of one element. */
12280 return build_array_type (record, build_index_type (size_zero_node));
12283 /* Implement va_start by filling the va_list structure VALIST.
12284 STDARG_P is always true, and ignored.
12285 NEXTARG points to the first anonymous stack argument.
12287 The following global variables are used to initialize
12288 the va_list structure:
12291 holds number of gprs and fprs used for named arguments.
12292 crtl->args.arg_offset_rtx:
12293 holds the offset of the first anonymous stack argument
12294 (relative to the virtual arg pointer). */
12297 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
12299 HOST_WIDE_INT n_gpr, n_fpr;
12301 tree f_gpr, f_fpr, f_ovf, f_sav;
12302 tree gpr, fpr, ovf, sav, t;
12304 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12305 f_fpr = DECL_CHAIN (f_gpr);
12306 f_ovf = DECL_CHAIN (f_fpr);
12307 f_sav = DECL_CHAIN (f_ovf);
12309 valist = build_simple_mem_ref (valist);
12310 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12311 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
12312 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
12313 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
12315 /* Count number of gp and fp argument registers used. */
12317 n_gpr = crtl->args.info.gprs;
12318 n_fpr = crtl->args.info.fprs;
12320 if (cfun->va_list_gpr_size)
12322 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
12323 build_int_cst (NULL_TREE, n_gpr));
12324 TREE_SIDE_EFFECTS (t) = 1;
12325 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12328 if (cfun->va_list_fpr_size)
12330 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
12331 build_int_cst (NULL_TREE, n_fpr));
12332 TREE_SIDE_EFFECTS (t) = 1;
12333 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12336 if (flag_split_stack
12337 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
12339 && cfun->machine->split_stack_varargs_pointer == NULL_RTX)
12344 reg = gen_reg_rtx (Pmode);
12345 cfun->machine->split_stack_varargs_pointer = reg;
12348 emit_move_insn (reg, gen_rtx_REG (Pmode, 1));
12349 seq = get_insns ();
12352 push_topmost_sequence ();
12353 emit_insn_after (seq, entry_of_function ());
12354 pop_topmost_sequence ();
12357 /* Find the overflow area.
12358 FIXME: This currently is too pessimistic when the vector ABI is
12359 enabled. In that case we *always* set up the overflow area
12361 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
12362 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG
12365 if (cfun->machine->split_stack_varargs_pointer == NULL_RTX)
12366 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
12368 t = make_tree (TREE_TYPE (ovf), cfun->machine->split_stack_varargs_pointer);
12370 off = INTVAL (crtl->args.arg_offset_rtx);
12371 off = off < 0 ? 0 : off;
12372 if (TARGET_DEBUG_ARG)
12373 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
12374 (int)n_gpr, (int)n_fpr, off);
12376 t = fold_build_pointer_plus_hwi (t, off);
12378 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
12379 TREE_SIDE_EFFECTS (t) = 1;
12380 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12383 /* Find the register save area. */
12384 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
12385 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
12387 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
12388 t = fold_build_pointer_plus_hwi (t, -RETURN_REGNUM * UNITS_PER_LONG);
12390 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
12391 TREE_SIDE_EFFECTS (t) = 1;
12392 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12396 /* Implement va_arg by updating the va_list structure
12397 VALIST as required to retrieve an argument of type
12398 TYPE, and returning that argument.
12400 Generates code equivalent to:
12402 if (integral value) {
12403 if (size <= 4 && args.gpr < 5 ||
12404 size > 4 && args.gpr < 4 )
12405 ret = args.reg_save_area[args.gpr+8]
12407 ret = *args.overflow_arg_area++;
12408 } else if (vector value) {
12409 ret = *args.overflow_arg_area;
12410 args.overflow_arg_area += size / 8;
12411 } else if (float value) {
12413 ret = args.reg_save_area[args.fpr+64]
12415 ret = *args.overflow_arg_area++;
12416 } else if (aggregate value) {
12418 ret = *args.reg_save_area[args.gpr]
12420 ret = **args.overflow_arg_area++;
12424 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
12425 gimple_seq *post_p ATTRIBUTE_UNUSED)
12427 tree f_gpr, f_fpr, f_ovf, f_sav;
12428 tree gpr, fpr, ovf, sav, reg, t, u;
12429 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
12430 tree lab_false, lab_over = NULL_TREE;
12431 tree addr = create_tmp_var (ptr_type_node, "addr");
12432 bool left_align_p; /* How a value < UNITS_PER_LONG is aligned within
12435 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12436 f_fpr = DECL_CHAIN (f_gpr);
12437 f_ovf = DECL_CHAIN (f_fpr);
12438 f_sav = DECL_CHAIN (f_ovf);
12440 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12441 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
12442 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
12444 /* The tree for args* cannot be shared between gpr/fpr and ovf since
12445 both appear on a lhs. */
12446 valist = unshare_expr (valist);
12447 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
12449 size = int_size_in_bytes (type);
12451 s390_check_type_for_vector_abi (type, true, false);
12453 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
12455 if (TARGET_DEBUG_ARG)
12457 fprintf (stderr, "va_arg: aggregate type");
12461 /* Aggregates are passed by reference. */
12466 /* kernel stack layout on 31 bit: It is assumed here that no padding
12467 will be added by s390_frame_info because for va_args always an even
12468 number of gprs has to be saved r15-r2 = 14 regs. */
12469 sav_ofs = 2 * UNITS_PER_LONG;
12470 sav_scale = UNITS_PER_LONG;
12471 size = UNITS_PER_LONG;
12472 max_reg = GP_ARG_NUM_REG - n_reg;
12473 left_align_p = false;
12475 else if (s390_function_arg_vector (TYPE_MODE (type), type))
12477 if (TARGET_DEBUG_ARG)
12479 fprintf (stderr, "va_arg: vector type");
12489 left_align_p = true;
12491 else if (s390_function_arg_float (TYPE_MODE (type), type))
12493 if (TARGET_DEBUG_ARG)
12495 fprintf (stderr, "va_arg: float type");
12499 /* FP args go in FP registers, if present. */
12503 sav_ofs = 16 * UNITS_PER_LONG;
12505 max_reg = FP_ARG_NUM_REG - n_reg;
12506 left_align_p = false;
12510 if (TARGET_DEBUG_ARG)
12512 fprintf (stderr, "va_arg: other type");
12516 /* Otherwise into GP registers. */
12519 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
12521 /* kernel stack layout on 31 bit: It is assumed here that no padding
12522 will be added by s390_frame_info because for va_args always an even
12523 number of gprs has to be saved r15-r2 = 14 regs. */
12524 sav_ofs = 2 * UNITS_PER_LONG;
12526 if (size < UNITS_PER_LONG)
12527 sav_ofs += UNITS_PER_LONG - size;
12529 sav_scale = UNITS_PER_LONG;
12530 max_reg = GP_ARG_NUM_REG - n_reg;
12531 left_align_p = false;
12534 /* Pull the value out of the saved registers ... */
12536 if (reg != NULL_TREE)
12539 if (reg > ((typeof (reg))max_reg))
12542 addr = sav + sav_ofs + reg * save_scale;
12549 lab_false = create_artificial_label (UNKNOWN_LOCATION);
12550 lab_over = create_artificial_label (UNKNOWN_LOCATION);
12552 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
12553 t = build2 (GT_EXPR, boolean_type_node, reg, t);
12554 u = build1 (GOTO_EXPR, void_type_node, lab_false);
12555 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
12556 gimplify_and_add (t, pre_p);
12558 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
12559 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
12560 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
12561 t = fold_build_pointer_plus (t, u);
12563 gimplify_assign (addr, t, pre_p);
12565 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
12567 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
12570 /* ... Otherwise out of the overflow area. */
12573 if (size < UNITS_PER_LONG && !left_align_p)
12574 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG - size);
12576 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
12578 gimplify_assign (addr, t, pre_p);
12580 if (size < UNITS_PER_LONG && left_align_p)
12581 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG);
12583 t = fold_build_pointer_plus_hwi (t, size);
12585 gimplify_assign (ovf, t, pre_p);
12587 if (reg != NULL_TREE)
12588 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
12591 /* Increment register save count. */
12595 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
12596 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
12597 gimplify_and_add (u, pre_p);
12602 t = build_pointer_type_for_mode (build_pointer_type (type),
12604 addr = fold_convert (t, addr);
12605 addr = build_va_arg_indirect_ref (addr);
12609 t = build_pointer_type_for_mode (type, ptr_mode, true);
12610 addr = fold_convert (t, addr);
12613 return build_va_arg_indirect_ref (addr);
12616 /* Emit rtl for the tbegin or tbegin_retry (RETRY != NULL_RTX)
12618 DEST - Register location where CC will be stored.
12619 TDB - Pointer to a 256 byte area where to store the transaction.
12620 diagnostic block. NULL if TDB is not needed.
12621 RETRY - Retry count value. If non-NULL a retry loop for CC2
12623 CLOBBER_FPRS_P - If true clobbers for all FPRs are emitted as part
12624 of the tbegin instruction pattern. */
12627 s390_expand_tbegin (rtx dest, rtx tdb, rtx retry, bool clobber_fprs_p)
12629 rtx retry_plus_two = gen_reg_rtx (SImode);
12630 rtx retry_reg = gen_reg_rtx (SImode);
12631 rtx_code_label *retry_label = NULL;
12633 if (retry != NULL_RTX)
12635 emit_move_insn (retry_reg, retry);
12636 emit_insn (gen_addsi3 (retry_plus_two, retry_reg, const2_rtx));
12637 emit_insn (gen_addsi3 (retry_reg, retry_reg, const1_rtx));
12638 retry_label = gen_label_rtx ();
12639 emit_label (retry_label);
12642 if (clobber_fprs_p)
12645 emit_insn (gen_tbegin_1_z13 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
12648 emit_insn (gen_tbegin_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
12652 emit_insn (gen_tbegin_nofloat_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
12655 emit_move_insn (dest, gen_rtx_UNSPEC (SImode,
12656 gen_rtvec (1, gen_rtx_REG (CCRAWmode,
12658 UNSPEC_CC_TO_INT));
12659 if (retry != NULL_RTX)
12661 const int CC0 = 1 << 3;
12662 const int CC1 = 1 << 2;
12663 const int CC3 = 1 << 0;
12665 rtx count = gen_reg_rtx (SImode);
12666 rtx_code_label *leave_label = gen_label_rtx ();
12668 /* Exit for success and permanent failures. */
12669 jump = s390_emit_jump (leave_label,
12670 gen_rtx_EQ (VOIDmode,
12671 gen_rtx_REG (CCRAWmode, CC_REGNUM),
12672 gen_rtx_CONST_INT (VOIDmode, CC0 | CC1 | CC3)));
12673 LABEL_NUSES (leave_label) = 1;
12675 /* CC2 - transient failure. Perform retry with ppa. */
12676 emit_move_insn (count, retry_plus_two);
12677 emit_insn (gen_subsi3 (count, count, retry_reg));
12678 emit_insn (gen_tx_assist (count));
12679 jump = emit_jump_insn (gen_doloop_si64 (retry_label,
12682 JUMP_LABEL (jump) = retry_label;
12683 LABEL_NUSES (retry_label) = 1;
12684 emit_label (leave_label);
12689 /* Return the decl for the target specific builtin with the function
12693 s390_builtin_decl (unsigned fcode, bool initialized_p ATTRIBUTE_UNUSED)
12695 if (fcode >= S390_BUILTIN_MAX)
12696 return error_mark_node;
12698 return s390_builtin_decls[fcode];
12701 /* We call mcount before the function prologue. So a profiled leaf
12702 function should stay a leaf function. */
12705 s390_keep_leaf_when_profiled ()
12710 /* Output assembly code for the trampoline template to
12713 On S/390, we use gpr 1 internally in the trampoline code;
12714 gpr 0 is used to hold the static chain. */
12717 s390_asm_trampoline_template (FILE *file)
12720 op[0] = gen_rtx_REG (Pmode, 0);
12721 op[1] = gen_rtx_REG (Pmode, 1);
12725 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
12726 output_asm_insn ("lmg\t%0,%1,14(%1)", op); /* 6 byte */
12727 output_asm_insn ("br\t%1", op); /* 2 byte */
12728 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
12732 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
12733 output_asm_insn ("lm\t%0,%1,6(%1)", op); /* 4 byte */
12734 output_asm_insn ("br\t%1", op); /* 2 byte */
12735 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
12739 /* Emit RTL insns to initialize the variable parts of a trampoline.
12740 FNADDR is an RTX for the address of the function's pure code.
12741 CXT is an RTX for the static chain value for the function. */
12744 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
12746 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
12749 emit_block_move (m_tramp, assemble_trampoline_template (),
12750 GEN_INT (2 * UNITS_PER_LONG), BLOCK_OP_NORMAL);
12752 mem = adjust_address (m_tramp, Pmode, 2 * UNITS_PER_LONG);
12753 emit_move_insn (mem, cxt);
12754 mem = adjust_address (m_tramp, Pmode, 3 * UNITS_PER_LONG);
12755 emit_move_insn (mem, fnaddr);
12758 /* Output assembler code to FILE to increment profiler label # LABELNO
12759 for profiling a function entry. */
12762 s390_function_profiler (FILE *file, int labelno)
12767 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
12769 fprintf (file, "# function profiler \n");
12771 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
12772 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
12773 op[1] = gen_rtx_MEM (Pmode, plus_constant (Pmode, op[1], UNITS_PER_LONG));
12775 op[2] = gen_rtx_REG (Pmode, 1);
12776 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
12777 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
12779 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
12782 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
12783 op[4] = gen_rtx_CONST (Pmode, op[4]);
12788 output_asm_insn ("stg\t%0,%1", op);
12789 output_asm_insn ("larl\t%2,%3", op);
12790 output_asm_insn ("brasl\t%0,%4", op);
12791 output_asm_insn ("lg\t%0,%1", op);
12793 else if (TARGET_CPU_ZARCH)
12795 output_asm_insn ("st\t%0,%1", op);
12796 output_asm_insn ("larl\t%2,%3", op);
12797 output_asm_insn ("brasl\t%0,%4", op);
12798 output_asm_insn ("l\t%0,%1", op);
12800 else if (!flag_pic)
12802 op[6] = gen_label_rtx ();
12804 output_asm_insn ("st\t%0,%1", op);
12805 output_asm_insn ("bras\t%2,%l6", op);
12806 output_asm_insn (".long\t%4", op);
12807 output_asm_insn (".long\t%3", op);
12808 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
12809 output_asm_insn ("l\t%0,0(%2)", op);
12810 output_asm_insn ("l\t%2,4(%2)", op);
12811 output_asm_insn ("basr\t%0,%0", op);
12812 output_asm_insn ("l\t%0,%1", op);
12816 op[5] = gen_label_rtx ();
12817 op[6] = gen_label_rtx ();
12819 output_asm_insn ("st\t%0,%1", op);
12820 output_asm_insn ("bras\t%2,%l6", op);
12821 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
12822 output_asm_insn (".long\t%4-%l5", op);
12823 output_asm_insn (".long\t%3-%l5", op);
12824 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
12825 output_asm_insn ("lr\t%0,%2", op);
12826 output_asm_insn ("a\t%0,0(%2)", op);
12827 output_asm_insn ("a\t%2,4(%2)", op);
12828 output_asm_insn ("basr\t%0,%0", op);
12829 output_asm_insn ("l\t%0,%1", op);
12833 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
12834 into its SYMBOL_REF_FLAGS. */
12837 s390_encode_section_info (tree decl, rtx rtl, int first)
12839 default_encode_section_info (decl, rtl, first);
12841 if (TREE_CODE (decl) == VAR_DECL)
12843 /* Store the alignment to be able to check if we can use
12844 a larl/load-relative instruction. We only handle the cases
12845 that can go wrong (i.e. no FUNC_DECLs). */
12846 if (DECL_ALIGN (decl) == 0 || DECL_ALIGN (decl) % 16)
12847 SYMBOL_FLAG_SET_NOTALIGN2 (XEXP (rtl, 0));
12848 else if (DECL_ALIGN (decl) % 32)
12849 SYMBOL_FLAG_SET_NOTALIGN4 (XEXP (rtl, 0));
12850 else if (DECL_ALIGN (decl) % 64)
12851 SYMBOL_FLAG_SET_NOTALIGN8 (XEXP (rtl, 0));
12854 /* Literal pool references don't have a decl so they are handled
12855 differently here. We rely on the information in the MEM_ALIGN
12856 entry to decide upon the alignment. */
12858 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
12859 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0)))
12861 if (MEM_ALIGN (rtl) == 0 || MEM_ALIGN (rtl) % 16)
12862 SYMBOL_FLAG_SET_NOTALIGN2 (XEXP (rtl, 0));
12863 else if (MEM_ALIGN (rtl) % 32)
12864 SYMBOL_FLAG_SET_NOTALIGN4 (XEXP (rtl, 0));
12865 else if (MEM_ALIGN (rtl) % 64)
12866 SYMBOL_FLAG_SET_NOTALIGN8 (XEXP (rtl, 0));
12870 /* Output thunk to FILE that implements a C++ virtual function call (with
12871 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
12872 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
12873 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
12874 relative to the resulting this pointer. */
12877 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
12878 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
12884 /* Make sure unwind info is emitted for the thunk if needed. */
12885 final_start_function (emit_barrier (), file, 1);
12887 /* Operand 0 is the target function. */
12888 op[0] = XEXP (DECL_RTL (function), 0);
12889 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
12892 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
12893 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
12894 op[0] = gen_rtx_CONST (Pmode, op[0]);
12897 /* Operand 1 is the 'this' pointer. */
12898 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
12899 op[1] = gen_rtx_REG (Pmode, 3);
12901 op[1] = gen_rtx_REG (Pmode, 2);
12903 /* Operand 2 is the delta. */
12904 op[2] = GEN_INT (delta);
12906 /* Operand 3 is the vcall_offset. */
12907 op[3] = GEN_INT (vcall_offset);
12909 /* Operand 4 is the temporary register. */
12910 op[4] = gen_rtx_REG (Pmode, 1);
12912 /* Operands 5 to 8 can be used as labels. */
12918 /* Operand 9 can be used for temporary register. */
12921 /* Generate code. */
12924 /* Setup literal pool pointer if required. */
12925 if ((!DISP_IN_RANGE (delta)
12926 && !CONST_OK_FOR_K (delta)
12927 && !CONST_OK_FOR_Os (delta))
12928 || (!DISP_IN_RANGE (vcall_offset)
12929 && !CONST_OK_FOR_K (vcall_offset)
12930 && !CONST_OK_FOR_Os (vcall_offset)))
12932 op[5] = gen_label_rtx ();
12933 output_asm_insn ("larl\t%4,%5", op);
12936 /* Add DELTA to this pointer. */
12939 if (CONST_OK_FOR_J (delta))
12940 output_asm_insn ("la\t%1,%2(%1)", op);
12941 else if (DISP_IN_RANGE (delta))
12942 output_asm_insn ("lay\t%1,%2(%1)", op);
12943 else if (CONST_OK_FOR_K (delta))
12944 output_asm_insn ("aghi\t%1,%2", op);
12945 else if (CONST_OK_FOR_Os (delta))
12946 output_asm_insn ("agfi\t%1,%2", op);
12949 op[6] = gen_label_rtx ();
12950 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
12954 /* Perform vcall adjustment. */
12957 if (DISP_IN_RANGE (vcall_offset))
12959 output_asm_insn ("lg\t%4,0(%1)", op);
12960 output_asm_insn ("ag\t%1,%3(%4)", op);
12962 else if (CONST_OK_FOR_K (vcall_offset))
12964 output_asm_insn ("lghi\t%4,%3", op);
12965 output_asm_insn ("ag\t%4,0(%1)", op);
12966 output_asm_insn ("ag\t%1,0(%4)", op);
12968 else if (CONST_OK_FOR_Os (vcall_offset))
12970 output_asm_insn ("lgfi\t%4,%3", op);
12971 output_asm_insn ("ag\t%4,0(%1)", op);
12972 output_asm_insn ("ag\t%1,0(%4)", op);
12976 op[7] = gen_label_rtx ();
12977 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
12978 output_asm_insn ("ag\t%4,0(%1)", op);
12979 output_asm_insn ("ag\t%1,0(%4)", op);
12983 /* Jump to target. */
12984 output_asm_insn ("jg\t%0", op);
12986 /* Output literal pool if required. */
12989 output_asm_insn (".align\t4", op);
12990 targetm.asm_out.internal_label (file, "L",
12991 CODE_LABEL_NUMBER (op[5]));
12995 targetm.asm_out.internal_label (file, "L",
12996 CODE_LABEL_NUMBER (op[6]));
12997 output_asm_insn (".long\t%2", op);
13001 targetm.asm_out.internal_label (file, "L",
13002 CODE_LABEL_NUMBER (op[7]));
13003 output_asm_insn (".long\t%3", op);
13008 /* Setup base pointer if required. */
13010 || (!DISP_IN_RANGE (delta)
13011 && !CONST_OK_FOR_K (delta)
13012 && !CONST_OK_FOR_Os (delta))
13013 || (!DISP_IN_RANGE (delta)
13014 && !CONST_OK_FOR_K (vcall_offset)
13015 && !CONST_OK_FOR_Os (vcall_offset)))
13017 op[5] = gen_label_rtx ();
13018 output_asm_insn ("basr\t%4,0", op);
13019 targetm.asm_out.internal_label (file, "L",
13020 CODE_LABEL_NUMBER (op[5]));
13023 /* Add DELTA to this pointer. */
13026 if (CONST_OK_FOR_J (delta))
13027 output_asm_insn ("la\t%1,%2(%1)", op);
13028 else if (DISP_IN_RANGE (delta))
13029 output_asm_insn ("lay\t%1,%2(%1)", op);
13030 else if (CONST_OK_FOR_K (delta))
13031 output_asm_insn ("ahi\t%1,%2", op);
13032 else if (CONST_OK_FOR_Os (delta))
13033 output_asm_insn ("afi\t%1,%2", op);
13036 op[6] = gen_label_rtx ();
13037 output_asm_insn ("a\t%1,%6-%5(%4)", op);
13041 /* Perform vcall adjustment. */
13044 if (CONST_OK_FOR_J (vcall_offset))
13046 output_asm_insn ("l\t%4,0(%1)", op);
13047 output_asm_insn ("a\t%1,%3(%4)", op);
13049 else if (DISP_IN_RANGE (vcall_offset))
13051 output_asm_insn ("l\t%4,0(%1)", op);
13052 output_asm_insn ("ay\t%1,%3(%4)", op);
13054 else if (CONST_OK_FOR_K (vcall_offset))
13056 output_asm_insn ("lhi\t%4,%3", op);
13057 output_asm_insn ("a\t%4,0(%1)", op);
13058 output_asm_insn ("a\t%1,0(%4)", op);
13060 else if (CONST_OK_FOR_Os (vcall_offset))
13062 output_asm_insn ("iilf\t%4,%3", op);
13063 output_asm_insn ("a\t%4,0(%1)", op);
13064 output_asm_insn ("a\t%1,0(%4)", op);
13068 op[7] = gen_label_rtx ();
13069 output_asm_insn ("l\t%4,%7-%5(%4)", op);
13070 output_asm_insn ("a\t%4,0(%1)", op);
13071 output_asm_insn ("a\t%1,0(%4)", op);
13074 /* We had to clobber the base pointer register.
13075 Re-setup the base pointer (with a different base). */
13076 op[5] = gen_label_rtx ();
13077 output_asm_insn ("basr\t%4,0", op);
13078 targetm.asm_out.internal_label (file, "L",
13079 CODE_LABEL_NUMBER (op[5]));
13082 /* Jump to target. */
13083 op[8] = gen_label_rtx ();
13086 output_asm_insn ("l\t%4,%8-%5(%4)", op);
13087 else if (!nonlocal)
13088 output_asm_insn ("a\t%4,%8-%5(%4)", op);
13089 /* We cannot call through .plt, since .plt requires %r12 loaded. */
13090 else if (flag_pic == 1)
13092 output_asm_insn ("a\t%4,%8-%5(%4)", op);
13093 output_asm_insn ("l\t%4,%0(%4)", op);
13095 else if (flag_pic == 2)
13097 op[9] = gen_rtx_REG (Pmode, 0);
13098 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
13099 output_asm_insn ("a\t%4,%8-%5(%4)", op);
13100 output_asm_insn ("ar\t%4,%9", op);
13101 output_asm_insn ("l\t%4,0(%4)", op);
13104 output_asm_insn ("br\t%4", op);
13106 /* Output literal pool. */
13107 output_asm_insn (".align\t4", op);
13109 if (nonlocal && flag_pic == 2)
13110 output_asm_insn (".long\t%0", op);
13113 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
13114 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
13117 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
13119 output_asm_insn (".long\t%0", op);
13121 output_asm_insn (".long\t%0-%5", op);
13125 targetm.asm_out.internal_label (file, "L",
13126 CODE_LABEL_NUMBER (op[6]));
13127 output_asm_insn (".long\t%2", op);
13131 targetm.asm_out.internal_label (file, "L",
13132 CODE_LABEL_NUMBER (op[7]));
13133 output_asm_insn (".long\t%3", op);
13136 final_end_function ();
13140 s390_valid_pointer_mode (machine_mode mode)
13142 return (mode == SImode || (TARGET_64BIT && mode == DImode));
13145 /* Checks whether the given CALL_EXPR would use a caller
13146 saved register. This is used to decide whether sibling call
13147 optimization could be performed on the respective function
13151 s390_call_saved_register_used (tree call_expr)
13153 CUMULATIVE_ARGS cum_v;
13154 cumulative_args_t cum;
13161 INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0);
13162 cum = pack_cumulative_args (&cum_v);
13164 for (i = 0; i < call_expr_nargs (call_expr); i++)
13166 parameter = CALL_EXPR_ARG (call_expr, i);
13167 gcc_assert (parameter);
13169 /* For an undeclared variable passed as parameter we will get
13170 an ERROR_MARK node here. */
13171 if (TREE_CODE (parameter) == ERROR_MARK)
13174 type = TREE_TYPE (parameter);
13177 mode = TYPE_MODE (type);
13180 /* We assume that in the target function all parameters are
13181 named. This only has an impact on vector argument register
13182 usage none of which is call-saved. */
13183 if (pass_by_reference (&cum_v, mode, type, true))
13186 type = build_pointer_type (type);
13189 parm_rtx = s390_function_arg (cum, mode, type, true);
13191 s390_function_arg_advance (cum, mode, type, true);
13196 if (REG_P (parm_rtx))
13199 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
13201 if (!call_used_regs[reg + REGNO (parm_rtx)])
13205 if (GET_CODE (parm_rtx) == PARALLEL)
13209 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
13211 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
13213 gcc_assert (REG_P (r));
13216 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
13218 if (!call_used_regs[reg + REGNO (r)])
13227 /* Return true if the given call expression can be
13228 turned into a sibling call.
13229 DECL holds the declaration of the function to be called whereas
13230 EXP is the call expression itself. */
13233 s390_function_ok_for_sibcall (tree decl, tree exp)
13235 /* The TPF epilogue uses register 1. */
13236 if (TARGET_TPF_PROFILING)
13239 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
13240 which would have to be restored before the sibcall. */
13241 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
13244 /* Register 6 on s390 is available as an argument register but unfortunately
13245 "caller saved". This makes functions needing this register for arguments
13246 not suitable for sibcalls. */
13247 return !s390_call_saved_register_used (exp);
13250 /* Return the fixed registers used for condition codes. */
13253 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
13256 *p2 = INVALID_REGNUM;
13261 /* This function is used by the call expanders of the machine description.
13262 It emits the call insn itself together with the necessary operations
13263 to adjust the target address and returns the emitted insn.
13264 ADDR_LOCATION is the target address rtx
13265 TLS_CALL the location of the thread-local symbol
13266 RESULT_REG the register where the result of the call should be stored
13267 RETADDR_REG the register where the return address should be stored
13268 If this parameter is NULL_RTX the call is considered
13269 to be a sibling call. */
13272 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
13275 bool plt_call = false;
13281 /* Direct function calls need special treatment. */
13282 if (GET_CODE (addr_location) == SYMBOL_REF)
13284 /* When calling a global routine in PIC mode, we must
13285 replace the symbol itself with the PLT stub. */
13286 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
13288 if (TARGET_64BIT || retaddr_reg != NULL_RTX)
13290 addr_location = gen_rtx_UNSPEC (Pmode,
13291 gen_rtvec (1, addr_location),
13293 addr_location = gen_rtx_CONST (Pmode, addr_location);
13297 /* For -fpic code the PLT entries might use r12 which is
13298 call-saved. Therefore we cannot do a sibcall when
13299 calling directly using a symbol ref. When reaching
13300 this point we decided (in s390_function_ok_for_sibcall)
13301 to do a sibcall for a function pointer but one of the
13302 optimizers was able to get rid of the function pointer
13303 by propagating the symbol ref into the call. This
13304 optimization is illegal for S/390 so we turn the direct
13305 call into a indirect call again. */
13306 addr_location = force_reg (Pmode, addr_location);
13309 /* Unless we can use the bras(l) insn, force the
13310 routine address into a register. */
13311 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
13314 addr_location = legitimize_pic_address (addr_location, 0);
13316 addr_location = force_reg (Pmode, addr_location);
13320 /* If it is already an indirect call or the code above moved the
13321 SYMBOL_REF to somewhere else make sure the address can be found in
13323 if (retaddr_reg == NULL_RTX
13324 && GET_CODE (addr_location) != SYMBOL_REF
13327 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
13328 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
13331 addr_location = gen_rtx_MEM (QImode, addr_location);
13332 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
13334 if (result_reg != NULL_RTX)
13335 call = gen_rtx_SET (result_reg, call);
13337 if (retaddr_reg != NULL_RTX)
13339 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
13341 if (tls_call != NULL_RTX)
13342 vec = gen_rtvec (3, call, clobber,
13343 gen_rtx_USE (VOIDmode, tls_call));
13345 vec = gen_rtvec (2, call, clobber);
13347 call = gen_rtx_PARALLEL (VOIDmode, vec);
13350 insn = emit_call_insn (call);
13352 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
13353 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
13355 /* s390_function_ok_for_sibcall should
13356 have denied sibcalls in this case. */
13357 gcc_assert (retaddr_reg != NULL_RTX);
13358 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, 12));
13363 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
13366 s390_conditional_register_usage (void)
13372 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
13373 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
13375 if (TARGET_CPU_ZARCH)
13377 fixed_regs[BASE_REGNUM] = 0;
13378 call_used_regs[BASE_REGNUM] = 0;
13379 fixed_regs[RETURN_REGNUM] = 0;
13380 call_used_regs[RETURN_REGNUM] = 0;
13384 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
13385 call_used_regs[i] = call_really_used_regs[i] = 0;
13389 call_used_regs[FPR4_REGNUM] = call_really_used_regs[FPR4_REGNUM] = 0;
13390 call_used_regs[FPR6_REGNUM] = call_really_used_regs[FPR6_REGNUM] = 0;
13393 if (TARGET_SOFT_FLOAT)
13395 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
13396 call_used_regs[i] = fixed_regs[i] = 1;
13399 /* Disable v16 - v31 for non-vector target. */
13402 for (i = VR16_REGNUM; i <= VR31_REGNUM; i++)
13403 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
13407 /* Corresponding function to eh_return expander. */
13409 static GTY(()) rtx s390_tpf_eh_return_symbol;
13411 s390_emit_tpf_eh_return (rtx target)
13416 if (!s390_tpf_eh_return_symbol)
13417 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
13419 reg = gen_rtx_REG (Pmode, 2);
13420 orig_ra = gen_rtx_REG (Pmode, 3);
13422 emit_move_insn (reg, target);
13423 emit_move_insn (orig_ra, get_hard_reg_initial_val (Pmode, RETURN_REGNUM));
13424 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
13425 gen_rtx_REG (Pmode, RETURN_REGNUM));
13426 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
13427 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), orig_ra);
13429 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
13432 /* Rework the prologue/epilogue to avoid saving/restoring
13433 registers unnecessarily. */
13436 s390_optimize_prologue (void)
13438 rtx_insn *insn, *new_insn, *next_insn;
13440 /* Do a final recompute of the frame-related data. */
13441 s390_optimize_register_info ();
13443 /* If all special registers are in fact used, there's nothing we
13444 can do, so no point in walking the insn list. */
13446 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
13447 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
13448 && (TARGET_CPU_ZARCH
13449 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
13450 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
13453 /* Search for prologue/epilogue insns and replace them. */
13455 for (insn = get_insns (); insn; insn = next_insn)
13457 int first, last, off;
13458 rtx set, base, offset;
13461 next_insn = NEXT_INSN (insn);
13463 if (! NONJUMP_INSN_P (insn) || ! RTX_FRAME_RELATED_P (insn))
13466 pat = PATTERN (insn);
13468 /* Remove ldgr/lgdr instructions used for saving and restore
13469 GPRs if possible. */
13474 if (INSN_CODE (insn) == CODE_FOR_stack_restore_from_fpr)
13475 tmp_pat = XVECEXP (pat, 0, 0);
13477 if (GET_CODE (tmp_pat) == SET
13478 && GET_MODE (SET_SRC (tmp_pat)) == DImode
13479 && REG_P (SET_SRC (tmp_pat))
13480 && REG_P (SET_DEST (tmp_pat)))
13482 int src_regno = REGNO (SET_SRC (tmp_pat));
13483 int dest_regno = REGNO (SET_DEST (tmp_pat));
13487 if (!((GENERAL_REGNO_P (src_regno)
13488 && FP_REGNO_P (dest_regno))
13489 || (FP_REGNO_P (src_regno)
13490 && GENERAL_REGNO_P (dest_regno))))
13493 gpr_regno = GENERAL_REGNO_P (src_regno) ? src_regno : dest_regno;
13494 fpr_regno = FP_REGNO_P (src_regno) ? src_regno : dest_regno;
13496 /* GPR must be call-saved, FPR must be call-clobbered. */
13497 if (!call_really_used_regs[fpr_regno]
13498 || call_really_used_regs[gpr_regno])
13501 /* It must not happen that what we once saved in an FPR now
13502 needs a stack slot. */
13503 gcc_assert (cfun_gpr_save_slot (gpr_regno) != SAVE_SLOT_STACK);
13505 if (cfun_gpr_save_slot (gpr_regno) == SAVE_SLOT_NONE)
13507 remove_insn (insn);
13513 if (GET_CODE (pat) == PARALLEL
13514 && store_multiple_operation (pat, VOIDmode))
13516 set = XVECEXP (pat, 0, 0);
13517 first = REGNO (SET_SRC (set));
13518 last = first + XVECLEN (pat, 0) - 1;
13519 offset = const0_rtx;
13520 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
13521 off = INTVAL (offset);
13523 if (GET_CODE (base) != REG || off < 0)
13525 if (cfun_frame_layout.first_save_gpr != -1
13526 && (cfun_frame_layout.first_save_gpr < first
13527 || cfun_frame_layout.last_save_gpr > last))
13529 if (REGNO (base) != STACK_POINTER_REGNUM
13530 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13532 if (first > BASE_REGNUM || last < BASE_REGNUM)
13535 if (cfun_frame_layout.first_save_gpr != -1)
13537 rtx s_pat = save_gprs (base,
13538 off + (cfun_frame_layout.first_save_gpr
13539 - first) * UNITS_PER_LONG,
13540 cfun_frame_layout.first_save_gpr,
13541 cfun_frame_layout.last_save_gpr);
13542 new_insn = emit_insn_before (s_pat, insn);
13543 INSN_ADDRESSES_NEW (new_insn, -1);
13546 remove_insn (insn);
13550 if (cfun_frame_layout.first_save_gpr == -1
13551 && GET_CODE (pat) == SET
13552 && GENERAL_REG_P (SET_SRC (pat))
13553 && GET_CODE (SET_DEST (pat)) == MEM)
13556 first = REGNO (SET_SRC (set));
13557 offset = const0_rtx;
13558 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
13559 off = INTVAL (offset);
13561 if (GET_CODE (base) != REG || off < 0)
13563 if (REGNO (base) != STACK_POINTER_REGNUM
13564 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13567 remove_insn (insn);
13571 if (GET_CODE (pat) == PARALLEL
13572 && load_multiple_operation (pat, VOIDmode))
13574 set = XVECEXP (pat, 0, 0);
13575 first = REGNO (SET_DEST (set));
13576 last = first + XVECLEN (pat, 0) - 1;
13577 offset = const0_rtx;
13578 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
13579 off = INTVAL (offset);
13581 if (GET_CODE (base) != REG || off < 0)
13584 if (cfun_frame_layout.first_restore_gpr != -1
13585 && (cfun_frame_layout.first_restore_gpr < first
13586 || cfun_frame_layout.last_restore_gpr > last))
13588 if (REGNO (base) != STACK_POINTER_REGNUM
13589 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13591 if (first > BASE_REGNUM || last < BASE_REGNUM)
13594 if (cfun_frame_layout.first_restore_gpr != -1)
13596 rtx rpat = restore_gprs (base,
13597 off + (cfun_frame_layout.first_restore_gpr
13598 - first) * UNITS_PER_LONG,
13599 cfun_frame_layout.first_restore_gpr,
13600 cfun_frame_layout.last_restore_gpr);
13602 /* Remove REG_CFA_RESTOREs for registers that we no
13603 longer need to save. */
13604 REG_NOTES (rpat) = REG_NOTES (insn);
13605 for (rtx *ptr = ®_NOTES (rpat); *ptr; )
13606 if (REG_NOTE_KIND (*ptr) == REG_CFA_RESTORE
13607 && ((int) REGNO (XEXP (*ptr, 0))
13608 < cfun_frame_layout.first_restore_gpr))
13609 *ptr = XEXP (*ptr, 1);
13611 ptr = &XEXP (*ptr, 1);
13612 new_insn = emit_insn_before (rpat, insn);
13613 RTX_FRAME_RELATED_P (new_insn) = 1;
13614 INSN_ADDRESSES_NEW (new_insn, -1);
13617 remove_insn (insn);
13621 if (cfun_frame_layout.first_restore_gpr == -1
13622 && GET_CODE (pat) == SET
13623 && GENERAL_REG_P (SET_DEST (pat))
13624 && GET_CODE (SET_SRC (pat)) == MEM)
13627 first = REGNO (SET_DEST (set));
13628 offset = const0_rtx;
13629 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
13630 off = INTVAL (offset);
13632 if (GET_CODE (base) != REG || off < 0)
13635 if (REGNO (base) != STACK_POINTER_REGNUM
13636 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13639 remove_insn (insn);
13645 /* On z10 and later the dynamic branch prediction must see the
13646 backward jump within a certain windows. If not it falls back to
13647 the static prediction. This function rearranges the loop backward
13648 branch in a way which makes the static prediction always correct.
13649 The function returns true if it added an instruction. */
13651 s390_fix_long_loop_prediction (rtx_insn *insn)
13653 rtx set = single_set (insn);
13654 rtx code_label, label_ref;
13655 rtx_insn *uncond_jump;
13656 rtx_insn *cur_insn;
13660 /* This will exclude branch on count and branch on index patterns
13661 since these are correctly statically predicted. */
13663 || SET_DEST (set) != pc_rtx
13664 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
13667 /* Skip conditional returns. */
13668 if (ANY_RETURN_P (XEXP (SET_SRC (set), 1))
13669 && XEXP (SET_SRC (set), 2) == pc_rtx)
13672 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
13673 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
13675 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
13677 code_label = XEXP (label_ref, 0);
13679 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
13680 || INSN_ADDRESSES (INSN_UID (insn)) == -1
13681 || (INSN_ADDRESSES (INSN_UID (insn))
13682 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
13685 for (distance = 0, cur_insn = PREV_INSN (insn);
13686 distance < PREDICT_DISTANCE - 6;
13687 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
13688 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
13691 rtx_code_label *new_label = gen_label_rtx ();
13692 uncond_jump = emit_jump_insn_after (
13693 gen_rtx_SET (pc_rtx,
13694 gen_rtx_LABEL_REF (VOIDmode, code_label)),
13696 emit_label_after (new_label, uncond_jump);
13698 tmp = XEXP (SET_SRC (set), 1);
13699 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
13700 XEXP (SET_SRC (set), 2) = tmp;
13701 INSN_CODE (insn) = -1;
13703 XEXP (label_ref, 0) = new_label;
13704 JUMP_LABEL (insn) = new_label;
13705 JUMP_LABEL (uncond_jump) = code_label;
13710 /* Returns 1 if INSN reads the value of REG for purposes not related
13711 to addressing of memory, and 0 otherwise. */
13713 s390_non_addr_reg_read_p (rtx reg, rtx_insn *insn)
13715 return reg_referenced_p (reg, PATTERN (insn))
13716 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
13719 /* Starting from INSN find_cond_jump looks downwards in the insn
13720 stream for a single jump insn which is the last user of the
13721 condition code set in INSN. */
13723 find_cond_jump (rtx_insn *insn)
13725 for (; insn; insn = NEXT_INSN (insn))
13729 if (LABEL_P (insn))
13732 if (!JUMP_P (insn))
13734 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
13739 /* This will be triggered by a return. */
13740 if (GET_CODE (PATTERN (insn)) != SET)
13743 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
13744 ite = SET_SRC (PATTERN (insn));
13746 if (GET_CODE (ite) != IF_THEN_ELSE)
13749 cc = XEXP (XEXP (ite, 0), 0);
13750 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
13753 if (find_reg_note (insn, REG_DEAD, cc))
13761 /* Swap the condition in COND and the operands in OP0 and OP1 so that
13762 the semantics does not change. If NULL_RTX is passed as COND the
13763 function tries to find the conditional jump starting with INSN. */
13765 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx_insn *insn)
13769 if (cond == NULL_RTX)
13771 rtx_insn *jump = find_cond_jump (NEXT_INSN (insn));
13772 rtx set = jump ? single_set (jump) : NULL_RTX;
13774 if (set == NULL_RTX)
13777 cond = XEXP (SET_SRC (set), 0);
13782 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
13785 /* On z10, instructions of the compare-and-branch family have the
13786 property to access the register occurring as second operand with
13787 its bits complemented. If such a compare is grouped with a second
13788 instruction that accesses the same register non-complemented, and
13789 if that register's value is delivered via a bypass, then the
13790 pipeline recycles, thereby causing significant performance decline.
13791 This function locates such situations and exchanges the two
13792 operands of the compare. The function return true whenever it
13795 s390_z10_optimize_cmp (rtx_insn *insn)
13797 rtx_insn *prev_insn, *next_insn;
13798 bool insn_added_p = false;
13799 rtx cond, *op0, *op1;
13801 if (GET_CODE (PATTERN (insn)) == PARALLEL)
13803 /* Handle compare and branch and branch on count
13805 rtx pattern = single_set (insn);
13808 || SET_DEST (pattern) != pc_rtx
13809 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
13812 cond = XEXP (SET_SRC (pattern), 0);
13813 op0 = &XEXP (cond, 0);
13814 op1 = &XEXP (cond, 1);
13816 else if (GET_CODE (PATTERN (insn)) == SET)
13820 /* Handle normal compare instructions. */
13821 src = SET_SRC (PATTERN (insn));
13822 dest = SET_DEST (PATTERN (insn));
13825 || !CC_REGNO_P (REGNO (dest))
13826 || GET_CODE (src) != COMPARE)
13829 /* s390_swap_cmp will try to find the conditional
13830 jump when passing NULL_RTX as condition. */
13832 op0 = &XEXP (src, 0);
13833 op1 = &XEXP (src, 1);
13838 if (!REG_P (*op0) || !REG_P (*op1))
13841 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
13844 /* Swap the COMPARE arguments and its mask if there is a
13845 conflicting access in the previous insn. */
13846 prev_insn = prev_active_insn (insn);
13847 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
13848 && reg_referenced_p (*op1, PATTERN (prev_insn)))
13849 s390_swap_cmp (cond, op0, op1, insn);
13851 /* Check if there is a conflict with the next insn. If there
13852 was no conflict with the previous insn, then swap the
13853 COMPARE arguments and its mask. If we already swapped
13854 the operands, or if swapping them would cause a conflict
13855 with the previous insn, issue a NOP after the COMPARE in
13856 order to separate the two instuctions. */
13857 next_insn = next_active_insn (insn);
13858 if (next_insn != NULL_RTX && INSN_P (next_insn)
13859 && s390_non_addr_reg_read_p (*op1, next_insn))
13861 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
13862 && s390_non_addr_reg_read_p (*op0, prev_insn))
13864 if (REGNO (*op1) == 0)
13865 emit_insn_after (gen_nop1 (), insn);
13867 emit_insn_after (gen_nop (), insn);
13868 insn_added_p = true;
13871 s390_swap_cmp (cond, op0, op1, insn);
13873 return insn_added_p;
13876 /* Number of INSNs to be scanned backward in the last BB of the loop
13877 and forward in the first BB of the loop. This usually should be a
13878 bit more than the number of INSNs which could go into one
13880 #define S390_OSC_SCAN_INSN_NUM 5
13882 /* Scan LOOP for static OSC collisions and return true if a osc_break
13883 should be issued for this loop. */
13885 s390_adjust_loop_scan_osc (struct loop* loop)
13888 HARD_REG_SET modregs, newregs;
13889 rtx_insn *insn, *store_insn = NULL;
13891 struct s390_address addr_store, addr_load;
13892 subrtx_iterator::array_type array;
13895 CLEAR_HARD_REG_SET (modregs);
13898 FOR_BB_INSNS_REVERSE (loop->latch, insn)
13900 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
13904 if (insn_count > S390_OSC_SCAN_INSN_NUM)
13907 find_all_hard_reg_sets (insn, &newregs, true);
13908 IOR_HARD_REG_SET (modregs, newregs);
13910 set = single_set (insn);
13914 if (MEM_P (SET_DEST (set))
13915 && s390_decompose_address (XEXP (SET_DEST (set), 0), &addr_store))
13922 if (store_insn == NULL_RTX)
13926 FOR_BB_INSNS (loop->header, insn)
13928 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
13931 if (insn == store_insn)
13935 if (insn_count > S390_OSC_SCAN_INSN_NUM)
13938 find_all_hard_reg_sets (insn, &newregs, true);
13939 IOR_HARD_REG_SET (modregs, newregs);
13941 set = single_set (insn);
13945 /* An intermediate store disrupts static OSC checking
13947 if (MEM_P (SET_DEST (set))
13948 && s390_decompose_address (XEXP (SET_DEST (set), 0), NULL))
13951 FOR_EACH_SUBRTX (iter, array, SET_SRC (set), NONCONST)
13953 && s390_decompose_address (XEXP (*iter, 0), &addr_load)
13954 && rtx_equal_p (addr_load.base, addr_store.base)
13955 && rtx_equal_p (addr_load.indx, addr_store.indx)
13956 && rtx_equal_p (addr_load.disp, addr_store.disp))
13958 if ((addr_load.base != NULL_RTX
13959 && TEST_HARD_REG_BIT (modregs, REGNO (addr_load.base)))
13960 || (addr_load.indx != NULL_RTX
13961 && TEST_HARD_REG_BIT (modregs, REGNO (addr_load.indx))))
13968 /* Look for adjustments which can be done on simple innermost
13971 s390_adjust_loops ()
13973 struct loop *loop = NULL;
13976 compute_bb_for_insn ();
13978 /* Find the loops. */
13979 loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
13981 FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
13985 flow_loop_dump (loop, dump_file, NULL, 0);
13986 fprintf (dump_file, ";; OSC loop scan Loop: ");
13988 if (loop->latch == NULL
13989 || pc_set (BB_END (loop->latch)) == NULL_RTX
13990 || !s390_adjust_loop_scan_osc (loop))
13994 if (loop->latch == NULL)
13995 fprintf (dump_file, " muliple backward jumps\n");
13998 fprintf (dump_file, " header insn: %d latch insn: %d ",
13999 INSN_UID (BB_HEAD (loop->header)),
14000 INSN_UID (BB_END (loop->latch)));
14001 if (pc_set (BB_END (loop->latch)) == NULL_RTX)
14002 fprintf (dump_file, " loop does not end with jump\n");
14004 fprintf (dump_file, " not instrumented\n");
14010 rtx_insn *new_insn;
14013 fprintf (dump_file, " adding OSC break insn: ");
14014 new_insn = emit_insn_before (gen_osc_break (),
14015 BB_END (loop->latch));
14016 INSN_ADDRESSES_NEW (new_insn, -1);
14020 loop_optimizer_finalize ();
14022 df_finish_pass (false);
14025 /* Perform machine-dependent processing. */
14030 bool pool_overflow = false;
14031 int hw_before, hw_after;
14033 if (s390_tune == PROCESSOR_2964_Z13)
14034 s390_adjust_loops ();
14036 /* Make sure all splits have been performed; splits after
14037 machine_dependent_reorg might confuse insn length counts. */
14038 split_all_insns_noflow ();
14040 /* Install the main literal pool and the associated base
14041 register load insns.
14043 In addition, there are two problematic situations we need
14046 - the literal pool might be > 4096 bytes in size, so that
14047 some of its elements cannot be directly accessed
14049 - a branch target might be > 64K away from the branch, so that
14050 it is not possible to use a PC-relative instruction.
14052 To fix those, we split the single literal pool into multiple
14053 pool chunks, reloading the pool base register at various
14054 points throughout the function to ensure it always points to
14055 the pool chunk the following code expects, and / or replace
14056 PC-relative branches by absolute branches.
14058 However, the two problems are interdependent: splitting the
14059 literal pool can move a branch further away from its target,
14060 causing the 64K limit to overflow, and on the other hand,
14061 replacing a PC-relative branch by an absolute branch means
14062 we need to put the branch target address into the literal
14063 pool, possibly causing it to overflow.
14065 So, we loop trying to fix up both problems until we manage
14066 to satisfy both conditions at the same time. Note that the
14067 loop is guaranteed to terminate as every pass of the loop
14068 strictly decreases the total number of PC-relative branches
14069 in the function. (This is not completely true as there
14070 might be branch-over-pool insns introduced by chunkify_start.
14071 Those never need to be split however.) */
14075 struct constant_pool *pool = NULL;
14077 /* Collect the literal pool. */
14078 if (!pool_overflow)
14080 pool = s390_mainpool_start ();
14082 pool_overflow = true;
14085 /* If literal pool overflowed, start to chunkify it. */
14087 pool = s390_chunkify_start ();
14089 /* Split out-of-range branches. If this has created new
14090 literal pool entries, cancel current chunk list and
14091 recompute it. zSeries machines have large branch
14092 instructions, so we never need to split a branch. */
14093 if (!TARGET_CPU_ZARCH && s390_split_branches ())
14096 s390_chunkify_cancel (pool);
14098 s390_mainpool_cancel (pool);
14103 /* If we made it up to here, both conditions are satisfied.
14104 Finish up literal pool related changes. */
14106 s390_chunkify_finish (pool);
14108 s390_mainpool_finish (pool);
14110 /* We're done splitting branches. */
14111 cfun->machine->split_branches_pending_p = false;
14115 /* Generate out-of-pool execute target insns. */
14116 if (TARGET_CPU_ZARCH)
14118 rtx_insn *insn, *target;
14121 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
14123 label = s390_execute_label (insn);
14127 gcc_assert (label != const0_rtx);
14129 target = emit_label (XEXP (label, 0));
14130 INSN_ADDRESSES_NEW (target, -1);
14132 target = emit_insn (s390_execute_target (insn));
14133 INSN_ADDRESSES_NEW (target, -1);
14137 /* Try to optimize prologue and epilogue further. */
14138 s390_optimize_prologue ();
14140 /* Walk over the insns and do some >=z10 specific changes. */
14141 if (s390_tune >= PROCESSOR_2097_Z10)
14144 bool insn_added_p = false;
14146 /* The insn lengths and addresses have to be up to date for the
14147 following manipulations. */
14148 shorten_branches (get_insns ());
14150 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
14152 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
14156 insn_added_p |= s390_fix_long_loop_prediction (insn);
14158 if ((GET_CODE (PATTERN (insn)) == PARALLEL
14159 || GET_CODE (PATTERN (insn)) == SET)
14160 && s390_tune == PROCESSOR_2097_Z10)
14161 insn_added_p |= s390_z10_optimize_cmp (insn);
14164 /* Adjust branches if we added new instructions. */
14166 shorten_branches (get_insns ());
14169 s390_function_num_hotpatch_hw (current_function_decl, &hw_before, &hw_after);
14174 /* Insert NOPs for hotpatching. */
14175 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
14177 1. inside the area covered by debug information to allow setting
14178 breakpoints at the NOPs,
14179 2. before any insn which results in an asm instruction,
14180 3. before in-function labels to avoid jumping to the NOPs, for
14181 example as part of a loop,
14182 4. before any barrier in case the function is completely empty
14183 (__builtin_unreachable ()) and has neither internal labels nor
14186 if (active_insn_p (insn) || BARRIER_P (insn) || LABEL_P (insn))
14188 /* Output a series of NOPs before the first active insn. */
14189 while (insn && hw_after > 0)
14191 if (hw_after >= 3 && TARGET_CPU_ZARCH)
14193 emit_insn_before (gen_nop_6_byte (), insn);
14196 else if (hw_after >= 2)
14198 emit_insn_before (gen_nop_4_byte (), insn);
14203 emit_insn_before (gen_nop_2_byte (), insn);
14210 /* Return true if INSN is a fp load insn writing register REGNO. */
14212 s390_fpload_toreg (rtx_insn *insn, unsigned int regno)
14215 enum attr_type flag = s390_safe_attr_type (insn);
14217 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
14220 set = single_set (insn);
14222 if (set == NULL_RTX)
14225 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
14228 if (REGNO (SET_DEST (set)) != regno)
14234 /* This value describes the distance to be avoided between an
14235 arithmetic fp instruction and an fp load writing the same register.
14236 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
14237 fine but the exact value has to be avoided. Otherwise the FP
14238 pipeline will throw an exception causing a major penalty. */
14239 #define Z10_EARLYLOAD_DISTANCE 7
14241 /* Rearrange the ready list in order to avoid the situation described
14242 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
14243 moved to the very end of the ready list. */
14245 s390_z10_prevent_earlyload_conflicts (rtx_insn **ready, int *nready_p)
14247 unsigned int regno;
14248 int nready = *nready_p;
14253 enum attr_type flag;
14256 /* Skip DISTANCE - 1 active insns. */
14257 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
14258 distance > 0 && insn != NULL_RTX;
14259 distance--, insn = prev_active_insn (insn))
14260 if (CALL_P (insn) || JUMP_P (insn))
14263 if (insn == NULL_RTX)
14266 set = single_set (insn);
14268 if (set == NULL_RTX || !REG_P (SET_DEST (set))
14269 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
14272 flag = s390_safe_attr_type (insn);
14274 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
14277 regno = REGNO (SET_DEST (set));
14280 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
14287 memmove (&ready[1], &ready[0], sizeof (rtx_insn *) * i);
14292 /* The s390_sched_state variable tracks the state of the current or
14293 the last instruction group.
14295 0,1,2 number of instructions scheduled in the current group
14296 3 the last group is complete - normal insns
14297 4 the last group was a cracked/expanded insn */
14299 static int s390_sched_state;
14301 #define S390_SCHED_STATE_NORMAL 3
14302 #define S390_SCHED_STATE_CRACKED 4
14304 #define S390_SCHED_ATTR_MASK_CRACKED 0x1
14305 #define S390_SCHED_ATTR_MASK_EXPANDED 0x2
14306 #define S390_SCHED_ATTR_MASK_ENDGROUP 0x4
14307 #define S390_SCHED_ATTR_MASK_GROUPALONE 0x8
14309 static unsigned int
14310 s390_get_sched_attrmask (rtx_insn *insn)
14312 unsigned int mask = 0;
14316 case PROCESSOR_2827_ZEC12:
14317 if (get_attr_zEC12_cracked (insn))
14318 mask |= S390_SCHED_ATTR_MASK_CRACKED;
14319 if (get_attr_zEC12_expanded (insn))
14320 mask |= S390_SCHED_ATTR_MASK_EXPANDED;
14321 if (get_attr_zEC12_endgroup (insn))
14322 mask |= S390_SCHED_ATTR_MASK_ENDGROUP;
14323 if (get_attr_zEC12_groupalone (insn))
14324 mask |= S390_SCHED_ATTR_MASK_GROUPALONE;
14326 case PROCESSOR_2964_Z13:
14327 case PROCESSOR_3906_Z14:
14328 if (get_attr_z13_cracked (insn))
14329 mask |= S390_SCHED_ATTR_MASK_CRACKED;
14330 if (get_attr_z13_expanded (insn))
14331 mask |= S390_SCHED_ATTR_MASK_EXPANDED;
14332 if (get_attr_z13_endgroup (insn))
14333 mask |= S390_SCHED_ATTR_MASK_ENDGROUP;
14334 if (get_attr_z13_groupalone (insn))
14335 mask |= S390_SCHED_ATTR_MASK_GROUPALONE;
14338 gcc_unreachable ();
14343 static unsigned int
14344 s390_get_unit_mask (rtx_insn *insn, int *units)
14346 unsigned int mask = 0;
14350 case PROCESSOR_2964_Z13:
14351 case PROCESSOR_3906_Z14:
14353 if (get_attr_z13_unit_lsu (insn))
14355 if (get_attr_z13_unit_fxu (insn))
14357 if (get_attr_z13_unit_vfu (insn))
14361 gcc_unreachable ();
14366 /* Return the scheduling score for INSN. The higher the score the
14367 better. The score is calculated from the OOO scheduling attributes
14368 of INSN and the scheduling state s390_sched_state. */
14370 s390_sched_score (rtx_insn *insn)
14372 unsigned int mask = s390_get_sched_attrmask (insn);
14375 switch (s390_sched_state)
14378 /* Try to put insns into the first slot which would otherwise
14380 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
14381 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
14383 if ((mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
14387 /* Prefer not cracked insns while trying to put together a
14389 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
14390 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0
14391 && (mask & S390_SCHED_ATTR_MASK_GROUPALONE) == 0)
14393 if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) == 0)
14397 /* Prefer not cracked insns while trying to put together a
14399 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
14400 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0
14401 && (mask & S390_SCHED_ATTR_MASK_GROUPALONE) == 0)
14403 /* Prefer endgroup insns in the last slot. */
14404 if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) != 0)
14407 case S390_SCHED_STATE_NORMAL:
14408 /* Prefer not cracked insns if the last was not cracked. */
14409 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
14410 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0)
14412 if ((mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
14415 case S390_SCHED_STATE_CRACKED:
14416 /* Try to keep cracked insns together to prevent them from
14417 interrupting groups. */
14418 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
14419 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
14424 if (s390_tune >= PROCESSOR_2964_Z13)
14427 unsigned unit_mask, m = 1;
14429 unit_mask = s390_get_unit_mask (insn, &units);
14430 gcc_assert (units <= MAX_SCHED_UNITS);
14432 /* Add a score in range 0..MAX_SCHED_MIX_SCORE depending on how long
14433 ago the last insn of this unit type got scheduled. This is
14434 supposed to help providing a proper instruction mix to the
14436 for (i = 0; i < units; i++, m <<= 1)
14438 score += (last_scheduled_unit_distance[i] * MAX_SCHED_MIX_SCORE /
14439 MAX_SCHED_MIX_DISTANCE);
14444 /* This function is called via hook TARGET_SCHED_REORDER before
14445 issuing one insn from list READY which contains *NREADYP entries.
14446 For target z10 it reorders load instructions to avoid early load
14447 conflicts in the floating point pipeline */
14449 s390_sched_reorder (FILE *file, int verbose,
14450 rtx_insn **ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
14452 if (s390_tune == PROCESSOR_2097_Z10
14453 && reload_completed
14455 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
14457 if (s390_tune >= PROCESSOR_2827_ZEC12
14458 && reload_completed
14462 int last_index = *nreadyp - 1;
14463 int max_index = -1;
14464 int max_score = -1;
14467 /* Just move the insn with the highest score to the top (the
14468 end) of the list. A full sort is not needed since a conflict
14469 in the hazard recognition cannot happen. So the top insn in
14470 the ready list will always be taken. */
14471 for (i = last_index; i >= 0; i--)
14475 if (recog_memoized (ready[i]) < 0)
14478 score = s390_sched_score (ready[i]);
14479 if (score > max_score)
14486 if (max_index != -1)
14488 if (max_index != last_index)
14490 tmp = ready[max_index];
14491 ready[max_index] = ready[last_index];
14492 ready[last_index] = tmp;
14496 ";;\t\tBACKEND: move insn %d to the top of list\n",
14497 INSN_UID (ready[last_index]));
14499 else if (verbose > 5)
14501 ";;\t\tBACKEND: best insn %d already on top\n",
14502 INSN_UID (ready[last_index]));
14507 fprintf (file, "ready list ooo attributes - sched state: %d\n",
14510 for (i = last_index; i >= 0; i--)
14512 unsigned int sched_mask;
14513 rtx_insn *insn = ready[i];
14515 if (recog_memoized (insn) < 0)
14518 sched_mask = s390_get_sched_attrmask (insn);
14519 fprintf (file, ";;\t\tBACKEND: insn %d score: %d: ",
14521 s390_sched_score (insn));
14522 #define PRINT_SCHED_ATTR(M, ATTR) fprintf (file, "%s ",\
14523 ((M) & sched_mask) ? #ATTR : "");
14524 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_CRACKED, cracked);
14525 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_EXPANDED, expanded);
14526 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_ENDGROUP, endgroup);
14527 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_GROUPALONE, groupalone);
14528 #undef PRINT_SCHED_ATTR
14529 if (s390_tune >= PROCESSOR_2964_Z13)
14531 unsigned int unit_mask, m = 1;
14534 unit_mask = s390_get_unit_mask (insn, &units);
14535 fprintf (file, "(units:");
14536 for (j = 0; j < units; j++, m <<= 1)
14538 fprintf (file, " u%d", j);
14539 fprintf (file, ")");
14541 fprintf (file, "\n");
14546 return s390_issue_rate ();
14550 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
14551 the scheduler has issued INSN. It stores the last issued insn into
14552 last_scheduled_insn in order to make it available for
14553 s390_sched_reorder. */
14555 s390_sched_variable_issue (FILE *file, int verbose, rtx_insn *insn, int more)
14557 last_scheduled_insn = insn;
14559 if (s390_tune >= PROCESSOR_2827_ZEC12
14560 && reload_completed
14561 && recog_memoized (insn) >= 0)
14563 unsigned int mask = s390_get_sched_attrmask (insn);
14565 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
14566 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
14567 s390_sched_state = S390_SCHED_STATE_CRACKED;
14568 else if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) != 0
14569 || (mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
14570 s390_sched_state = S390_SCHED_STATE_NORMAL;
14573 /* Only normal insns are left (mask == 0). */
14574 switch (s390_sched_state)
14579 case S390_SCHED_STATE_NORMAL:
14580 if (s390_sched_state == S390_SCHED_STATE_NORMAL)
14581 s390_sched_state = 1;
14583 s390_sched_state++;
14586 case S390_SCHED_STATE_CRACKED:
14587 s390_sched_state = S390_SCHED_STATE_NORMAL;
14592 if (s390_tune >= PROCESSOR_2964_Z13)
14595 unsigned unit_mask, m = 1;
14597 unit_mask = s390_get_unit_mask (insn, &units);
14598 gcc_assert (units <= MAX_SCHED_UNITS);
14600 for (i = 0; i < units; i++, m <<= 1)
14602 last_scheduled_unit_distance[i] = 0;
14603 else if (last_scheduled_unit_distance[i] < MAX_SCHED_MIX_DISTANCE)
14604 last_scheduled_unit_distance[i]++;
14609 unsigned int sched_mask;
14611 sched_mask = s390_get_sched_attrmask (insn);
14613 fprintf (file, ";;\t\tBACKEND: insn %d: ", INSN_UID (insn));
14614 #define PRINT_SCHED_ATTR(M, ATTR) fprintf (file, "%s ", ((M) & sched_mask) ? #ATTR : "");
14615 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_CRACKED, cracked);
14616 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_EXPANDED, expanded);
14617 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_ENDGROUP, endgroup);
14618 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_GROUPALONE, groupalone);
14619 #undef PRINT_SCHED_ATTR
14621 if (s390_tune >= PROCESSOR_2964_Z13)
14623 unsigned int unit_mask, m = 1;
14626 unit_mask = s390_get_unit_mask (insn, &units);
14627 fprintf (file, "(units:");
14628 for (j = 0; j < units; j++, m <<= 1)
14630 fprintf (file, " %d", j);
14631 fprintf (file, ")");
14633 fprintf (file, " sched state: %d\n", s390_sched_state);
14635 if (s390_tune >= PROCESSOR_2964_Z13)
14639 s390_get_unit_mask (insn, &units);
14641 fprintf (file, ";;\t\tBACKEND: units unused for: ");
14642 for (j = 0; j < units; j++)
14643 fprintf (file, "%d:%d ", j, last_scheduled_unit_distance[j]);
14644 fprintf (file, "\n");
14649 if (GET_CODE (PATTERN (insn)) != USE
14650 && GET_CODE (PATTERN (insn)) != CLOBBER)
14657 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
14658 int verbose ATTRIBUTE_UNUSED,
14659 int max_ready ATTRIBUTE_UNUSED)
14661 last_scheduled_insn = NULL;
14662 memset (last_scheduled_unit_distance, 0, MAX_SCHED_UNITS * sizeof (int));
14663 s390_sched_state = 0;
14666 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
14667 a new number struct loop *loop should be unrolled if tuned for cpus with
14668 a built-in stride prefetcher.
14669 The loop is analyzed for memory accesses by calling check_dpu for
14670 each rtx of the loop. Depending on the loop_depth and the amount of
14671 memory accesses a new number <=nunroll is returned to improve the
14672 behavior of the hardware prefetch unit. */
14674 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
14679 unsigned mem_count = 0;
14681 if (s390_tune < PROCESSOR_2097_Z10)
14684 /* Count the number of memory references within the loop body. */
14685 bbs = get_loop_body (loop);
14686 subrtx_iterator::array_type array;
14687 for (i = 0; i < loop->num_nodes; i++)
14688 FOR_BB_INSNS (bbs[i], insn)
14689 if (INSN_P (insn) && INSN_CODE (insn) != -1)
14690 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
14695 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
14696 if (mem_count == 0)
14699 switch (loop_depth(loop))
14702 return MIN (nunroll, 28 / mem_count);
14704 return MIN (nunroll, 22 / mem_count);
14706 return MIN (nunroll, 16 / mem_count);
14710 /* Restore the current options. This is a hook function and also called
14714 s390_function_specific_restore (struct gcc_options *opts,
14715 struct cl_target_option *ptr ATTRIBUTE_UNUSED)
14717 opts->x_s390_cost_pointer = (long)processor_table[opts->x_s390_tune].cost;
14721 s390_option_override_internal (bool main_args_p,
14722 struct gcc_options *opts,
14723 const struct gcc_options *opts_set)
14725 const char *prefix;
14726 const char *suffix;
14728 /* Set up prefix/suffix so the error messages refer to either the command
14729 line argument, or the attribute(target). */
14737 prefix = "option(\"";
14742 /* Architecture mode defaults according to ABI. */
14743 if (!(opts_set->x_target_flags & MASK_ZARCH))
14746 opts->x_target_flags |= MASK_ZARCH;
14748 opts->x_target_flags &= ~MASK_ZARCH;
14751 /* Set the march default in case it hasn't been specified on cmdline. */
14752 if (!opts_set->x_s390_arch)
14753 opts->x_s390_arch = PROCESSOR_2064_Z900;
14754 else if (opts->x_s390_arch == PROCESSOR_9672_G5
14755 || opts->x_s390_arch == PROCESSOR_9672_G6)
14756 warning (OPT_Wdeprecated, "%sarch=%s%s is deprecated and will be removed "
14757 "in future releases; use at least %sarch=z900%s",
14758 prefix, opts->x_s390_arch == PROCESSOR_9672_G5 ? "g5" : "g6",
14759 suffix, prefix, suffix);
14761 opts->x_s390_arch_flags = processor_flags_table[(int) opts->x_s390_arch];
14763 /* Determine processor to tune for. */
14764 if (!opts_set->x_s390_tune)
14765 opts->x_s390_tune = opts->x_s390_arch;
14766 else if (opts->x_s390_tune == PROCESSOR_9672_G5
14767 || opts->x_s390_tune == PROCESSOR_9672_G6)
14768 warning (OPT_Wdeprecated, "%stune=%s%s is deprecated and will be removed "
14769 "in future releases; use at least %stune=z900%s",
14770 prefix, opts->x_s390_tune == PROCESSOR_9672_G5 ? "g5" : "g6",
14771 suffix, prefix, suffix);
14773 opts->x_s390_tune_flags = processor_flags_table[opts->x_s390_tune];
14775 /* Sanity checks. */
14776 if (opts->x_s390_arch == PROCESSOR_NATIVE
14777 || opts->x_s390_tune == PROCESSOR_NATIVE)
14778 gcc_unreachable ();
14779 if (TARGET_ZARCH_P (opts->x_target_flags) && !TARGET_CPU_ZARCH_P (opts))
14780 error ("z/Architecture mode not supported on %s",
14781 processor_table[(int)opts->x_s390_arch].name);
14782 if (TARGET_64BIT && !TARGET_ZARCH_P (opts->x_target_flags))
14783 error ("64-bit ABI not supported in ESA/390 mode");
14785 /* Enable hardware transactions if available and not explicitly
14786 disabled by user. E.g. with -m31 -march=zEC12 -mzarch */
14787 if (!TARGET_OPT_HTM_P (opts_set->x_target_flags))
14789 if (TARGET_CPU_HTM_P (opts) && TARGET_ZARCH_P (opts->x_target_flags))
14790 opts->x_target_flags |= MASK_OPT_HTM;
14792 opts->x_target_flags &= ~MASK_OPT_HTM;
14795 if (TARGET_OPT_VX_P (opts_set->x_target_flags))
14797 if (TARGET_OPT_VX_P (opts->x_target_flags))
14799 if (!TARGET_CPU_VX_P (opts))
14800 error ("hardware vector support not available on %s",
14801 processor_table[(int)opts->x_s390_arch].name);
14802 if (TARGET_SOFT_FLOAT_P (opts->x_target_flags))
14803 error ("hardware vector support not available with -msoft-float");
14808 if (TARGET_CPU_VX_P (opts))
14809 /* Enable vector support if available and not explicitly disabled
14810 by user. E.g. with -m31 -march=z13 -mzarch */
14811 opts->x_target_flags |= MASK_OPT_VX;
14813 opts->x_target_flags &= ~MASK_OPT_VX;
14816 /* Use hardware DFP if available and not explicitly disabled by
14817 user. E.g. with -m31 -march=z10 -mzarch */
14818 if (!TARGET_HARD_DFP_P (opts_set->x_target_flags))
14820 if (TARGET_DFP_P (opts))
14821 opts->x_target_flags |= MASK_HARD_DFP;
14823 opts->x_target_flags &= ~MASK_HARD_DFP;
14826 if (TARGET_HARD_DFP_P (opts->x_target_flags) && !TARGET_DFP_P (opts))
14828 if (TARGET_HARD_DFP_P (opts_set->x_target_flags))
14830 if (!TARGET_CPU_DFP_P (opts))
14831 error ("hardware decimal floating point instructions"
14832 " not available on %s",
14833 processor_table[(int)opts->x_s390_arch].name);
14834 if (!TARGET_ZARCH_P (opts->x_target_flags))
14835 error ("hardware decimal floating point instructions"
14836 " not available in ESA/390 mode");
14839 opts->x_target_flags &= ~MASK_HARD_DFP;
14842 if (TARGET_SOFT_FLOAT_P (opts_set->x_target_flags)
14843 && TARGET_SOFT_FLOAT_P (opts->x_target_flags))
14845 if (TARGET_HARD_DFP_P (opts_set->x_target_flags)
14846 && TARGET_HARD_DFP_P (opts->x_target_flags))
14847 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
14849 opts->x_target_flags &= ~MASK_HARD_DFP;
14852 if (TARGET_BACKCHAIN_P (opts->x_target_flags)
14853 && TARGET_PACKED_STACK_P (opts->x_target_flags)
14854 && TARGET_HARD_FLOAT_P (opts->x_target_flags))
14855 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
14858 if (opts->x_s390_stack_size)
14860 if (opts->x_s390_stack_guard >= opts->x_s390_stack_size)
14861 error ("stack size must be greater than the stack guard value");
14862 else if (opts->x_s390_stack_size > 1 << 16)
14863 error ("stack size must not be greater than 64k");
14865 else if (opts->x_s390_stack_guard)
14866 error ("-mstack-guard implies use of -mstack-size");
14868 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
14869 if (!TARGET_LONG_DOUBLE_128_P (opts_set->x_target_flags))
14870 opts->x_target_flags |= MASK_LONG_DOUBLE_128;
14873 if (opts->x_s390_tune >= PROCESSOR_2097_Z10)
14875 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
14876 opts->x_param_values,
14877 opts_set->x_param_values);
14878 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
14879 opts->x_param_values,
14880 opts_set->x_param_values);
14881 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
14882 opts->x_param_values,
14883 opts_set->x_param_values);
14884 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
14885 opts->x_param_values,
14886 opts_set->x_param_values);
14889 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
14890 opts->x_param_values,
14891 opts_set->x_param_values);
14892 /* values for loop prefetching */
14893 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
14894 opts->x_param_values,
14895 opts_set->x_param_values);
14896 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
14897 opts->x_param_values,
14898 opts_set->x_param_values);
14899 /* s390 has more than 2 levels and the size is much larger. Since
14900 we are always running virtualized assume that we only get a small
14901 part of the caches above l1. */
14902 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
14903 opts->x_param_values,
14904 opts_set->x_param_values);
14905 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
14906 opts->x_param_values,
14907 opts_set->x_param_values);
14908 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
14909 opts->x_param_values,
14910 opts_set->x_param_values);
14912 /* Use the alternative scheduling-pressure algorithm by default. */
14913 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
14914 opts->x_param_values,
14915 opts_set->x_param_values);
14917 maybe_set_param_value (PARAM_MIN_VECT_LOOP_BOUND, 2,
14918 opts->x_param_values,
14919 opts_set->x_param_values);
14921 /* Call target specific restore function to do post-init work. At the moment,
14922 this just sets opts->x_s390_cost_pointer. */
14923 s390_function_specific_restore (opts, NULL);
14927 s390_option_override (void)
14930 cl_deferred_option *opt;
14931 vec<cl_deferred_option> *v =
14932 (vec<cl_deferred_option> *) s390_deferred_options;
14935 FOR_EACH_VEC_ELT (*v, i, opt)
14937 switch (opt->opt_index)
14939 case OPT_mhotpatch_:
14946 strncpy (s, opt->arg, 256);
14948 t = strchr (s, ',');
14953 val1 = integral_argument (s);
14954 val2 = integral_argument (t);
14961 if (val1 == -1 || val2 == -1)
14963 /* argument is not a plain number */
14964 error ("arguments to %qs should be non-negative integers",
14968 else if (val1 > s390_hotpatch_hw_max
14969 || val2 > s390_hotpatch_hw_max)
14971 error ("argument to %qs is too large (max. %d)",
14972 "-mhotpatch=n,m", s390_hotpatch_hw_max);
14975 s390_hotpatch_hw_before_label = val1;
14976 s390_hotpatch_hw_after_label = val2;
14980 gcc_unreachable ();
14984 /* Set up function hooks. */
14985 init_machine_status = s390_init_machine_status;
14987 s390_option_override_internal (true, &global_options, &global_options_set);
14989 /* Save the initial options in case the user does function specific
14991 target_option_default_node = build_target_option_node (&global_options);
14992 target_option_current_node = target_option_default_node;
14994 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
14995 requires the arch flags to be evaluated already. Since prefetching
14996 is beneficial on s390, we enable it if available. */
14997 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
14998 flag_prefetch_loop_arrays = 1;
15000 if (!s390_pic_data_is_text_relative && !flag_pic)
15001 error ("-mno-pic-data-is-text-relative cannot be used without -fpic/-fPIC");
15005 /* Don't emit DWARF3/4 unless specifically selected. The TPF
15006 debuggers do not yet support DWARF 3/4. */
15007 if (!global_options_set.x_dwarf_strict)
15009 if (!global_options_set.x_dwarf_version)
15013 /* Register a target-specific optimization-and-lowering pass
15014 to run immediately before prologue and epilogue generation.
15016 Registering the pass must be done at start up. It's
15017 convenient to do it here. */
15018 opt_pass *new_pass = new pass_s390_early_mach (g);
15019 struct register_pass_info insert_pass_s390_early_mach =
15021 new_pass, /* pass */
15022 "pro_and_epilogue", /* reference_pass_name */
15023 1, /* ref_pass_instance_number */
15024 PASS_POS_INSERT_BEFORE /* po_op */
15026 register_pass (&insert_pass_s390_early_mach);
15029 #if S390_USE_TARGET_ATTRIBUTE
15030 /* Inner function to process the attribute((target(...))), take an argument and
15031 set the current options from the argument. If we have a list, recursively go
15035 s390_valid_target_attribute_inner_p (tree args,
15036 struct gcc_options *opts,
15037 struct gcc_options *new_opts_set,
15043 #define S390_ATTRIB(S,O,A) { S, sizeof (S)-1, O, A, 0 }
15044 #define S390_PRAGMA(S,O,A) { S, sizeof (S)-1, O, A, 1 }
15045 static const struct
15047 const char *string;
15051 int only_as_pragma;
15054 S390_ATTRIB ("arch=", OPT_march_, 1),
15055 S390_ATTRIB ("tune=", OPT_mtune_, 1),
15056 /* uinteger options */
15057 S390_ATTRIB ("stack-guard=", OPT_mstack_guard_, 1),
15058 S390_ATTRIB ("stack-size=", OPT_mstack_size_, 1),
15059 S390_ATTRIB ("branch-cost=", OPT_mbranch_cost_, 1),
15060 S390_ATTRIB ("warn-framesize=", OPT_mwarn_framesize_, 1),
15062 S390_ATTRIB ("backchain", OPT_mbackchain, 0),
15063 S390_ATTRIB ("hard-dfp", OPT_mhard_dfp, 0),
15064 S390_ATTRIB ("hard-float", OPT_mhard_float, 0),
15065 S390_ATTRIB ("htm", OPT_mhtm, 0),
15066 S390_ATTRIB ("vx", OPT_mvx, 0),
15067 S390_ATTRIB ("packed-stack", OPT_mpacked_stack, 0),
15068 S390_ATTRIB ("small-exec", OPT_msmall_exec, 0),
15069 S390_ATTRIB ("soft-float", OPT_msoft_float, 0),
15070 S390_ATTRIB ("mvcle", OPT_mmvcle, 0),
15071 S390_PRAGMA ("zvector", OPT_mzvector, 0),
15072 /* boolean options */
15073 S390_ATTRIB ("warn-dynamicstack", OPT_mwarn_dynamicstack, 0),
15078 /* If this is a list, recurse to get the options. */
15079 if (TREE_CODE (args) == TREE_LIST)
15082 int num_pragma_values;
15085 /* Note: attribs.c:decl_attributes prepends the values from
15086 current_target_pragma to the list of target attributes. To determine
15087 whether we're looking at a value of the attribute or the pragma we
15088 assume that the first [list_length (current_target_pragma)] values in
15089 the list are the values from the pragma. */
15090 num_pragma_values = (!force_pragma && current_target_pragma != NULL)
15091 ? list_length (current_target_pragma) : 0;
15092 for (i = 0; args; args = TREE_CHAIN (args), i++)
15096 is_pragma = (force_pragma || i < num_pragma_values);
15097 if (TREE_VALUE (args)
15098 && !s390_valid_target_attribute_inner_p (TREE_VALUE (args),
15099 opts, new_opts_set,
15108 else if (TREE_CODE (args) != STRING_CST)
15110 error ("attribute %<target%> argument not a string");
15114 /* Handle multiple arguments separated by commas. */
15115 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
15117 while (next_optstr && *next_optstr != '\0')
15119 char *p = next_optstr;
15121 char *comma = strchr (next_optstr, ',');
15122 size_t len, opt_len;
15128 enum cl_var_type var_type;
15134 len = comma - next_optstr;
15135 next_optstr = comma + 1;
15140 next_optstr = NULL;
15143 /* Recognize no-xxx. */
15144 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
15153 /* Find the option. */
15156 for (i = 0; i < ARRAY_SIZE (attrs); i++)
15158 opt_len = attrs[i].len;
15159 if (ch == attrs[i].string[0]
15160 && ((attrs[i].has_arg) ? len > opt_len : len == opt_len)
15161 && memcmp (p, attrs[i].string, opt_len) == 0)
15163 opt = attrs[i].opt;
15164 if (!opt_set_p && cl_options[opt].cl_reject_negative)
15166 mask = cl_options[opt].var_value;
15167 var_type = cl_options[opt].var_type;
15173 /* Process the option. */
15176 error ("attribute(target(\"%s\")) is unknown", orig_p);
15179 else if (attrs[i].only_as_pragma && !force_pragma)
15181 /* Value is not allowed for the target attribute. */
15182 error ("value %qs is not supported by attribute %<target%>",
15187 else if (var_type == CLVC_BIT_SET || var_type == CLVC_BIT_CLEAR)
15189 if (var_type == CLVC_BIT_CLEAR)
15190 opt_set_p = !opt_set_p;
15193 opts->x_target_flags |= mask;
15195 opts->x_target_flags &= ~mask;
15196 new_opts_set->x_target_flags |= mask;
15199 else if (cl_options[opt].var_type == CLVC_BOOLEAN)
15203 if (cl_options[opt].cl_uinteger)
15205 /* Unsigned integer argument. Code based on the function
15206 decode_cmdline_option () in opts-common.c. */
15207 value = integral_argument (p + opt_len);
15210 value = (opt_set_p) ? 1 : 0;
15214 struct cl_decoded_option decoded;
15216 /* Value range check; only implemented for numeric and boolean
15217 options at the moment. */
15218 generate_option (opt, NULL, value, CL_TARGET, &decoded);
15219 s390_handle_option (opts, new_opts_set, &decoded, input_location);
15220 set_option (opts, new_opts_set, opt, value,
15221 p + opt_len, DK_UNSPECIFIED, input_location,
15226 error ("attribute(target(\"%s\")) is unknown", orig_p);
15231 else if (cl_options[opt].var_type == CLVC_ENUM)
15236 arg_ok = opt_enum_arg_to_value (opt, p + opt_len, &value, CL_TARGET);
15238 set_option (opts, new_opts_set, opt, value,
15239 p + opt_len, DK_UNSPECIFIED, input_location,
15243 error ("attribute(target(\"%s\")) is unknown", orig_p);
15249 gcc_unreachable ();
15254 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
15257 s390_valid_target_attribute_tree (tree args,
15258 struct gcc_options *opts,
15259 const struct gcc_options *opts_set,
15262 tree t = NULL_TREE;
15263 struct gcc_options new_opts_set;
15265 memset (&new_opts_set, 0, sizeof (new_opts_set));
15267 /* Process each of the options on the chain. */
15268 if (! s390_valid_target_attribute_inner_p (args, opts, &new_opts_set,
15270 return error_mark_node;
15272 /* If some option was set (even if it has not changed), rerun
15273 s390_option_override_internal, and then save the options away. */
15274 if (new_opts_set.x_target_flags
15275 || new_opts_set.x_s390_arch
15276 || new_opts_set.x_s390_tune
15277 || new_opts_set.x_s390_stack_guard
15278 || new_opts_set.x_s390_stack_size
15279 || new_opts_set.x_s390_branch_cost
15280 || new_opts_set.x_s390_warn_framesize
15281 || new_opts_set.x_s390_warn_dynamicstack_p)
15283 const unsigned char *src = (const unsigned char *)opts_set;
15284 unsigned char *dest = (unsigned char *)&new_opts_set;
15287 /* Merge the original option flags into the new ones. */
15288 for (i = 0; i < sizeof(*opts_set); i++)
15291 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
15292 s390_option_override_internal (false, opts, &new_opts_set);
15293 /* Save the current options unless we are validating options for
15295 t = build_target_option_node (opts);
15300 /* Hook to validate attribute((target("string"))). */
15303 s390_valid_target_attribute_p (tree fndecl,
15304 tree ARG_UNUSED (name),
15306 int ARG_UNUSED (flags))
15308 struct gcc_options func_options;
15309 tree new_target, new_optimize;
15312 /* attribute((target("default"))) does nothing, beyond
15313 affecting multi-versioning. */
15314 if (TREE_VALUE (args)
15315 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
15316 && TREE_CHAIN (args) == NULL_TREE
15317 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
15320 tree old_optimize = build_optimization_node (&global_options);
15322 /* Get the optimization options of the current function. */
15323 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
15325 if (!func_optimize)
15326 func_optimize = old_optimize;
15328 /* Init func_options. */
15329 memset (&func_options, 0, sizeof (func_options));
15330 init_options_struct (&func_options, NULL);
15331 lang_hooks.init_options_struct (&func_options);
15333 cl_optimization_restore (&func_options, TREE_OPTIMIZATION (func_optimize));
15335 /* Initialize func_options to the default before its target options can
15337 cl_target_option_restore (&func_options,
15338 TREE_TARGET_OPTION (target_option_default_node));
15340 new_target = s390_valid_target_attribute_tree (args, &func_options,
15341 &global_options_set,
15343 current_target_pragma));
15344 new_optimize = build_optimization_node (&func_options);
15345 if (new_target == error_mark_node)
15347 else if (fndecl && new_target)
15349 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
15350 if (old_optimize != new_optimize)
15351 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
15356 /* Restore targets globals from NEW_TREE and invalidate s390_previous_fndecl
15360 s390_activate_target_options (tree new_tree)
15362 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
15363 if (TREE_TARGET_GLOBALS (new_tree))
15364 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
15365 else if (new_tree == target_option_default_node)
15366 restore_target_globals (&default_target_globals);
15368 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
15369 s390_previous_fndecl = NULL_TREE;
15372 /* Establish appropriate back-end context for processing the function
15373 FNDECL. The argument might be NULL to indicate processing at top
15374 level, outside of any function scope. */
15376 s390_set_current_function (tree fndecl)
15378 /* Only change the context if the function changes. This hook is called
15379 several times in the course of compiling a function, and we don't want to
15380 slow things down too much or call target_reinit when it isn't safe. */
15381 if (fndecl == s390_previous_fndecl)
15385 if (s390_previous_fndecl == NULL_TREE)
15386 old_tree = target_option_current_node;
15387 else if (DECL_FUNCTION_SPECIFIC_TARGET (s390_previous_fndecl))
15388 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (s390_previous_fndecl);
15390 old_tree = target_option_default_node;
15392 if (fndecl == NULL_TREE)
15394 if (old_tree != target_option_current_node)
15395 s390_activate_target_options (target_option_current_node);
15399 tree new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
15400 if (new_tree == NULL_TREE)
15401 new_tree = target_option_default_node;
15403 if (old_tree != new_tree)
15404 s390_activate_target_options (new_tree);
15405 s390_previous_fndecl = fndecl;
15409 /* Implement TARGET_USE_BY_PIECES_INFRASTRUCTURE_P. */
15412 s390_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size,
15413 unsigned int align ATTRIBUTE_UNUSED,
15414 enum by_pieces_operation op ATTRIBUTE_UNUSED,
15415 bool speed_p ATTRIBUTE_UNUSED)
15417 return (size == 1 || size == 2
15418 || size == 4 || (TARGET_ZARCH && size == 8));
15421 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
15424 s390_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
15426 tree sfpc = s390_builtin_decls[S390_BUILTIN_s390_sfpc];
15427 tree efpc = s390_builtin_decls[S390_BUILTIN_s390_efpc];
15428 tree call_efpc = build_call_expr (efpc, 0);
15429 tree fenv_var = create_tmp_var_raw (unsigned_type_node);
15431 #define FPC_EXCEPTION_MASK HOST_WIDE_INT_UC (0xf8000000)
15432 #define FPC_FLAGS_MASK HOST_WIDE_INT_UC (0x00f80000)
15433 #define FPC_DXC_MASK HOST_WIDE_INT_UC (0x0000ff00)
15434 #define FPC_EXCEPTION_MASK_SHIFT HOST_WIDE_INT_UC (24)
15435 #define FPC_FLAGS_SHIFT HOST_WIDE_INT_UC (16)
15436 #define FPC_DXC_SHIFT HOST_WIDE_INT_UC (8)
15438 /* Generates the equivalent of feholdexcept (&fenv_var)
15440 fenv_var = __builtin_s390_efpc ();
15441 __builtin_s390_sfpc (fenv_var & mask) */
15442 tree old_fpc = build2 (MODIFY_EXPR, unsigned_type_node, fenv_var, call_efpc);
15444 build2 (BIT_AND_EXPR, unsigned_type_node, fenv_var,
15445 build_int_cst (unsigned_type_node,
15446 ~(FPC_DXC_MASK | FPC_FLAGS_MASK |
15447 FPC_EXCEPTION_MASK)));
15448 tree set_new_fpc = build_call_expr (sfpc, 1, new_fpc);
15449 *hold = build2 (COMPOUND_EXPR, void_type_node, old_fpc, set_new_fpc);
15451 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT)
15453 __builtin_s390_sfpc (__builtin_s390_efpc () & mask) */
15454 new_fpc = build2 (BIT_AND_EXPR, unsigned_type_node, call_efpc,
15455 build_int_cst (unsigned_type_node,
15456 ~(FPC_DXC_MASK | FPC_FLAGS_MASK)));
15457 *clear = build_call_expr (sfpc, 1, new_fpc);
15459 /* Generates the equivalent of feupdateenv (fenv_var)
15461 old_fpc = __builtin_s390_efpc ();
15462 __builtin_s390_sfpc (fenv_var);
15463 __atomic_feraiseexcept ((old_fpc & FPC_FLAGS_MASK) >> FPC_FLAGS_SHIFT); */
15465 old_fpc = create_tmp_var_raw (unsigned_type_node);
15466 tree store_old_fpc = build2 (MODIFY_EXPR, void_type_node,
15467 old_fpc, call_efpc);
15469 set_new_fpc = build_call_expr (sfpc, 1, fenv_var);
15471 tree raise_old_except = build2 (BIT_AND_EXPR, unsigned_type_node, old_fpc,
15472 build_int_cst (unsigned_type_node,
15474 raise_old_except = build2 (RSHIFT_EXPR, unsigned_type_node, raise_old_except,
15475 build_int_cst (unsigned_type_node,
15477 tree atomic_feraiseexcept
15478 = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
15479 raise_old_except = build_call_expr (atomic_feraiseexcept,
15480 1, raise_old_except);
15482 *update = build2 (COMPOUND_EXPR, void_type_node,
15483 build2 (COMPOUND_EXPR, void_type_node,
15484 store_old_fpc, set_new_fpc),
15487 #undef FPC_EXCEPTION_MASK
15488 #undef FPC_FLAGS_MASK
15489 #undef FPC_DXC_MASK
15490 #undef FPC_EXCEPTION_MASK_SHIFT
15491 #undef FPC_FLAGS_SHIFT
15492 #undef FPC_DXC_SHIFT
15495 /* Return the vector mode to be used for inner mode MODE when doing
15497 static machine_mode
15498 s390_preferred_simd_mode (machine_mode mode)
15518 /* Our hardware does not require vectors to be strictly aligned. */
15520 s390_support_vector_misalignment (machine_mode mode ATTRIBUTE_UNUSED,
15521 const_tree type ATTRIBUTE_UNUSED,
15522 int misalignment ATTRIBUTE_UNUSED,
15523 bool is_packed ATTRIBUTE_UNUSED)
15528 return default_builtin_support_vector_misalignment (mode, type, misalignment,
15532 /* The vector ABI requires vector types to be aligned on an 8 byte
15533 boundary (our stack alignment). However, we allow this to be
15534 overriden by the user, while this definitely breaks the ABI. */
15535 static HOST_WIDE_INT
15536 s390_vector_alignment (const_tree type)
15538 if (!TARGET_VX_ABI)
15539 return default_vector_alignment (type);
15541 if (TYPE_USER_ALIGN (type))
15542 return TYPE_ALIGN (type);
15544 return MIN (64, tree_to_shwi (TYPE_SIZE (type)));
15547 #ifdef HAVE_AS_MACHINE_MACHINEMODE
15548 /* Implement TARGET_ASM_FILE_START. */
15550 s390_asm_file_start (void)
15552 default_file_start ();
15553 s390_asm_output_machine_for_arch (asm_out_file);
15557 /* Implement TARGET_ASM_FILE_END. */
15559 s390_asm_file_end (void)
15561 #ifdef HAVE_AS_GNU_ATTRIBUTE
15562 varpool_node *vnode;
15563 cgraph_node *cnode;
15565 FOR_EACH_VARIABLE (vnode)
15566 if (TREE_PUBLIC (vnode->decl))
15567 s390_check_type_for_vector_abi (TREE_TYPE (vnode->decl), false, false);
15569 FOR_EACH_FUNCTION (cnode)
15570 if (TREE_PUBLIC (cnode->decl))
15571 s390_check_type_for_vector_abi (TREE_TYPE (cnode->decl), false, false);
15574 if (s390_vector_abi != 0)
15575 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
15578 file_end_indicate_exec_stack ();
15580 if (flag_split_stack)
15581 file_end_indicate_split_stack ();
15584 /* Return true if TYPE is a vector bool type. */
15586 s390_vector_bool_type_p (const_tree type)
15588 return TYPE_VECTOR_OPAQUE (type);
15591 /* Return the diagnostic message string if the binary operation OP is
15592 not permitted on TYPE1 and TYPE2, NULL otherwise. */
15594 s390_invalid_binary_op (int op ATTRIBUTE_UNUSED, const_tree type1, const_tree type2)
15596 bool bool1_p, bool2_p;
15600 machine_mode mode1, mode2;
15602 if (!TARGET_ZVECTOR)
15605 if (!VECTOR_TYPE_P (type1) || !VECTOR_TYPE_P (type2))
15608 bool1_p = s390_vector_bool_type_p (type1);
15609 bool2_p = s390_vector_bool_type_p (type2);
15611 /* Mixing signed and unsigned types is forbidden for all
15613 if (!bool1_p && !bool2_p
15614 && TYPE_UNSIGNED (type1) != TYPE_UNSIGNED (type2))
15615 return N_("types differ in signedness");
15617 plusminus_p = (op == PLUS_EXPR || op == MINUS_EXPR);
15618 muldiv_p = (op == MULT_EXPR || op == RDIV_EXPR || op == TRUNC_DIV_EXPR
15619 || op == CEIL_DIV_EXPR || op == FLOOR_DIV_EXPR
15620 || op == ROUND_DIV_EXPR);
15621 compare_p = (op == LT_EXPR || op == LE_EXPR || op == GT_EXPR || op == GE_EXPR
15622 || op == EQ_EXPR || op == NE_EXPR);
15624 if (bool1_p && bool2_p && (plusminus_p || muldiv_p))
15625 return N_("binary operator does not support two vector bool operands");
15627 if (bool1_p != bool2_p && (muldiv_p || compare_p))
15628 return N_("binary operator does not support vector bool operand");
15630 mode1 = TYPE_MODE (type1);
15631 mode2 = TYPE_MODE (type2);
15633 if (bool1_p != bool2_p && plusminus_p
15634 && (GET_MODE_CLASS (mode1) == MODE_VECTOR_FLOAT
15635 || GET_MODE_CLASS (mode2) == MODE_VECTOR_FLOAT))
15636 return N_("binary operator does not support mixing vector "
15637 "bool with floating point vector operands");
15642 /* Implement TARGET_C_EXCESS_PRECISION.
15644 FIXME: For historical reasons, float_t and double_t are typedef'ed to
15645 double on s390, causing operations on float_t to operate in a higher
15646 precision than is necessary. However, it is not the case that SFmode
15647 operations have implicit excess precision, and we generate more optimal
15648 code if we let the compiler know no implicit extra precision is added.
15650 That means when we are compiling with -fexcess-precision=fast, the value
15651 we set for FLT_EVAL_METHOD will be out of line with the actual precision of
15652 float_t (though they would be correct for -fexcess-precision=standard).
15654 A complete fix would modify glibc to remove the unnecessary typedef
15655 of float_t to double. */
15657 static enum flt_eval_method
15658 s390_excess_precision (enum excess_precision_type type)
15662 case EXCESS_PRECISION_TYPE_IMPLICIT:
15663 case EXCESS_PRECISION_TYPE_FAST:
15664 /* The fastest type to promote to will always be the native type,
15665 whether that occurs with implicit excess precision or
15667 return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT;
15668 case EXCESS_PRECISION_TYPE_STANDARD:
15669 /* Otherwise, when we are in a standards compliant mode, to
15670 ensure consistency with the implementation in glibc, report that
15671 float is evaluated to the range and precision of double. */
15672 return FLT_EVAL_METHOD_PROMOTE_TO_DOUBLE;
15674 gcc_unreachable ();
15676 return FLT_EVAL_METHOD_UNPREDICTABLE;
15679 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
15681 static unsigned HOST_WIDE_INT
15682 s390_asan_shadow_offset (void)
15684 return TARGET_64BIT ? HOST_WIDE_INT_1U << 52 : HOST_WIDE_INT_UC (0x20000000);
15687 /* Initialize GCC target structure. */
15689 #undef TARGET_ASM_ALIGNED_HI_OP
15690 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
15691 #undef TARGET_ASM_ALIGNED_DI_OP
15692 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
15693 #undef TARGET_ASM_INTEGER
15694 #define TARGET_ASM_INTEGER s390_assemble_integer
15696 #undef TARGET_ASM_OPEN_PAREN
15697 #define TARGET_ASM_OPEN_PAREN ""
15699 #undef TARGET_ASM_CLOSE_PAREN
15700 #define TARGET_ASM_CLOSE_PAREN ""
15702 #undef TARGET_OPTION_OVERRIDE
15703 #define TARGET_OPTION_OVERRIDE s390_option_override
15705 #ifdef TARGET_THREAD_SSP_OFFSET
15706 #undef TARGET_STACK_PROTECT_GUARD
15707 #define TARGET_STACK_PROTECT_GUARD hook_tree_void_null
15710 #undef TARGET_ENCODE_SECTION_INFO
15711 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
15713 #undef TARGET_SCALAR_MODE_SUPPORTED_P
15714 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
15717 #undef TARGET_HAVE_TLS
15718 #define TARGET_HAVE_TLS true
15720 #undef TARGET_CANNOT_FORCE_CONST_MEM
15721 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
15723 #undef TARGET_DELEGITIMIZE_ADDRESS
15724 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
15726 #undef TARGET_LEGITIMIZE_ADDRESS
15727 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
15729 #undef TARGET_RETURN_IN_MEMORY
15730 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
15732 #undef TARGET_INIT_BUILTINS
15733 #define TARGET_INIT_BUILTINS s390_init_builtins
15734 #undef TARGET_EXPAND_BUILTIN
15735 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
15736 #undef TARGET_BUILTIN_DECL
15737 #define TARGET_BUILTIN_DECL s390_builtin_decl
15739 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
15740 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
15742 #undef TARGET_ASM_OUTPUT_MI_THUNK
15743 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
15744 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
15745 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
15747 #undef TARGET_C_EXCESS_PRECISION
15748 #define TARGET_C_EXCESS_PRECISION s390_excess_precision
15750 #undef TARGET_SCHED_ADJUST_PRIORITY
15751 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
15752 #undef TARGET_SCHED_ISSUE_RATE
15753 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
15754 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
15755 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
15757 #undef TARGET_SCHED_VARIABLE_ISSUE
15758 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
15759 #undef TARGET_SCHED_REORDER
15760 #define TARGET_SCHED_REORDER s390_sched_reorder
15761 #undef TARGET_SCHED_INIT
15762 #define TARGET_SCHED_INIT s390_sched_init
15764 #undef TARGET_CANNOT_COPY_INSN_P
15765 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
15766 #undef TARGET_RTX_COSTS
15767 #define TARGET_RTX_COSTS s390_rtx_costs
15768 #undef TARGET_ADDRESS_COST
15769 #define TARGET_ADDRESS_COST s390_address_cost
15770 #undef TARGET_REGISTER_MOVE_COST
15771 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
15772 #undef TARGET_MEMORY_MOVE_COST
15773 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
15774 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
15775 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
15776 s390_builtin_vectorization_cost
15778 #undef TARGET_MACHINE_DEPENDENT_REORG
15779 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
15781 #undef TARGET_VALID_POINTER_MODE
15782 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
15784 #undef TARGET_BUILD_BUILTIN_VA_LIST
15785 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
15786 #undef TARGET_EXPAND_BUILTIN_VA_START
15787 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
15788 #undef TARGET_ASAN_SHADOW_OFFSET
15789 #define TARGET_ASAN_SHADOW_OFFSET s390_asan_shadow_offset
15790 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
15791 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
15793 #undef TARGET_PROMOTE_FUNCTION_MODE
15794 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
15795 #undef TARGET_PASS_BY_REFERENCE
15796 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
15798 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
15799 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
15800 #undef TARGET_FUNCTION_ARG
15801 #define TARGET_FUNCTION_ARG s390_function_arg
15802 #undef TARGET_FUNCTION_ARG_ADVANCE
15803 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
15804 #undef TARGET_FUNCTION_VALUE
15805 #define TARGET_FUNCTION_VALUE s390_function_value
15806 #undef TARGET_LIBCALL_VALUE
15807 #define TARGET_LIBCALL_VALUE s390_libcall_value
15808 #undef TARGET_STRICT_ARGUMENT_NAMING
15809 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
15811 #undef TARGET_KEEP_LEAF_WHEN_PROFILED
15812 #define TARGET_KEEP_LEAF_WHEN_PROFILED s390_keep_leaf_when_profiled
15814 #undef TARGET_FIXED_CONDITION_CODE_REGS
15815 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
15817 #undef TARGET_CC_MODES_COMPATIBLE
15818 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
15820 #undef TARGET_INVALID_WITHIN_DOLOOP
15821 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_insn_null
15824 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
15825 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
15828 #undef TARGET_DWARF_FRAME_REG_MODE
15829 #define TARGET_DWARF_FRAME_REG_MODE s390_dwarf_frame_reg_mode
15831 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
15832 #undef TARGET_MANGLE_TYPE
15833 #define TARGET_MANGLE_TYPE s390_mangle_type
15836 #undef TARGET_SCALAR_MODE_SUPPORTED_P
15837 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
15839 #undef TARGET_VECTOR_MODE_SUPPORTED_P
15840 #define TARGET_VECTOR_MODE_SUPPORTED_P s390_vector_mode_supported_p
15842 #undef TARGET_PREFERRED_RELOAD_CLASS
15843 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
15845 #undef TARGET_SECONDARY_RELOAD
15846 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
15848 #undef TARGET_LIBGCC_CMP_RETURN_MODE
15849 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
15851 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
15852 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
15854 #undef TARGET_LEGITIMATE_ADDRESS_P
15855 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
15857 #undef TARGET_LEGITIMATE_CONSTANT_P
15858 #define TARGET_LEGITIMATE_CONSTANT_P s390_legitimate_constant_p
15860 #undef TARGET_LRA_P
15861 #define TARGET_LRA_P s390_lra_p
15863 #undef TARGET_CAN_ELIMINATE
15864 #define TARGET_CAN_ELIMINATE s390_can_eliminate
15866 #undef TARGET_CONDITIONAL_REGISTER_USAGE
15867 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
15869 #undef TARGET_LOOP_UNROLL_ADJUST
15870 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
15872 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
15873 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
15874 #undef TARGET_TRAMPOLINE_INIT
15875 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
15878 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
15879 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
15881 #undef TARGET_UNWIND_WORD_MODE
15882 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
15884 #undef TARGET_CANONICALIZE_COMPARISON
15885 #define TARGET_CANONICALIZE_COMPARISON s390_canonicalize_comparison
15887 #undef TARGET_HARD_REGNO_SCRATCH_OK
15888 #define TARGET_HARD_REGNO_SCRATCH_OK s390_hard_regno_scratch_ok
15890 #undef TARGET_ATTRIBUTE_TABLE
15891 #define TARGET_ATTRIBUTE_TABLE s390_attribute_table
15893 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
15894 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
15896 #undef TARGET_SET_UP_BY_PROLOGUE
15897 #define TARGET_SET_UP_BY_PROLOGUE s300_set_up_by_prologue
15899 #undef TARGET_EXTRA_LIVE_ON_ENTRY
15900 #define TARGET_EXTRA_LIVE_ON_ENTRY s390_live_on_entry
15902 #undef TARGET_USE_BY_PIECES_INFRASTRUCTURE_P
15903 #define TARGET_USE_BY_PIECES_INFRASTRUCTURE_P \
15904 s390_use_by_pieces_infrastructure_p
15906 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
15907 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV s390_atomic_assign_expand_fenv
15909 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
15910 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN s390_invalid_arg_for_unprototyped_fn
15912 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
15913 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE s390_preferred_simd_mode
15915 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
15916 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT s390_support_vector_misalignment
15918 #undef TARGET_VECTOR_ALIGNMENT
15919 #define TARGET_VECTOR_ALIGNMENT s390_vector_alignment
15921 #undef TARGET_INVALID_BINARY_OP
15922 #define TARGET_INVALID_BINARY_OP s390_invalid_binary_op
15924 #ifdef HAVE_AS_MACHINE_MACHINEMODE
15925 #undef TARGET_ASM_FILE_START
15926 #define TARGET_ASM_FILE_START s390_asm_file_start
15929 #undef TARGET_ASM_FILE_END
15930 #define TARGET_ASM_FILE_END s390_asm_file_end
15932 #if S390_USE_TARGET_ATTRIBUTE
15933 #undef TARGET_SET_CURRENT_FUNCTION
15934 #define TARGET_SET_CURRENT_FUNCTION s390_set_current_function
15936 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
15937 #define TARGET_OPTION_VALID_ATTRIBUTE_P s390_valid_target_attribute_p
15940 #undef TARGET_OPTION_RESTORE
15941 #define TARGET_OPTION_RESTORE s390_function_specific_restore
15943 struct gcc_target targetm = TARGET_INITIALIZER;
15945 #include "gt-s390.h"