1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999-2018 Free Software Foundation, Inc.
3 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
4 Ulrich Weigand (uweigand@de.ibm.com) and
5 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #define IN_TARGET_CODE 1
27 #include "coretypes.h"
30 #include "target-globals.h"
39 #include "stringpool.h"
47 #include "diagnostic-core.h"
48 #include "diagnostic.h"
50 #include "fold-const.h"
51 #include "print-tree.h"
52 #include "stor-layout.h"
55 #include "conditions.h"
57 #include "insn-attr.h"
69 #include "cfgcleanup.h"
71 #include "langhooks.h"
72 #include "internal-fn.h"
73 #include "gimple-fold.h"
78 #include "tree-pass.h"
83 #include "tm-constrs.h"
85 #include "symbol-summary.h"
87 #include "ipa-fnsummary.h"
88 #include "sched-int.h"
90 /* This file should be included last. */
91 #include "target-def.h"
93 static bool s390_hard_regno_mode_ok (unsigned int, machine_mode);
95 /* Remember the last target of s390_set_current_function. */
96 static GTY(()) tree s390_previous_fndecl;
98 /* Define the specific costs for a given cpu. */
100 struct processor_costs
103 const int m; /* cost of an M instruction. */
104 const int mghi; /* cost of an MGHI instruction. */
105 const int mh; /* cost of an MH instruction. */
106 const int mhi; /* cost of an MHI instruction. */
107 const int ml; /* cost of an ML instruction. */
108 const int mr; /* cost of an MR instruction. */
109 const int ms; /* cost of an MS instruction. */
110 const int msg; /* cost of an MSG instruction. */
111 const int msgf; /* cost of an MSGF instruction. */
112 const int msgfr; /* cost of an MSGFR instruction. */
113 const int msgr; /* cost of an MSGR instruction. */
114 const int msr; /* cost of an MSR instruction. */
115 const int mult_df; /* cost of multiplication in DFmode. */
118 const int sqxbr; /* cost of square root in TFmode. */
119 const int sqdbr; /* cost of square root in DFmode. */
120 const int sqebr; /* cost of square root in SFmode. */
121 /* multiply and add */
122 const int madbr; /* cost of multiply and add in DFmode. */
123 const int maebr; /* cost of multiply and add in SFmode. */
135 #define s390_cost ((const struct processor_costs *)(s390_cost_pointer))
138 struct processor_costs z900_cost =
140 COSTS_N_INSNS (5), /* M */
141 COSTS_N_INSNS (10), /* MGHI */
142 COSTS_N_INSNS (5), /* MH */
143 COSTS_N_INSNS (4), /* MHI */
144 COSTS_N_INSNS (5), /* ML */
145 COSTS_N_INSNS (5), /* MR */
146 COSTS_N_INSNS (4), /* MS */
147 COSTS_N_INSNS (15), /* MSG */
148 COSTS_N_INSNS (7), /* MSGF */
149 COSTS_N_INSNS (7), /* MSGFR */
150 COSTS_N_INSNS (10), /* MSGR */
151 COSTS_N_INSNS (4), /* MSR */
152 COSTS_N_INSNS (7), /* multiplication in DFmode */
153 COSTS_N_INSNS (13), /* MXBR */
154 COSTS_N_INSNS (136), /* SQXBR */
155 COSTS_N_INSNS (44), /* SQDBR */
156 COSTS_N_INSNS (35), /* SQEBR */
157 COSTS_N_INSNS (18), /* MADBR */
158 COSTS_N_INSNS (13), /* MAEBR */
159 COSTS_N_INSNS (134), /* DXBR */
160 COSTS_N_INSNS (30), /* DDBR */
161 COSTS_N_INSNS (27), /* DEBR */
162 COSTS_N_INSNS (220), /* DLGR */
163 COSTS_N_INSNS (34), /* DLR */
164 COSTS_N_INSNS (34), /* DR */
165 COSTS_N_INSNS (32), /* DSGFR */
166 COSTS_N_INSNS (32), /* DSGR */
170 struct processor_costs z990_cost =
172 COSTS_N_INSNS (4), /* M */
173 COSTS_N_INSNS (2), /* MGHI */
174 COSTS_N_INSNS (2), /* MH */
175 COSTS_N_INSNS (2), /* MHI */
176 COSTS_N_INSNS (4), /* ML */
177 COSTS_N_INSNS (4), /* MR */
178 COSTS_N_INSNS (5), /* MS */
179 COSTS_N_INSNS (6), /* MSG */
180 COSTS_N_INSNS (4), /* MSGF */
181 COSTS_N_INSNS (4), /* MSGFR */
182 COSTS_N_INSNS (4), /* MSGR */
183 COSTS_N_INSNS (4), /* MSR */
184 COSTS_N_INSNS (1), /* multiplication in DFmode */
185 COSTS_N_INSNS (28), /* MXBR */
186 COSTS_N_INSNS (130), /* SQXBR */
187 COSTS_N_INSNS (66), /* SQDBR */
188 COSTS_N_INSNS (38), /* SQEBR */
189 COSTS_N_INSNS (1), /* MADBR */
190 COSTS_N_INSNS (1), /* MAEBR */
191 COSTS_N_INSNS (60), /* DXBR */
192 COSTS_N_INSNS (40), /* DDBR */
193 COSTS_N_INSNS (26), /* DEBR */
194 COSTS_N_INSNS (176), /* DLGR */
195 COSTS_N_INSNS (31), /* DLR */
196 COSTS_N_INSNS (31), /* DR */
197 COSTS_N_INSNS (31), /* DSGFR */
198 COSTS_N_INSNS (31), /* DSGR */
202 struct processor_costs z9_109_cost =
204 COSTS_N_INSNS (4), /* M */
205 COSTS_N_INSNS (2), /* MGHI */
206 COSTS_N_INSNS (2), /* MH */
207 COSTS_N_INSNS (2), /* MHI */
208 COSTS_N_INSNS (4), /* ML */
209 COSTS_N_INSNS (4), /* MR */
210 COSTS_N_INSNS (5), /* MS */
211 COSTS_N_INSNS (6), /* MSG */
212 COSTS_N_INSNS (4), /* MSGF */
213 COSTS_N_INSNS (4), /* MSGFR */
214 COSTS_N_INSNS (4), /* MSGR */
215 COSTS_N_INSNS (4), /* MSR */
216 COSTS_N_INSNS (1), /* multiplication in DFmode */
217 COSTS_N_INSNS (28), /* MXBR */
218 COSTS_N_INSNS (130), /* SQXBR */
219 COSTS_N_INSNS (66), /* SQDBR */
220 COSTS_N_INSNS (38), /* SQEBR */
221 COSTS_N_INSNS (1), /* MADBR */
222 COSTS_N_INSNS (1), /* MAEBR */
223 COSTS_N_INSNS (60), /* DXBR */
224 COSTS_N_INSNS (40), /* DDBR */
225 COSTS_N_INSNS (26), /* DEBR */
226 COSTS_N_INSNS (30), /* DLGR */
227 COSTS_N_INSNS (23), /* DLR */
228 COSTS_N_INSNS (23), /* DR */
229 COSTS_N_INSNS (24), /* DSGFR */
230 COSTS_N_INSNS (24), /* DSGR */
234 struct processor_costs z10_cost =
236 COSTS_N_INSNS (10), /* M */
237 COSTS_N_INSNS (10), /* MGHI */
238 COSTS_N_INSNS (10), /* MH */
239 COSTS_N_INSNS (10), /* MHI */
240 COSTS_N_INSNS (10), /* ML */
241 COSTS_N_INSNS (10), /* MR */
242 COSTS_N_INSNS (10), /* MS */
243 COSTS_N_INSNS (10), /* MSG */
244 COSTS_N_INSNS (10), /* MSGF */
245 COSTS_N_INSNS (10), /* MSGFR */
246 COSTS_N_INSNS (10), /* MSGR */
247 COSTS_N_INSNS (10), /* MSR */
248 COSTS_N_INSNS (1) , /* multiplication in DFmode */
249 COSTS_N_INSNS (50), /* MXBR */
250 COSTS_N_INSNS (120), /* SQXBR */
251 COSTS_N_INSNS (52), /* SQDBR */
252 COSTS_N_INSNS (38), /* SQEBR */
253 COSTS_N_INSNS (1), /* MADBR */
254 COSTS_N_INSNS (1), /* MAEBR */
255 COSTS_N_INSNS (111), /* DXBR */
256 COSTS_N_INSNS (39), /* DDBR */
257 COSTS_N_INSNS (32), /* DEBR */
258 COSTS_N_INSNS (160), /* DLGR */
259 COSTS_N_INSNS (71), /* DLR */
260 COSTS_N_INSNS (71), /* DR */
261 COSTS_N_INSNS (71), /* DSGFR */
262 COSTS_N_INSNS (71), /* DSGR */
266 struct processor_costs z196_cost =
268 COSTS_N_INSNS (7), /* M */
269 COSTS_N_INSNS (5), /* MGHI */
270 COSTS_N_INSNS (5), /* MH */
271 COSTS_N_INSNS (5), /* MHI */
272 COSTS_N_INSNS (7), /* ML */
273 COSTS_N_INSNS (7), /* MR */
274 COSTS_N_INSNS (6), /* MS */
275 COSTS_N_INSNS (8), /* MSG */
276 COSTS_N_INSNS (6), /* MSGF */
277 COSTS_N_INSNS (6), /* MSGFR */
278 COSTS_N_INSNS (8), /* MSGR */
279 COSTS_N_INSNS (6), /* MSR */
280 COSTS_N_INSNS (1) , /* multiplication in DFmode */
281 COSTS_N_INSNS (40), /* MXBR B+40 */
282 COSTS_N_INSNS (100), /* SQXBR B+100 */
283 COSTS_N_INSNS (42), /* SQDBR B+42 */
284 COSTS_N_INSNS (28), /* SQEBR B+28 */
285 COSTS_N_INSNS (1), /* MADBR B */
286 COSTS_N_INSNS (1), /* MAEBR B */
287 COSTS_N_INSNS (101), /* DXBR B+101 */
288 COSTS_N_INSNS (29), /* DDBR */
289 COSTS_N_INSNS (22), /* DEBR */
290 COSTS_N_INSNS (160), /* DLGR cracked */
291 COSTS_N_INSNS (160), /* DLR cracked */
292 COSTS_N_INSNS (160), /* DR expanded */
293 COSTS_N_INSNS (160), /* DSGFR cracked */
294 COSTS_N_INSNS (160), /* DSGR cracked */
298 struct processor_costs zEC12_cost =
300 COSTS_N_INSNS (7), /* M */
301 COSTS_N_INSNS (5), /* MGHI */
302 COSTS_N_INSNS (5), /* MH */
303 COSTS_N_INSNS (5), /* MHI */
304 COSTS_N_INSNS (7), /* ML */
305 COSTS_N_INSNS (7), /* MR */
306 COSTS_N_INSNS (6), /* MS */
307 COSTS_N_INSNS (8), /* MSG */
308 COSTS_N_INSNS (6), /* MSGF */
309 COSTS_N_INSNS (6), /* MSGFR */
310 COSTS_N_INSNS (8), /* MSGR */
311 COSTS_N_INSNS (6), /* MSR */
312 COSTS_N_INSNS (1) , /* multiplication in DFmode */
313 COSTS_N_INSNS (40), /* MXBR B+40 */
314 COSTS_N_INSNS (100), /* SQXBR B+100 */
315 COSTS_N_INSNS (42), /* SQDBR B+42 */
316 COSTS_N_INSNS (28), /* SQEBR B+28 */
317 COSTS_N_INSNS (1), /* MADBR B */
318 COSTS_N_INSNS (1), /* MAEBR B */
319 COSTS_N_INSNS (131), /* DXBR B+131 */
320 COSTS_N_INSNS (29), /* DDBR */
321 COSTS_N_INSNS (22), /* DEBR */
322 COSTS_N_INSNS (160), /* DLGR cracked */
323 COSTS_N_INSNS (160), /* DLR cracked */
324 COSTS_N_INSNS (160), /* DR expanded */
325 COSTS_N_INSNS (160), /* DSGFR cracked */
326 COSTS_N_INSNS (160), /* DSGR cracked */
331 /* The preferred name to be used in user visible output. */
332 const char *const name;
333 /* CPU name as it should be passed to Binutils via .machine */
334 const char *const binutils_name;
335 const enum processor_type processor;
336 const struct processor_costs *cost;
338 const processor_table[] =
340 { "g5", "g5", PROCESSOR_9672_G5, &z900_cost },
341 { "g6", "g6", PROCESSOR_9672_G6, &z900_cost },
342 { "z900", "z900", PROCESSOR_2064_Z900, &z900_cost },
343 { "z990", "z990", PROCESSOR_2084_Z990, &z990_cost },
344 { "z9-109", "z9-109", PROCESSOR_2094_Z9_109, &z9_109_cost },
345 { "z9-ec", "z9-ec", PROCESSOR_2094_Z9_EC, &z9_109_cost },
346 { "z10", "z10", PROCESSOR_2097_Z10, &z10_cost },
347 { "z196", "z196", PROCESSOR_2817_Z196, &z196_cost },
348 { "zEC12", "zEC12", PROCESSOR_2827_ZEC12, &zEC12_cost },
349 { "z13", "z13", PROCESSOR_2964_Z13, &zEC12_cost },
350 { "z14", "arch12", PROCESSOR_3906_Z14, &zEC12_cost },
351 { "native", "", PROCESSOR_NATIVE, NULL }
354 extern int reload_completed;
356 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
357 static rtx_insn *last_scheduled_insn;
358 #define MAX_SCHED_UNITS 3
359 static int last_scheduled_unit_distance[MAX_SCHED_UNITS];
362 static int current_side = 1;
363 #define LONGRUNNING_THRESHOLD 5
365 /* Estimate of number of cycles a long-running insn occupies an
367 static unsigned fxu_longrunning[NUM_SIDES];
368 static unsigned vfu_longrunning[NUM_SIDES];
370 /* Factor to scale latencies by, determined by measurements. */
371 #define LATENCY_FACTOR 4
373 /* The maximum score added for an instruction whose unit hasn't been
374 in use for MAX_SCHED_MIX_DISTANCE steps. Increase this value to
375 give instruction mix scheduling more priority over instruction
377 #define MAX_SCHED_MIX_SCORE 8
379 /* The maximum distance up to which individual scores will be
380 calculated. Everything beyond this gives MAX_SCHED_MIX_SCORE.
381 Increase this with the OOO windows size of the machine. */
382 #define MAX_SCHED_MIX_DISTANCE 100
384 /* Structure used to hold the components of a S/390 memory
385 address. A legitimate address on S/390 is of the general
387 base + index + displacement
388 where any of the components is optional.
390 base and index are registers of the class ADDR_REGS,
391 displacement is an unsigned 12-bit immediate constant. */
402 /* The following structure is embedded in the machine
403 specific part of struct function. */
405 struct GTY (()) s390_frame_layout
407 /* Offset within stack frame. */
408 HOST_WIDE_INT gprs_offset;
409 HOST_WIDE_INT f0_offset;
410 HOST_WIDE_INT f4_offset;
411 HOST_WIDE_INT f8_offset;
412 HOST_WIDE_INT backchain_offset;
414 /* Number of first and last gpr where slots in the register
415 save area are reserved for. */
416 int first_save_gpr_slot;
417 int last_save_gpr_slot;
419 /* Location (FP register number) where GPRs (r0-r15) should
421 0 - does not need to be saved at all
423 #define SAVE_SLOT_NONE 0
424 #define SAVE_SLOT_STACK -1
425 signed char gpr_save_slots[16];
427 /* Number of first and last gpr to be saved, restored. */
429 int first_restore_gpr;
431 int last_restore_gpr;
433 /* Bits standing for floating point registers. Set, if the
434 respective register has to be saved. Starting with reg 16 (f0)
435 at the rightmost bit.
436 Bit 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
437 fpr 15 13 11 9 14 12 10 8 7 5 3 1 6 4 2 0
438 reg 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 */
439 unsigned int fpr_bitmap;
441 /* Number of floating point registers f8-f15 which must be saved. */
444 /* Set if return address needs to be saved.
445 This flag is set by s390_return_addr_rtx if it could not use
446 the initial value of r14 and therefore depends on r14 saved
448 bool save_return_addr_p;
450 /* Size of stack frame. */
451 HOST_WIDE_INT frame_size;
454 /* Define the structure for the machine field in struct function. */
456 struct GTY(()) machine_function
458 struct s390_frame_layout frame_layout;
460 /* Literal pool base register. */
463 /* True if we may need to perform branch splitting. */
464 bool split_branches_pending_p;
466 bool has_landing_pad_p;
468 /* True if the current function may contain a tbegin clobbering
472 /* For -fsplit-stack support: A stack local which holds a pointer to
473 the stack arguments for a function with a variable number of
474 arguments. This is set at the start of the function and is used
475 to initialize the overflow_arg_area field of the va_list
477 rtx split_stack_varargs_pointer;
480 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
482 #define cfun_frame_layout (cfun->machine->frame_layout)
483 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
484 #define cfun_save_arg_fprs_p (!!(TARGET_64BIT \
485 ? cfun_frame_layout.fpr_bitmap & 0x0f \
486 : cfun_frame_layout.fpr_bitmap & 0x03))
487 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
488 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
489 #define cfun_set_fpr_save(REGNO) (cfun->machine->frame_layout.fpr_bitmap |= \
490 (1 << (REGNO - FPR0_REGNUM)))
491 #define cfun_fpr_save_p(REGNO) (!!(cfun->machine->frame_layout.fpr_bitmap & \
492 (1 << (REGNO - FPR0_REGNUM))))
493 #define cfun_gpr_save_slot(REGNO) \
494 cfun->machine->frame_layout.gpr_save_slots[REGNO]
496 /* Number of GPRs and FPRs used for argument passing. */
497 #define GP_ARG_NUM_REG 5
498 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
499 #define VEC_ARG_NUM_REG 8
501 /* A couple of shortcuts. */
502 #define CONST_OK_FOR_J(x) \
503 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
504 #define CONST_OK_FOR_K(x) \
505 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
506 #define CONST_OK_FOR_Os(x) \
507 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
508 #define CONST_OK_FOR_Op(x) \
509 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
510 #define CONST_OK_FOR_On(x) \
511 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
513 #define REGNO_PAIR_OK(REGNO, MODE) \
514 (s390_hard_regno_nregs ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
516 /* That's the read ahead of the dynamic branch prediction unit in
517 bytes on a z10 (or higher) CPU. */
518 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
521 /* Indicate which ABI has been used for passing vector args.
522 0 - no vector type arguments have been passed where the ABI is relevant
523 1 - the old ABI has been used
524 2 - a vector type argument has been passed either in a vector register
525 or on the stack by value */
526 static int s390_vector_abi = 0;
528 /* Set the vector ABI marker if TYPE is subject to the vector ABI
529 switch. The vector ABI affects only vector data types. There are
530 two aspects of the vector ABI relevant here:
532 1. vectors >= 16 bytes have an alignment of 8 bytes with the new
533 ABI and natural alignment with the old.
535 2. vector <= 16 bytes are passed in VRs or by value on the stack
536 with the new ABI but by reference on the stack with the old.
538 If ARG_P is true TYPE is used for a function argument or return
539 value. The ABI marker then is set for all vector data types. If
540 ARG_P is false only type 1 vectors are being checked. */
543 s390_check_type_for_vector_abi (const_tree type, bool arg_p, bool in_struct_p)
545 static hash_set<const_tree> visited_types_hash;
550 if (type == NULL_TREE || TREE_CODE (type) == ERROR_MARK)
553 if (visited_types_hash.contains (type))
556 visited_types_hash.add (type);
558 if (VECTOR_TYPE_P (type))
560 int type_size = int_size_in_bytes (type);
562 /* Outside arguments only the alignment is changing and this
563 only happens for vector types >= 16 bytes. */
564 if (!arg_p && type_size < 16)
567 /* In arguments vector types > 16 are passed as before (GCC
568 never enforced the bigger alignment for arguments which was
569 required by the old vector ABI). However, it might still be
570 ABI relevant due to the changed alignment if it is a struct
572 if (arg_p && type_size > 16 && !in_struct_p)
575 s390_vector_abi = TARGET_VX_ABI ? 2 : 1;
577 else if (POINTER_TYPE_P (type) || TREE_CODE (type) == ARRAY_TYPE)
579 /* ARRAY_TYPE: Since with neither of the ABIs we have more than
580 natural alignment there will never be ABI dependent padding
581 in an array type. That's why we do not set in_struct_p to
583 s390_check_type_for_vector_abi (TREE_TYPE (type), arg_p, in_struct_p);
585 else if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
589 /* Check the return type. */
590 s390_check_type_for_vector_abi (TREE_TYPE (type), true, false);
592 for (arg_chain = TYPE_ARG_TYPES (type);
594 arg_chain = TREE_CHAIN (arg_chain))
595 s390_check_type_for_vector_abi (TREE_VALUE (arg_chain), true, false);
597 else if (RECORD_OR_UNION_TYPE_P (type))
601 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
603 if (TREE_CODE (field) != FIELD_DECL)
606 s390_check_type_for_vector_abi (TREE_TYPE (field), arg_p, true);
612 /* System z builtins. */
614 #include "s390-builtins.h"
616 const unsigned int bflags_builtin[S390_BUILTIN_MAX + 1] =
621 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, ...) BFLAGS,
623 #define OB_DEF_VAR(...)
624 #include "s390-builtins.def"
628 const unsigned int opflags_builtin[S390_BUILTIN_MAX + 1] =
633 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, OPFLAGS, ...) OPFLAGS,
635 #define OB_DEF_VAR(...)
636 #include "s390-builtins.def"
640 const unsigned int bflags_overloaded_builtin[S390_OVERLOADED_BUILTIN_MAX + 1] =
646 #define OB_DEF(NAME, FIRST_VAR_NAME, LAST_VAR_NAME, BFLAGS, ...) BFLAGS,
647 #define OB_DEF_VAR(...)
648 #include "s390-builtins.def"
653 bflags_overloaded_builtin_var[S390_OVERLOADED_BUILTIN_VAR_MAX + 1] =
660 #define OB_DEF_VAR(NAME, PATTERN, FLAGS, OPFLAGS, FNTYPE) FLAGS,
661 #include "s390-builtins.def"
666 opflags_overloaded_builtin_var[S390_OVERLOADED_BUILTIN_VAR_MAX + 1] =
673 #define OB_DEF_VAR(NAME, PATTERN, FLAGS, OPFLAGS, FNTYPE) OPFLAGS,
674 #include "s390-builtins.def"
678 tree s390_builtin_types[BT_MAX];
679 tree s390_builtin_fn_types[BT_FN_MAX];
680 tree s390_builtin_decls[S390_BUILTIN_MAX +
681 S390_OVERLOADED_BUILTIN_MAX +
682 S390_OVERLOADED_BUILTIN_VAR_MAX];
684 static enum insn_code const code_for_builtin[S390_BUILTIN_MAX + 1] = {
688 #define B_DEF(NAME, PATTERN, ...) CODE_FOR_##PATTERN,
690 #define OB_DEF_VAR(...)
692 #include "s390-builtins.def"
697 s390_init_builtins (void)
699 /* These definitions are being used in s390-builtins.def. */
700 tree returns_twice_attr = tree_cons (get_identifier ("returns_twice"),
702 tree noreturn_attr = tree_cons (get_identifier ("noreturn"), NULL, NULL);
703 tree c_uint64_type_node;
705 /* The uint64_type_node from tree.c is not compatible to the C99
706 uint64_t data type. What we want is c_uint64_type_node from
707 c-common.c. But since backend code is not supposed to interface
708 with the frontend we recreate it here. */
710 c_uint64_type_node = long_unsigned_type_node;
712 c_uint64_type_node = long_long_unsigned_type_node;
715 #define DEF_TYPE(INDEX, NODE, CONST_P) \
716 if (s390_builtin_types[INDEX] == NULL) \
717 s390_builtin_types[INDEX] = (!CONST_P) ? \
718 (NODE) : build_type_variant ((NODE), 1, 0);
720 #undef DEF_POINTER_TYPE
721 #define DEF_POINTER_TYPE(INDEX, INDEX_BASE) \
722 if (s390_builtin_types[INDEX] == NULL) \
723 s390_builtin_types[INDEX] = \
724 build_pointer_type (s390_builtin_types[INDEX_BASE]);
726 #undef DEF_DISTINCT_TYPE
727 #define DEF_DISTINCT_TYPE(INDEX, INDEX_BASE) \
728 if (s390_builtin_types[INDEX] == NULL) \
729 s390_builtin_types[INDEX] = \
730 build_distinct_type_copy (s390_builtin_types[INDEX_BASE]);
732 #undef DEF_VECTOR_TYPE
733 #define DEF_VECTOR_TYPE(INDEX, INDEX_BASE, ELEMENTS) \
734 if (s390_builtin_types[INDEX] == NULL) \
735 s390_builtin_types[INDEX] = \
736 build_vector_type (s390_builtin_types[INDEX_BASE], ELEMENTS);
738 #undef DEF_OPAQUE_VECTOR_TYPE
739 #define DEF_OPAQUE_VECTOR_TYPE(INDEX, INDEX_BASE, ELEMENTS) \
740 if (s390_builtin_types[INDEX] == NULL) \
741 s390_builtin_types[INDEX] = \
742 build_opaque_vector_type (s390_builtin_types[INDEX_BASE], ELEMENTS);
745 #define DEF_FN_TYPE(INDEX, args...) \
746 if (s390_builtin_fn_types[INDEX] == NULL) \
747 s390_builtin_fn_types[INDEX] = \
748 build_function_type_list (args, NULL_TREE);
750 #define DEF_OV_TYPE(...)
751 #include "s390-builtin-types.def"
754 #define B_DEF(NAME, PATTERN, ATTRS, BFLAGS, OPFLAGS, FNTYPE) \
755 if (s390_builtin_decls[S390_BUILTIN_##NAME] == NULL) \
756 s390_builtin_decls[S390_BUILTIN_##NAME] = \
757 add_builtin_function ("__builtin_" #NAME, \
758 s390_builtin_fn_types[FNTYPE], \
759 S390_BUILTIN_##NAME, \
764 #define OB_DEF(NAME, FIRST_VAR_NAME, LAST_VAR_NAME, BFLAGS, FNTYPE) \
765 if (s390_builtin_decls[S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX] \
767 s390_builtin_decls[S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX] = \
768 add_builtin_function ("__builtin_" #NAME, \
769 s390_builtin_fn_types[FNTYPE], \
770 S390_OVERLOADED_BUILTIN_##NAME + S390_BUILTIN_MAX, \
775 #define OB_DEF_VAR(...)
776 #include "s390-builtins.def"
780 /* Return true if ARG is appropriate as argument number ARGNUM of
781 builtin DECL. The operand flags from s390-builtins.def have to
782 passed as OP_FLAGS. */
784 s390_const_operand_ok (tree arg, int argnum, int op_flags, tree decl)
786 if (O_UIMM_P (op_flags))
788 int bitwidths[] = { 1, 2, 3, 4, 5, 8, 12, 16, 32 };
789 int bitwidth = bitwidths[op_flags - O_U1];
791 if (!tree_fits_uhwi_p (arg)
792 || tree_to_uhwi (arg) > (HOST_WIDE_INT_1U << bitwidth) - 1)
794 error("constant argument %d for builtin %qF is out of range (0.."
795 HOST_WIDE_INT_PRINT_UNSIGNED ")",
797 (HOST_WIDE_INT_1U << bitwidth) - 1);
802 if (O_SIMM_P (op_flags))
804 int bitwidths[] = { 2, 3, 4, 5, 8, 12, 16, 32 };
805 int bitwidth = bitwidths[op_flags - O_S2];
807 if (!tree_fits_shwi_p (arg)
808 || tree_to_shwi (arg) < -(HOST_WIDE_INT_1 << (bitwidth - 1))
809 || tree_to_shwi (arg) > ((HOST_WIDE_INT_1 << (bitwidth - 1)) - 1))
811 error("constant argument %d for builtin %qF is out of range ("
812 HOST_WIDE_INT_PRINT_DEC ".."
813 HOST_WIDE_INT_PRINT_DEC ")",
815 -(HOST_WIDE_INT_1 << (bitwidth - 1)),
816 (HOST_WIDE_INT_1 << (bitwidth - 1)) - 1);
823 /* Expand an expression EXP that calls a built-in function,
824 with result going to TARGET if that's convenient
825 (and in mode MODE if that's convenient).
826 SUBTARGET may be used as the target for computing one of EXP's operands.
827 IGNORE is nonzero if the value is to be ignored. */
830 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
831 machine_mode mode ATTRIBUTE_UNUSED,
832 int ignore ATTRIBUTE_UNUSED)
836 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
837 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
838 enum insn_code icode;
839 rtx op[MAX_ARGS], pat;
843 call_expr_arg_iterator iter;
844 unsigned int all_op_flags = opflags_for_builtin (fcode);
845 machine_mode last_vec_mode = VOIDmode;
847 if (TARGET_DEBUG_ARG)
850 "s390_expand_builtin, code = %4d, %s, bflags = 0x%x\n",
851 (int)fcode, IDENTIFIER_POINTER (DECL_NAME (fndecl)),
852 bflags_for_builtin (fcode));
855 if (S390_USE_TARGET_ATTRIBUTE)
859 bflags = bflags_for_builtin (fcode);
860 if ((bflags & B_HTM) && !TARGET_HTM)
862 error ("builtin %qF is not supported without -mhtm "
863 "(default with -march=zEC12 and higher).", fndecl);
866 if (((bflags & B_VX) || (bflags & B_VXE)) && !TARGET_VX)
868 error ("builtin %qF requires -mvx "
869 "(default with -march=z13 and higher).", fndecl);
873 if ((bflags & B_VXE) && !TARGET_VXE)
875 error ("Builtin %qF requires z14 or higher.", fndecl);
879 if (fcode >= S390_OVERLOADED_BUILTIN_VAR_OFFSET
880 && fcode < S390_ALL_BUILTIN_MAX)
884 else if (fcode < S390_OVERLOADED_BUILTIN_OFFSET)
886 icode = code_for_builtin[fcode];
887 /* Set a flag in the machine specific cfun part in order to support
888 saving/restoring of FPRs. */
889 if (fcode == S390_BUILTIN_tbegin || fcode == S390_BUILTIN_tbegin_retry)
890 cfun->machine->tbegin_p = true;
892 else if (fcode < S390_OVERLOADED_BUILTIN_VAR_OFFSET)
894 error ("unresolved overloaded builtin");
898 internal_error ("bad builtin fcode");
901 internal_error ("bad builtin icode");
903 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
907 machine_mode tmode = insn_data[icode].operand[0].mode;
909 || GET_MODE (target) != tmode
910 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
911 target = gen_reg_rtx (tmode);
913 /* There are builtins (e.g. vec_promote) with no vector
914 arguments but an element selector. So we have to also look
915 at the vector return type when emitting the modulo
917 if (VECTOR_MODE_P (insn_data[icode].operand[0].mode))
918 last_vec_mode = insn_data[icode].operand[0].mode;
922 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
925 const struct insn_operand_data *insn_op;
926 unsigned int op_flags = all_op_flags & ((1 << O_SHIFT) - 1);
928 all_op_flags = all_op_flags >> O_SHIFT;
930 if (arg == error_mark_node)
932 if (arity >= MAX_ARGS)
935 if (O_IMM_P (op_flags)
936 && TREE_CODE (arg) != INTEGER_CST)
938 error ("constant value required for builtin %qF argument %d",
943 if (!s390_const_operand_ok (arg, arity + 1, op_flags, fndecl))
946 insn_op = &insn_data[icode].operand[arity + nonvoid];
947 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
949 /* expand_expr truncates constants to the target mode only if it
950 is "convenient". However, our checks below rely on this
952 if (CONST_INT_P (op[arity])
953 && SCALAR_INT_MODE_P (insn_op->mode)
954 && GET_MODE (op[arity]) != insn_op->mode)
955 op[arity] = GEN_INT (trunc_int_for_mode (INTVAL (op[arity]),
958 /* Wrap the expanded RTX for pointer types into a MEM expr with
959 the proper mode. This allows us to use e.g. (match_operand
960 "memory_operand"..) in the insn patterns instead of (mem
961 (match_operand "address_operand)). This is helpful for
962 patterns not just accepting MEMs. */
963 if (POINTER_TYPE_P (TREE_TYPE (arg))
964 && insn_op->predicate != address_operand)
965 op[arity] = gen_rtx_MEM (insn_op->mode, op[arity]);
967 /* Expand the module operation required on element selectors. */
968 if (op_flags == O_ELEM)
970 gcc_assert (last_vec_mode != VOIDmode);
971 op[arity] = simplify_expand_binop (SImode, code_to_optab (AND),
973 GEN_INT (GET_MODE_NUNITS (last_vec_mode) - 1),
974 NULL_RTX, 1, OPTAB_DIRECT);
977 /* Record the vector mode used for an element selector. This assumes:
978 1. There is no builtin with two different vector modes and an element selector
979 2. The element selector comes after the vector type it is referring to.
980 This currently the true for all the builtins but FIXME we
981 should better check for that. */
982 if (VECTOR_MODE_P (insn_op->mode))
983 last_vec_mode = insn_op->mode;
985 if (insn_op->predicate (op[arity], insn_op->mode))
991 if (MEM_P (op[arity])
992 && insn_op->predicate == memory_operand
993 && (GET_MODE (XEXP (op[arity], 0)) == Pmode
994 || GET_MODE (XEXP (op[arity], 0)) == VOIDmode))
996 op[arity] = replace_equiv_address (op[arity],
997 copy_to_mode_reg (Pmode,
998 XEXP (op[arity], 0)));
1000 /* Some of the builtins require different modes/types than the
1001 pattern in order to implement a specific API. Instead of
1002 adding many expanders which do the mode change we do it here.
1003 E.g. s390_vec_add_u128 required to have vector unsigned char
1004 arguments is mapped to addti3. */
1005 else if (insn_op->mode != VOIDmode
1006 && GET_MODE (op[arity]) != VOIDmode
1007 && GET_MODE (op[arity]) != insn_op->mode
1008 && ((tmp_rtx = simplify_gen_subreg (insn_op->mode, op[arity],
1009 GET_MODE (op[arity]), 0))
1012 op[arity] = tmp_rtx;
1014 else if (GET_MODE (op[arity]) == insn_op->mode
1015 || GET_MODE (op[arity]) == VOIDmode
1016 || (insn_op->predicate == address_operand
1017 && GET_MODE (op[arity]) == Pmode))
1019 /* An address_operand usually has VOIDmode in the expander
1020 so we cannot use this. */
1021 machine_mode target_mode =
1022 (insn_op->predicate == address_operand
1023 ? (machine_mode) Pmode : insn_op->mode);
1024 op[arity] = copy_to_mode_reg (target_mode, op[arity]);
1027 if (!insn_op->predicate (op[arity], insn_op->mode))
1029 error ("invalid argument %d for builtin %qF", arity + 1, fndecl);
1038 pat = GEN_FCN (icode) (target);
1042 pat = GEN_FCN (icode) (target, op[0]);
1044 pat = GEN_FCN (icode) (op[0]);
1048 pat = GEN_FCN (icode) (target, op[0], op[1]);
1050 pat = GEN_FCN (icode) (op[0], op[1]);
1054 pat = GEN_FCN (icode) (target, op[0], op[1], op[2]);
1056 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
1060 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3]);
1062 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
1066 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4]);
1068 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]);
1072 pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4], op[5]);
1074 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4], op[5]);
1090 static const int s390_hotpatch_hw_max = 1000000;
1091 static int s390_hotpatch_hw_before_label = 0;
1092 static int s390_hotpatch_hw_after_label = 0;
1094 /* Check whether the hotpatch attribute is applied to a function and, if it has
1095 an argument, the argument is valid. */
1098 s390_handle_hotpatch_attribute (tree *node, tree name, tree args,
1099 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1105 if (TREE_CODE (*node) != FUNCTION_DECL)
1107 warning (OPT_Wattributes, "%qE attribute only applies to functions",
1109 *no_add_attrs = true;
1111 if (args != NULL && TREE_CHAIN (args) != NULL)
1113 expr = TREE_VALUE (args);
1114 expr2 = TREE_VALUE (TREE_CHAIN (args));
1116 if (args == NULL || TREE_CHAIN (args) == NULL)
1118 else if (TREE_CODE (expr) != INTEGER_CST
1119 || !INTEGRAL_TYPE_P (TREE_TYPE (expr))
1120 || wi::gtu_p (wi::to_wide (expr), s390_hotpatch_hw_max))
1122 else if (TREE_CODE (expr2) != INTEGER_CST
1123 || !INTEGRAL_TYPE_P (TREE_TYPE (expr2))
1124 || wi::gtu_p (wi::to_wide (expr2), s390_hotpatch_hw_max))
1130 error ("requested %qE attribute is not a comma separated pair of"
1131 " non-negative integer constants or too large (max. %d)", name,
1132 s390_hotpatch_hw_max);
1133 *no_add_attrs = true;
1139 /* Expand the s390_vector_bool type attribute. */
1142 s390_handle_vectorbool_attribute (tree *node, tree name ATTRIBUTE_UNUSED,
1143 tree args ATTRIBUTE_UNUSED,
1144 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1146 tree type = *node, result = NULL_TREE;
1149 while (POINTER_TYPE_P (type)
1150 || TREE_CODE (type) == FUNCTION_TYPE
1151 || TREE_CODE (type) == METHOD_TYPE
1152 || TREE_CODE (type) == ARRAY_TYPE)
1153 type = TREE_TYPE (type);
1155 mode = TYPE_MODE (type);
1158 case E_DImode: case E_V2DImode:
1159 result = s390_builtin_types[BT_BV2DI];
1161 case E_SImode: case E_V4SImode:
1162 result = s390_builtin_types[BT_BV4SI];
1164 case E_HImode: case E_V8HImode:
1165 result = s390_builtin_types[BT_BV8HI];
1167 case E_QImode: case E_V16QImode:
1168 result = s390_builtin_types[BT_BV16QI];
1174 *no_add_attrs = true; /* No need to hang on to the attribute. */
1177 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
1182 static const struct attribute_spec s390_attribute_table[] = {
1183 { "hotpatch", 2, 2, true, false, false, false,
1184 s390_handle_hotpatch_attribute, NULL },
1185 { "s390_vector_bool", 0, 0, false, true, false, true,
1186 s390_handle_vectorbool_attribute, NULL },
1188 { NULL, 0, 0, false, false, false, false, NULL, NULL }
1191 /* Return the alignment for LABEL. We default to the -falign-labels
1192 value except for the literal pool base label. */
1194 s390_label_align (rtx_insn *label)
1196 rtx_insn *prev_insn = prev_active_insn (label);
1199 if (prev_insn == NULL_RTX)
1202 set = single_set (prev_insn);
1204 if (set == NULL_RTX)
1207 src = SET_SRC (set);
1209 /* Don't align literal pool base labels. */
1210 if (GET_CODE (src) == UNSPEC
1211 && XINT (src, 1) == UNSPEC_MAIN_BASE)
1215 return align_labels_log;
1218 static GTY(()) rtx got_symbol;
1220 /* Return the GOT table symbol. The symbol will be created when the
1221 function is invoked for the first time. */
1224 s390_got_symbol (void)
1228 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
1229 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
1235 static scalar_int_mode
1236 s390_libgcc_cmp_return_mode (void)
1238 return TARGET_64BIT ? DImode : SImode;
1241 static scalar_int_mode
1242 s390_libgcc_shift_count_mode (void)
1244 return TARGET_64BIT ? DImode : SImode;
1247 static scalar_int_mode
1248 s390_unwind_word_mode (void)
1250 return TARGET_64BIT ? DImode : SImode;
1253 /* Return true if the back end supports mode MODE. */
1255 s390_scalar_mode_supported_p (scalar_mode mode)
1257 /* In contrast to the default implementation reject TImode constants on 31bit
1258 TARGET_ZARCH for ABI compliance. */
1259 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
1262 if (DECIMAL_FLOAT_MODE_P (mode))
1263 return default_decimal_float_supported_p ();
1265 return default_scalar_mode_supported_p (mode);
1268 /* Return true if the back end supports vector mode MODE. */
1270 s390_vector_mode_supported_p (machine_mode mode)
1274 if (!VECTOR_MODE_P (mode)
1276 || GET_MODE_SIZE (mode) > 16)
1279 inner = GET_MODE_INNER (mode);
1297 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
1300 s390_set_has_landing_pad_p (bool value)
1302 cfun->machine->has_landing_pad_p = value;
1305 /* If two condition code modes are compatible, return a condition code
1306 mode which is compatible with both. Otherwise, return
1310 s390_cc_modes_compatible (machine_mode m1, machine_mode m2)
1318 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
1319 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
1340 /* Return true if SET either doesn't set the CC register, or else
1341 the source and destination have matching CC modes and that
1342 CC mode is at least as constrained as REQ_MODE. */
1345 s390_match_ccmode_set (rtx set, machine_mode req_mode)
1347 machine_mode set_mode;
1349 gcc_assert (GET_CODE (set) == SET);
1351 /* These modes are supposed to be used only in CC consumer
1353 gcc_assert (req_mode != CCVIALLmode && req_mode != CCVIANYmode
1354 && req_mode != CCVFALLmode && req_mode != CCVFANYmode);
1356 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
1359 set_mode = GET_MODE (SET_DEST (set));
1379 if (req_mode != set_mode)
1384 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
1385 && req_mode != CCSRmode && req_mode != CCURmode
1386 && req_mode != CCZ1mode)
1392 if (req_mode != CCAmode)
1400 return (GET_MODE (SET_SRC (set)) == set_mode);
1403 /* Return true if every SET in INSN that sets the CC register
1404 has source and destination with matching CC modes and that
1405 CC mode is at least as constrained as REQ_MODE.
1406 If REQ_MODE is VOIDmode, always return false. */
1409 s390_match_ccmode (rtx_insn *insn, machine_mode req_mode)
1413 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
1414 if (req_mode == VOIDmode)
1417 if (GET_CODE (PATTERN (insn)) == SET)
1418 return s390_match_ccmode_set (PATTERN (insn), req_mode);
1420 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1421 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1423 rtx set = XVECEXP (PATTERN (insn), 0, i);
1424 if (GET_CODE (set) == SET)
1425 if (!s390_match_ccmode_set (set, req_mode))
1432 /* If a test-under-mask instruction can be used to implement
1433 (compare (and ... OP1) OP2), return the CC mode required
1434 to do that. Otherwise, return VOIDmode.
1435 MIXED is true if the instruction can distinguish between
1436 CC1 and CC2 for mixed selected bits (TMxx), it is false
1437 if the instruction cannot (TM). */
1440 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
1444 /* ??? Fixme: should work on CONST_WIDE_INT as well. */
1445 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
1448 /* Selected bits all zero: CC0.
1449 e.g.: int a; if ((a & (16 + 128)) == 0) */
1450 if (INTVAL (op2) == 0)
1453 /* Selected bits all one: CC3.
1454 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
1455 if (INTVAL (op2) == INTVAL (op1))
1458 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
1460 if ((a & (16 + 128)) == 16) -> CCT1
1461 if ((a & (16 + 128)) == 128) -> CCT2 */
1464 bit1 = exact_log2 (INTVAL (op2));
1465 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
1466 if (bit0 != -1 && bit1 != -1)
1467 return bit0 > bit1 ? CCT1mode : CCT2mode;
1473 /* Given a comparison code OP (EQ, NE, etc.) and the operands
1474 OP0 and OP1 of a COMPARE, return the mode to be used for the
1478 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
1484 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
1485 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1487 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
1488 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
1490 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1491 || GET_CODE (op1) == NEG)
1492 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1495 if (GET_CODE (op0) == AND)
1497 /* Check whether we can potentially do it via TM. */
1498 machine_mode ccmode;
1499 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
1500 if (ccmode != VOIDmode)
1502 /* Relax CCTmode to CCZmode to allow fall-back to AND
1503 if that turns out to be beneficial. */
1504 return ccmode == CCTmode ? CCZmode : ccmode;
1508 if (register_operand (op0, HImode)
1509 && GET_CODE (op1) == CONST_INT
1510 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
1512 if (register_operand (op0, QImode)
1513 && GET_CODE (op1) == CONST_INT
1514 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
1523 /* The only overflow condition of NEG and ABS happens when
1524 -INT_MAX is used as parameter, which stays negative. So
1525 we have an overflow from a positive value to a negative.
1526 Using CCAP mode the resulting cc can be used for comparisons. */
1527 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
1528 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1531 /* If constants are involved in an add instruction it is possible to use
1532 the resulting cc for comparisons with zero. Knowing the sign of the
1533 constant the overflow behavior gets predictable. e.g.:
1534 int a, b; if ((b = a + c) > 0)
1535 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
1536 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
1537 && (CONST_OK_FOR_K (INTVAL (XEXP (op0, 1)))
1538 || (CONST_OK_FOR_CONSTRAINT_P (INTVAL (XEXP (op0, 1)), 'O', "Os")
1539 /* Avoid INT32_MIN on 32 bit. */
1540 && (!TARGET_ZARCH || INTVAL (XEXP (op0, 1)) != -0x7fffffff - 1))))
1542 if (INTVAL (XEXP((op0), 1)) < 0)
1556 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1557 && GET_CODE (op1) != CONST_INT)
1563 if (GET_CODE (op0) == PLUS
1564 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1567 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1568 && GET_CODE (op1) != CONST_INT)
1574 if (GET_CODE (op0) == MINUS
1575 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
1578 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
1579 && GET_CODE (op1) != CONST_INT)
1588 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
1589 that we can implement more efficiently. */
1592 s390_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
1593 bool op0_preserve_value)
1595 if (op0_preserve_value)
1598 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
1599 if ((*code == EQ || *code == NE)
1600 && *op1 == const0_rtx
1601 && GET_CODE (*op0) == ZERO_EXTRACT
1602 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
1603 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
1604 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
1606 rtx inner = XEXP (*op0, 0);
1607 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
1608 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
1609 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
1611 if (len > 0 && len < modesize
1612 && pos >= 0 && pos + len <= modesize
1613 && modesize <= HOST_BITS_PER_WIDE_INT)
1615 unsigned HOST_WIDE_INT block;
1616 block = (HOST_WIDE_INT_1U << len) - 1;
1617 block <<= modesize - pos - len;
1619 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
1620 gen_int_mode (block, GET_MODE (inner)));
1624 /* Narrow AND of memory against immediate to enable TM. */
1625 if ((*code == EQ || *code == NE)
1626 && *op1 == const0_rtx
1627 && GET_CODE (*op0) == AND
1628 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
1629 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
1631 rtx inner = XEXP (*op0, 0);
1632 rtx mask = XEXP (*op0, 1);
1634 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
1635 if (GET_CODE (inner) == SUBREG
1636 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
1637 && (GET_MODE_SIZE (GET_MODE (inner))
1638 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
1640 & GET_MODE_MASK (GET_MODE (inner))
1641 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
1643 inner = SUBREG_REG (inner);
1645 /* Do not change volatile MEMs. */
1646 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
1648 int part = s390_single_part (XEXP (*op0, 1),
1649 GET_MODE (inner), QImode, 0);
1652 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
1653 inner = adjust_address_nv (inner, QImode, part);
1654 *op0 = gen_rtx_AND (QImode, inner, mask);
1659 /* Narrow comparisons against 0xffff to HImode if possible. */
1660 if ((*code == EQ || *code == NE)
1661 && GET_CODE (*op1) == CONST_INT
1662 && INTVAL (*op1) == 0xffff
1663 && SCALAR_INT_MODE_P (GET_MODE (*op0))
1664 && (nonzero_bits (*op0, GET_MODE (*op0))
1665 & ~HOST_WIDE_INT_UC (0xffff)) == 0)
1667 *op0 = gen_lowpart (HImode, *op0);
1671 /* Remove redundant UNSPEC_STRCMPCC_TO_INT conversions if possible. */
1672 if (GET_CODE (*op0) == UNSPEC
1673 && XINT (*op0, 1) == UNSPEC_STRCMPCC_TO_INT
1674 && XVECLEN (*op0, 0) == 1
1675 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
1676 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
1677 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
1678 && *op1 == const0_rtx)
1680 enum rtx_code new_code = UNKNOWN;
1683 case EQ: new_code = EQ; break;
1684 case NE: new_code = NE; break;
1685 case LT: new_code = GTU; break;
1686 case GT: new_code = LTU; break;
1687 case LE: new_code = GEU; break;
1688 case GE: new_code = LEU; break;
1692 if (new_code != UNKNOWN)
1694 *op0 = XVECEXP (*op0, 0, 0);
1699 /* Remove redundant UNSPEC_CC_TO_INT conversions if possible. */
1700 if (GET_CODE (*op0) == UNSPEC
1701 && XINT (*op0, 1) == UNSPEC_CC_TO_INT
1702 && XVECLEN (*op0, 0) == 1
1703 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
1704 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
1705 && CONST_INT_P (*op1))
1707 enum rtx_code new_code = UNKNOWN;
1708 switch (GET_MODE (XVECEXP (*op0, 0, 0)))
1714 case EQ: new_code = EQ; break;
1715 case NE: new_code = NE; break;
1722 if (new_code != UNKNOWN)
1724 /* For CCRAWmode put the required cc mask into the second
1726 if (GET_MODE (XVECEXP (*op0, 0, 0)) == CCRAWmode
1727 && INTVAL (*op1) >= 0 && INTVAL (*op1) <= 3)
1728 *op1 = gen_rtx_CONST_INT (VOIDmode, 1 << (3 - INTVAL (*op1)));
1729 *op0 = XVECEXP (*op0, 0, 0);
1734 /* Simplify cascaded EQ, NE with const0_rtx. */
1735 if ((*code == NE || *code == EQ)
1736 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
1737 && GET_MODE (*op0) == SImode
1738 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
1739 && REG_P (XEXP (*op0, 0))
1740 && XEXP (*op0, 1) == const0_rtx
1741 && *op1 == const0_rtx)
1743 if ((*code == EQ && GET_CODE (*op0) == NE)
1744 || (*code == NE && GET_CODE (*op0) == EQ))
1748 *op0 = XEXP (*op0, 0);
1751 /* Prefer register over memory as first operand. */
1752 if (MEM_P (*op0) && REG_P (*op1))
1754 rtx tem = *op0; *op0 = *op1; *op1 = tem;
1755 *code = (int)swap_condition ((enum rtx_code)*code);
1758 /* A comparison result is compared against zero. Replace it with
1759 the (perhaps inverted) original comparison.
1760 This probably should be done by simplify_relational_operation. */
1761 if ((*code == EQ || *code == NE)
1762 && *op1 == const0_rtx
1763 && COMPARISON_P (*op0)
1764 && CC_REG_P (XEXP (*op0, 0)))
1766 enum rtx_code new_code;
1769 new_code = reversed_comparison_code_parts (GET_CODE (*op0),
1771 XEXP (*op1, 0), NULL);
1773 new_code = GET_CODE (*op0);
1775 if (new_code != UNKNOWN)
1778 *op1 = XEXP (*op0, 1);
1779 *op0 = XEXP (*op0, 0);
1785 /* Emit a compare instruction suitable to implement the comparison
1786 OP0 CODE OP1. Return the correct condition RTL to be placed in
1787 the IF_THEN_ELSE of the conditional branch testing the result. */
1790 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
1792 machine_mode mode = s390_select_ccmode (code, op0, op1);
1795 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
1797 /* Do not output a redundant compare instruction if a
1798 compare_and_swap pattern already computed the result and the
1799 machine modes are compatible. */
1800 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
1806 cc = gen_rtx_REG (mode, CC_REGNUM);
1807 emit_insn (gen_rtx_SET (cc, gen_rtx_COMPARE (mode, op0, op1)));
1810 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
1813 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
1815 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
1816 conditional branch testing the result. */
1819 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem,
1820 rtx cmp, rtx new_rtx, machine_mode ccmode)
1824 cc = gen_rtx_REG (ccmode, CC_REGNUM);
1825 switch (GET_MODE (mem))
1828 emit_insn (gen_atomic_compare_and_swapsi_internal (old, mem, cmp,
1832 emit_insn (gen_atomic_compare_and_swapdi_internal (old, mem, cmp,
1836 emit_insn (gen_atomic_compare_and_swapti_internal (old, mem, cmp,
1844 return s390_emit_compare (code, cc, const0_rtx);
1847 /* Emit a jump instruction to TARGET and return it. If COND is
1848 NULL_RTX, emit an unconditional jump, else a conditional jump under
1852 s390_emit_jump (rtx target, rtx cond)
1856 target = gen_rtx_LABEL_REF (VOIDmode, target);
1858 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
1860 insn = gen_rtx_SET (pc_rtx, target);
1861 return emit_jump_insn (insn);
1864 /* Return branch condition mask to implement a branch
1865 specified by CODE. Return -1 for invalid comparisons. */
1868 s390_branch_condition_mask (rtx code)
1870 const int CC0 = 1 << 3;
1871 const int CC1 = 1 << 2;
1872 const int CC2 = 1 << 1;
1873 const int CC3 = 1 << 0;
1875 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
1876 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
1877 gcc_assert (XEXP (code, 1) == const0_rtx
1878 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
1879 && CONST_INT_P (XEXP (code, 1))));
1882 switch (GET_MODE (XEXP (code, 0)))
1886 switch (GET_CODE (code))
1888 case EQ: return CC0;
1889 case NE: return CC1 | CC2 | CC3;
1895 switch (GET_CODE (code))
1897 case EQ: return CC1;
1898 case NE: return CC0 | CC2 | CC3;
1904 switch (GET_CODE (code))
1906 case EQ: return CC2;
1907 case NE: return CC0 | CC1 | CC3;
1913 switch (GET_CODE (code))
1915 case EQ: return CC3;
1916 case NE: return CC0 | CC1 | CC2;
1922 switch (GET_CODE (code))
1924 case EQ: return CC0 | CC2;
1925 case NE: return CC1 | CC3;
1931 switch (GET_CODE (code))
1933 case LTU: return CC2 | CC3; /* carry */
1934 case GEU: return CC0 | CC1; /* no carry */
1940 switch (GET_CODE (code))
1942 case GTU: return CC0 | CC1; /* borrow */
1943 case LEU: return CC2 | CC3; /* no borrow */
1949 switch (GET_CODE (code))
1951 case EQ: return CC0 | CC2;
1952 case NE: return CC1 | CC3;
1953 case LTU: return CC1;
1954 case GTU: return CC3;
1955 case LEU: return CC1 | CC2;
1956 case GEU: return CC2 | CC3;
1961 switch (GET_CODE (code))
1963 case EQ: return CC0;
1964 case NE: return CC1 | CC2 | CC3;
1965 case LTU: return CC1;
1966 case GTU: return CC2;
1967 case LEU: return CC0 | CC1;
1968 case GEU: return CC0 | CC2;
1974 switch (GET_CODE (code))
1976 case EQ: return CC0;
1977 case NE: return CC2 | CC1 | CC3;
1978 case LTU: return CC2;
1979 case GTU: return CC1;
1980 case LEU: return CC0 | CC2;
1981 case GEU: return CC0 | CC1;
1987 switch (GET_CODE (code))
1989 case EQ: return CC0;
1990 case NE: return CC1 | CC2 | CC3;
1991 case LT: return CC1 | CC3;
1992 case GT: return CC2;
1993 case LE: return CC0 | CC1 | CC3;
1994 case GE: return CC0 | CC2;
2000 switch (GET_CODE (code))
2002 case EQ: return CC0;
2003 case NE: return CC1 | CC2 | CC3;
2004 case LT: return CC1;
2005 case GT: return CC2 | CC3;
2006 case LE: return CC0 | CC1;
2007 case GE: return CC0 | CC2 | CC3;
2013 switch (GET_CODE (code))
2015 case EQ: return CC0;
2016 case NE: return CC1 | CC2 | CC3;
2017 case LT: return CC1;
2018 case GT: return CC2;
2019 case LE: return CC0 | CC1;
2020 case GE: return CC0 | CC2;
2021 case UNORDERED: return CC3;
2022 case ORDERED: return CC0 | CC1 | CC2;
2023 case UNEQ: return CC0 | CC3;
2024 case UNLT: return CC1 | CC3;
2025 case UNGT: return CC2 | CC3;
2026 case UNLE: return CC0 | CC1 | CC3;
2027 case UNGE: return CC0 | CC2 | CC3;
2028 case LTGT: return CC1 | CC2;
2034 switch (GET_CODE (code))
2036 case EQ: return CC0;
2037 case NE: return CC2 | CC1 | CC3;
2038 case LT: return CC2;
2039 case GT: return CC1;
2040 case LE: return CC0 | CC2;
2041 case GE: return CC0 | CC1;
2042 case UNORDERED: return CC3;
2043 case ORDERED: return CC0 | CC2 | CC1;
2044 case UNEQ: return CC0 | CC3;
2045 case UNLT: return CC2 | CC3;
2046 case UNGT: return CC1 | CC3;
2047 case UNLE: return CC0 | CC2 | CC3;
2048 case UNGE: return CC0 | CC1 | CC3;
2049 case LTGT: return CC2 | CC1;
2054 /* Vector comparison modes. */
2055 /* CC2 will never be set. It however is part of the negated
2058 switch (GET_CODE (code))
2063 case GE: return CC0;
2064 /* The inverted modes are in fact *any* modes. */
2068 case LT: return CC3 | CC1 | CC2;
2073 switch (GET_CODE (code))
2078 case GE: return CC0 | CC1;
2079 /* The inverted modes are in fact *all* modes. */
2083 case LT: return CC3 | CC2;
2087 switch (GET_CODE (code))
2091 case GE: return CC0;
2092 /* The inverted modes are in fact *any* modes. */
2095 case UNLT: return CC3 | CC1 | CC2;
2100 switch (GET_CODE (code))
2104 case GE: return CC0 | CC1;
2105 /* The inverted modes are in fact *all* modes. */
2108 case UNLT: return CC3 | CC2;
2113 switch (GET_CODE (code))
2116 return INTVAL (XEXP (code, 1));
2118 return (INTVAL (XEXP (code, 1))) ^ 0xf;
2129 /* Return branch condition mask to implement a compare and branch
2130 specified by CODE. Return -1 for invalid comparisons. */
2133 s390_compare_and_branch_condition_mask (rtx code)
2135 const int CC0 = 1 << 3;
2136 const int CC1 = 1 << 2;
2137 const int CC2 = 1 << 1;
2139 switch (GET_CODE (code))
2163 /* If INV is false, return assembler mnemonic string to implement
2164 a branch specified by CODE. If INV is true, return mnemonic
2165 for the corresponding inverted branch. */
2168 s390_branch_condition_mnemonic (rtx code, int inv)
2172 static const char *const mnemonic[16] =
2174 NULL, "o", "h", "nle",
2175 "l", "nhe", "lh", "ne",
2176 "e", "nlh", "he", "nl",
2177 "le", "nh", "no", NULL
2180 if (GET_CODE (XEXP (code, 0)) == REG
2181 && REGNO (XEXP (code, 0)) == CC_REGNUM
2182 && (XEXP (code, 1) == const0_rtx
2183 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
2184 && CONST_INT_P (XEXP (code, 1)))))
2185 mask = s390_branch_condition_mask (code);
2187 mask = s390_compare_and_branch_condition_mask (code);
2189 gcc_assert (mask >= 0);
2194 gcc_assert (mask >= 1 && mask <= 14);
2196 return mnemonic[mask];
2199 /* Return the part of op which has a value different from def.
2200 The size of the part is determined by mode.
2201 Use this function only if you already know that op really
2202 contains such a part. */
2204 unsigned HOST_WIDE_INT
2205 s390_extract_part (rtx op, machine_mode mode, int def)
2207 unsigned HOST_WIDE_INT value = 0;
2208 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
2209 int part_bits = GET_MODE_BITSIZE (mode);
2210 unsigned HOST_WIDE_INT part_mask = (HOST_WIDE_INT_1U << part_bits) - 1;
2213 for (i = 0; i < max_parts; i++)
2216 value = UINTVAL (op);
2218 value >>= part_bits;
2220 if ((value & part_mask) != (def & part_mask))
2221 return value & part_mask;
2227 /* If OP is an integer constant of mode MODE with exactly one
2228 part of mode PART_MODE unequal to DEF, return the number of that
2229 part. Otherwise, return -1. */
2232 s390_single_part (rtx op,
2234 machine_mode part_mode,
2237 unsigned HOST_WIDE_INT value = 0;
2238 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
2239 unsigned HOST_WIDE_INT part_mask
2240 = (HOST_WIDE_INT_1U << GET_MODE_BITSIZE (part_mode)) - 1;
2243 if (GET_CODE (op) != CONST_INT)
2246 for (i = 0; i < n_parts; i++)
2249 value = UINTVAL (op);
2251 value >>= GET_MODE_BITSIZE (part_mode);
2253 if ((value & part_mask) != (def & part_mask))
2261 return part == -1 ? -1 : n_parts - 1 - part;
2264 /* Return true if IN contains a contiguous bitfield in the lower SIZE
2265 bits and no other bits are set in (the lower SIZE bits of) IN.
2267 PSTART and PEND can be used to obtain the start and end
2268 position (inclusive) of the bitfield relative to 64
2269 bits. *PSTART / *PEND gives the position of the first/last bit
2270 of the bitfield counting from the highest order bit starting
2274 s390_contiguous_bitmask_nowrap_p (unsigned HOST_WIDE_INT in, int size,
2275 int *pstart, int *pend)
2279 int lowbit = HOST_BITS_PER_WIDE_INT - 1;
2280 int highbit = HOST_BITS_PER_WIDE_INT - size;
2281 unsigned HOST_WIDE_INT bitmask = HOST_WIDE_INT_1U;
2283 gcc_assert (!!pstart == !!pend);
2284 for (start = lowbit; start >= highbit; bitmask <<= 1, start--)
2287 /* Look for the rightmost bit of a contiguous range of ones. */
2294 /* Look for the firt zero bit after the range of ones. */
2295 if (! (bitmask & in))
2299 /* We're one past the last one-bit. */
2303 /* No one bits found. */
2306 if (start > highbit)
2308 unsigned HOST_WIDE_INT mask;
2310 /* Calculate a mask for all bits beyond the contiguous bits. */
2311 mask = ((~HOST_WIDE_INT_0U >> highbit)
2312 & (~HOST_WIDE_INT_0U << (lowbit - start + 1)));
2314 /* There are more bits set beyond the first range of one bits. */
2327 /* Same as s390_contiguous_bitmask_nowrap_p but also returns true
2328 if ~IN contains a contiguous bitfield. In that case, *END is <
2331 If WRAP_P is true, a bitmask that wraps around is also tested.
2332 When a wraparoud occurs *START is greater than *END (in
2333 non-null pointers), and the uppermost (64 - SIZE) bits are thus
2334 part of the range. If WRAP_P is false, no wraparound is
2338 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, bool wrap_p,
2339 int size, int *start, int *end)
2341 int bs = HOST_BITS_PER_WIDE_INT;
2344 gcc_assert (!!start == !!end);
2345 if ((in & ((~HOST_WIDE_INT_0U) >> (bs - size))) == 0)
2346 /* This cannot be expressed as a contiguous bitmask. Exit early because
2347 the second call of s390_contiguous_bitmask_nowrap_p would accept this as
2350 b = s390_contiguous_bitmask_nowrap_p (in, size, start, end);
2355 b = s390_contiguous_bitmask_nowrap_p (~in, size, start, end);
2361 gcc_assert (s >= 1);
2362 *start = ((e + 1) & (bs - 1));
2363 *end = ((s - 1 + bs) & (bs - 1));
2369 /* Return true if OP contains the same contiguous bitfield in *all*
2370 its elements. START and END can be used to obtain the start and
2371 end position of the bitfield.
2373 START/STOP give the position of the first/last bit of the bitfield
2374 counting from the lowest order bit starting with zero. In order to
2375 use these values for S/390 instructions this has to be converted to
2376 "bits big endian" style. */
2379 s390_contiguous_bitmask_vector_p (rtx op, int *start, int *end)
2381 unsigned HOST_WIDE_INT mask;
2386 gcc_assert (!!start == !!end);
2387 if (!const_vec_duplicate_p (op, &elt)
2388 || !CONST_INT_P (elt))
2391 size = GET_MODE_UNIT_BITSIZE (GET_MODE (op));
2393 /* We cannot deal with V1TI/V1TF. This would require a vgmq. */
2397 mask = UINTVAL (elt);
2399 b = s390_contiguous_bitmask_p (mask, true, size, start, end);
2404 *start -= (HOST_BITS_PER_WIDE_INT - size);
2405 *end -= (HOST_BITS_PER_WIDE_INT - size);
2413 /* Return true if C consists only of byte chunks being either 0 or
2414 0xff. If MASK is !=NULL a byte mask is generated which is
2415 appropriate for the vector generate byte mask instruction. */
2418 s390_bytemask_vector_p (rtx op, unsigned *mask)
2421 unsigned tmp_mask = 0;
2422 int nunit, unit_size;
2424 if (!VECTOR_MODE_P (GET_MODE (op))
2425 || GET_CODE (op) != CONST_VECTOR
2426 || !CONST_INT_P (XVECEXP (op, 0, 0)))
2429 nunit = GET_MODE_NUNITS (GET_MODE (op));
2430 unit_size = GET_MODE_UNIT_SIZE (GET_MODE (op));
2432 for (i = 0; i < nunit; i++)
2434 unsigned HOST_WIDE_INT c;
2437 if (!CONST_INT_P (XVECEXP (op, 0, i)))
2440 c = UINTVAL (XVECEXP (op, 0, i));
2441 for (j = 0; j < unit_size; j++)
2443 if ((c & 0xff) != 0 && (c & 0xff) != 0xff)
2445 tmp_mask |= (c & 1) << ((nunit - 1 - i) * unit_size + j);
2446 c = c >> BITS_PER_UNIT;
2456 /* Check whether a rotate of ROTL followed by an AND of CONTIG is
2457 equivalent to a shift followed by the AND. In particular, CONTIG
2458 should not overlap the (rotated) bit 0/bit 63 gap. Negative values
2459 for ROTL indicate a rotate to the right. */
2462 s390_extzv_shift_ok (int bitsize, int rotl, unsigned HOST_WIDE_INT contig)
2467 ok = s390_contiguous_bitmask_nowrap_p (contig, bitsize, &start, &end);
2471 return (64 - end >= rotl);
2474 /* Translate "- rotate right" in BITSIZE mode to "rotate left" in
2476 rotl = -rotl + (64 - bitsize);
2477 return (start >= rotl);
2481 /* Check whether we can (and want to) split a double-word
2482 move in mode MODE from SRC to DST into two single-word
2483 moves, moving the subword FIRST_SUBWORD first. */
2486 s390_split_ok_p (rtx dst, rtx src, machine_mode mode, int first_subword)
2488 /* Floating point and vector registers cannot be split. */
2489 if (FP_REG_P (src) || FP_REG_P (dst) || VECTOR_REG_P (src) || VECTOR_REG_P (dst))
2492 /* Non-offsettable memory references cannot be split. */
2493 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
2494 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
2497 /* Moving the first subword must not clobber a register
2498 needed to move the second subword. */
2499 if (register_operand (dst, mode))
2501 rtx subreg = operand_subword (dst, first_subword, 0, mode);
2502 if (reg_overlap_mentioned_p (subreg, src))
2509 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
2510 and [MEM2, MEM2 + SIZE] do overlap and false
2514 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
2516 rtx addr1, addr2, addr_delta;
2517 HOST_WIDE_INT delta;
2519 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
2525 addr1 = XEXP (mem1, 0);
2526 addr2 = XEXP (mem2, 0);
2528 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
2530 /* This overlapping check is used by peepholes merging memory block operations.
2531 Overlapping operations would otherwise be recognized by the S/390 hardware
2532 and would fall back to a slower implementation. Allowing overlapping
2533 operations would lead to slow code but not to wrong code. Therefore we are
2534 somewhat optimistic if we cannot prove that the memory blocks are
2536 That's why we return false here although this may accept operations on
2537 overlapping memory areas. */
2538 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
2541 delta = INTVAL (addr_delta);
2544 || (delta > 0 && delta < size)
2545 || (delta < 0 && -delta < size))
2551 /* Check whether the address of memory reference MEM2 equals exactly
2552 the address of memory reference MEM1 plus DELTA. Return true if
2553 we can prove this to be the case, false otherwise. */
2556 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
2558 rtx addr1, addr2, addr_delta;
2560 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
2563 addr1 = XEXP (mem1, 0);
2564 addr2 = XEXP (mem2, 0);
2566 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
2567 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
2573 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
2576 s390_expand_logical_operator (enum rtx_code code, machine_mode mode,
2579 machine_mode wmode = mode;
2580 rtx dst = operands[0];
2581 rtx src1 = operands[1];
2582 rtx src2 = operands[2];
2585 /* If we cannot handle the operation directly, use a temp register. */
2586 if (!s390_logical_operator_ok_p (operands))
2587 dst = gen_reg_rtx (mode);
2589 /* QImode and HImode patterns make sense only if we have a destination
2590 in memory. Otherwise perform the operation in SImode. */
2591 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
2594 /* Widen operands if required. */
2597 if (GET_CODE (dst) == SUBREG
2598 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
2600 else if (REG_P (dst))
2601 dst = gen_rtx_SUBREG (wmode, dst, 0);
2603 dst = gen_reg_rtx (wmode);
2605 if (GET_CODE (src1) == SUBREG
2606 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
2608 else if (GET_MODE (src1) != VOIDmode)
2609 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
2611 if (GET_CODE (src2) == SUBREG
2612 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
2614 else if (GET_MODE (src2) != VOIDmode)
2615 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
2618 /* Emit the instruction. */
2619 op = gen_rtx_SET (dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
2620 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
2621 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
2623 /* Fix up the destination if needed. */
2624 if (dst != operands[0])
2625 emit_move_insn (operands[0], gen_lowpart (mode, dst));
2628 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
2631 s390_logical_operator_ok_p (rtx *operands)
2633 /* If the destination operand is in memory, it needs to coincide
2634 with one of the source operands. After reload, it has to be
2635 the first source operand. */
2636 if (GET_CODE (operands[0]) == MEM)
2637 return rtx_equal_p (operands[0], operands[1])
2638 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
2643 /* Narrow logical operation CODE of memory operand MEMOP with immediate
2644 operand IMMOP to switch from SS to SI type instructions. */
2647 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
2649 int def = code == AND ? -1 : 0;
2653 gcc_assert (GET_CODE (*memop) == MEM);
2654 gcc_assert (!MEM_VOLATILE_P (*memop));
2656 mask = s390_extract_part (*immop, QImode, def);
2657 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
2658 gcc_assert (part >= 0);
2660 *memop = adjust_address (*memop, QImode, part);
2661 *immop = gen_int_mode (mask, QImode);
2665 /* How to allocate a 'struct machine_function'. */
2667 static struct machine_function *
2668 s390_init_machine_status (void)
2670 return ggc_cleared_alloc<machine_function> ();
2673 /* Map for smallest class containing reg regno. */
2675 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
2676 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 0 */
2677 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 4 */
2678 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 8 */
2679 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS, /* 12 */
2680 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 16 */
2681 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 20 */
2682 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 24 */
2683 FP_REGS, FP_REGS, FP_REGS, FP_REGS, /* 28 */
2684 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS, /* 32 */
2685 ACCESS_REGS, ACCESS_REGS, VEC_REGS, VEC_REGS, /* 36 */
2686 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 40 */
2687 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 44 */
2688 VEC_REGS, VEC_REGS, VEC_REGS, VEC_REGS, /* 48 */
2689 VEC_REGS, VEC_REGS /* 52 */
2692 /* Return attribute type of insn. */
2694 static enum attr_type
2695 s390_safe_attr_type (rtx_insn *insn)
2697 if (recog_memoized (insn) >= 0)
2698 return get_attr_type (insn);
2703 /* Return true if DISP is a valid short displacement. */
2706 s390_short_displacement (rtx disp)
2708 /* No displacement is OK. */
2712 /* Without the long displacement facility we don't need to
2713 distingiush between long and short displacement. */
2714 if (!TARGET_LONG_DISPLACEMENT)
2717 /* Integer displacement in range. */
2718 if (GET_CODE (disp) == CONST_INT)
2719 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
2721 /* GOT offset is not OK, the GOT can be large. */
2722 if (GET_CODE (disp) == CONST
2723 && GET_CODE (XEXP (disp, 0)) == UNSPEC
2724 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
2725 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
2728 /* All other symbolic constants are literal pool references,
2729 which are OK as the literal pool must be small. */
2730 if (GET_CODE (disp) == CONST)
2736 /* Decompose a RTL expression ADDR for a memory address into
2737 its components, returned in OUT.
2739 Returns false if ADDR is not a valid memory address, true
2740 otherwise. If OUT is NULL, don't return the components,
2741 but check for validity only.
2743 Note: Only addresses in canonical form are recognized.
2744 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
2745 canonical form so that they will be recognized. */
2748 s390_decompose_address (rtx addr, struct s390_address *out)
2750 HOST_WIDE_INT offset = 0;
2751 rtx base = NULL_RTX;
2752 rtx indx = NULL_RTX;
2753 rtx disp = NULL_RTX;
2755 bool pointer = false;
2756 bool base_ptr = false;
2757 bool indx_ptr = false;
2758 bool literal_pool = false;
2760 /* We may need to substitute the literal pool base register into the address
2761 below. However, at this point we do not know which register is going to
2762 be used as base, so we substitute the arg pointer register. This is going
2763 to be treated as holding a pointer below -- it shouldn't be used for any
2765 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
2767 /* Decompose address into base + index + displacement. */
2769 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
2772 else if (GET_CODE (addr) == PLUS)
2774 rtx op0 = XEXP (addr, 0);
2775 rtx op1 = XEXP (addr, 1);
2776 enum rtx_code code0 = GET_CODE (op0);
2777 enum rtx_code code1 = GET_CODE (op1);
2779 if (code0 == REG || code0 == UNSPEC)
2781 if (code1 == REG || code1 == UNSPEC)
2783 indx = op0; /* index + base */
2789 base = op0; /* base + displacement */
2794 else if (code0 == PLUS)
2796 indx = XEXP (op0, 0); /* index + base + disp */
2797 base = XEXP (op0, 1);
2808 disp = addr; /* displacement */
2810 /* Extract integer part of displacement. */
2814 if (GET_CODE (disp) == CONST_INT)
2816 offset = INTVAL (disp);
2819 else if (GET_CODE (disp) == CONST
2820 && GET_CODE (XEXP (disp, 0)) == PLUS
2821 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
2823 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
2824 disp = XEXP (XEXP (disp, 0), 0);
2828 /* Strip off CONST here to avoid special case tests later. */
2829 if (disp && GET_CODE (disp) == CONST)
2830 disp = XEXP (disp, 0);
2832 /* We can convert literal pool addresses to
2833 displacements by basing them off the base register. */
2834 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
2839 base = fake_pool_base, literal_pool = true;
2841 /* Mark up the displacement. */
2842 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
2843 UNSPEC_LTREL_OFFSET);
2846 /* Validate base register. */
2849 if (GET_CODE (base) == UNSPEC)
2850 switch (XINT (base, 1))
2854 disp = gen_rtx_UNSPEC (Pmode,
2855 gen_rtvec (1, XVECEXP (base, 0, 0)),
2856 UNSPEC_LTREL_OFFSET);
2860 base = XVECEXP (base, 0, 1);
2863 case UNSPEC_LTREL_BASE:
2864 if (XVECLEN (base, 0) == 1)
2865 base = fake_pool_base, literal_pool = true;
2867 base = XVECEXP (base, 0, 1);
2874 if (!REG_P (base) || GET_MODE (base) != Pmode)
2877 if (REGNO (base) == STACK_POINTER_REGNUM
2878 || REGNO (base) == FRAME_POINTER_REGNUM
2879 || ((reload_completed || reload_in_progress)
2880 && frame_pointer_needed
2881 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
2882 || REGNO (base) == ARG_POINTER_REGNUM
2884 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
2885 pointer = base_ptr = true;
2887 if ((reload_completed || reload_in_progress)
2888 && base == cfun->machine->base_reg)
2889 pointer = base_ptr = literal_pool = true;
2892 /* Validate index register. */
2895 if (GET_CODE (indx) == UNSPEC)
2896 switch (XINT (indx, 1))
2900 disp = gen_rtx_UNSPEC (Pmode,
2901 gen_rtvec (1, XVECEXP (indx, 0, 0)),
2902 UNSPEC_LTREL_OFFSET);
2906 indx = XVECEXP (indx, 0, 1);
2909 case UNSPEC_LTREL_BASE:
2910 if (XVECLEN (indx, 0) == 1)
2911 indx = fake_pool_base, literal_pool = true;
2913 indx = XVECEXP (indx, 0, 1);
2920 if (!REG_P (indx) || GET_MODE (indx) != Pmode)
2923 if (REGNO (indx) == STACK_POINTER_REGNUM
2924 || REGNO (indx) == FRAME_POINTER_REGNUM
2925 || ((reload_completed || reload_in_progress)
2926 && frame_pointer_needed
2927 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
2928 || REGNO (indx) == ARG_POINTER_REGNUM
2930 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
2931 pointer = indx_ptr = true;
2933 if ((reload_completed || reload_in_progress)
2934 && indx == cfun->machine->base_reg)
2935 pointer = indx_ptr = literal_pool = true;
2938 /* Prefer to use pointer as base, not index. */
2939 if (base && indx && !base_ptr
2940 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
2947 /* Validate displacement. */
2950 /* If virtual registers are involved, the displacement will change later
2951 anyway as the virtual registers get eliminated. This could make a
2952 valid displacement invalid, but it is more likely to make an invalid
2953 displacement valid, because we sometimes access the register save area
2954 via negative offsets to one of those registers.
2955 Thus we don't check the displacement for validity here. If after
2956 elimination the displacement turns out to be invalid after all,
2957 this is fixed up by reload in any case. */
2958 /* LRA maintains always displacements up to date and we need to
2959 know the displacement is right during all LRA not only at the
2960 final elimination. */
2962 || (base != arg_pointer_rtx
2963 && indx != arg_pointer_rtx
2964 && base != return_address_pointer_rtx
2965 && indx != return_address_pointer_rtx
2966 && base != frame_pointer_rtx
2967 && indx != frame_pointer_rtx
2968 && base != virtual_stack_vars_rtx
2969 && indx != virtual_stack_vars_rtx))
2970 if (!DISP_IN_RANGE (offset))
2975 /* All the special cases are pointers. */
2978 /* In the small-PIC case, the linker converts @GOT
2979 and @GOTNTPOFF offsets to possible displacements. */
2980 if (GET_CODE (disp) == UNSPEC
2981 && (XINT (disp, 1) == UNSPEC_GOT
2982 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
2988 /* Accept pool label offsets. */
2989 else if (GET_CODE (disp) == UNSPEC
2990 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
2993 /* Accept literal pool references. */
2994 else if (GET_CODE (disp) == UNSPEC
2995 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
2997 /* In case CSE pulled a non literal pool reference out of
2998 the pool we have to reject the address. This is
2999 especially important when loading the GOT pointer on non
3000 zarch CPUs. In this case the literal pool contains an lt
3001 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
3002 will most likely exceed the displacement. */
3003 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
3004 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
3007 orig_disp = gen_rtx_CONST (Pmode, disp);
3010 /* If we have an offset, make sure it does not
3011 exceed the size of the constant pool entry. */
3012 rtx sym = XVECEXP (disp, 0, 0);
3013 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
3016 orig_disp = plus_constant (Pmode, orig_disp, offset);
3031 out->disp = orig_disp;
3032 out->pointer = pointer;
3033 out->literal_pool = literal_pool;
3039 /* Decompose a RTL expression OP for an address style operand into its
3040 components, and return the base register in BASE and the offset in
3041 OFFSET. While OP looks like an address it is never supposed to be
3044 Return true if OP is a valid address operand, false if not. */
3047 s390_decompose_addrstyle_without_index (rtx op, rtx *base,
3048 HOST_WIDE_INT *offset)
3052 /* We can have an integer constant, an address register,
3053 or a sum of the two. */
3054 if (CONST_SCALAR_INT_P (op))
3059 if (op && GET_CODE (op) == PLUS && CONST_SCALAR_INT_P (XEXP (op, 1)))
3064 while (op && GET_CODE (op) == SUBREG)
3065 op = SUBREG_REG (op);
3067 if (op && GET_CODE (op) != REG)
3072 if (off == NULL_RTX)
3074 else if (CONST_INT_P (off))
3075 *offset = INTVAL (off);
3076 else if (CONST_WIDE_INT_P (off))
3077 /* The offset will anyway be cut down to 12 bits so take just
3078 the lowest order chunk of the wide int. */
3079 *offset = CONST_WIDE_INT_ELT (off, 0);
3090 /* Return true if CODE is a valid address without index. */
3093 s390_legitimate_address_without_index_p (rtx op)
3095 struct s390_address addr;
3097 if (!s390_decompose_address (XEXP (op, 0), &addr))
3106 /* Return TRUE if ADDR is an operand valid for a load/store relative
3107 instruction. Be aware that the alignment of the operand needs to
3108 be checked separately.
3109 Valid addresses are single references or a sum of a reference and a
3110 constant integer. Return these parts in SYMREF and ADDEND. You can
3111 pass NULL in REF and/or ADDEND if you are not interested in these
3112 values. Literal pool references are *not* considered symbol
3116 s390_loadrelative_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
3118 HOST_WIDE_INT tmpaddend = 0;
3120 if (GET_CODE (addr) == CONST)
3121 addr = XEXP (addr, 0);
3123 if (GET_CODE (addr) == PLUS)
3125 if (!CONST_INT_P (XEXP (addr, 1)))
3128 tmpaddend = INTVAL (XEXP (addr, 1));
3129 addr = XEXP (addr, 0);
3132 if ((GET_CODE (addr) == SYMBOL_REF && !CONSTANT_POOL_ADDRESS_P (addr))
3133 || (GET_CODE (addr) == UNSPEC
3134 && (XINT (addr, 1) == UNSPEC_GOTENT
3135 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
3140 *addend = tmpaddend;
3147 /* Return true if the address in OP is valid for constraint letter C
3148 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
3149 pool MEMs should be accepted. Only the Q, R, S, T constraint
3150 letters are allowed for C. */
3153 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
3155 struct s390_address addr;
3156 bool decomposed = false;
3158 if (!address_operand (op, GET_MODE (op)))
3161 /* This check makes sure that no symbolic address (except literal
3162 pool references) are accepted by the R or T constraints. */
3163 if (s390_loadrelative_operand_p (op, NULL, NULL))
3166 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
3169 if (!s390_decompose_address (op, &addr))
3171 if (addr.literal_pool)
3176 /* With reload, we sometimes get intermediate address forms that are
3177 actually invalid as-is, but we need to accept them in the most
3178 generic cases below ('R' or 'T'), since reload will in fact fix
3179 them up. LRA behaves differently here; we never see such forms,
3180 but on the other hand, we need to strictly reject every invalid
3181 address form. Perform this check right up front. */
3182 if (lra_in_progress)
3184 if (!decomposed && !s390_decompose_address (op, &addr))
3191 case 'Q': /* no index short displacement */
3192 if (!decomposed && !s390_decompose_address (op, &addr))
3196 if (!s390_short_displacement (addr.disp))
3200 case 'R': /* with index short displacement */
3201 if (TARGET_LONG_DISPLACEMENT)
3203 if (!decomposed && !s390_decompose_address (op, &addr))
3205 if (!s390_short_displacement (addr.disp))
3208 /* Any invalid address here will be fixed up by reload,
3209 so accept it for the most generic constraint. */
3212 case 'S': /* no index long displacement */
3213 if (!decomposed && !s390_decompose_address (op, &addr))
3219 case 'T': /* with index long displacement */
3220 /* Any invalid address here will be fixed up by reload,
3221 so accept it for the most generic constraint. */
3231 /* Evaluates constraint strings described by the regular expression
3232 ([A|B|Z](Q|R|S|T))|Y and returns 1 if OP is a valid operand for
3233 the constraint given in STR, or 0 else. */
3236 s390_mem_constraint (const char *str, rtx op)
3243 /* Check for offsettable variants of memory constraints. */
3244 if (!MEM_P (op) || MEM_VOLATILE_P (op))
3246 if ((reload_completed || reload_in_progress)
3247 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
3249 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
3251 /* Check for non-literal-pool variants of memory constraints. */
3254 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
3259 if (GET_CODE (op) != MEM)
3261 return s390_check_qrst_address (c, XEXP (op, 0), true);
3263 /* Simply check for the basic form of a shift count. Reload will
3264 take care of making sure we have a proper base register. */
3265 if (!s390_decompose_addrstyle_without_index (op, NULL, NULL))
3269 return s390_check_qrst_address (str[1], op, true);
3277 /* Evaluates constraint strings starting with letter O. Input
3278 parameter C is the second letter following the "O" in the constraint
3279 string. Returns 1 if VALUE meets the respective constraint and 0
3283 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
3291 return trunc_int_for_mode (value, SImode) == value;
3295 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
3298 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
3306 /* Evaluates constraint strings starting with letter N. Parameter STR
3307 contains the letters following letter "N" in the constraint string.
3308 Returns true if VALUE matches the constraint. */
3311 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
3313 machine_mode mode, part_mode;
3315 int part, part_goal;
3321 part_goal = str[0] - '0';
3365 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
3368 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
3371 if (part_goal != -1 && part_goal != part)
3378 /* Returns true if the input parameter VALUE is a float zero. */
3381 s390_float_const_zero_p (rtx value)
3383 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
3384 && value == CONST0_RTX (GET_MODE (value)));
3387 /* Implement TARGET_REGISTER_MOVE_COST. */
3390 s390_register_move_cost (machine_mode mode,
3391 reg_class_t from, reg_class_t to)
3393 /* On s390, copy between fprs and gprs is expensive. */
3395 /* It becomes somewhat faster having ldgr/lgdr. */
3396 if (TARGET_Z10 && GET_MODE_SIZE (mode) == 8)
3398 /* ldgr is single cycle. */
3399 if (reg_classes_intersect_p (from, GENERAL_REGS)
3400 && reg_classes_intersect_p (to, FP_REGS))
3402 /* lgdr needs 3 cycles. */
3403 if (reg_classes_intersect_p (to, GENERAL_REGS)
3404 && reg_classes_intersect_p (from, FP_REGS))
3408 /* Otherwise copying is done via memory. */
3409 if ((reg_classes_intersect_p (from, GENERAL_REGS)
3410 && reg_classes_intersect_p (to, FP_REGS))
3411 || (reg_classes_intersect_p (from, FP_REGS)
3412 && reg_classes_intersect_p (to, GENERAL_REGS)))
3418 /* Implement TARGET_MEMORY_MOVE_COST. */
3421 s390_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
3422 reg_class_t rclass ATTRIBUTE_UNUSED,
3423 bool in ATTRIBUTE_UNUSED)
3428 /* Compute a (partial) cost for rtx X. Return true if the complete
3429 cost has been computed, and false if subexpressions should be
3430 scanned. In either case, *TOTAL contains the cost result. The
3431 initial value of *TOTAL is the default value computed by
3432 rtx_cost. It may be left unmodified. OUTER_CODE contains the
3433 code of the superexpression of x. */
3436 s390_rtx_costs (rtx x, machine_mode mode, int outer_code,
3437 int opno ATTRIBUTE_UNUSED,
3438 int *total, bool speed ATTRIBUTE_UNUSED)
3440 int code = GET_CODE (x);
3448 case CONST_WIDE_INT:
3455 /* Without this a conditional move instruction would be
3456 accounted as 3 * COSTS_N_INSNS (set, if_then_else,
3457 comparison operator). That's a bit pessimistic. */
3459 if (!TARGET_Z196 || GET_CODE (SET_SRC (x)) != IF_THEN_ELSE)
3462 rtx cond = XEXP (SET_SRC (x), 0);
3464 if (!CC_REG_P (XEXP (cond, 0)) || !CONST_INT_P (XEXP (cond, 1)))
3467 /* It is going to be a load/store on condition. Make it
3468 slightly more expensive than a normal load. */
3469 *total = COSTS_N_INSNS (1) + 1;
3471 rtx dst = SET_DEST (x);
3472 rtx then = XEXP (SET_SRC (x), 1);
3473 rtx els = XEXP (SET_SRC (x), 2);
3475 /* It is a real IF-THEN-ELSE. An additional move will be
3476 needed to implement that. */
3477 if (reload_completed
3478 && !rtx_equal_p (dst, then)
3479 && !rtx_equal_p (dst, els))
3480 *total += COSTS_N_INSNS (1) / 2;
3482 /* A minor penalty for constants we cannot directly handle. */
3483 if ((CONST_INT_P (then) || CONST_INT_P (els))
3484 && (!TARGET_Z13 || MEM_P (dst)
3485 || (CONST_INT_P (then) && !satisfies_constraint_K (then))
3486 || (CONST_INT_P (els) && !satisfies_constraint_K (els))))
3487 *total += COSTS_N_INSNS (1) / 2;
3489 /* A store on condition can only handle register src operands. */
3490 if (MEM_P (dst) && (!REG_P (then) || !REG_P (els)))
3491 *total += COSTS_N_INSNS (1) / 2;
3497 if (GET_CODE (XEXP (x, 0)) == AND
3498 && GET_CODE (XEXP (x, 1)) == ASHIFT
3499 && REG_P (XEXP (XEXP (x, 0), 0))
3500 && REG_P (XEXP (XEXP (x, 1), 0))
3501 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
3502 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
3503 && (UINTVAL (XEXP (XEXP (x, 0), 1)) ==
3504 (HOST_WIDE_INT_1U << UINTVAL (XEXP (XEXP (x, 1), 1))) - 1))
3506 *total = COSTS_N_INSNS (2);
3510 /* ~AND on a 128 bit mode. This can be done using a vector
3513 && GET_CODE (XEXP (x, 0)) == NOT
3514 && GET_CODE (XEXP (x, 1)) == NOT
3515 && REG_P (XEXP (XEXP (x, 0), 0))
3516 && REG_P (XEXP (XEXP (x, 1), 0))
3517 && GET_MODE_SIZE (GET_MODE (XEXP (XEXP (x, 0), 0))) == 16
3518 && s390_hard_regno_mode_ok (VR0_REGNUM,
3519 GET_MODE (XEXP (XEXP (x, 0), 0))))
3521 *total = COSTS_N_INSNS (1);
3534 *total = COSTS_N_INSNS (1);
3539 *total = COSTS_N_INSNS (1);
3547 rtx left = XEXP (x, 0);
3548 rtx right = XEXP (x, 1);
3549 if (GET_CODE (right) == CONST_INT
3550 && CONST_OK_FOR_K (INTVAL (right)))
3551 *total = s390_cost->mhi;
3552 else if (GET_CODE (left) == SIGN_EXTEND)
3553 *total = s390_cost->mh;
3555 *total = s390_cost->ms; /* msr, ms, msy */
3560 rtx left = XEXP (x, 0);
3561 rtx right = XEXP (x, 1);
3564 if (GET_CODE (right) == CONST_INT
3565 && CONST_OK_FOR_K (INTVAL (right)))
3566 *total = s390_cost->mghi;
3567 else if (GET_CODE (left) == SIGN_EXTEND)
3568 *total = s390_cost->msgf;
3570 *total = s390_cost->msg; /* msgr, msg */
3572 else /* TARGET_31BIT */
3574 if (GET_CODE (left) == SIGN_EXTEND
3575 && GET_CODE (right) == SIGN_EXTEND)
3576 /* mulsidi case: mr, m */
3577 *total = s390_cost->m;
3578 else if (GET_CODE (left) == ZERO_EXTEND
3579 && GET_CODE (right) == ZERO_EXTEND
3580 && TARGET_CPU_ZARCH)
3581 /* umulsidi case: ml, mlr */
3582 *total = s390_cost->ml;
3584 /* Complex calculation is required. */
3585 *total = COSTS_N_INSNS (40);
3591 *total = s390_cost->mult_df;
3594 *total = s390_cost->mxbr;
3605 *total = s390_cost->madbr;
3608 *total = s390_cost->maebr;
3613 /* Negate in the third argument is free: FMSUB. */
3614 if (GET_CODE (XEXP (x, 2)) == NEG)
3616 *total += (rtx_cost (XEXP (x, 0), mode, FMA, 0, speed)
3617 + rtx_cost (XEXP (x, 1), mode, FMA, 1, speed)
3618 + rtx_cost (XEXP (XEXP (x, 2), 0), mode, FMA, 2, speed));
3625 if (mode == TImode) /* 128 bit division */
3626 *total = s390_cost->dlgr;
3627 else if (mode == DImode)
3629 rtx right = XEXP (x, 1);
3630 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
3631 *total = s390_cost->dlr;
3632 else /* 64 by 64 bit division */
3633 *total = s390_cost->dlgr;
3635 else if (mode == SImode) /* 32 bit division */
3636 *total = s390_cost->dlr;
3643 rtx right = XEXP (x, 1);
3644 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
3646 *total = s390_cost->dsgfr;
3648 *total = s390_cost->dr;
3649 else /* 64 by 64 bit division */
3650 *total = s390_cost->dsgr;
3652 else if (mode == SImode) /* 32 bit division */
3653 *total = s390_cost->dlr;
3654 else if (mode == SFmode)
3656 *total = s390_cost->debr;
3658 else if (mode == DFmode)
3660 *total = s390_cost->ddbr;
3662 else if (mode == TFmode)
3664 *total = s390_cost->dxbr;
3670 *total = s390_cost->sqebr;
3671 else if (mode == DFmode)
3672 *total = s390_cost->sqdbr;
3674 *total = s390_cost->sqxbr;
3679 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
3680 || outer_code == PLUS || outer_code == MINUS
3681 || outer_code == COMPARE)
3686 *total = COSTS_N_INSNS (1);
3687 if (GET_CODE (XEXP (x, 0)) == AND
3688 && GET_CODE (XEXP (x, 1)) == CONST_INT
3689 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
3691 rtx op0 = XEXP (XEXP (x, 0), 0);
3692 rtx op1 = XEXP (XEXP (x, 0), 1);
3693 rtx op2 = XEXP (x, 1);
3695 if (memory_operand (op0, GET_MODE (op0))
3696 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
3698 if (register_operand (op0, GET_MODE (op0))
3699 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
3709 /* Return the cost of an address rtx ADDR. */
3712 s390_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
3713 addr_space_t as ATTRIBUTE_UNUSED,
3714 bool speed ATTRIBUTE_UNUSED)
3716 struct s390_address ad;
3717 if (!s390_decompose_address (addr, &ad))
3720 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
3723 /* Implement targetm.vectorize.builtin_vectorization_cost. */
3725 s390_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
3727 int misalign ATTRIBUTE_UNUSED)
3729 switch (type_of_cost)
3737 case vector_gather_load:
3738 case vector_scatter_store:
3741 case cond_branch_not_taken:
3743 case vec_promote_demote:
3744 case unaligned_load:
3745 case unaligned_store:
3748 case cond_branch_taken:
3752 return TYPE_VECTOR_SUBPARTS (vectype) - 1;
3759 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
3760 otherwise return 0. */
3763 tls_symbolic_operand (rtx op)
3765 if (GET_CODE (op) != SYMBOL_REF)
3767 return SYMBOL_REF_TLS_MODEL (op);
3770 /* Split DImode access register reference REG (on 64-bit) into its constituent
3771 low and high parts, and store them into LO and HI. Note that gen_lowpart/
3772 gen_highpart cannot be used as they assume all registers are word-sized,
3773 while our access registers have only half that size. */
3776 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
3778 gcc_assert (TARGET_64BIT);
3779 gcc_assert (ACCESS_REG_P (reg));
3780 gcc_assert (GET_MODE (reg) == DImode);
3781 gcc_assert (!(REGNO (reg) & 1));
3783 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
3784 *hi = gen_rtx_REG (SImode, REGNO (reg));
3787 /* Return true if OP contains a symbol reference */
3790 symbolic_reference_mentioned_p (rtx op)
3795 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
3798 fmt = GET_RTX_FORMAT (GET_CODE (op));
3799 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3805 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3806 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3810 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
3817 /* Return true if OP contains a reference to a thread-local symbol. */
3820 tls_symbolic_reference_mentioned_p (rtx op)
3825 if (GET_CODE (op) == SYMBOL_REF)
3826 return tls_symbolic_operand (op);
3828 fmt = GET_RTX_FORMAT (GET_CODE (op));
3829 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3835 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3836 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3840 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
3848 /* Return true if OP is a legitimate general operand when
3849 generating PIC code. It is given that flag_pic is on
3850 and that OP satisfies CONSTANT_P. */
3853 legitimate_pic_operand_p (rtx op)
3855 /* Accept all non-symbolic constants. */
3856 if (!SYMBOLIC_CONST (op))
3859 /* Reject everything else; must be handled
3860 via emit_symbolic_move. */
3864 /* Returns true if the constant value OP is a legitimate general operand.
3865 It is given that OP satisfies CONSTANT_P. */
3868 s390_legitimate_constant_p (machine_mode mode, rtx op)
3870 if (TARGET_VX && VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
3872 if (GET_MODE_SIZE (mode) != 16)
3875 if (!satisfies_constraint_j00 (op)
3876 && !satisfies_constraint_jm1 (op)
3877 && !satisfies_constraint_jKK (op)
3878 && !satisfies_constraint_jxx (op)
3879 && !satisfies_constraint_jyy (op))
3883 /* Accept all non-symbolic constants. */
3884 if (!SYMBOLIC_CONST (op))
3887 /* Accept immediate LARL operands. */
3888 if (TARGET_CPU_ZARCH && larl_operand (op, mode))
3891 /* Thread-local symbols are never legal constants. This is
3892 so that emit_call knows that computing such addresses
3893 might require a function call. */
3894 if (TLS_SYMBOLIC_CONST (op))
3897 /* In the PIC case, symbolic constants must *not* be
3898 forced into the literal pool. We accept them here,
3899 so that they will be handled by emit_symbolic_move. */
3903 /* All remaining non-PIC symbolic constants are
3904 forced into the literal pool. */
3908 /* Determine if it's legal to put X into the constant pool. This
3909 is not possible if X contains the address of a symbol that is
3910 not constant (TLS) or not known at final link time (PIC). */
3913 s390_cannot_force_const_mem (machine_mode mode, rtx x)
3915 switch (GET_CODE (x))
3919 case CONST_WIDE_INT:
3921 /* Accept all non-symbolic constants. */
3925 /* Labels are OK iff we are non-PIC. */
3926 return flag_pic != 0;
3929 /* 'Naked' TLS symbol references are never OK,
3930 non-TLS symbols are OK iff we are non-PIC. */
3931 if (tls_symbolic_operand (x))
3934 return flag_pic != 0;
3937 return s390_cannot_force_const_mem (mode, XEXP (x, 0));
3940 return s390_cannot_force_const_mem (mode, XEXP (x, 0))
3941 || s390_cannot_force_const_mem (mode, XEXP (x, 1));
3944 switch (XINT (x, 1))
3946 /* Only lt-relative or GOT-relative UNSPECs are OK. */
3947 case UNSPEC_LTREL_OFFSET:
3955 case UNSPEC_GOTNTPOFF:
3956 case UNSPEC_INDNTPOFF:
3959 /* If the literal pool shares the code section, be put
3960 execute template placeholders into the pool as well. */
3962 return TARGET_CPU_ZARCH;
3974 /* Returns true if the constant value OP is a legitimate general
3975 operand during and after reload. The difference to
3976 legitimate_constant_p is that this function will not accept
3977 a constant that would need to be forced to the literal pool
3978 before it can be used as operand.
3979 This function accepts all constants which can be loaded directly
3983 legitimate_reload_constant_p (rtx op)
3985 /* Accept la(y) operands. */
3986 if (GET_CODE (op) == CONST_INT
3987 && DISP_IN_RANGE (INTVAL (op)))
3990 /* Accept l(g)hi/l(g)fi operands. */
3991 if (GET_CODE (op) == CONST_INT
3992 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
3995 /* Accept lliXX operands. */
3997 && GET_CODE (op) == CONST_INT
3998 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
3999 && s390_single_part (op, word_mode, HImode, 0) >= 0)
4003 && GET_CODE (op) == CONST_INT
4004 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
4005 && s390_single_part (op, word_mode, SImode, 0) >= 0)
4008 /* Accept larl operands. */
4009 if (TARGET_CPU_ZARCH
4010 && larl_operand (op, VOIDmode))
4013 /* Accept floating-point zero operands that fit into a single GPR. */
4014 if (GET_CODE (op) == CONST_DOUBLE
4015 && s390_float_const_zero_p (op)
4016 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
4019 /* Accept double-word operands that can be split. */
4020 if (GET_CODE (op) == CONST_WIDE_INT
4021 || (GET_CODE (op) == CONST_INT
4022 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op)))
4024 machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
4025 rtx hi = operand_subword (op, 0, 0, dword_mode);
4026 rtx lo = operand_subword (op, 1, 0, dword_mode);
4027 return legitimate_reload_constant_p (hi)
4028 && legitimate_reload_constant_p (lo);
4031 /* Everything else cannot be handled without reload. */
4035 /* Returns true if the constant value OP is a legitimate fp operand
4036 during and after reload.
4037 This function accepts all constants which can be loaded directly
4041 legitimate_reload_fp_constant_p (rtx op)
4043 /* Accept floating-point zero operands if the load zero instruction
4044 can be used. Prior to z196 the load fp zero instruction caused a
4045 performance penalty if the result is used as BFP number. */
4047 && GET_CODE (op) == CONST_DOUBLE
4048 && s390_float_const_zero_p (op))
4054 /* Returns true if the constant value OP is a legitimate vector operand
4055 during and after reload.
4056 This function accepts all constants which can be loaded directly
4060 legitimate_reload_vector_constant_p (rtx op)
4062 if (TARGET_VX && GET_MODE_SIZE (GET_MODE (op)) == 16
4063 && (satisfies_constraint_j00 (op)
4064 || satisfies_constraint_jm1 (op)
4065 || satisfies_constraint_jKK (op)
4066 || satisfies_constraint_jxx (op)
4067 || satisfies_constraint_jyy (op)))
4073 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
4074 return the class of reg to actually use. */
4077 s390_preferred_reload_class (rtx op, reg_class_t rclass)
4079 switch (GET_CODE (op))
4081 /* Constants we cannot reload into general registers
4082 must be forced into the literal pool. */
4086 case CONST_WIDE_INT:
4087 if (reg_class_subset_p (GENERAL_REGS, rclass)
4088 && legitimate_reload_constant_p (op))
4089 return GENERAL_REGS;
4090 else if (reg_class_subset_p (ADDR_REGS, rclass)
4091 && legitimate_reload_constant_p (op))
4093 else if (reg_class_subset_p (FP_REGS, rclass)
4094 && legitimate_reload_fp_constant_p (op))
4096 else if (reg_class_subset_p (VEC_REGS, rclass)
4097 && legitimate_reload_vector_constant_p (op))
4102 /* If a symbolic constant or a PLUS is reloaded,
4103 it is most likely being used as an address, so
4104 prefer ADDR_REGS. If 'class' is not a superset
4105 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
4107 /* Symrefs cannot be pushed into the literal pool with -fPIC
4108 so we *MUST NOT* return NO_REGS for these cases
4109 (s390_cannot_force_const_mem will return true).
4111 On the other hand we MUST return NO_REGS for symrefs with
4112 invalid addend which might have been pushed to the literal
4113 pool (no -fPIC). Usually we would expect them to be
4114 handled via secondary reload but this does not happen if
4115 they are used as literal pool slot replacement in reload
4116 inheritance (see emit_input_reload_insns). */
4117 if (TARGET_CPU_ZARCH
4118 && GET_CODE (XEXP (op, 0)) == PLUS
4119 && GET_CODE (XEXP (XEXP(op, 0), 0)) == SYMBOL_REF
4120 && GET_CODE (XEXP (XEXP(op, 0), 1)) == CONST_INT)
4122 if (flag_pic && reg_class_subset_p (ADDR_REGS, rclass))
4130 if (!legitimate_reload_constant_p (op))
4134 /* load address will be used. */
4135 if (reg_class_subset_p (ADDR_REGS, rclass))
4147 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
4148 multiple of ALIGNMENT and the SYMBOL_REF being naturally
4152 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
4154 HOST_WIDE_INT addend;
4157 /* The "required alignment" might be 0 (e.g. for certain structs
4158 accessed via BLKmode). Early abort in this case, as well as when
4159 an alignment > 8 is required. */
4160 if (alignment < 2 || alignment > 8)
4163 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
4166 if (addend & (alignment - 1))
4169 if (GET_CODE (symref) == SYMBOL_REF)
4171 /* We have load-relative instructions for 2-byte, 4-byte, and
4172 8-byte alignment so allow only these. */
4175 case 8: return !SYMBOL_FLAG_NOTALIGN8_P (symref);
4176 case 4: return !SYMBOL_FLAG_NOTALIGN4_P (symref);
4177 case 2: return !SYMBOL_FLAG_NOTALIGN2_P (symref);
4178 default: return false;
4182 if (GET_CODE (symref) == UNSPEC
4183 && alignment <= UNITS_PER_LONG)
4189 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
4190 operand SCRATCH is used to reload the even part of the address and
4194 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
4196 HOST_WIDE_INT addend;
4199 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
4203 /* Easy case. The addend is even so larl will do fine. */
4204 emit_move_insn (reg, addr);
4207 /* We can leave the scratch register untouched if the target
4208 register is a valid base register. */
4209 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
4210 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
4213 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
4214 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
4217 emit_move_insn (scratch,
4218 gen_rtx_CONST (Pmode,
4219 gen_rtx_PLUS (Pmode, symref,
4220 GEN_INT (addend - 1))));
4222 emit_move_insn (scratch, symref);
4224 /* Increment the address using la in order to avoid clobbering cc. */
4225 s390_load_address (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
4229 /* Generate what is necessary to move between REG and MEM using
4230 SCRATCH. The direction is given by TOMEM. */
4233 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
4235 /* Reload might have pulled a constant out of the literal pool.
4236 Force it back in. */
4237 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
4238 || GET_CODE (mem) == CONST_WIDE_INT
4239 || GET_CODE (mem) == CONST_VECTOR
4240 || GET_CODE (mem) == CONST)
4241 mem = force_const_mem (GET_MODE (reg), mem);
4243 gcc_assert (MEM_P (mem));
4245 /* For a load from memory we can leave the scratch register
4246 untouched if the target register is a valid base register. */
4248 && REGNO (reg) < FIRST_PSEUDO_REGISTER
4249 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
4250 && GET_MODE (reg) == GET_MODE (scratch))
4253 /* Load address into scratch register. Since we can't have a
4254 secondary reload for a secondary reload we have to cover the case
4255 where larl would need a secondary reload here as well. */
4256 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
4258 /* Now we can use a standard load/store to do the move. */
4260 emit_move_insn (replace_equiv_address (mem, scratch), reg);
4262 emit_move_insn (reg, replace_equiv_address (mem, scratch));
4265 /* Inform reload about cases where moving X with a mode MODE to a register in
4266 RCLASS requires an extra scratch or immediate register. Return the class
4267 needed for the immediate register. */
4270 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
4271 machine_mode mode, secondary_reload_info *sri)
4273 enum reg_class rclass = (enum reg_class) rclass_i;
4275 /* Intermediate register needed. */
4276 if (reg_classes_intersect_p (CC_REGS, rclass))
4277 return GENERAL_REGS;
4281 /* The vst/vl vector move instructions allow only for short
4284 && GET_CODE (XEXP (x, 0)) == PLUS
4285 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4286 && !SHORT_DISP_IN_RANGE(INTVAL (XEXP (XEXP (x, 0), 1)))
4287 && reg_class_subset_p (rclass, VEC_REGS)
4288 && (!reg_class_subset_p (rclass, FP_REGS)
4289 || (GET_MODE_SIZE (mode) > 8
4290 && s390_class_max_nregs (FP_REGS, mode) == 1)))
4293 sri->icode = (TARGET_64BIT ?
4294 CODE_FOR_reloaddi_la_in :
4295 CODE_FOR_reloadsi_la_in);
4297 sri->icode = (TARGET_64BIT ?
4298 CODE_FOR_reloaddi_la_out :
4299 CODE_FOR_reloadsi_la_out);
4305 HOST_WIDE_INT offset;
4308 /* On z10 several optimizer steps may generate larl operands with
4311 && s390_loadrelative_operand_p (x, &symref, &offset)
4313 && !SYMBOL_FLAG_NOTALIGN2_P (symref)
4314 && (offset & 1) == 1)
4315 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
4316 : CODE_FOR_reloadsi_larl_odd_addend_z10);
4318 /* Handle all the (mem (symref)) accesses we cannot use the z10
4319 instructions for. */
4321 && s390_loadrelative_operand_p (XEXP (x, 0), NULL, NULL)
4323 || !reg_class_subset_p (rclass, GENERAL_REGS)
4324 || GET_MODE_SIZE (mode) > UNITS_PER_WORD
4325 || !s390_check_symref_alignment (XEXP (x, 0),
4326 GET_MODE_SIZE (mode))))
4328 #define __SECONDARY_RELOAD_CASE(M,m) \
4331 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
4332 CODE_FOR_reload##m##di_tomem_z10; \
4334 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
4335 CODE_FOR_reload##m##si_tomem_z10; \
4338 switch (GET_MODE (x))
4340 __SECONDARY_RELOAD_CASE (QI, qi);
4341 __SECONDARY_RELOAD_CASE (HI, hi);
4342 __SECONDARY_RELOAD_CASE (SI, si);
4343 __SECONDARY_RELOAD_CASE (DI, di);
4344 __SECONDARY_RELOAD_CASE (TI, ti);
4345 __SECONDARY_RELOAD_CASE (SF, sf);
4346 __SECONDARY_RELOAD_CASE (DF, df);
4347 __SECONDARY_RELOAD_CASE (TF, tf);
4348 __SECONDARY_RELOAD_CASE (SD, sd);
4349 __SECONDARY_RELOAD_CASE (DD, dd);
4350 __SECONDARY_RELOAD_CASE (TD, td);
4351 __SECONDARY_RELOAD_CASE (V1QI, v1qi);
4352 __SECONDARY_RELOAD_CASE (V2QI, v2qi);
4353 __SECONDARY_RELOAD_CASE (V4QI, v4qi);
4354 __SECONDARY_RELOAD_CASE (V8QI, v8qi);
4355 __SECONDARY_RELOAD_CASE (V16QI, v16qi);
4356 __SECONDARY_RELOAD_CASE (V1HI, v1hi);
4357 __SECONDARY_RELOAD_CASE (V2HI, v2hi);
4358 __SECONDARY_RELOAD_CASE (V4HI, v4hi);
4359 __SECONDARY_RELOAD_CASE (V8HI, v8hi);
4360 __SECONDARY_RELOAD_CASE (V1SI, v1si);
4361 __SECONDARY_RELOAD_CASE (V2SI, v2si);
4362 __SECONDARY_RELOAD_CASE (V4SI, v4si);
4363 __SECONDARY_RELOAD_CASE (V1DI, v1di);
4364 __SECONDARY_RELOAD_CASE (V2DI, v2di);
4365 __SECONDARY_RELOAD_CASE (V1TI, v1ti);
4366 __SECONDARY_RELOAD_CASE (V1SF, v1sf);
4367 __SECONDARY_RELOAD_CASE (V2SF, v2sf);
4368 __SECONDARY_RELOAD_CASE (V4SF, v4sf);
4369 __SECONDARY_RELOAD_CASE (V1DF, v1df);
4370 __SECONDARY_RELOAD_CASE (V2DF, v2df);
4371 __SECONDARY_RELOAD_CASE (V1TF, v1tf);
4375 #undef __SECONDARY_RELOAD_CASE
4379 /* We need a scratch register when loading a PLUS expression which
4380 is not a legitimate operand of the LOAD ADDRESS instruction. */
4381 /* LRA can deal with transformation of plus op very well -- so we
4382 don't need to prompt LRA in this case. */
4383 if (! lra_in_progress && in_p && s390_plus_operand (x, mode))
4384 sri->icode = (TARGET_64BIT ?
4385 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
4387 /* Performing a multiword move from or to memory we have to make sure the
4388 second chunk in memory is addressable without causing a displacement
4389 overflow. If that would be the case we calculate the address in
4390 a scratch register. */
4392 && GET_CODE (XEXP (x, 0)) == PLUS
4393 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4394 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
4395 + GET_MODE_SIZE (mode) - 1))
4397 /* For GENERAL_REGS a displacement overflow is no problem if occurring
4398 in a s_operand address since we may fallback to lm/stm. So we only
4399 have to care about overflows in the b+i+d case. */
4400 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
4401 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
4402 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
4403 /* For FP_REGS no lm/stm is available so this check is triggered
4404 for displacement overflows in b+i+d and b+d like addresses. */
4405 || (reg_classes_intersect_p (FP_REGS, rclass)
4406 && s390_class_max_nregs (FP_REGS, mode) > 1))
4409 sri->icode = (TARGET_64BIT ?
4410 CODE_FOR_reloaddi_la_in :
4411 CODE_FOR_reloadsi_la_in);
4413 sri->icode = (TARGET_64BIT ?
4414 CODE_FOR_reloaddi_la_out :
4415 CODE_FOR_reloadsi_la_out);
4419 /* A scratch address register is needed when a symbolic constant is
4420 copied to r0 compiling with -fPIC. In other cases the target
4421 register might be used as temporary (see legitimize_pic_address). */
4422 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
4423 sri->icode = (TARGET_64BIT ?
4424 CODE_FOR_reloaddi_PIC_addr :
4425 CODE_FOR_reloadsi_PIC_addr);
4427 /* Either scratch or no register needed. */
4431 /* Implement TARGET_SECONDARY_MEMORY_NEEDED.
4433 We need secondary memory to move data between GPRs and FPRs.
4435 - With DFP the ldgr lgdr instructions are available. Due to the
4436 different alignment we cannot use them for SFmode. For 31 bit a
4437 64 bit value in GPR would be a register pair so here we still
4438 need to go via memory.
4440 - With z13 we can do the SF/SImode moves with vlgvf. Due to the
4441 overlapping of FPRs and VRs we still disallow TF/TD modes to be
4442 in full VRs so as before also on z13 we do these moves via
4445 FIXME: Should we try splitting it into two vlgvg's/vlvg's instead? */
4448 s390_secondary_memory_needed (machine_mode mode,
4449 reg_class_t class1, reg_class_t class2)
4451 return (((reg_classes_intersect_p (class1, VEC_REGS)
4452 && reg_classes_intersect_p (class2, GENERAL_REGS))
4453 || (reg_classes_intersect_p (class1, GENERAL_REGS)
4454 && reg_classes_intersect_p (class2, VEC_REGS)))
4455 && (!TARGET_DFP || !TARGET_64BIT || GET_MODE_SIZE (mode) != 8)
4456 && (!TARGET_VX || (SCALAR_FLOAT_MODE_P (mode)
4457 && GET_MODE_SIZE (mode) > 8)));
4460 /* Implement TARGET_SECONDARY_MEMORY_NEEDED_MODE.
4462 get_secondary_mem widens its argument to BITS_PER_WORD which loses on 64bit
4463 because the movsi and movsf patterns don't handle r/f moves. */
4466 s390_secondary_memory_needed_mode (machine_mode mode)
4468 if (GET_MODE_BITSIZE (mode) < 32)
4469 return mode_for_size (32, GET_MODE_CLASS (mode), 0).require ();
4473 /* Generate code to load SRC, which is PLUS that is not a
4474 legitimate operand for the LA instruction, into TARGET.
4475 SCRATCH may be used as scratch register. */
4478 s390_expand_plus_operand (rtx target, rtx src,
4482 struct s390_address ad;
4484 /* src must be a PLUS; get its two operands. */
4485 gcc_assert (GET_CODE (src) == PLUS);
4486 gcc_assert (GET_MODE (src) == Pmode);
4488 /* Check if any of the two operands is already scheduled
4489 for replacement by reload. This can happen e.g. when
4490 float registers occur in an address. */
4491 sum1 = find_replacement (&XEXP (src, 0));
4492 sum2 = find_replacement (&XEXP (src, 1));
4493 src = gen_rtx_PLUS (Pmode, sum1, sum2);
4495 /* If the address is already strictly valid, there's nothing to do. */
4496 if (!s390_decompose_address (src, &ad)
4497 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4498 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
4500 /* Otherwise, one of the operands cannot be an address register;
4501 we reload its value into the scratch register. */
4502 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
4504 emit_move_insn (scratch, sum1);
4507 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
4509 emit_move_insn (scratch, sum2);
4513 /* According to the way these invalid addresses are generated
4514 in reload.c, it should never happen (at least on s390) that
4515 *neither* of the PLUS components, after find_replacements
4516 was applied, is an address register. */
4517 if (sum1 == scratch && sum2 == scratch)
4523 src = gen_rtx_PLUS (Pmode, sum1, sum2);
4526 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
4527 is only ever performed on addresses, so we can mark the
4528 sum as legitimate for LA in any case. */
4529 s390_load_address (target, src);
4533 /* Return true if ADDR is a valid memory address.
4534 STRICT specifies whether strict register checking applies. */
4537 s390_legitimate_address_p (machine_mode mode, rtx addr, bool strict)
4539 struct s390_address ad;
4542 && larl_operand (addr, VOIDmode)
4543 && (mode == VOIDmode
4544 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
4547 if (!s390_decompose_address (addr, &ad))
4552 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
4555 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
4561 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
4562 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
4566 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
4567 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
4573 /* Return true if OP is a valid operand for the LA instruction.
4574 In 31-bit, we need to prove that the result is used as an
4575 address, as LA performs only a 31-bit addition. */
4578 legitimate_la_operand_p (rtx op)
4580 struct s390_address addr;
4581 if (!s390_decompose_address (op, &addr))
4584 return (TARGET_64BIT || addr.pointer);
4587 /* Return true if it is valid *and* preferable to use LA to
4588 compute the sum of OP1 and OP2. */
4591 preferred_la_operand_p (rtx op1, rtx op2)
4593 struct s390_address addr;
4595 if (op2 != const0_rtx)
4596 op1 = gen_rtx_PLUS (Pmode, op1, op2);
4598 if (!s390_decompose_address (op1, &addr))
4600 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
4602 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
4605 /* Avoid LA instructions with index register on z196; it is
4606 preferable to use regular add instructions when possible.
4607 Starting with zEC12 the la with index register is "uncracked"
4609 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
4612 if (!TARGET_64BIT && !addr.pointer)
4618 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
4619 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
4625 /* Emit a forced load-address operation to load SRC into DST.
4626 This will use the LOAD ADDRESS instruction even in situations
4627 where legitimate_la_operand_p (SRC) returns false. */
4630 s390_load_address (rtx dst, rtx src)
4633 emit_move_insn (dst, src);
4635 emit_insn (gen_force_la_31 (dst, src));
4638 /* Return true if it ok to use SYMBOL_REF in a relative address. */
4641 s390_rel_address_ok_p (rtx symbol_ref)
4645 if (symbol_ref == s390_got_symbol () || CONSTANT_POOL_ADDRESS_P (symbol_ref))
4648 decl = SYMBOL_REF_DECL (symbol_ref);
4650 if (!flag_pic || SYMBOL_REF_LOCAL_P (symbol_ref))
4651 return (s390_pic_data_is_text_relative
4653 && TREE_CODE (decl) == FUNCTION_DECL));
4658 /* Return a legitimate reference for ORIG (an address) using the
4659 register REG. If REG is 0, a new pseudo is generated.
4661 There are two types of references that must be handled:
4663 1. Global data references must load the address from the GOT, via
4664 the PIC reg. An insn is emitted to do this load, and the reg is
4667 2. Static data references, constant pool addresses, and code labels
4668 compute the address as an offset from the GOT, whose base is in
4669 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
4670 differentiate them from global data objects. The returned
4671 address is the PIC reg + an unspec constant.
4673 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
4674 reg also appears in the address. */
4677 legitimize_pic_address (rtx orig, rtx reg)
4680 rtx addend = const0_rtx;
4683 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
4685 if (GET_CODE (addr) == CONST)
4686 addr = XEXP (addr, 0);
4688 if (GET_CODE (addr) == PLUS)
4690 addend = XEXP (addr, 1);
4691 addr = XEXP (addr, 0);
4694 if ((GET_CODE (addr) == LABEL_REF
4695 || (SYMBOL_REF_P (addr) && s390_rel_address_ok_p (addr))
4696 || (GET_CODE (addr) == UNSPEC &&
4697 (XINT (addr, 1) == UNSPEC_GOTENT
4698 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
4699 && GET_CODE (addend) == CONST_INT)
4701 /* This can be locally addressed. */
4703 /* larl_operand requires UNSPECs to be wrapped in a const rtx. */
4704 rtx const_addr = (GET_CODE (addr) == UNSPEC ?
4705 gen_rtx_CONST (Pmode, addr) : addr);
4707 if (TARGET_CPU_ZARCH
4708 && larl_operand (const_addr, VOIDmode)
4709 && INTVAL (addend) < HOST_WIDE_INT_1 << 31
4710 && INTVAL (addend) >= -(HOST_WIDE_INT_1 << 31))
4712 if (INTVAL (addend) & 1)
4714 /* LARL can't handle odd offsets, so emit a pair of LARL
4716 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4718 if (!DISP_IN_RANGE (INTVAL (addend)))
4720 HOST_WIDE_INT even = INTVAL (addend) - 1;
4721 addr = gen_rtx_PLUS (Pmode, addr, GEN_INT (even));
4722 addr = gen_rtx_CONST (Pmode, addr);
4723 addend = const1_rtx;
4726 emit_move_insn (temp, addr);
4727 new_rtx = gen_rtx_PLUS (Pmode, temp, addend);
4731 s390_load_address (reg, new_rtx);
4737 /* If the offset is even, we can just use LARL. This
4738 will happen automatically. */
4743 /* No larl - Access local symbols relative to the GOT. */
4745 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4747 if (reload_in_progress || reload_completed)
4748 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4750 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
4751 if (addend != const0_rtx)
4752 addr = gen_rtx_PLUS (Pmode, addr, addend);
4753 addr = gen_rtx_CONST (Pmode, addr);
4754 addr = force_const_mem (Pmode, addr);
4755 emit_move_insn (temp, addr);
4757 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4760 s390_load_address (reg, new_rtx);
4765 else if (GET_CODE (addr) == SYMBOL_REF && addend == const0_rtx)
4767 /* A non-local symbol reference without addend.
4769 The symbol ref is wrapped into an UNSPEC to make sure the
4770 proper operand modifier (@GOT or @GOTENT) will be emitted.
4771 This will tell the linker to put the symbol into the GOT.
4773 Additionally the code dereferencing the GOT slot is emitted here.
4775 An addend to the symref needs to be added afterwards.
4776 legitimize_pic_address calls itself recursively to handle
4777 that case. So no need to do it here. */
4780 reg = gen_reg_rtx (Pmode);
4784 /* Use load relative if possible.
4785 lgrl <target>, sym@GOTENT */
4786 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
4787 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4788 new_rtx = gen_const_mem (GET_MODE (reg), new_rtx);
4790 emit_move_insn (reg, new_rtx);
4793 else if (flag_pic == 1)
4795 /* Assume GOT offset is a valid displacement operand (< 4k
4796 or < 512k with z990). This is handled the same way in
4797 both 31- and 64-bit code (@GOT).
4798 lg <target>, sym@GOT(r12) */
4800 if (reload_in_progress || reload_completed)
4801 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4803 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
4804 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4805 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
4806 new_rtx = gen_const_mem (Pmode, new_rtx);
4807 emit_move_insn (reg, new_rtx);
4810 else if (TARGET_CPU_ZARCH)
4812 /* If the GOT offset might be >= 4k, we determine the position
4813 of the GOT entry via a PC-relative LARL (@GOTENT).
4814 larl temp, sym@GOTENT
4815 lg <target>, 0(temp) */
4817 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
4819 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
4820 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
4822 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
4823 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
4824 emit_move_insn (temp, new_rtx);
4826 new_rtx = gen_const_mem (Pmode, temp);
4827 emit_move_insn (reg, new_rtx);
4833 /* If the GOT offset might be >= 4k, we have to load it
4834 from the literal pool (@GOT).
4836 lg temp, lit-litbase(r13)
4837 lg <target>, 0(temp)
4838 lit: .long sym@GOT */
4840 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
4842 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
4843 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
4845 if (reload_in_progress || reload_completed)
4846 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4848 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
4849 addr = gen_rtx_CONST (Pmode, addr);
4850 addr = force_const_mem (Pmode, addr);
4851 emit_move_insn (temp, addr);
4853 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4854 new_rtx = gen_const_mem (Pmode, new_rtx);
4855 emit_move_insn (reg, new_rtx);
4859 else if (GET_CODE (addr) == UNSPEC && GET_CODE (addend) == CONST_INT)
4861 gcc_assert (XVECLEN (addr, 0) == 1);
4862 switch (XINT (addr, 1))
4864 /* These address symbols (or PLT slots) relative to the GOT
4865 (not GOT slots!). In general this will exceed the
4866 displacement range so these value belong into the literal
4870 new_rtx = force_const_mem (Pmode, orig);
4873 /* For -fPIC the GOT size might exceed the displacement
4874 range so make sure the value is in the literal pool. */
4877 new_rtx = force_const_mem (Pmode, orig);
4880 /* For @GOTENT larl is used. This is handled like local
4886 /* @PLT is OK as is on 64-bit, must be converted to
4887 GOT-relative @PLTOFF on 31-bit. */
4889 if (!TARGET_CPU_ZARCH)
4891 rtx temp = reg? reg : gen_reg_rtx (Pmode);
4893 if (reload_in_progress || reload_completed)
4894 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
4896 addr = XVECEXP (addr, 0, 0);
4897 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
4899 if (addend != const0_rtx)
4900 addr = gen_rtx_PLUS (Pmode, addr, addend);
4901 addr = gen_rtx_CONST (Pmode, addr);
4902 addr = force_const_mem (Pmode, addr);
4903 emit_move_insn (temp, addr);
4905 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
4908 s390_load_address (reg, new_rtx);
4913 /* On 64 bit larl can be used. This case is handled like
4914 local symbol refs. */
4918 /* Everything else cannot happen. */
4923 else if (addend != const0_rtx)
4925 /* Otherwise, compute the sum. */
4927 rtx base = legitimize_pic_address (addr, reg);
4928 new_rtx = legitimize_pic_address (addend,
4929 base == reg ? NULL_RTX : reg);
4930 if (GET_CODE (new_rtx) == CONST_INT)
4931 new_rtx = plus_constant (Pmode, base, INTVAL (new_rtx));
4934 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
4936 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
4937 new_rtx = XEXP (new_rtx, 1);
4939 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
4942 if (GET_CODE (new_rtx) == CONST)
4943 new_rtx = XEXP (new_rtx, 0);
4944 new_rtx = force_operand (new_rtx, 0);
4950 /* Load the thread pointer into a register. */
4953 s390_get_thread_pointer (void)
4955 rtx tp = gen_reg_rtx (Pmode);
4957 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
4958 mark_reg_pointer (tp, BITS_PER_WORD);
4963 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
4964 in s390_tls_symbol which always refers to __tls_get_offset.
4965 The returned offset is written to RESULT_REG and an USE rtx is
4966 generated for TLS_CALL. */
4968 static GTY(()) rtx s390_tls_symbol;
4971 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
4976 emit_insn (s390_load_got ());
4978 if (!s390_tls_symbol)
4979 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
4981 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
4982 gen_rtx_REG (Pmode, RETURN_REGNUM));
4984 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
4985 RTL_CONST_CALL_P (insn) = 1;
4988 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
4989 this (thread-local) address. REG may be used as temporary. */
4992 legitimize_tls_address (rtx addr, rtx reg)
4994 rtx new_rtx, tls_call, temp, base, r2;
4997 if (GET_CODE (addr) == SYMBOL_REF)
4998 switch (tls_symbolic_operand (addr))
5000 case TLS_MODEL_GLOBAL_DYNAMIC:
5002 r2 = gen_rtx_REG (Pmode, 2);
5003 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
5004 new_rtx = gen_rtx_CONST (Pmode, tls_call);
5005 new_rtx = force_const_mem (Pmode, new_rtx);
5006 emit_move_insn (r2, new_rtx);
5007 s390_emit_tls_call_insn (r2, tls_call);
5008 insn = get_insns ();
5011 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
5012 temp = gen_reg_rtx (Pmode);
5013 emit_libcall_block (insn, temp, r2, new_rtx);
5015 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
5018 s390_load_address (reg, new_rtx);
5023 case TLS_MODEL_LOCAL_DYNAMIC:
5025 r2 = gen_rtx_REG (Pmode, 2);
5026 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
5027 new_rtx = gen_rtx_CONST (Pmode, tls_call);
5028 new_rtx = force_const_mem (Pmode, new_rtx);
5029 emit_move_insn (r2, new_rtx);
5030 s390_emit_tls_call_insn (r2, tls_call);
5031 insn = get_insns ();
5034 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
5035 temp = gen_reg_rtx (Pmode);
5036 emit_libcall_block (insn, temp, r2, new_rtx);
5038 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
5039 base = gen_reg_rtx (Pmode);
5040 s390_load_address (base, new_rtx);
5042 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
5043 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5044 new_rtx = force_const_mem (Pmode, new_rtx);
5045 temp = gen_reg_rtx (Pmode);
5046 emit_move_insn (temp, new_rtx);
5048 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
5051 s390_load_address (reg, new_rtx);
5056 case TLS_MODEL_INITIAL_EXEC:
5059 /* Assume GOT offset < 4k. This is handled the same way
5060 in both 31- and 64-bit code. */
5062 if (reload_in_progress || reload_completed)
5063 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
5065 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
5066 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5067 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
5068 new_rtx = gen_const_mem (Pmode, new_rtx);
5069 temp = gen_reg_rtx (Pmode);
5070 emit_move_insn (temp, new_rtx);
5072 else if (TARGET_CPU_ZARCH)
5074 /* If the GOT offset might be >= 4k, we determine the position
5075 of the GOT entry via a PC-relative LARL. */
5077 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
5078 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5079 temp = gen_reg_rtx (Pmode);
5080 emit_move_insn (temp, new_rtx);
5082 new_rtx = gen_const_mem (Pmode, temp);
5083 temp = gen_reg_rtx (Pmode);
5084 emit_move_insn (temp, new_rtx);
5088 /* If the GOT offset might be >= 4k, we have to load it
5089 from the literal pool. */
5091 if (reload_in_progress || reload_completed)
5092 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
5094 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
5095 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5096 new_rtx = force_const_mem (Pmode, new_rtx);
5097 temp = gen_reg_rtx (Pmode);
5098 emit_move_insn (temp, new_rtx);
5100 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
5101 new_rtx = gen_const_mem (Pmode, new_rtx);
5103 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
5104 temp = gen_reg_rtx (Pmode);
5105 emit_insn (gen_rtx_SET (temp, new_rtx));
5109 /* In position-dependent code, load the absolute address of
5110 the GOT entry from the literal pool. */
5112 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
5113 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5114 new_rtx = force_const_mem (Pmode, new_rtx);
5115 temp = gen_reg_rtx (Pmode);
5116 emit_move_insn (temp, new_rtx);
5119 new_rtx = gen_const_mem (Pmode, new_rtx);
5120 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
5121 temp = gen_reg_rtx (Pmode);
5122 emit_insn (gen_rtx_SET (temp, new_rtx));
5125 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
5128 s390_load_address (reg, new_rtx);
5133 case TLS_MODEL_LOCAL_EXEC:
5134 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
5135 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5136 new_rtx = force_const_mem (Pmode, new_rtx);
5137 temp = gen_reg_rtx (Pmode);
5138 emit_move_insn (temp, new_rtx);
5140 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
5143 s390_load_address (reg, new_rtx);
5152 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
5154 switch (XINT (XEXP (addr, 0), 1))
5156 case UNSPEC_INDNTPOFF:
5157 gcc_assert (TARGET_CPU_ZARCH);
5166 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
5167 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
5169 new_rtx = XEXP (XEXP (addr, 0), 0);
5170 if (GET_CODE (new_rtx) != SYMBOL_REF)
5171 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
5173 new_rtx = legitimize_tls_address (new_rtx, reg);
5174 new_rtx = plus_constant (Pmode, new_rtx,
5175 INTVAL (XEXP (XEXP (addr, 0), 1)));
5176 new_rtx = force_operand (new_rtx, 0);
5180 gcc_unreachable (); /* for now ... */
5185 /* Emit insns making the address in operands[1] valid for a standard
5186 move to operands[0]. operands[1] is replaced by an address which
5187 should be used instead of the former RTX to emit the move
5191 emit_symbolic_move (rtx *operands)
5193 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
5195 if (GET_CODE (operands[0]) == MEM)
5196 operands[1] = force_reg (Pmode, operands[1]);
5197 else if (TLS_SYMBOLIC_CONST (operands[1]))
5198 operands[1] = legitimize_tls_address (operands[1], temp);
5200 operands[1] = legitimize_pic_address (operands[1], temp);
5203 /* Try machine-dependent ways of modifying an illegitimate address X
5204 to be legitimate. If we find one, return the new, valid address.
5206 OLDX is the address as it was before break_out_memory_refs was called.
5207 In some cases it is useful to look at this to decide what needs to be done.
5209 MODE is the mode of the operand pointed to by X.
5211 When -fpic is used, special handling is needed for symbolic references.
5212 See comments by legitimize_pic_address for details. */
5215 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
5216 machine_mode mode ATTRIBUTE_UNUSED)
5218 rtx constant_term = const0_rtx;
5220 if (TLS_SYMBOLIC_CONST (x))
5222 x = legitimize_tls_address (x, 0);
5224 if (s390_legitimate_address_p (mode, x, FALSE))
5227 else if (GET_CODE (x) == PLUS
5228 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
5229 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
5235 if (SYMBOLIC_CONST (x)
5236 || (GET_CODE (x) == PLUS
5237 && (SYMBOLIC_CONST (XEXP (x, 0))
5238 || SYMBOLIC_CONST (XEXP (x, 1)))))
5239 x = legitimize_pic_address (x, 0);
5241 if (s390_legitimate_address_p (mode, x, FALSE))
5245 x = eliminate_constant_term (x, &constant_term);
5247 /* Optimize loading of large displacements by splitting them
5248 into the multiple of 4K and the rest; this allows the
5249 former to be CSE'd if possible.
5251 Don't do this if the displacement is added to a register
5252 pointing into the stack frame, as the offsets will
5253 change later anyway. */
5255 if (GET_CODE (constant_term) == CONST_INT
5256 && !TARGET_LONG_DISPLACEMENT
5257 && !DISP_IN_RANGE (INTVAL (constant_term))
5258 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
5260 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
5261 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
5263 rtx temp = gen_reg_rtx (Pmode);
5264 rtx val = force_operand (GEN_INT (upper), temp);
5266 emit_move_insn (temp, val);
5268 x = gen_rtx_PLUS (Pmode, x, temp);
5269 constant_term = GEN_INT (lower);
5272 if (GET_CODE (x) == PLUS)
5274 if (GET_CODE (XEXP (x, 0)) == REG)
5276 rtx temp = gen_reg_rtx (Pmode);
5277 rtx val = force_operand (XEXP (x, 1), temp);
5279 emit_move_insn (temp, val);
5281 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
5284 else if (GET_CODE (XEXP (x, 1)) == REG)
5286 rtx temp = gen_reg_rtx (Pmode);
5287 rtx val = force_operand (XEXP (x, 0), temp);
5289 emit_move_insn (temp, val);
5291 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
5295 if (constant_term != const0_rtx)
5296 x = gen_rtx_PLUS (Pmode, x, constant_term);
5301 /* Try a machine-dependent way of reloading an illegitimate address AD
5302 operand. If we find one, push the reload and return the new address.
5304 MODE is the mode of the enclosing MEM. OPNUM is the operand number
5305 and TYPE is the reload type of the current reload. */
5308 legitimize_reload_address (rtx ad, machine_mode mode ATTRIBUTE_UNUSED,
5309 int opnum, int type)
5311 if (!optimize || TARGET_LONG_DISPLACEMENT)
5314 if (GET_CODE (ad) == PLUS)
5316 rtx tem = simplify_binary_operation (PLUS, Pmode,
5317 XEXP (ad, 0), XEXP (ad, 1));
5322 if (GET_CODE (ad) == PLUS
5323 && GET_CODE (XEXP (ad, 0)) == REG
5324 && GET_CODE (XEXP (ad, 1)) == CONST_INT
5325 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
5327 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
5328 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
5329 rtx cst, tem, new_rtx;
5331 cst = GEN_INT (upper);
5332 if (!legitimate_reload_constant_p (cst))
5333 cst = force_const_mem (Pmode, cst);
5335 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
5336 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
5338 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
5339 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
5340 opnum, (enum reload_type) type);
5347 /* Emit code to move LEN bytes from DST to SRC. */
5350 s390_expand_movmem (rtx dst, rtx src, rtx len)
5352 /* When tuning for z10 or higher we rely on the Glibc functions to
5353 do the right thing. Only for constant lengths below 64k we will
5354 generate inline code. */
5355 if (s390_tune >= PROCESSOR_2097_Z10
5356 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
5359 /* Expand memcpy for constant length operands without a loop if it
5360 is shorter that way.
5362 With a constant length argument a
5363 memcpy loop (without pfd) is 36 bytes -> 6 * mvc */
5364 if (GET_CODE (len) == CONST_INT
5365 && INTVAL (len) >= 0
5366 && INTVAL (len) <= 256 * 6
5367 && (!TARGET_MVCLE || INTVAL (len) <= 256))
5371 for (l = INTVAL (len), o = 0; l > 0; l -= 256, o += 256)
5373 rtx newdst = adjust_address (dst, BLKmode, o);
5374 rtx newsrc = adjust_address (src, BLKmode, o);
5375 emit_insn (gen_movmem_short (newdst, newsrc,
5376 GEN_INT (l > 256 ? 255 : l - 1)));
5380 else if (TARGET_MVCLE)
5382 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
5387 rtx dst_addr, src_addr, count, blocks, temp;
5388 rtx_code_label *loop_start_label = gen_label_rtx ();
5389 rtx_code_label *loop_end_label = gen_label_rtx ();
5390 rtx_code_label *end_label = gen_label_rtx ();
5393 mode = GET_MODE (len);
5394 if (mode == VOIDmode)
5397 dst_addr = gen_reg_rtx (Pmode);
5398 src_addr = gen_reg_rtx (Pmode);
5399 count = gen_reg_rtx (mode);
5400 blocks = gen_reg_rtx (mode);
5402 convert_move (count, len, 1);
5403 emit_cmp_and_jump_insns (count, const0_rtx,
5404 EQ, NULL_RTX, mode, 1, end_label);
5406 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
5407 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
5408 dst = change_address (dst, VOIDmode, dst_addr);
5409 src = change_address (src, VOIDmode, src_addr);
5411 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5414 emit_move_insn (count, temp);
5416 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5419 emit_move_insn (blocks, temp);
5421 emit_cmp_and_jump_insns (blocks, const0_rtx,
5422 EQ, NULL_RTX, mode, 1, loop_end_label);
5424 emit_label (loop_start_label);
5427 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
5431 /* Issue a read prefetch for the +3 cache line. */
5432 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
5433 const0_rtx, const0_rtx);
5434 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5435 emit_insn (prefetch);
5437 /* Issue a write prefetch for the +3 cache line. */
5438 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
5439 const1_rtx, const0_rtx);
5440 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5441 emit_insn (prefetch);
5444 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
5445 s390_load_address (dst_addr,
5446 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
5447 s390_load_address (src_addr,
5448 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
5450 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5453 emit_move_insn (blocks, temp);
5455 emit_cmp_and_jump_insns (blocks, const0_rtx,
5456 EQ, NULL_RTX, mode, 1, loop_end_label);
5458 emit_jump (loop_start_label);
5459 emit_label (loop_end_label);
5461 emit_insn (gen_movmem_short (dst, src,
5462 convert_to_mode (Pmode, count, 1)));
5463 emit_label (end_label);
5468 /* Emit code to set LEN bytes at DST to VAL.
5469 Make use of clrmem if VAL is zero. */
5472 s390_expand_setmem (rtx dst, rtx len, rtx val)
5474 if (GET_CODE (len) == CONST_INT && INTVAL (len) <= 0)
5477 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
5479 /* Expand setmem/clrmem for a constant length operand without a
5480 loop if it will be shorter that way.
5481 With a constant length and without pfd argument a
5482 clrmem loop is 32 bytes -> 5.3 * xc
5483 setmem loop is 36 bytes -> 3.6 * (mvi/stc + mvc) */
5484 if (GET_CODE (len) == CONST_INT
5485 && ((INTVAL (len) <= 256 * 5 && val == const0_rtx)
5486 || INTVAL (len) <= 257 * 3)
5487 && (!TARGET_MVCLE || INTVAL (len) <= 256))
5491 if (val == const0_rtx)
5492 /* clrmem: emit 256 byte blockwise XCs. */
5493 for (l = INTVAL (len), o = 0; l > 0; l -= 256, o += 256)
5495 rtx newdst = adjust_address (dst, BLKmode, o);
5496 emit_insn (gen_clrmem_short (newdst,
5497 GEN_INT (l > 256 ? 255 : l - 1)));
5500 /* setmem: emit 1(mvi) + 256(mvc) byte blockwise memsets by
5501 setting first byte to val and using a 256 byte mvc with one
5502 byte overlap to propagate the byte. */
5503 for (l = INTVAL (len), o = 0; l > 0; l -= 257, o += 257)
5505 rtx newdst = adjust_address (dst, BLKmode, o);
5506 emit_move_insn (adjust_address (dst, QImode, o), val);
5509 rtx newdstp1 = adjust_address (dst, BLKmode, o + 1);
5510 emit_insn (gen_movmem_short (newdstp1, newdst,
5511 GEN_INT (l > 257 ? 255 : l - 2)));
5516 else if (TARGET_MVCLE)
5518 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
5520 emit_insn (gen_setmem_long_di (dst, convert_to_mode (Pmode, len, 1),
5523 emit_insn (gen_setmem_long_si (dst, convert_to_mode (Pmode, len, 1),
5529 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
5530 rtx_code_label *loop_start_label = gen_label_rtx ();
5531 rtx_code_label *onebyte_end_label = gen_label_rtx ();
5532 rtx_code_label *zerobyte_end_label = gen_label_rtx ();
5533 rtx_code_label *restbyte_end_label = gen_label_rtx ();
5536 mode = GET_MODE (len);
5537 if (mode == VOIDmode)
5540 dst_addr = gen_reg_rtx (Pmode);
5541 count = gen_reg_rtx (mode);
5542 blocks = gen_reg_rtx (mode);
5544 convert_move (count, len, 1);
5545 emit_cmp_and_jump_insns (count, const0_rtx,
5546 EQ, NULL_RTX, mode, 1, zerobyte_end_label,
5547 profile_probability::very_unlikely ());
5549 /* We need to make a copy of the target address since memset is
5550 supposed to return it unmodified. We have to make it here
5551 already since the new reg is used at onebyte_end_label. */
5552 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
5553 dst = change_address (dst, VOIDmode, dst_addr);
5555 if (val != const0_rtx)
5557 /* When using the overlapping mvc the original target
5558 address is only accessed as single byte entity (even by
5559 the mvc reading this value). */
5560 set_mem_size (dst, 1);
5561 dstp1 = adjust_address (dst, VOIDmode, 1);
5562 emit_cmp_and_jump_insns (count,
5563 const1_rtx, EQ, NULL_RTX, mode, 1,
5565 profile_probability::very_unlikely ());
5568 /* There is one unconditional (mvi+mvc)/xc after the loop
5569 dealing with the rest of the bytes, subtracting two (mvi+mvc)
5570 or one (xc) here leaves this number of bytes to be handled by
5572 temp = expand_binop (mode, add_optab, count,
5573 val == const0_rtx ? constm1_rtx : GEN_INT (-2),
5574 count, 1, OPTAB_DIRECT);
5576 emit_move_insn (count, temp);
5578 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5581 emit_move_insn (blocks, temp);
5583 emit_cmp_and_jump_insns (blocks, const0_rtx,
5584 EQ, NULL_RTX, mode, 1, restbyte_end_label);
5586 emit_jump (loop_start_label);
5588 if (val != const0_rtx)
5590 /* The 1 byte != 0 special case. Not handled efficiently
5591 since we require two jumps for that. However, this
5592 should be very rare. */
5593 emit_label (onebyte_end_label);
5594 emit_move_insn (adjust_address (dst, QImode, 0), val);
5595 emit_jump (zerobyte_end_label);
5598 emit_label (loop_start_label);
5601 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
5603 /* Issue a write prefetch for the +4 cache line. */
5604 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
5606 const1_rtx, const0_rtx);
5607 emit_insn (prefetch);
5608 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5611 if (val == const0_rtx)
5612 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
5615 /* Set the first byte in the block to the value and use an
5616 overlapping mvc for the block. */
5617 emit_move_insn (adjust_address (dst, QImode, 0), val);
5618 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (254)));
5620 s390_load_address (dst_addr,
5621 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
5623 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5626 emit_move_insn (blocks, temp);
5628 emit_cmp_and_jump_insns (blocks, const0_rtx,
5629 NE, NULL_RTX, mode, 1, loop_start_label);
5631 emit_label (restbyte_end_label);
5633 if (val == const0_rtx)
5634 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
5637 /* Set the first byte in the block to the value and use an
5638 overlapping mvc for the block. */
5639 emit_move_insn (adjust_address (dst, QImode, 0), val);
5640 /* execute only uses the lowest 8 bits of count that's
5641 exactly what we need here. */
5642 emit_insn (gen_movmem_short (dstp1, dst,
5643 convert_to_mode (Pmode, count, 1)));
5646 emit_label (zerobyte_end_label);
5650 /* Emit code to compare LEN bytes at OP0 with those at OP1,
5651 and return the result in TARGET. */
5654 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
5656 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
5659 /* When tuning for z10 or higher we rely on the Glibc functions to
5660 do the right thing. Only for constant lengths below 64k we will
5661 generate inline code. */
5662 if (s390_tune >= PROCESSOR_2097_Z10
5663 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
5666 /* As the result of CMPINT is inverted compared to what we need,
5667 we have to swap the operands. */
5668 tmp = op0; op0 = op1; op1 = tmp;
5670 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
5672 if (INTVAL (len) > 0)
5674 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
5675 emit_insn (gen_cmpint (target, ccreg));
5678 emit_move_insn (target, const0_rtx);
5680 else if (TARGET_MVCLE)
5682 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
5683 emit_insn (gen_cmpint (target, ccreg));
5687 rtx addr0, addr1, count, blocks, temp;
5688 rtx_code_label *loop_start_label = gen_label_rtx ();
5689 rtx_code_label *loop_end_label = gen_label_rtx ();
5690 rtx_code_label *end_label = gen_label_rtx ();
5693 mode = GET_MODE (len);
5694 if (mode == VOIDmode)
5697 addr0 = gen_reg_rtx (Pmode);
5698 addr1 = gen_reg_rtx (Pmode);
5699 count = gen_reg_rtx (mode);
5700 blocks = gen_reg_rtx (mode);
5702 convert_move (count, len, 1);
5703 emit_cmp_and_jump_insns (count, const0_rtx,
5704 EQ, NULL_RTX, mode, 1, end_label);
5706 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
5707 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
5708 op0 = change_address (op0, VOIDmode, addr0);
5709 op1 = change_address (op1, VOIDmode, addr1);
5711 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
5714 emit_move_insn (count, temp);
5716 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
5719 emit_move_insn (blocks, temp);
5721 emit_cmp_and_jump_insns (blocks, const0_rtx,
5722 EQ, NULL_RTX, mode, 1, loop_end_label);
5724 emit_label (loop_start_label);
5727 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
5731 /* Issue a read prefetch for the +2 cache line of operand 1. */
5732 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
5733 const0_rtx, const0_rtx);
5734 emit_insn (prefetch);
5735 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5737 /* Issue a read prefetch for the +2 cache line of operand 2. */
5738 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
5739 const0_rtx, const0_rtx);
5740 emit_insn (prefetch);
5741 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
5744 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
5745 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
5746 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
5747 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
5748 temp = gen_rtx_SET (pc_rtx, temp);
5749 emit_jump_insn (temp);
5751 s390_load_address (addr0,
5752 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
5753 s390_load_address (addr1,
5754 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
5756 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
5759 emit_move_insn (blocks, temp);
5761 emit_cmp_and_jump_insns (blocks, const0_rtx,
5762 EQ, NULL_RTX, mode, 1, loop_end_label);
5764 emit_jump (loop_start_label);
5765 emit_label (loop_end_label);
5767 emit_insn (gen_cmpmem_short (op0, op1,
5768 convert_to_mode (Pmode, count, 1)));
5769 emit_label (end_label);
5771 emit_insn (gen_cmpint (target, ccreg));
5776 /* Emit a conditional jump to LABEL for condition code mask MASK using
5777 comparsion operator COMPARISON. Return the emitted jump insn. */
5780 s390_emit_ccraw_jump (HOST_WIDE_INT mask, enum rtx_code comparison, rtx label)
5784 gcc_assert (comparison == EQ || comparison == NE);
5785 gcc_assert (mask > 0 && mask < 15);
5787 temp = gen_rtx_fmt_ee (comparison, VOIDmode,
5788 gen_rtx_REG (CCRAWmode, CC_REGNUM), GEN_INT (mask));
5789 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
5790 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
5791 temp = gen_rtx_SET (pc_rtx, temp);
5792 return emit_jump_insn (temp);
5795 /* Emit the instructions to implement strlen of STRING and store the
5796 result in TARGET. The string has the known ALIGNMENT. This
5797 version uses vector instructions and is therefore not appropriate
5798 for targets prior to z13. */
5801 s390_expand_vec_strlen (rtx target, rtx string, rtx alignment)
5803 rtx highest_index_to_load_reg = gen_reg_rtx (Pmode);
5804 rtx str_reg = gen_reg_rtx (V16QImode);
5805 rtx str_addr_base_reg = gen_reg_rtx (Pmode);
5806 rtx str_idx_reg = gen_reg_rtx (Pmode);
5807 rtx result_reg = gen_reg_rtx (V16QImode);
5808 rtx is_aligned_label = gen_label_rtx ();
5809 rtx into_loop_label = NULL_RTX;
5810 rtx loop_start_label = gen_label_rtx ();
5812 rtx len = gen_reg_rtx (QImode);
5815 s390_load_address (str_addr_base_reg, XEXP (string, 0));
5816 emit_move_insn (str_idx_reg, const0_rtx);
5818 if (INTVAL (alignment) < 16)
5820 /* Check whether the address happens to be aligned properly so
5821 jump directly to the aligned loop. */
5822 emit_cmp_and_jump_insns (gen_rtx_AND (Pmode,
5823 str_addr_base_reg, GEN_INT (15)),
5824 const0_rtx, EQ, NULL_RTX,
5825 Pmode, 1, is_aligned_label);
5827 temp = gen_reg_rtx (Pmode);
5828 temp = expand_binop (Pmode, and_optab, str_addr_base_reg,
5829 GEN_INT (15), temp, 1, OPTAB_DIRECT);
5830 gcc_assert (REG_P (temp));
5831 highest_index_to_load_reg =
5832 expand_binop (Pmode, sub_optab, GEN_INT (15), temp,
5833 highest_index_to_load_reg, 1, OPTAB_DIRECT);
5834 gcc_assert (REG_P (highest_index_to_load_reg));
5835 emit_insn (gen_vllv16qi (str_reg,
5836 convert_to_mode (SImode, highest_index_to_load_reg, 1),
5837 gen_rtx_MEM (BLKmode, str_addr_base_reg)));
5839 into_loop_label = gen_label_rtx ();
5840 s390_emit_jump (into_loop_label, NULL_RTX);
5844 emit_label (is_aligned_label);
5845 LABEL_NUSES (is_aligned_label) = INTVAL (alignment) < 16 ? 2 : 1;
5847 /* Reaching this point we are only performing 16 bytes aligned
5849 emit_move_insn (highest_index_to_load_reg, GEN_INT (15));
5851 emit_label (loop_start_label);
5852 LABEL_NUSES (loop_start_label) = 1;
5854 /* Load 16 bytes of the string into VR. */
5855 emit_move_insn (str_reg,
5856 gen_rtx_MEM (V16QImode,
5857 gen_rtx_PLUS (Pmode, str_idx_reg,
5858 str_addr_base_reg)));
5859 if (into_loop_label != NULL_RTX)
5861 emit_label (into_loop_label);
5862 LABEL_NUSES (into_loop_label) = 1;
5865 /* Increment string index by 16 bytes. */
5866 expand_binop (Pmode, add_optab, str_idx_reg, GEN_INT (16),
5867 str_idx_reg, 1, OPTAB_DIRECT);
5869 emit_insn (gen_vec_vfenesv16qi (result_reg, str_reg, str_reg,
5870 GEN_INT (VSTRING_FLAG_ZS | VSTRING_FLAG_CS)));
5872 add_int_reg_note (s390_emit_ccraw_jump (8, NE, loop_start_label),
5874 profile_probability::very_likely ().to_reg_br_prob_note ());
5875 emit_insn (gen_vec_extractv16qiqi (len, result_reg, GEN_INT (7)));
5877 /* If the string pointer wasn't aligned we have loaded less then 16
5878 bytes and the remaining bytes got filled with zeros (by vll).
5879 Now we have to check whether the resulting index lies within the
5880 bytes actually part of the string. */
5882 cond = s390_emit_compare (GT, convert_to_mode (Pmode, len, 1),
5883 highest_index_to_load_reg);
5884 s390_load_address (highest_index_to_load_reg,
5885 gen_rtx_PLUS (Pmode, highest_index_to_load_reg,
5888 emit_insn (gen_movdicc (str_idx_reg, cond,
5889 highest_index_to_load_reg, str_idx_reg));
5891 emit_insn (gen_movsicc (str_idx_reg, cond,
5892 highest_index_to_load_reg, str_idx_reg));
5894 add_reg_br_prob_note (s390_emit_jump (is_aligned_label, cond),
5895 profile_probability::very_unlikely ());
5897 expand_binop (Pmode, add_optab, str_idx_reg,
5898 GEN_INT (-16), str_idx_reg, 1, OPTAB_DIRECT);
5899 /* FIXME: len is already zero extended - so avoid the llgcr emitted
5901 temp = expand_binop (Pmode, add_optab, str_idx_reg,
5902 convert_to_mode (Pmode, len, 1),
5903 target, 1, OPTAB_DIRECT);
5905 emit_move_insn (target, temp);
5909 s390_expand_vec_movstr (rtx result, rtx dst, rtx src)
5911 rtx temp = gen_reg_rtx (Pmode);
5912 rtx src_addr = XEXP (src, 0);
5913 rtx dst_addr = XEXP (dst, 0);
5914 rtx src_addr_reg = gen_reg_rtx (Pmode);
5915 rtx dst_addr_reg = gen_reg_rtx (Pmode);
5916 rtx offset = gen_reg_rtx (Pmode);
5917 rtx vsrc = gen_reg_rtx (V16QImode);
5918 rtx vpos = gen_reg_rtx (V16QImode);
5919 rtx loadlen = gen_reg_rtx (SImode);
5920 rtx gpos_qi = gen_reg_rtx(QImode);
5921 rtx gpos = gen_reg_rtx (SImode);
5922 rtx done_label = gen_label_rtx ();
5923 rtx loop_label = gen_label_rtx ();
5924 rtx exit_label = gen_label_rtx ();
5925 rtx full_label = gen_label_rtx ();
5927 /* Perform a quick check for string ending on the first up to 16
5928 bytes and exit early if successful. */
5930 emit_insn (gen_vlbb (vsrc, src, GEN_INT (6)));
5931 emit_insn (gen_lcbb (loadlen, src_addr, GEN_INT (6)));
5932 emit_insn (gen_vfenezv16qi (vpos, vsrc, vsrc));
5933 emit_insn (gen_vec_extractv16qiqi (gpos_qi, vpos, GEN_INT (7)));
5934 emit_move_insn (gpos, gen_rtx_SUBREG (SImode, gpos_qi, 0));
5935 /* gpos is the byte index if a zero was found and 16 otherwise.
5936 So if it is lower than the loaded bytes we have a hit. */
5937 emit_cmp_and_jump_insns (gpos, loadlen, GE, NULL_RTX, SImode, 1,
5939 emit_insn (gen_vstlv16qi (vsrc, gpos, dst));
5941 force_expand_binop (Pmode, add_optab, dst_addr, gpos, result,
5943 emit_jump (exit_label);
5946 emit_label (full_label);
5947 LABEL_NUSES (full_label) = 1;
5949 /* Calculate `offset' so that src + offset points to the last byte
5950 before 16 byte alignment. */
5952 /* temp = src_addr & 0xf */
5953 force_expand_binop (Pmode, and_optab, src_addr, GEN_INT (15), temp,
5956 /* offset = 0xf - temp */
5957 emit_move_insn (offset, GEN_INT (15));
5958 force_expand_binop (Pmode, sub_optab, offset, temp, offset,
5961 /* Store `offset' bytes in the dstination string. The quick check
5962 has loaded at least `offset' bytes into vsrc. */
5964 emit_insn (gen_vstlv16qi (vsrc, gen_lowpart (SImode, offset), dst));
5966 /* Advance to the next byte to be loaded. */
5967 force_expand_binop (Pmode, add_optab, offset, const1_rtx, offset,
5970 /* Make sure the addresses are single regs which can be used as a
5972 emit_move_insn (src_addr_reg, src_addr);
5973 emit_move_insn (dst_addr_reg, dst_addr);
5977 emit_label (loop_label);
5978 LABEL_NUSES (loop_label) = 1;
5980 emit_move_insn (vsrc,
5981 gen_rtx_MEM (V16QImode,
5982 gen_rtx_PLUS (Pmode, src_addr_reg, offset)));
5984 emit_insn (gen_vec_vfenesv16qi (vpos, vsrc, vsrc,
5985 GEN_INT (VSTRING_FLAG_ZS | VSTRING_FLAG_CS)));
5986 add_int_reg_note (s390_emit_ccraw_jump (8, EQ, done_label),
5987 REG_BR_PROB, profile_probability::very_unlikely ()
5988 .to_reg_br_prob_note ());
5990 emit_move_insn (gen_rtx_MEM (V16QImode,
5991 gen_rtx_PLUS (Pmode, dst_addr_reg, offset)),
5994 force_expand_binop (Pmode, add_optab, offset, GEN_INT (16),
5995 offset, 1, OPTAB_DIRECT);
5997 emit_jump (loop_label);
6002 /* We are done. Add the offset of the zero character to the dst_addr
6003 pointer to get the result. */
6005 emit_label (done_label);
6006 LABEL_NUSES (done_label) = 1;
6008 force_expand_binop (Pmode, add_optab, dst_addr_reg, offset, dst_addr_reg,
6011 emit_insn (gen_vec_extractv16qiqi (gpos_qi, vpos, GEN_INT (7)));
6012 emit_move_insn (gpos, gen_rtx_SUBREG (SImode, gpos_qi, 0));
6014 emit_insn (gen_vstlv16qi (vsrc, gpos, gen_rtx_MEM (BLKmode, dst_addr_reg)));
6016 force_expand_binop (Pmode, add_optab, dst_addr_reg, gpos, result,
6021 emit_label (exit_label);
6022 LABEL_NUSES (exit_label) = 1;
6026 /* Expand conditional increment or decrement using alc/slb instructions.
6027 Should generate code setting DST to either SRC or SRC + INCREMENT,
6028 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
6029 Returns true if successful, false otherwise.
6031 That makes it possible to implement some if-constructs without jumps e.g.:
6032 (borrow = CC0 | CC1 and carry = CC2 | CC3)
6033 unsigned int a, b, c;
6034 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
6035 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
6036 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
6037 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
6039 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
6040 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
6041 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
6042 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
6043 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
6046 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
6047 rtx dst, rtx src, rtx increment)
6049 machine_mode cmp_mode;
6050 machine_mode cc_mode;
6056 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
6057 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
6059 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
6060 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
6065 /* Try ADD LOGICAL WITH CARRY. */
6066 if (increment == const1_rtx)
6068 /* Determine CC mode to use. */
6069 if (cmp_code == EQ || cmp_code == NE)
6071 if (cmp_op1 != const0_rtx)
6073 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
6074 NULL_RTX, 0, OPTAB_WIDEN);
6075 cmp_op1 = const0_rtx;
6078 cmp_code = cmp_code == EQ ? LEU : GTU;
6081 if (cmp_code == LTU || cmp_code == LEU)
6086 cmp_code = swap_condition (cmp_code);
6103 /* Emit comparison instruction pattern. */
6104 if (!register_operand (cmp_op0, cmp_mode))
6105 cmp_op0 = force_reg (cmp_mode, cmp_op0);
6107 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
6108 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
6109 /* We use insn_invalid_p here to add clobbers if required. */
6110 ret = insn_invalid_p (emit_insn (insn), false);
6113 /* Emit ALC instruction pattern. */
6114 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
6115 gen_rtx_REG (cc_mode, CC_REGNUM),
6118 if (src != const0_rtx)
6120 if (!register_operand (src, GET_MODE (dst)))
6121 src = force_reg (GET_MODE (dst), src);
6123 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
6124 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
6127 p = rtvec_alloc (2);
6129 gen_rtx_SET (dst, op_res);
6131 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6132 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
6137 /* Try SUBTRACT LOGICAL WITH BORROW. */
6138 if (increment == constm1_rtx)
6140 /* Determine CC mode to use. */
6141 if (cmp_code == EQ || cmp_code == NE)
6143 if (cmp_op1 != const0_rtx)
6145 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
6146 NULL_RTX, 0, OPTAB_WIDEN);
6147 cmp_op1 = const0_rtx;
6150 cmp_code = cmp_code == EQ ? LEU : GTU;
6153 if (cmp_code == GTU || cmp_code == GEU)
6158 cmp_code = swap_condition (cmp_code);
6175 /* Emit comparison instruction pattern. */
6176 if (!register_operand (cmp_op0, cmp_mode))
6177 cmp_op0 = force_reg (cmp_mode, cmp_op0);
6179 insn = gen_rtx_SET (gen_rtx_REG (cc_mode, CC_REGNUM),
6180 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
6181 /* We use insn_invalid_p here to add clobbers if required. */
6182 ret = insn_invalid_p (emit_insn (insn), false);
6185 /* Emit SLB instruction pattern. */
6186 if (!register_operand (src, GET_MODE (dst)))
6187 src = force_reg (GET_MODE (dst), src);
6189 op_res = gen_rtx_MINUS (GET_MODE (dst),
6190 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
6191 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
6192 gen_rtx_REG (cc_mode, CC_REGNUM),
6194 p = rtvec_alloc (2);
6196 gen_rtx_SET (dst, op_res);
6198 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6199 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
6207 /* Expand code for the insv template. Return true if successful. */
6210 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
6212 int bitsize = INTVAL (op1);
6213 int bitpos = INTVAL (op2);
6214 machine_mode mode = GET_MODE (dest);
6216 int smode_bsize, mode_bsize;
6219 if (bitsize + bitpos > GET_MODE_BITSIZE (mode))
6222 /* Generate INSERT IMMEDIATE (IILL et al). */
6223 /* (set (ze (reg)) (const_int)). */
6225 && register_operand (dest, word_mode)
6226 && (bitpos % 16) == 0
6227 && (bitsize % 16) == 0
6228 && const_int_operand (src, VOIDmode))
6230 HOST_WIDE_INT val = INTVAL (src);
6231 int regpos = bitpos + bitsize;
6233 while (regpos > bitpos)
6235 machine_mode putmode;
6238 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
6243 putsize = GET_MODE_BITSIZE (putmode);
6245 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
6248 gen_int_mode (val, putmode));
6251 gcc_assert (regpos == bitpos);
6255 smode = smallest_int_mode_for_size (bitsize);
6256 smode_bsize = GET_MODE_BITSIZE (smode);
6257 mode_bsize = GET_MODE_BITSIZE (mode);
6259 /* Generate STORE CHARACTERS UNDER MASK (STCM et al). */
6261 && (bitsize % BITS_PER_UNIT) == 0
6263 && (register_operand (src, word_mode)
6264 || const_int_operand (src, VOIDmode)))
6266 /* Emit standard pattern if possible. */
6267 if (smode_bsize == bitsize)
6269 emit_move_insn (adjust_address (dest, smode, 0),
6270 gen_lowpart (smode, src));
6274 /* (set (ze (mem)) (const_int)). */
6275 else if (const_int_operand (src, VOIDmode))
6277 int size = bitsize / BITS_PER_UNIT;
6278 rtx src_mem = adjust_address (force_const_mem (word_mode, src),
6280 UNITS_PER_WORD - size);
6282 dest = adjust_address (dest, BLKmode, 0);
6283 set_mem_size (dest, size);
6284 s390_expand_movmem (dest, src_mem, GEN_INT (size));
6288 /* (set (ze (mem)) (reg)). */
6289 else if (register_operand (src, word_mode))
6292 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
6296 /* Emit st,stcmh sequence. */
6297 int stcmh_width = bitsize - 32;
6298 int size = stcmh_width / BITS_PER_UNIT;
6300 emit_move_insn (adjust_address (dest, SImode, size),
6301 gen_lowpart (SImode, src));
6302 set_mem_size (dest, size);
6303 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
6304 GEN_INT (stcmh_width),
6306 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT (32)));
6312 /* Generate INSERT CHARACTERS UNDER MASK (IC, ICM et al). */
6313 if ((bitpos % BITS_PER_UNIT) == 0
6314 && (bitsize % BITS_PER_UNIT) == 0
6315 && (bitpos & 32) == ((bitpos + bitsize - 1) & 32)
6317 && (mode == DImode || mode == SImode)
6318 && register_operand (dest, mode))
6320 /* Emit a strict_low_part pattern if possible. */
6321 if (smode_bsize == bitsize && bitpos == mode_bsize - smode_bsize)
6323 op = gen_rtx_STRICT_LOW_PART (VOIDmode, gen_lowpart (smode, dest));
6324 op = gen_rtx_SET (op, gen_lowpart (smode, src));
6325 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6326 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
6330 /* ??? There are more powerful versions of ICM that are not
6331 completely represented in the md file. */
6334 /* For z10, generate ROTATE THEN INSERT SELECTED BITS (RISBG et al). */
6335 if (TARGET_Z10 && (mode == DImode || mode == SImode))
6337 machine_mode mode_s = GET_MODE (src);
6339 if (CONSTANT_P (src))
6341 /* For constant zero values the representation with AND
6342 appears to be folded in more situations than the (set
6343 (zero_extract) ...).
6344 We only do this when the start and end of the bitfield
6345 remain in the same SImode chunk. That way nihf or nilf
6347 The AND patterns might still generate a risbg for this. */
6348 if (src == const0_rtx && bitpos / 32 == (bitpos + bitsize - 1) / 32)
6351 src = force_reg (mode, src);
6353 else if (mode_s != mode)
6355 gcc_assert (GET_MODE_BITSIZE (mode_s) >= bitsize);
6356 src = force_reg (mode_s, src);
6357 src = gen_lowpart (mode, src);
6360 op = gen_rtx_ZERO_EXTRACT (mode, dest, op1, op2),
6361 op = gen_rtx_SET (op, src);
6365 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
6366 op = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber));
6376 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
6377 register that holds VAL of mode MODE shifted by COUNT bits. */
6380 s390_expand_mask_and_shift (rtx val, machine_mode mode, rtx count)
6382 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
6383 NULL_RTX, 1, OPTAB_DIRECT);
6384 return expand_simple_binop (SImode, ASHIFT, val, count,
6385 NULL_RTX, 1, OPTAB_DIRECT);
6388 /* Generate a vector comparison COND of CMP_OP1 and CMP_OP2 and store
6389 the result in TARGET. */
6392 s390_expand_vec_compare (rtx target, enum rtx_code cond,
6393 rtx cmp_op1, rtx cmp_op2)
6395 machine_mode mode = GET_MODE (target);
6396 bool neg_p = false, swap_p = false;
6399 if (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_VECTOR_FLOAT)
6403 /* NE a != b -> !(a == b) */
6404 case NE: cond = EQ; neg_p = true; break;
6405 /* UNGT a u> b -> !(b >= a) */
6406 case UNGT: cond = GE; neg_p = true; swap_p = true; break;
6407 /* UNGE a u>= b -> !(b > a) */
6408 case UNGE: cond = GT; neg_p = true; swap_p = true; break;
6409 /* LE: a <= b -> b >= a */
6410 case LE: cond = GE; swap_p = true; break;
6411 /* UNLE: a u<= b -> !(a > b) */
6412 case UNLE: cond = GT; neg_p = true; break;
6413 /* LT: a < b -> b > a */
6414 case LT: cond = GT; swap_p = true; break;
6415 /* UNLT: a u< b -> !(a >= b) */
6416 case UNLT: cond = GE; neg_p = true; break;
6418 emit_insn (gen_vec_cmpuneq (target, cmp_op1, cmp_op2));
6421 emit_insn (gen_vec_cmpltgt (target, cmp_op1, cmp_op2));
6424 emit_insn (gen_vec_ordered (target, cmp_op1, cmp_op2));
6427 emit_insn (gen_vec_unordered (target, cmp_op1, cmp_op2));
6436 /* NE: a != b -> !(a == b) */
6437 case NE: cond = EQ; neg_p = true; break;
6438 /* GE: a >= b -> !(b > a) */
6439 case GE: cond = GT; neg_p = true; swap_p = true; break;
6440 /* GEU: a >= b -> !(b > a) */
6441 case GEU: cond = GTU; neg_p = true; swap_p = true; break;
6442 /* LE: a <= b -> !(a > b) */
6443 case LE: cond = GT; neg_p = true; break;
6444 /* LEU: a <= b -> !(a > b) */
6445 case LEU: cond = GTU; neg_p = true; break;
6446 /* LT: a < b -> b > a */
6447 case LT: cond = GT; swap_p = true; break;
6448 /* LTU: a < b -> b > a */
6449 case LTU: cond = GTU; swap_p = true; break;
6456 tmp = cmp_op1; cmp_op1 = cmp_op2; cmp_op2 = tmp;
6459 emit_insn (gen_rtx_SET (target, gen_rtx_fmt_ee (cond,
6461 cmp_op1, cmp_op2)));
6463 emit_insn (gen_rtx_SET (target, gen_rtx_NOT (mode, target)));
6466 /* Expand the comparison CODE of CMP1 and CMP2 and copy 1 or 0 into
6467 TARGET if either all (ALL_P is true) or any (ALL_P is false) of the
6468 elements in CMP1 and CMP2 fulfill the comparison.
6469 This function is only used to emit patterns for the vx builtins and
6470 therefore only handles comparison codes required by the
6473 s390_expand_vec_compare_cc (rtx target, enum rtx_code code,
6474 rtx cmp1, rtx cmp2, bool all_p)
6476 machine_mode cc_producer_mode, cc_consumer_mode, scratch_mode;
6477 rtx tmp_reg = gen_reg_rtx (SImode);
6478 bool swap_p = false;
6480 if (GET_MODE_CLASS (GET_MODE (cmp1)) == MODE_VECTOR_INT)
6486 cc_producer_mode = CCVEQmode;
6490 code = swap_condition (code);
6495 cc_producer_mode = CCVIHmode;
6499 code = swap_condition (code);
6504 cc_producer_mode = CCVIHUmode;
6510 scratch_mode = GET_MODE (cmp1);
6511 /* These codes represent inverted CC interpretations. Inverting
6512 an ALL CC mode results in an ANY CC mode and the other way
6513 around. Invert the all_p flag here to compensate for
6515 if (code == NE || code == LE || code == LEU)
6518 cc_consumer_mode = all_p ? CCVIALLmode : CCVIANYmode;
6520 else if (GET_MODE_CLASS (GET_MODE (cmp1)) == MODE_VECTOR_FLOAT)
6526 case EQ: cc_producer_mode = CCVEQmode; break;
6527 case NE: cc_producer_mode = CCVEQmode; inv_p = true; break;
6528 case GT: cc_producer_mode = CCVFHmode; break;
6529 case GE: cc_producer_mode = CCVFHEmode; break;
6530 case UNLE: cc_producer_mode = CCVFHmode; inv_p = true; break;
6531 case UNLT: cc_producer_mode = CCVFHEmode; inv_p = true; break;
6532 case LT: cc_producer_mode = CCVFHmode; code = GT; swap_p = true; break;
6533 case LE: cc_producer_mode = CCVFHEmode; code = GE; swap_p = true; break;
6534 default: gcc_unreachable ();
6536 scratch_mode = mode_for_int_vector (GET_MODE (cmp1)).require ();
6541 cc_consumer_mode = all_p ? CCVFALLmode : CCVFANYmode;
6553 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6554 gen_rtvec (2, gen_rtx_SET (
6555 gen_rtx_REG (cc_producer_mode, CC_REGNUM),
6556 gen_rtx_COMPARE (cc_producer_mode, cmp1, cmp2)),
6557 gen_rtx_CLOBBER (VOIDmode,
6558 gen_rtx_SCRATCH (scratch_mode)))));
6559 emit_move_insn (target, const0_rtx);
6560 emit_move_insn (tmp_reg, const1_rtx);
6562 emit_move_insn (target,
6563 gen_rtx_IF_THEN_ELSE (SImode,
6564 gen_rtx_fmt_ee (code, VOIDmode,
6565 gen_rtx_REG (cc_consumer_mode, CC_REGNUM),
6570 /* Invert the comparison CODE applied to a CC mode. This is only safe
6571 if we know whether there result was created by a floating point
6572 compare or not. For the CCV modes this is encoded as part of the
6575 s390_reverse_condition (machine_mode mode, enum rtx_code code)
6577 /* Reversal of FP compares takes care -- an ordered compare
6578 becomes an unordered compare and vice versa. */
6579 if (mode == CCVFALLmode || mode == CCVFANYmode)
6580 return reverse_condition_maybe_unordered (code);
6581 else if (mode == CCVIALLmode || mode == CCVIANYmode)
6582 return reverse_condition (code);
6587 /* Generate a vector comparison expression loading either elements of
6588 THEN or ELS into TARGET depending on the comparison COND of CMP_OP1
6592 s390_expand_vcond (rtx target, rtx then, rtx els,
6593 enum rtx_code cond, rtx cmp_op1, rtx cmp_op2)
6596 machine_mode result_mode;
6599 machine_mode target_mode = GET_MODE (target);
6600 machine_mode cmp_mode = GET_MODE (cmp_op1);
6601 rtx op = (cond == LT) ? els : then;
6603 /* Try to optimize x < 0 ? -1 : 0 into (signed) x >> 31
6604 and x < 0 ? 1 : 0 into (unsigned) x >> 31. Likewise
6605 for short and byte (x >> 15 and x >> 7 respectively). */
6606 if ((cond == LT || cond == GE)
6607 && target_mode == cmp_mode
6608 && cmp_op2 == CONST0_RTX (cmp_mode)
6609 && op == CONST0_RTX (target_mode)
6610 && s390_vector_mode_supported_p (target_mode)
6611 && GET_MODE_CLASS (target_mode) == MODE_VECTOR_INT)
6613 rtx negop = (cond == LT) ? then : els;
6615 int shift = GET_MODE_BITSIZE (GET_MODE_INNER (target_mode)) - 1;
6617 /* if x < 0 ? 1 : 0 or if x >= 0 ? 0 : 1 */
6618 if (negop == CONST1_RTX (target_mode))
6620 rtx res = expand_simple_binop (cmp_mode, LSHIFTRT, cmp_op1,
6621 GEN_INT (shift), target,
6624 emit_move_insn (target, res);
6628 /* if x < 0 ? -1 : 0 or if x >= 0 ? 0 : -1 */
6629 else if (all_ones_operand (negop, target_mode))
6631 rtx res = expand_simple_binop (cmp_mode, ASHIFTRT, cmp_op1,
6632 GEN_INT (shift), target,
6635 emit_move_insn (target, res);
6640 /* We always use an integral type vector to hold the comparison
6642 result_mode = mode_for_int_vector (cmp_mode).require ();
6643 result_target = gen_reg_rtx (result_mode);
6645 /* We allow vector immediates as comparison operands that
6646 can be handled by the optimization above but not by the
6647 following code. Hence, force them into registers here. */
6648 if (!REG_P (cmp_op1))
6649 cmp_op1 = force_reg (GET_MODE (cmp_op1), cmp_op1);
6651 if (!REG_P (cmp_op2))
6652 cmp_op2 = force_reg (GET_MODE (cmp_op2), cmp_op2);
6654 s390_expand_vec_compare (result_target, cond,
6657 /* If the results are supposed to be either -1 or 0 we are done
6658 since this is what our compare instructions generate anyway. */
6659 if (all_ones_operand (then, GET_MODE (then))
6660 && const0_operand (els, GET_MODE (els)))
6662 emit_move_insn (target, gen_rtx_SUBREG (target_mode,
6667 /* Otherwise we will do a vsel afterwards. */
6668 /* This gets triggered e.g.
6669 with gcc.c-torture/compile/pr53410-1.c */
6671 then = force_reg (target_mode, then);
6674 els = force_reg (target_mode, els);
6676 tmp = gen_rtx_fmt_ee (EQ, VOIDmode,
6678 CONST0_RTX (result_mode));
6680 /* We compared the result against zero above so we have to swap then
6682 tmp = gen_rtx_IF_THEN_ELSE (target_mode, tmp, els, then);
6684 gcc_assert (target_mode == GET_MODE (then));
6685 emit_insn (gen_rtx_SET (target, tmp));
6688 /* Emit the RTX necessary to initialize the vector TARGET with values
6691 s390_expand_vec_init (rtx target, rtx vals)
6693 machine_mode mode = GET_MODE (target);
6694 machine_mode inner_mode = GET_MODE_INNER (mode);
6695 int n_elts = GET_MODE_NUNITS (mode);
6696 bool all_same = true, all_regs = true, all_const_int = true;
6700 for (i = 0; i < n_elts; ++i)
6702 x = XVECEXP (vals, 0, i);
6704 if (!CONST_INT_P (x))
6705 all_const_int = false;
6707 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6714 /* Use vector gen mask or vector gen byte mask if possible. */
6715 if (all_same && all_const_int
6716 && (XVECEXP (vals, 0, 0) == const0_rtx
6717 || s390_contiguous_bitmask_vector_p (XVECEXP (vals, 0, 0),
6719 || s390_bytemask_vector_p (XVECEXP (vals, 0, 0), NULL)))
6721 emit_insn (gen_rtx_SET (target,
6722 gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0))));
6728 emit_insn (gen_rtx_SET (target,
6729 gen_rtx_VEC_DUPLICATE (mode,
6730 XVECEXP (vals, 0, 0))));
6737 && GET_MODE_SIZE (inner_mode) == 8)
6739 /* Use vector load pair. */
6740 emit_insn (gen_rtx_SET (target,
6741 gen_rtx_VEC_CONCAT (mode,
6742 XVECEXP (vals, 0, 0),
6743 XVECEXP (vals, 0, 1))));
6747 /* Use vector load logical element and zero. */
6748 if (TARGET_VXE && (mode == V4SImode || mode == V4SFmode))
6752 x = XVECEXP (vals, 0, 0);
6753 if (memory_operand (x, inner_mode))
6755 for (i = 1; i < n_elts; ++i)
6756 found = found && XVECEXP (vals, 0, i) == const0_rtx;
6760 machine_mode half_mode = (inner_mode == SFmode
6761 ? V2SFmode : V2SImode);
6762 emit_insn (gen_rtx_SET (target,
6763 gen_rtx_VEC_CONCAT (mode,
6764 gen_rtx_VEC_CONCAT (half_mode,
6767 gen_rtx_VEC_CONCAT (half_mode,
6775 /* We are about to set the vector elements one by one. Zero out the
6776 full register first in order to help the data flow framework to
6777 detect it as full VR set. */
6778 emit_insn (gen_rtx_SET (target, CONST0_RTX (mode)));
6780 /* Unfortunately the vec_init expander is not allowed to fail. So
6781 we have to implement the fallback ourselves. */
6782 for (i = 0; i < n_elts; i++)
6784 rtx elem = XVECEXP (vals, 0, i);
6785 if (!general_operand (elem, GET_MODE (elem)))
6786 elem = force_reg (inner_mode, elem);
6788 emit_insn (gen_rtx_SET (target,
6789 gen_rtx_UNSPEC (mode,
6791 GEN_INT (i), target),
6796 /* Structure to hold the initial parameters for a compare_and_swap operation
6797 in HImode and QImode. */
6799 struct alignment_context
6801 rtx memsi; /* SI aligned memory location. */
6802 rtx shift; /* Bit offset with regard to lsb. */
6803 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
6804 rtx modemaski; /* ~modemask */
6805 bool aligned; /* True if memory is aligned, false else. */
6808 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
6809 structure AC for transparent simplifying, if the memory alignment is known
6810 to be at least 32bit. MEM is the memory location for the actual operation
6811 and MODE its mode. */
6814 init_alignment_context (struct alignment_context *ac, rtx mem,
6817 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
6818 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
6821 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
6824 /* Alignment is unknown. */
6825 rtx byteoffset, addr, align;
6827 /* Force the address into a register. */
6828 addr = force_reg (Pmode, XEXP (mem, 0));
6830 /* Align it to SImode. */
6831 align = expand_simple_binop (Pmode, AND, addr,
6832 GEN_INT (-GET_MODE_SIZE (SImode)),
6833 NULL_RTX, 1, OPTAB_DIRECT);
6835 ac->memsi = gen_rtx_MEM (SImode, align);
6836 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
6837 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
6838 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
6840 /* Calculate shiftcount. */
6841 byteoffset = expand_simple_binop (Pmode, AND, addr,
6842 GEN_INT (GET_MODE_SIZE (SImode) - 1),
6843 NULL_RTX, 1, OPTAB_DIRECT);
6844 /* As we already have some offset, evaluate the remaining distance. */
6845 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
6846 NULL_RTX, 1, OPTAB_DIRECT);
6849 /* Shift is the byte count, but we need the bitcount. */
6850 ac->shift = expand_simple_binop (SImode, ASHIFT, ac->shift, GEN_INT (3),
6851 NULL_RTX, 1, OPTAB_DIRECT);
6853 /* Calculate masks. */
6854 ac->modemask = expand_simple_binop (SImode, ASHIFT,
6855 GEN_INT (GET_MODE_MASK (mode)),
6856 ac->shift, NULL_RTX, 1, OPTAB_DIRECT);
6857 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask,
6861 /* A subroutine of s390_expand_cs_hqi. Insert INS into VAL. If possible,
6862 use a single insv insn into SEQ2. Otherwise, put prep insns in SEQ1 and
6863 perform the merge in SEQ2. */
6866 s390_two_part_insv (struct alignment_context *ac, rtx *seq1, rtx *seq2,
6867 machine_mode mode, rtx val, rtx ins)
6874 tmp = copy_to_mode_reg (SImode, val);
6875 if (s390_expand_insv (tmp, GEN_INT (GET_MODE_BITSIZE (mode)),
6879 *seq2 = get_insns ();
6886 /* Failed to use insv. Generate a two part shift and mask. */
6888 tmp = s390_expand_mask_and_shift (ins, mode, ac->shift);
6889 *seq1 = get_insns ();
6893 tmp = expand_simple_binop (SImode, IOR, tmp, val, NULL_RTX, 1, OPTAB_DIRECT);
6894 *seq2 = get_insns ();
6900 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
6901 the memory location, CMP the old value to compare MEM with and NEW_RTX the
6902 value to set if CMP == MEM. */
6905 s390_expand_cs_hqi (machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
6906 rtx cmp, rtx new_rtx, bool is_weak)
6908 struct alignment_context ac;
6909 rtx cmpv, newv, val, cc, seq0, seq1, seq2, seq3;
6910 rtx res = gen_reg_rtx (SImode);
6911 rtx_code_label *csloop = NULL, *csend = NULL;
6913 gcc_assert (MEM_P (mem));
6915 init_alignment_context (&ac, mem, mode);
6917 /* Load full word. Subsequent loads are performed by CS. */
6918 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
6919 NULL_RTX, 1, OPTAB_DIRECT);
6921 /* Prepare insertions of cmp and new_rtx into the loaded value. When
6922 possible, we try to use insv to make this happen efficiently. If
6923 that fails we'll generate code both inside and outside the loop. */
6924 cmpv = s390_two_part_insv (&ac, &seq0, &seq2, mode, val, cmp);
6925 newv = s390_two_part_insv (&ac, &seq1, &seq3, mode, val, new_rtx);
6932 /* Start CS loop. */
6935 /* Begin assuming success. */
6936 emit_move_insn (btarget, const1_rtx);
6938 csloop = gen_label_rtx ();
6939 csend = gen_label_rtx ();
6940 emit_label (csloop);
6943 /* val = "<mem>00..0<mem>"
6944 * cmp = "00..0<cmp>00..0"
6945 * new = "00..0<new>00..0"
6951 cc = s390_emit_compare_and_swap (EQ, res, ac.memsi, cmpv, newv, CCZ1mode);
6953 emit_insn (gen_cstorecc4 (btarget, cc, XEXP (cc, 0), XEXP (cc, 1)));
6958 /* Jump to end if we're done (likely?). */
6959 s390_emit_jump (csend, cc);
6961 /* Check for changes outside mode, and loop internal if so.
6962 Arrange the moves so that the compare is adjacent to the
6963 branch so that we can generate CRJ. */
6964 tmp = copy_to_reg (val);
6965 force_expand_binop (SImode, and_optab, res, ac.modemaski, val,
6967 cc = s390_emit_compare (NE, val, tmp);
6968 s390_emit_jump (csloop, cc);
6971 emit_move_insn (btarget, const0_rtx);
6975 /* Return the correct part of the bitfield. */
6976 convert_move (vtarget, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
6977 NULL_RTX, 1, OPTAB_DIRECT), 1);
6980 /* Variant of s390_expand_cs for SI, DI and TI modes. */
6982 s390_expand_cs_tdsi (machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
6983 rtx cmp, rtx new_rtx, bool is_weak)
6985 rtx output = vtarget;
6986 rtx_code_label *skip_cs_label = NULL;
6987 bool do_const_opt = false;
6989 if (!register_operand (output, mode))
6990 output = gen_reg_rtx (mode);
6992 /* If IS_WEAK is true and the INPUT value is a constant, compare the memory
6993 with the constant first and skip the compare_and_swap because its very
6994 expensive and likely to fail anyway.
6995 Note 1: This is done only for IS_WEAK. C11 allows optimizations that may
6996 cause spurious in that case.
6997 Note 2: It may be useful to do this also for non-constant INPUT.
6998 Note 3: Currently only targets with "load on condition" are supported
6999 (z196 and newer). */
7002 && (mode == SImode || mode == DImode))
7003 do_const_opt = (is_weak && CONST_INT_P (cmp));
7007 rtx cc = gen_rtx_REG (CCZmode, CC_REGNUM);
7009 skip_cs_label = gen_label_rtx ();
7010 emit_move_insn (btarget, const0_rtx);
7011 if (CONST_INT_P (cmp) && INTVAL (cmp) == 0)
7013 rtvec lt = rtvec_alloc (2);
7015 /* Load-and-test + conditional jump. */
7017 = gen_rtx_SET (cc, gen_rtx_COMPARE (CCZmode, mem, cmp));
7018 RTVEC_ELT (lt, 1) = gen_rtx_SET (output, mem);
7019 emit_insn (gen_rtx_PARALLEL (VOIDmode, lt));
7023 emit_move_insn (output, mem);
7024 emit_insn (gen_rtx_SET (cc, gen_rtx_COMPARE (CCZmode, output, cmp)));
7026 s390_emit_jump (skip_cs_label, gen_rtx_NE (VOIDmode, cc, const0_rtx));
7027 add_reg_br_prob_note (get_last_insn (),
7028 profile_probability::very_unlikely ());
7029 /* If the jump is not taken, OUTPUT is the expected value. */
7031 /* Reload newval to a register manually, *after* the compare and jump
7032 above. Otherwise Reload might place it before the jump. */
7035 cmp = force_reg (mode, cmp);
7036 new_rtx = force_reg (mode, new_rtx);
7037 s390_emit_compare_and_swap (EQ, output, mem, cmp, new_rtx,
7038 (do_const_opt) ? CCZmode : CCZ1mode);
7039 if (skip_cs_label != NULL)
7040 emit_label (skip_cs_label);
7042 /* We deliberately accept non-register operands in the predicate
7043 to ensure the write back to the output operand happens *before*
7044 the store-flags code below. This makes it easier for combine
7045 to merge the store-flags code with a potential test-and-branch
7046 pattern following (immediately!) afterwards. */
7047 if (output != vtarget)
7048 emit_move_insn (vtarget, output);
7054 /* Do not use gen_cstorecc4 here because it writes either 1 or 0, but
7055 btarget has already been initialized with 0 above. */
7056 cc = gen_rtx_REG (CCZmode, CC_REGNUM);
7057 cond = gen_rtx_EQ (VOIDmode, cc, const0_rtx);
7058 ite = gen_rtx_IF_THEN_ELSE (SImode, cond, const1_rtx, btarget);
7059 emit_insn (gen_rtx_SET (btarget, ite));
7065 cc = gen_rtx_REG (CCZ1mode, CC_REGNUM);
7066 cond = gen_rtx_EQ (SImode, cc, const0_rtx);
7067 emit_insn (gen_cstorecc4 (btarget, cond, cc, const0_rtx));
7071 /* Expand an atomic compare and swap operation. MEM is the memory location,
7072 CMP the old value to compare MEM with and NEW_RTX the value to set if
7076 s390_expand_cs (machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
7077 rtx cmp, rtx new_rtx, bool is_weak)
7084 s390_expand_cs_tdsi (mode, btarget, vtarget, mem, cmp, new_rtx, is_weak);
7088 s390_expand_cs_hqi (mode, btarget, vtarget, mem, cmp, new_rtx, is_weak);
7095 /* Expand an atomic_exchange operation simulated with a compare-and-swap loop.
7096 The memory location MEM is set to INPUT. OUTPUT is set to the previous value
7100 s390_expand_atomic_exchange_tdsi (rtx output, rtx mem, rtx input)
7102 machine_mode mode = GET_MODE (mem);
7103 rtx_code_label *csloop;
7106 && (mode == DImode || mode == SImode)
7107 && CONST_INT_P (input) && INTVAL (input) == 0)
7109 emit_move_insn (output, const0_rtx);
7111 emit_insn (gen_atomic_fetch_anddi (output, mem, const0_rtx, input));
7113 emit_insn (gen_atomic_fetch_andsi (output, mem, const0_rtx, input));
7117 input = force_reg (mode, input);
7118 emit_move_insn (output, mem);
7119 csloop = gen_label_rtx ();
7120 emit_label (csloop);
7121 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, output, mem, output,
7125 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
7126 and VAL the value to play with. If AFTER is true then store the value
7127 MEM holds after the operation, if AFTER is false then store the value MEM
7128 holds before the operation. If TARGET is zero then discard that value, else
7129 store it to TARGET. */
7132 s390_expand_atomic (machine_mode mode, enum rtx_code code,
7133 rtx target, rtx mem, rtx val, bool after)
7135 struct alignment_context ac;
7137 rtx new_rtx = gen_reg_rtx (SImode);
7138 rtx orig = gen_reg_rtx (SImode);
7139 rtx_code_label *csloop = gen_label_rtx ();
7141 gcc_assert (!target || register_operand (target, VOIDmode));
7142 gcc_assert (MEM_P (mem));
7144 init_alignment_context (&ac, mem, mode);
7146 /* Shift val to the correct bit positions.
7147 Preserve "icm", but prevent "ex icm". */
7148 if (!(ac.aligned && code == SET && MEM_P (val)))
7149 val = s390_expand_mask_and_shift (val, mode, ac.shift);
7151 /* Further preparation insns. */
7152 if (code == PLUS || code == MINUS)
7153 emit_move_insn (orig, val);
7154 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
7155 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
7156 NULL_RTX, 1, OPTAB_DIRECT);
7158 /* Load full word. Subsequent loads are performed by CS. */
7159 cmp = force_reg (SImode, ac.memsi);
7161 /* Start CS loop. */
7162 emit_label (csloop);
7163 emit_move_insn (new_rtx, cmp);
7165 /* Patch new with val at correct position. */
7170 val = expand_simple_binop (SImode, code, new_rtx, orig,
7171 NULL_RTX, 1, OPTAB_DIRECT);
7172 val = expand_simple_binop (SImode, AND, val, ac.modemask,
7173 NULL_RTX, 1, OPTAB_DIRECT);
7176 if (ac.aligned && MEM_P (val))
7177 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0,
7178 0, 0, SImode, val, false);
7181 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
7182 NULL_RTX, 1, OPTAB_DIRECT);
7183 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
7184 NULL_RTX, 1, OPTAB_DIRECT);
7190 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
7191 NULL_RTX, 1, OPTAB_DIRECT);
7193 case MULT: /* NAND */
7194 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
7195 NULL_RTX, 1, OPTAB_DIRECT);
7196 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
7197 NULL_RTX, 1, OPTAB_DIRECT);
7203 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
7204 ac.memsi, cmp, new_rtx,
7207 /* Return the correct part of the bitfield. */
7209 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
7210 after ? new_rtx : cmp, ac.shift,
7211 NULL_RTX, 1, OPTAB_DIRECT), 1);
7214 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
7215 We need to emit DTP-relative relocations. */
7217 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
7220 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
7225 fputs ("\t.long\t", file);
7228 fputs ("\t.quad\t", file);
7233 output_addr_const (file, x);
7234 fputs ("@DTPOFF", file);
7237 /* Return the proper mode for REGNO being represented in the dwarf
7240 s390_dwarf_frame_reg_mode (int regno)
7242 machine_mode save_mode = default_dwarf_frame_reg_mode (regno);
7244 /* Make sure not to return DImode for any GPR with -m31 -mzarch. */
7245 if (GENERAL_REGNO_P (regno))
7248 /* The rightmost 64 bits of vector registers are call-clobbered. */
7249 if (GET_MODE_SIZE (save_mode) > 8)
7255 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
7256 /* Implement TARGET_MANGLE_TYPE. */
7259 s390_mangle_type (const_tree type)
7261 type = TYPE_MAIN_VARIANT (type);
7263 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
7264 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
7267 if (type == s390_builtin_types[BT_BV16QI]) return "U6__boolc";
7268 if (type == s390_builtin_types[BT_BV8HI]) return "U6__bools";
7269 if (type == s390_builtin_types[BT_BV4SI]) return "U6__booli";
7270 if (type == s390_builtin_types[BT_BV2DI]) return "U6__booll";
7272 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
7273 && TARGET_LONG_DOUBLE_128)
7276 /* For all other types, use normal C++ mangling. */
7281 /* In the name of slightly smaller debug output, and to cater to
7282 general assembler lossage, recognize various UNSPEC sequences
7283 and turn them back into a direct symbol reference. */
7286 s390_delegitimize_address (rtx orig_x)
7290 orig_x = delegitimize_mem_from_attrs (orig_x);
7293 /* Extract the symbol ref from:
7294 (plus:SI (reg:SI 12 %r12)
7295 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
7296 UNSPEC_GOTOFF/PLTOFF)))
7298 (plus:SI (reg:SI 12 %r12)
7299 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
7300 UNSPEC_GOTOFF/PLTOFF)
7301 (const_int 4 [0x4])))) */
7302 if (GET_CODE (x) == PLUS
7303 && REG_P (XEXP (x, 0))
7304 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
7305 && GET_CODE (XEXP (x, 1)) == CONST)
7307 HOST_WIDE_INT offset = 0;
7309 /* The const operand. */
7310 y = XEXP (XEXP (x, 1), 0);
7312 if (GET_CODE (y) == PLUS
7313 && GET_CODE (XEXP (y, 1)) == CONST_INT)
7315 offset = INTVAL (XEXP (y, 1));
7319 if (GET_CODE (y) == UNSPEC
7320 && (XINT (y, 1) == UNSPEC_GOTOFF
7321 || XINT (y, 1) == UNSPEC_PLTOFF))
7322 return plus_constant (Pmode, XVECEXP (y, 0, 0), offset);
7325 if (GET_CODE (x) != MEM)
7329 if (GET_CODE (x) == PLUS
7330 && GET_CODE (XEXP (x, 1)) == CONST
7331 && GET_CODE (XEXP (x, 0)) == REG
7332 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
7334 y = XEXP (XEXP (x, 1), 0);
7335 if (GET_CODE (y) == UNSPEC
7336 && XINT (y, 1) == UNSPEC_GOT)
7337 y = XVECEXP (y, 0, 0);
7341 else if (GET_CODE (x) == CONST)
7343 /* Extract the symbol ref from:
7344 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
7345 UNSPEC_PLT/GOTENT))) */
7348 if (GET_CODE (y) == UNSPEC
7349 && (XINT (y, 1) == UNSPEC_GOTENT
7350 || XINT (y, 1) == UNSPEC_PLT))
7351 y = XVECEXP (y, 0, 0);
7358 if (GET_MODE (orig_x) != Pmode)
7360 if (GET_MODE (orig_x) == BLKmode)
7362 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
7369 /* Output operand OP to stdio stream FILE.
7370 OP is an address (register + offset) which is not used to address data;
7371 instead the rightmost bits are interpreted as the value. */
7374 print_addrstyle_operand (FILE *file, rtx op)
7376 HOST_WIDE_INT offset;
7379 /* Extract base register and offset. */
7380 if (!s390_decompose_addrstyle_without_index (op, &base, &offset))
7386 gcc_assert (GET_CODE (base) == REG);
7387 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
7388 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
7391 /* Offsets are constricted to twelve bits. */
7392 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
7394 fprintf (file, "(%s)", reg_names[REGNO (base)]);
7397 /* Assigns the number of NOP halfwords to be emitted before and after the
7398 function label to *HW_BEFORE and *HW_AFTER. Both pointers must not be NULL.
7399 If hotpatching is disabled for the function, the values are set to zero.
7403 s390_function_num_hotpatch_hw (tree decl,
7409 attr = lookup_attribute ("hotpatch", DECL_ATTRIBUTES (decl));
7411 /* Handle the arguments of the hotpatch attribute. The values
7412 specified via attribute might override the cmdline argument
7416 tree args = TREE_VALUE (attr);
7418 *hw_before = TREE_INT_CST_LOW (TREE_VALUE (args));
7419 *hw_after = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (args)));
7423 /* Use the values specified by the cmdline arguments. */
7424 *hw_before = s390_hotpatch_hw_before_label;
7425 *hw_after = s390_hotpatch_hw_after_label;
7429 /* Write the current .machine and .machinemode specification to the assembler
7432 #ifdef HAVE_AS_MACHINE_MACHINEMODE
7434 s390_asm_output_machine_for_arch (FILE *asm_out_file)
7436 fprintf (asm_out_file, "\t.machinemode %s\n",
7437 (TARGET_ZARCH) ? "zarch" : "esa");
7438 fprintf (asm_out_file, "\t.machine \"%s",
7439 processor_table[s390_arch].binutils_name);
7440 if (S390_USE_ARCHITECTURE_MODIFIERS)
7444 cpu_flags = processor_flags_table[(int) s390_arch];
7445 if (TARGET_HTM && !(cpu_flags & PF_TX))
7446 fprintf (asm_out_file, "+htm");
7447 else if (!TARGET_HTM && (cpu_flags & PF_TX))
7448 fprintf (asm_out_file, "+nohtm");
7449 if (TARGET_VX && !(cpu_flags & PF_VX))
7450 fprintf (asm_out_file, "+vx");
7451 else if (!TARGET_VX && (cpu_flags & PF_VX))
7452 fprintf (asm_out_file, "+novx");
7454 fprintf (asm_out_file, "\"\n");
7457 /* Write an extra function header before the very start of the function. */
7460 s390_asm_output_function_prefix (FILE *asm_out_file,
7461 const char *fnname ATTRIBUTE_UNUSED)
7463 if (DECL_FUNCTION_SPECIFIC_TARGET (current_function_decl) == NULL)
7465 /* Since only the function specific options are saved but not the indications
7466 which options are set, it's too much work here to figure out which options
7467 have actually changed. Thus, generate .machine and .machinemode whenever a
7468 function has the target attribute or pragma. */
7469 fprintf (asm_out_file, "\t.machinemode push\n");
7470 fprintf (asm_out_file, "\t.machine push\n");
7471 s390_asm_output_machine_for_arch (asm_out_file);
7474 /* Write an extra function footer after the very end of the function. */
7477 s390_asm_declare_function_size (FILE *asm_out_file,
7478 const char *fnname, tree decl)
7480 if (!flag_inhibit_size_directive)
7481 ASM_OUTPUT_MEASURED_SIZE (asm_out_file, fnname);
7482 if (DECL_FUNCTION_SPECIFIC_TARGET (decl) == NULL)
7484 fprintf (asm_out_file, "\t.machine pop\n");
7485 fprintf (asm_out_file, "\t.machinemode pop\n");
7489 /* Write the extra assembler code needed to declare a function properly. */
7492 s390_asm_output_function_label (FILE *asm_out_file, const char *fname,
7495 int hw_before, hw_after;
7497 s390_function_num_hotpatch_hw (decl, &hw_before, &hw_after);
7500 unsigned int function_alignment;
7503 /* Add a trampoline code area before the function label and initialize it
7504 with two-byte nop instructions. This area can be overwritten with code
7505 that jumps to a patched version of the function. */
7506 asm_fprintf (asm_out_file, "\tnopr\t%%r0"
7507 "\t# pre-label NOPs for hotpatch (%d halfwords)\n",
7509 for (i = 1; i < hw_before; i++)
7510 fputs ("\tnopr\t%r0\n", asm_out_file);
7512 /* Note: The function label must be aligned so that (a) the bytes of the
7513 following nop do not cross a cacheline boundary, and (b) a jump address
7514 (eight bytes for 64 bit targets, 4 bytes for 32 bit targets) can be
7515 stored directly before the label without crossing a cacheline
7516 boundary. All this is necessary to make sure the trampoline code can
7517 be changed atomically.
7518 This alignment is done automatically using the FOUNCTION_BOUNDARY, but
7519 if there are NOPs before the function label, the alignment is placed
7520 before them. So it is necessary to duplicate the alignment after the
7522 function_alignment = MAX (8, DECL_ALIGN (decl) / BITS_PER_UNIT);
7523 if (! DECL_USER_ALIGN (decl))
7524 function_alignment = MAX (function_alignment,
7525 (unsigned int) align_functions);
7526 fputs ("\t# alignment for hotpatch\n", asm_out_file);
7527 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (function_alignment));
7530 if (S390_USE_TARGET_ATTRIBUTE && TARGET_DEBUG_ARG)
7532 asm_fprintf (asm_out_file, "\t# fn:%s ar%d\n", fname, s390_arch);
7533 asm_fprintf (asm_out_file, "\t# fn:%s tu%d\n", fname, s390_tune);
7534 asm_fprintf (asm_out_file, "\t# fn:%s sg%d\n", fname, s390_stack_guard);
7535 asm_fprintf (asm_out_file, "\t# fn:%s ss%d\n", fname, s390_stack_size);
7536 asm_fprintf (asm_out_file, "\t# fn:%s bc%d\n", fname, s390_branch_cost);
7537 asm_fprintf (asm_out_file, "\t# fn:%s wf%d\n", fname,
7538 s390_warn_framesize);
7539 asm_fprintf (asm_out_file, "\t# fn:%s ba%d\n", fname, TARGET_BACKCHAIN);
7540 asm_fprintf (asm_out_file, "\t# fn:%s hd%d\n", fname, TARGET_HARD_DFP);
7541 asm_fprintf (asm_out_file, "\t# fn:%s hf%d\n", fname, !TARGET_SOFT_FLOAT);
7542 asm_fprintf (asm_out_file, "\t# fn:%s ht%d\n", fname, TARGET_OPT_HTM);
7543 asm_fprintf (asm_out_file, "\t# fn:%s vx%d\n", fname, TARGET_OPT_VX);
7544 asm_fprintf (asm_out_file, "\t# fn:%s ps%d\n", fname,
7545 TARGET_PACKED_STACK);
7546 asm_fprintf (asm_out_file, "\t# fn:%s se%d\n", fname, TARGET_SMALL_EXEC);
7547 asm_fprintf (asm_out_file, "\t# fn:%s mv%d\n", fname, TARGET_MVCLE);
7548 asm_fprintf (asm_out_file, "\t# fn:%s zv%d\n", fname, TARGET_ZVECTOR);
7549 asm_fprintf (asm_out_file, "\t# fn:%s wd%d\n", fname,
7550 s390_warn_dynamicstack_p);
7552 ASM_OUTPUT_LABEL (asm_out_file, fname);
7554 asm_fprintf (asm_out_file,
7555 "\t# post-label NOPs for hotpatch (%d halfwords)\n",
7559 /* Output machine-dependent UNSPECs occurring in address constant X
7560 in assembler syntax to stdio stream FILE. Returns true if the
7561 constant X could be recognized, false otherwise. */
7564 s390_output_addr_const_extra (FILE *file, rtx x)
7566 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
7567 switch (XINT (x, 1))
7570 output_addr_const (file, XVECEXP (x, 0, 0));
7571 fprintf (file, "@GOTENT");
7574 output_addr_const (file, XVECEXP (x, 0, 0));
7575 fprintf (file, "@GOT");
7578 output_addr_const (file, XVECEXP (x, 0, 0));
7579 fprintf (file, "@GOTOFF");
7582 output_addr_const (file, XVECEXP (x, 0, 0));
7583 fprintf (file, "@PLT");
7586 output_addr_const (file, XVECEXP (x, 0, 0));
7587 fprintf (file, "@PLTOFF");
7590 output_addr_const (file, XVECEXP (x, 0, 0));
7591 fprintf (file, "@TLSGD");
7594 assemble_name (file, get_some_local_dynamic_name ());
7595 fprintf (file, "@TLSLDM");
7598 output_addr_const (file, XVECEXP (x, 0, 0));
7599 fprintf (file, "@DTPOFF");
7602 output_addr_const (file, XVECEXP (x, 0, 0));
7603 fprintf (file, "@NTPOFF");
7605 case UNSPEC_GOTNTPOFF:
7606 output_addr_const (file, XVECEXP (x, 0, 0));
7607 fprintf (file, "@GOTNTPOFF");
7609 case UNSPEC_INDNTPOFF:
7610 output_addr_const (file, XVECEXP (x, 0, 0));
7611 fprintf (file, "@INDNTPOFF");
7615 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
7616 switch (XINT (x, 1))
7618 case UNSPEC_POOL_OFFSET:
7619 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
7620 output_addr_const (file, x);
7626 /* Output address operand ADDR in assembler syntax to
7627 stdio stream FILE. */
7630 print_operand_address (FILE *file, rtx addr)
7632 struct s390_address ad;
7633 memset (&ad, 0, sizeof (s390_address));
7635 if (s390_loadrelative_operand_p (addr, NULL, NULL))
7639 output_operand_lossage ("symbolic memory references are "
7640 "only supported on z10 or later");
7643 output_addr_const (file, addr);
7647 if (!s390_decompose_address (addr, &ad)
7648 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7649 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
7650 output_operand_lossage ("cannot decompose address");
7653 output_addr_const (file, ad.disp);
7655 fprintf (file, "0");
7657 if (ad.base && ad.indx)
7658 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
7659 reg_names[REGNO (ad.base)]);
7661 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
7664 /* Output operand X in assembler syntax to stdio stream FILE.
7665 CODE specified the format flag. The following format flags
7668 'C': print opcode suffix for branch condition.
7669 'D': print opcode suffix for inverse branch condition.
7670 'E': print opcode suffix for branch on index instruction.
7671 'G': print the size of the operand in bytes.
7672 'J': print tls_load/tls_gdcall/tls_ldcall suffix
7673 'M': print the second word of a TImode operand.
7674 'N': print the second word of a DImode operand.
7675 'O': print only the displacement of a memory reference or address.
7676 'R': print only the base register of a memory reference or address.
7677 'S': print S-type memory reference (base+displacement).
7678 'Y': print address style operand without index (e.g. shift count or setmem
7681 'b': print integer X as if it's an unsigned byte.
7682 'c': print integer X as if it's an signed byte.
7683 'e': "end" contiguous bitmask X in either DImode or vector inner mode.
7684 'f': "end" contiguous bitmask X in SImode.
7685 'h': print integer X as if it's a signed halfword.
7686 'i': print the first nonzero HImode part of X.
7687 'j': print the first HImode part unequal to -1 of X.
7688 'k': print the first nonzero SImode part of X.
7689 'm': print the first SImode part unequal to -1 of X.
7690 'o': print integer X as if it's an unsigned 32bit word.
7691 's': "start" of contiguous bitmask X in either DImode or vector inner mode.
7692 't': CONST_INT: "start" of contiguous bitmask X in SImode.
7693 CONST_VECTOR: Generate a bitmask for vgbm instruction.
7694 'x': print integer X as if it's an unsigned halfword.
7695 'v': print register number as vector register (v1 instead of f1).
7699 print_operand (FILE *file, rtx x, int code)
7706 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
7710 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
7714 if (GET_CODE (x) == LE)
7715 fprintf (file, "l");
7716 else if (GET_CODE (x) == GT)
7717 fprintf (file, "h");
7719 output_operand_lossage ("invalid comparison operator "
7720 "for 'E' output modifier");
7724 if (GET_CODE (x) == SYMBOL_REF)
7726 fprintf (file, "%s", ":tls_load:");
7727 output_addr_const (file, x);
7729 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
7731 fprintf (file, "%s", ":tls_gdcall:");
7732 output_addr_const (file, XVECEXP (x, 0, 0));
7734 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
7736 fprintf (file, "%s", ":tls_ldcall:");
7737 const char *name = get_some_local_dynamic_name ();
7739 assemble_name (file, name);
7742 output_operand_lossage ("invalid reference for 'J' output modifier");
7746 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
7751 struct s390_address ad;
7754 ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
7757 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7760 output_operand_lossage ("invalid address for 'O' output modifier");
7765 output_addr_const (file, ad.disp);
7767 fprintf (file, "0");
7773 struct s390_address ad;
7776 ret = s390_decompose_address (MEM_P (x) ? XEXP (x, 0) : x, &ad);
7779 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7782 output_operand_lossage ("invalid address for 'R' output modifier");
7787 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
7789 fprintf (file, "0");
7795 struct s390_address ad;
7800 output_operand_lossage ("memory reference expected for "
7801 "'S' output modifier");
7804 ret = s390_decompose_address (XEXP (x, 0), &ad);
7807 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
7810 output_operand_lossage ("invalid address for 'S' output modifier");
7815 output_addr_const (file, ad.disp);
7817 fprintf (file, "0");
7820 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
7825 if (GET_CODE (x) == REG)
7826 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
7827 else if (GET_CODE (x) == MEM)
7828 x = change_address (x, VOIDmode,
7829 plus_constant (Pmode, XEXP (x, 0), 4));
7831 output_operand_lossage ("register or memory expression expected "
7832 "for 'N' output modifier");
7836 if (GET_CODE (x) == REG)
7837 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
7838 else if (GET_CODE (x) == MEM)
7839 x = change_address (x, VOIDmode,
7840 plus_constant (Pmode, XEXP (x, 0), 8));
7842 output_operand_lossage ("register or memory expression expected "
7843 "for 'M' output modifier");
7847 print_addrstyle_operand (file, x);
7851 switch (GET_CODE (x))
7854 /* Print FP regs as fx instead of vx when they are accessed
7855 through non-vector mode. */
7857 || VECTOR_NOFP_REG_P (x)
7858 || (FP_REG_P (x) && VECTOR_MODE_P (GET_MODE (x)))
7859 || (VECTOR_REG_P (x)
7860 && (GET_MODE_SIZE (GET_MODE (x)) /
7861 s390_class_max_nregs (FP_REGS, GET_MODE (x))) > 8))
7862 fprintf (file, "%%v%s", reg_names[REGNO (x)] + 2);
7864 fprintf (file, "%s", reg_names[REGNO (x)]);
7868 output_address (GET_MODE (x), XEXP (x, 0));
7875 output_addr_const (file, x);
7888 ival = ((ival & 0xff) ^ 0x80) - 0x80;
7894 ival = ((ival & 0xffff) ^ 0x8000) - 0x8000;
7897 ival = s390_extract_part (x, HImode, 0);
7900 ival = s390_extract_part (x, HImode, -1);
7903 ival = s390_extract_part (x, SImode, 0);
7906 ival = s390_extract_part (x, SImode, -1);
7918 len = (code == 's' || code == 'e' ? 64 : 32);
7919 ok = s390_contiguous_bitmask_p (ival, true, len, &start, &end);
7921 if (code == 's' || code == 't')
7928 output_operand_lossage ("invalid constant for output modifier '%c'", code);
7930 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
7933 case CONST_WIDE_INT:
7935 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7936 CONST_WIDE_INT_ELT (x, 0) & 0xff);
7937 else if (code == 'x')
7938 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7939 CONST_WIDE_INT_ELT (x, 0) & 0xffff);
7940 else if (code == 'h')
7941 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7942 ((CONST_WIDE_INT_ELT (x, 0) & 0xffff) ^ 0x8000) - 0x8000);
7946 output_operand_lossage ("invalid constant - try using "
7947 "an output modifier");
7949 output_operand_lossage ("invalid constant for output modifier '%c'",
7957 gcc_assert (const_vec_duplicate_p (x));
7958 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
7959 ((INTVAL (XVECEXP (x, 0, 0)) & 0xffff) ^ 0x8000) - 0x8000);
7967 ok = s390_contiguous_bitmask_vector_p (x, &start, &end);
7969 ival = (code == 's') ? start : end;
7970 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
7976 bool ok = s390_bytemask_vector_p (x, &mask);
7978 fprintf (file, "%u", mask);
7983 output_operand_lossage ("invalid constant vector for output "
7984 "modifier '%c'", code);
7990 output_operand_lossage ("invalid expression - try using "
7991 "an output modifier");
7993 output_operand_lossage ("invalid expression for output "
7994 "modifier '%c'", code);
7999 /* Target hook for assembling integer objects. We need to define it
8000 here to work a round a bug in some versions of GAS, which couldn't
8001 handle values smaller than INT_MIN when printed in decimal. */
8004 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
8006 if (size == 8 && aligned_p
8007 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
8009 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
8013 return default_assemble_integer (x, size, aligned_p);
8016 /* Returns true if register REGNO is used for forming
8017 a memory address in expression X. */
8020 reg_used_in_mem_p (int regno, rtx x)
8022 enum rtx_code code = GET_CODE (x);
8028 if (refers_to_regno_p (regno, XEXP (x, 0)))
8031 else if (code == SET
8032 && GET_CODE (SET_DEST (x)) == PC)
8034 if (refers_to_regno_p (regno, SET_SRC (x)))
8038 fmt = GET_RTX_FORMAT (code);
8039 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8042 && reg_used_in_mem_p (regno, XEXP (x, i)))
8045 else if (fmt[i] == 'E')
8046 for (j = 0; j < XVECLEN (x, i); j++)
8047 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
8053 /* Returns true if expression DEP_RTX sets an address register
8054 used by instruction INSN to address memory. */
8057 addr_generation_dependency_p (rtx dep_rtx, rtx_insn *insn)
8061 if (NONJUMP_INSN_P (dep_rtx))
8062 dep_rtx = PATTERN (dep_rtx);
8064 if (GET_CODE (dep_rtx) == SET)
8066 target = SET_DEST (dep_rtx);
8067 if (GET_CODE (target) == STRICT_LOW_PART)
8068 target = XEXP (target, 0);
8069 while (GET_CODE (target) == SUBREG)
8070 target = SUBREG_REG (target);
8072 if (GET_CODE (target) == REG)
8074 int regno = REGNO (target);
8076 if (s390_safe_attr_type (insn) == TYPE_LA)
8078 pat = PATTERN (insn);
8079 if (GET_CODE (pat) == PARALLEL)
8081 gcc_assert (XVECLEN (pat, 0) == 2);
8082 pat = XVECEXP (pat, 0, 0);
8084 gcc_assert (GET_CODE (pat) == SET);
8085 return refers_to_regno_p (regno, SET_SRC (pat));
8087 else if (get_attr_atype (insn) == ATYPE_AGEN)
8088 return reg_used_in_mem_p (regno, PATTERN (insn));
8094 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
8097 s390_agen_dep_p (rtx_insn *dep_insn, rtx_insn *insn)
8099 rtx dep_rtx = PATTERN (dep_insn);
8102 if (GET_CODE (dep_rtx) == SET
8103 && addr_generation_dependency_p (dep_rtx, insn))
8105 else if (GET_CODE (dep_rtx) == PARALLEL)
8107 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
8109 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
8117 /* A C statement (sans semicolon) to update the integer scheduling priority
8118 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
8119 reduce the priority to execute INSN later. Do not define this macro if
8120 you do not need to adjust the scheduling priorities of insns.
8122 A STD instruction should be scheduled earlier,
8123 in order to use the bypass. */
8125 s390_adjust_priority (rtx_insn *insn, int priority)
8127 if (! INSN_P (insn))
8130 if (s390_tune <= PROCESSOR_2064_Z900)
8133 switch (s390_safe_attr_type (insn))
8137 priority = priority << 3;
8141 priority = priority << 1;
8150 /* The number of instructions that can be issued per cycle. */
8153 s390_issue_rate (void)
8157 case PROCESSOR_2084_Z990:
8158 case PROCESSOR_2094_Z9_109:
8159 case PROCESSOR_2094_Z9_EC:
8160 case PROCESSOR_2817_Z196:
8162 case PROCESSOR_2097_Z10:
8164 case PROCESSOR_9672_G5:
8165 case PROCESSOR_9672_G6:
8166 case PROCESSOR_2064_Z900:
8167 /* Starting with EC12 we use the sched_reorder hook to take care
8168 of instruction dispatch constraints. The algorithm only
8169 picks the best instruction and assumes only a single
8170 instruction gets issued per cycle. */
8171 case PROCESSOR_2827_ZEC12:
8172 case PROCESSOR_2964_Z13:
8173 case PROCESSOR_3906_Z14:
8180 s390_first_cycle_multipass_dfa_lookahead (void)
8185 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
8186 Fix up MEMs as required. */
8189 annotate_constant_pool_refs (rtx *x)
8194 gcc_assert (GET_CODE (*x) != SYMBOL_REF
8195 || !CONSTANT_POOL_ADDRESS_P (*x));
8197 /* Literal pool references can only occur inside a MEM ... */
8198 if (GET_CODE (*x) == MEM)
8200 rtx memref = XEXP (*x, 0);
8202 if (GET_CODE (memref) == SYMBOL_REF
8203 && CONSTANT_POOL_ADDRESS_P (memref))
8205 rtx base = cfun->machine->base_reg;
8206 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
8209 *x = replace_equiv_address (*x, addr);
8213 if (GET_CODE (memref) == CONST
8214 && GET_CODE (XEXP (memref, 0)) == PLUS
8215 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
8216 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
8217 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
8219 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
8220 rtx sym = XEXP (XEXP (memref, 0), 0);
8221 rtx base = cfun->machine->base_reg;
8222 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
8225 *x = replace_equiv_address (*x, plus_constant (Pmode, addr, off));
8230 /* ... or a load-address type pattern. */
8231 if (GET_CODE (*x) == SET)
8233 rtx addrref = SET_SRC (*x);
8235 if (GET_CODE (addrref) == SYMBOL_REF
8236 && CONSTANT_POOL_ADDRESS_P (addrref))
8238 rtx base = cfun->machine->base_reg;
8239 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
8242 SET_SRC (*x) = addr;
8246 if (GET_CODE (addrref) == CONST
8247 && GET_CODE (XEXP (addrref, 0)) == PLUS
8248 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
8249 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
8250 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
8252 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
8253 rtx sym = XEXP (XEXP (addrref, 0), 0);
8254 rtx base = cfun->machine->base_reg;
8255 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
8258 SET_SRC (*x) = plus_constant (Pmode, addr, off);
8263 /* Annotate LTREL_BASE as well. */
8264 if (GET_CODE (*x) == UNSPEC
8265 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
8267 rtx base = cfun->machine->base_reg;
8268 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
8273 fmt = GET_RTX_FORMAT (GET_CODE (*x));
8274 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
8278 annotate_constant_pool_refs (&XEXP (*x, i));
8280 else if (fmt[i] == 'E')
8282 for (j = 0; j < XVECLEN (*x, i); j++)
8283 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
8288 /* Split all branches that exceed the maximum distance.
8289 Returns true if this created a new literal pool entry. */
8292 s390_split_branches (void)
8294 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8295 int new_literal = 0, ret;
8300 /* We need correct insn addresses. */
8302 shorten_branches (get_insns ());
8304 /* Find all branches that exceed 64KB, and split them. */
8306 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8308 if (! JUMP_P (insn) || tablejump_p (insn, NULL, NULL))
8311 pat = PATTERN (insn);
8312 if (GET_CODE (pat) == PARALLEL)
8313 pat = XVECEXP (pat, 0, 0);
8314 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
8317 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
8319 label = &SET_SRC (pat);
8321 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
8323 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
8324 label = &XEXP (SET_SRC (pat), 1);
8325 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
8326 label = &XEXP (SET_SRC (pat), 2);
8333 if (get_attr_length (insn) <= 4)
8336 /* We are going to use the return register as scratch register,
8337 make sure it will be saved/restored by the prologue/epilogue. */
8338 cfun_frame_layout.save_return_addr_p = 1;
8343 rtx mem = force_const_mem (Pmode, *label);
8344 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, mem),
8346 INSN_ADDRESSES_NEW (set_insn, -1);
8347 annotate_constant_pool_refs (&PATTERN (set_insn));
8354 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
8355 UNSPEC_LTREL_OFFSET);
8356 target = gen_rtx_CONST (Pmode, target);
8357 target = force_const_mem (Pmode, target);
8358 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (temp_reg, target),
8360 INSN_ADDRESSES_NEW (set_insn, -1);
8361 annotate_constant_pool_refs (&PATTERN (set_insn));
8363 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
8364 cfun->machine->base_reg),
8366 target = gen_rtx_PLUS (Pmode, temp_reg, target);
8369 ret = validate_change (insn, label, target, 0);
8377 /* Find an annotated literal pool symbol referenced in RTX X,
8378 and store it at REF. Will abort if X contains references to
8379 more than one such pool symbol; multiple references to the same
8380 symbol are allowed, however.
8382 The rtx pointed to by REF must be initialized to NULL_RTX
8383 by the caller before calling this routine. */
8386 find_constant_pool_ref (rtx x, rtx *ref)
8391 /* Ignore LTREL_BASE references. */
8392 if (GET_CODE (x) == UNSPEC
8393 && XINT (x, 1) == UNSPEC_LTREL_BASE)
8395 /* Likewise POOL_ENTRY insns. */
8396 if (GET_CODE (x) == UNSPEC_VOLATILE
8397 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
8400 gcc_assert (GET_CODE (x) != SYMBOL_REF
8401 || !CONSTANT_POOL_ADDRESS_P (x));
8403 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
8405 rtx sym = XVECEXP (x, 0, 0);
8406 gcc_assert (GET_CODE (sym) == SYMBOL_REF
8407 && CONSTANT_POOL_ADDRESS_P (sym));
8409 if (*ref == NULL_RTX)
8412 gcc_assert (*ref == sym);
8417 fmt = GET_RTX_FORMAT (GET_CODE (x));
8418 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8422 find_constant_pool_ref (XEXP (x, i), ref);
8424 else if (fmt[i] == 'E')
8426 for (j = 0; j < XVECLEN (x, i); j++)
8427 find_constant_pool_ref (XVECEXP (x, i, j), ref);
8432 /* Replace every reference to the annotated literal pool
8433 symbol REF in X by its base plus OFFSET. */
8436 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
8441 gcc_assert (*x != ref);
8443 if (GET_CODE (*x) == UNSPEC
8444 && XINT (*x, 1) == UNSPEC_LTREF
8445 && XVECEXP (*x, 0, 0) == ref)
8447 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
8451 if (GET_CODE (*x) == PLUS
8452 && GET_CODE (XEXP (*x, 1)) == CONST_INT
8453 && GET_CODE (XEXP (*x, 0)) == UNSPEC
8454 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
8455 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
8457 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
8458 *x = plus_constant (Pmode, addr, INTVAL (XEXP (*x, 1)));
8462 fmt = GET_RTX_FORMAT (GET_CODE (*x));
8463 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
8467 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
8469 else if (fmt[i] == 'E')
8471 for (j = 0; j < XVECLEN (*x, i); j++)
8472 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
8477 /* Check whether X contains an UNSPEC_LTREL_BASE.
8478 Return its constant pool symbol if found, NULL_RTX otherwise. */
8481 find_ltrel_base (rtx x)
8486 if (GET_CODE (x) == UNSPEC
8487 && XINT (x, 1) == UNSPEC_LTREL_BASE)
8488 return XVECEXP (x, 0, 0);
8490 fmt = GET_RTX_FORMAT (GET_CODE (x));
8491 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8495 rtx fnd = find_ltrel_base (XEXP (x, i));
8499 else if (fmt[i] == 'E')
8501 for (j = 0; j < XVECLEN (x, i); j++)
8503 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
8513 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
8516 replace_ltrel_base (rtx *x)
8521 if (GET_CODE (*x) == UNSPEC
8522 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
8524 *x = XVECEXP (*x, 0, 1);
8528 fmt = GET_RTX_FORMAT (GET_CODE (*x));
8529 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
8533 replace_ltrel_base (&XEXP (*x, i));
8535 else if (fmt[i] == 'E')
8537 for (j = 0; j < XVECLEN (*x, i); j++)
8538 replace_ltrel_base (&XVECEXP (*x, i, j));
8544 /* We keep a list of constants which we have to add to internal
8545 constant tables in the middle of large functions. */
8547 #define NR_C_MODES 32
8548 machine_mode constant_modes[NR_C_MODES] =
8550 TFmode, TImode, TDmode,
8551 V16QImode, V8HImode, V4SImode, V2DImode, V1TImode,
8552 V4SFmode, V2DFmode, V1TFmode,
8553 DFmode, DImode, DDmode,
8554 V8QImode, V4HImode, V2SImode, V1DImode, V2SFmode, V1DFmode,
8555 SFmode, SImode, SDmode,
8556 V4QImode, V2HImode, V1SImode, V1SFmode,
8565 struct constant *next;
8567 rtx_code_label *label;
8570 struct constant_pool
8572 struct constant_pool *next;
8573 rtx_insn *first_insn;
8574 rtx_insn *pool_insn;
8576 rtx_insn *emit_pool_after;
8578 struct constant *constants[NR_C_MODES];
8579 struct constant *execute;
8580 rtx_code_label *label;
8584 /* Allocate new constant_pool structure. */
8586 static struct constant_pool *
8587 s390_alloc_pool (void)
8589 struct constant_pool *pool;
8592 pool = (struct constant_pool *) xmalloc (sizeof *pool);
8594 for (i = 0; i < NR_C_MODES; i++)
8595 pool->constants[i] = NULL;
8597 pool->execute = NULL;
8598 pool->label = gen_label_rtx ();
8599 pool->first_insn = NULL;
8600 pool->pool_insn = NULL;
8601 pool->insns = BITMAP_ALLOC (NULL);
8603 pool->emit_pool_after = NULL;
8608 /* Create new constant pool covering instructions starting at INSN
8609 and chain it to the end of POOL_LIST. */
8611 static struct constant_pool *
8612 s390_start_pool (struct constant_pool **pool_list, rtx_insn *insn)
8614 struct constant_pool *pool, **prev;
8616 pool = s390_alloc_pool ();
8617 pool->first_insn = insn;
8619 for (prev = pool_list; *prev; prev = &(*prev)->next)
8626 /* End range of instructions covered by POOL at INSN and emit
8627 placeholder insn representing the pool. */
8630 s390_end_pool (struct constant_pool *pool, rtx_insn *insn)
8632 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
8635 insn = get_last_insn ();
8637 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
8638 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
8641 /* Add INSN to the list of insns covered by POOL. */
8644 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
8646 bitmap_set_bit (pool->insns, INSN_UID (insn));
8649 /* Return pool out of POOL_LIST that covers INSN. */
8651 static struct constant_pool *
8652 s390_find_pool (struct constant_pool *pool_list, rtx insn)
8654 struct constant_pool *pool;
8656 for (pool = pool_list; pool; pool = pool->next)
8657 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
8663 /* Add constant VAL of mode MODE to the constant pool POOL. */
8666 s390_add_constant (struct constant_pool *pool, rtx val, machine_mode mode)
8671 for (i = 0; i < NR_C_MODES; i++)
8672 if (constant_modes[i] == mode)
8674 gcc_assert (i != NR_C_MODES);
8676 for (c = pool->constants[i]; c != NULL; c = c->next)
8677 if (rtx_equal_p (val, c->value))
8682 c = (struct constant *) xmalloc (sizeof *c);
8684 c->label = gen_label_rtx ();
8685 c->next = pool->constants[i];
8686 pool->constants[i] = c;
8687 pool->size += GET_MODE_SIZE (mode);
8691 /* Return an rtx that represents the offset of X from the start of
8695 s390_pool_offset (struct constant_pool *pool, rtx x)
8699 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
8700 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
8701 UNSPEC_POOL_OFFSET);
8702 return gen_rtx_CONST (GET_MODE (x), x);
8705 /* Find constant VAL of mode MODE in the constant pool POOL.
8706 Return an RTX describing the distance from the start of
8707 the pool to the location of the new constant. */
8710 s390_find_constant (struct constant_pool *pool, rtx val,
8716 for (i = 0; i < NR_C_MODES; i++)
8717 if (constant_modes[i] == mode)
8719 gcc_assert (i != NR_C_MODES);
8721 for (c = pool->constants[i]; c != NULL; c = c->next)
8722 if (rtx_equal_p (val, c->value))
8727 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
8730 /* Check whether INSN is an execute. Return the label_ref to its
8731 execute target template if so, NULL_RTX otherwise. */
8734 s390_execute_label (rtx insn)
8736 if (NONJUMP_INSN_P (insn)
8737 && GET_CODE (PATTERN (insn)) == PARALLEL
8738 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
8739 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
8740 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
8745 /* Add execute target for INSN to the constant pool POOL. */
8748 s390_add_execute (struct constant_pool *pool, rtx insn)
8752 for (c = pool->execute; c != NULL; c = c->next)
8753 if (INSN_UID (insn) == INSN_UID (c->value))
8758 c = (struct constant *) xmalloc (sizeof *c);
8760 c->label = gen_label_rtx ();
8761 c->next = pool->execute;
8767 /* Find execute target for INSN in the constant pool POOL.
8768 Return an RTX describing the distance from the start of
8769 the pool to the location of the execute target. */
8772 s390_find_execute (struct constant_pool *pool, rtx insn)
8776 for (c = pool->execute; c != NULL; c = c->next)
8777 if (INSN_UID (insn) == INSN_UID (c->value))
8782 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
8785 /* For an execute INSN, extract the execute target template. */
8788 s390_execute_target (rtx insn)
8790 rtx pattern = PATTERN (insn);
8791 gcc_assert (s390_execute_label (insn));
8793 if (XVECLEN (pattern, 0) == 2)
8795 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
8799 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
8802 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
8803 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
8805 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
8811 /* Indicate that INSN cannot be duplicated. This is the case for
8812 execute insns that carry a unique label. */
8815 s390_cannot_copy_insn_p (rtx_insn *insn)
8817 rtx label = s390_execute_label (insn);
8818 return label && label != const0_rtx;
8821 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
8822 do not emit the pool base label. */
8825 s390_dump_pool (struct constant_pool *pool, bool remote_label)
8828 rtx_insn *insn = pool->pool_insn;
8831 /* Switch to rodata section. */
8832 if (TARGET_CPU_ZARCH)
8834 insn = emit_insn_after (gen_pool_section_start (), insn);
8835 INSN_ADDRESSES_NEW (insn, -1);
8838 /* Ensure minimum pool alignment. */
8839 if (TARGET_CPU_ZARCH)
8840 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
8842 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
8843 INSN_ADDRESSES_NEW (insn, -1);
8845 /* Emit pool base label. */
8848 insn = emit_label_after (pool->label, insn);
8849 INSN_ADDRESSES_NEW (insn, -1);
8852 /* Dump constants in descending alignment requirement order,
8853 ensuring proper alignment for every constant. */
8854 for (i = 0; i < NR_C_MODES; i++)
8855 for (c = pool->constants[i]; c; c = c->next)
8857 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
8858 rtx value = copy_rtx (c->value);
8859 if (GET_CODE (value) == CONST
8860 && GET_CODE (XEXP (value, 0)) == UNSPEC
8861 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
8862 && XVECLEN (XEXP (value, 0), 0) == 1)
8863 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
8865 insn = emit_label_after (c->label, insn);
8866 INSN_ADDRESSES_NEW (insn, -1);
8868 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
8869 gen_rtvec (1, value),
8870 UNSPECV_POOL_ENTRY);
8871 insn = emit_insn_after (value, insn);
8872 INSN_ADDRESSES_NEW (insn, -1);
8875 /* Ensure minimum alignment for instructions. */
8876 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
8877 INSN_ADDRESSES_NEW (insn, -1);
8879 /* Output in-pool execute template insns. */
8880 for (c = pool->execute; c; c = c->next)
8882 insn = emit_label_after (c->label, insn);
8883 INSN_ADDRESSES_NEW (insn, -1);
8885 insn = emit_insn_after (s390_execute_target (c->value), insn);
8886 INSN_ADDRESSES_NEW (insn, -1);
8889 /* Switch back to previous section. */
8890 if (TARGET_CPU_ZARCH)
8892 insn = emit_insn_after (gen_pool_section_end (), insn);
8893 INSN_ADDRESSES_NEW (insn, -1);
8896 insn = emit_barrier_after (insn);
8897 INSN_ADDRESSES_NEW (insn, -1);
8899 /* Remove placeholder insn. */
8900 remove_insn (pool->pool_insn);
8903 /* Free all memory used by POOL. */
8906 s390_free_pool (struct constant_pool *pool)
8908 struct constant *c, *next;
8911 for (i = 0; i < NR_C_MODES; i++)
8912 for (c = pool->constants[i]; c; c = next)
8918 for (c = pool->execute; c; c = next)
8924 BITMAP_FREE (pool->insns);
8929 /* Collect main literal pool. Return NULL on overflow. */
8931 static struct constant_pool *
8932 s390_mainpool_start (void)
8934 struct constant_pool *pool;
8937 pool = s390_alloc_pool ();
8939 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8941 if (NONJUMP_INSN_P (insn)
8942 && GET_CODE (PATTERN (insn)) == SET
8943 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
8944 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
8946 /* There might be two main_pool instructions if base_reg
8947 is call-clobbered; one for shrink-wrapped code and one
8948 for the rest. We want to keep the first. */
8949 if (pool->pool_insn)
8951 insn = PREV_INSN (insn);
8952 delete_insn (NEXT_INSN (insn));
8955 pool->pool_insn = insn;
8958 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
8960 s390_add_execute (pool, insn);
8962 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
8964 rtx pool_ref = NULL_RTX;
8965 find_constant_pool_ref (PATTERN (insn), &pool_ref);
8968 rtx constant = get_pool_constant (pool_ref);
8969 machine_mode mode = get_pool_mode (pool_ref);
8970 s390_add_constant (pool, constant, mode);
8974 /* If hot/cold partitioning is enabled we have to make sure that
8975 the literal pool is emitted in the same section where the
8976 initialization of the literal pool base pointer takes place.
8977 emit_pool_after is only used in the non-overflow case on non
8978 Z cpus where we can emit the literal pool at the end of the
8979 function body within the text section. */
8981 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
8982 && !pool->emit_pool_after)
8983 pool->emit_pool_after = PREV_INSN (insn);
8986 gcc_assert (pool->pool_insn || pool->size == 0);
8988 if (pool->size >= 4096)
8990 /* We're going to chunkify the pool, so remove the main
8991 pool placeholder insn. */
8992 remove_insn (pool->pool_insn);
8994 s390_free_pool (pool);
8998 /* If the functions ends with the section where the literal pool
8999 should be emitted set the marker to its end. */
9000 if (pool && !pool->emit_pool_after)
9001 pool->emit_pool_after = get_last_insn ();
9006 /* POOL holds the main literal pool as collected by s390_mainpool_start.
9007 Modify the current function to output the pool constants as well as
9008 the pool register setup instruction. */
9011 s390_mainpool_finish (struct constant_pool *pool)
9013 rtx base_reg = cfun->machine->base_reg;
9015 /* If the pool is empty, we're done. */
9016 if (pool->size == 0)
9018 /* We don't actually need a base register after all. */
9019 cfun->machine->base_reg = NULL_RTX;
9021 if (pool->pool_insn)
9022 remove_insn (pool->pool_insn);
9023 s390_free_pool (pool);
9027 /* We need correct insn addresses. */
9028 shorten_branches (get_insns ());
9030 /* On zSeries, we use a LARL to load the pool register. The pool is
9031 located in the .rodata section, so we emit it after the function. */
9032 if (TARGET_CPU_ZARCH)
9034 rtx set = gen_main_base_64 (base_reg, pool->label);
9035 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
9036 INSN_ADDRESSES_NEW (insn, -1);
9037 remove_insn (pool->pool_insn);
9039 insn = get_last_insn ();
9040 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
9041 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
9043 s390_dump_pool (pool, 0);
9046 /* On S/390, if the total size of the function's code plus literal pool
9047 does not exceed 4096 bytes, we use BASR to set up a function base
9048 pointer, and emit the literal pool at the end of the function. */
9049 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
9050 + pool->size + 8 /* alignment slop */ < 4096)
9052 rtx set = gen_main_base_31_small (base_reg, pool->label);
9053 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
9054 INSN_ADDRESSES_NEW (insn, -1);
9055 remove_insn (pool->pool_insn);
9057 insn = emit_label_after (pool->label, insn);
9058 INSN_ADDRESSES_NEW (insn, -1);
9060 /* emit_pool_after will be set by s390_mainpool_start to the
9061 last insn of the section where the literal pool should be
9063 insn = pool->emit_pool_after;
9065 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
9066 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
9068 s390_dump_pool (pool, 1);
9071 /* Otherwise, we emit an inline literal pool and use BASR to branch
9072 over it, setting up the pool register at the same time. */
9075 rtx_code_label *pool_end = gen_label_rtx ();
9077 rtx pat = gen_main_base_31_large (base_reg, pool->label, pool_end);
9078 rtx_insn *insn = emit_jump_insn_after (pat, pool->pool_insn);
9079 JUMP_LABEL (insn) = pool_end;
9080 INSN_ADDRESSES_NEW (insn, -1);
9081 remove_insn (pool->pool_insn);
9083 insn = emit_label_after (pool->label, insn);
9084 INSN_ADDRESSES_NEW (insn, -1);
9086 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
9087 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
9089 insn = emit_label_after (pool_end, pool->pool_insn);
9090 INSN_ADDRESSES_NEW (insn, -1);
9092 s390_dump_pool (pool, 1);
9096 /* Replace all literal pool references. */
9098 for (rtx_insn *insn = get_insns (); insn; insn = NEXT_INSN (insn))
9101 replace_ltrel_base (&PATTERN (insn));
9103 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
9105 rtx addr, pool_ref = NULL_RTX;
9106 find_constant_pool_ref (PATTERN (insn), &pool_ref);
9109 if (s390_execute_label (insn))
9110 addr = s390_find_execute (pool, insn);
9112 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
9113 get_pool_mode (pool_ref));
9115 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
9116 INSN_CODE (insn) = -1;
9122 /* Free the pool. */
9123 s390_free_pool (pool);
9126 /* POOL holds the main literal pool as collected by s390_mainpool_start.
9127 We have decided we cannot use this pool, so revert all changes
9128 to the current function that were done by s390_mainpool_start. */
9130 s390_mainpool_cancel (struct constant_pool *pool)
9132 /* We didn't actually change the instruction stream, so simply
9133 free the pool memory. */
9134 s390_free_pool (pool);
9138 /* Chunkify the literal pool. */
9140 #define S390_POOL_CHUNK_MIN 0xc00
9141 #define S390_POOL_CHUNK_MAX 0xe00
9143 static struct constant_pool *
9144 s390_chunkify_start (void)
9146 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
9149 rtx pending_ltrel = NULL_RTX;
9152 rtx (*gen_reload_base) (rtx, rtx) =
9153 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
9156 /* We need correct insn addresses. */
9158 shorten_branches (get_insns ());
9160 /* Scan all insns and move literals to pool chunks. */
9162 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9164 bool section_switch_p = false;
9166 /* Check for pending LTREL_BASE. */
9169 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
9172 gcc_assert (ltrel_base == pending_ltrel);
9173 pending_ltrel = NULL_RTX;
9177 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
9180 curr_pool = s390_start_pool (&pool_list, insn);
9182 s390_add_execute (curr_pool, insn);
9183 s390_add_pool_insn (curr_pool, insn);
9185 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
9187 rtx pool_ref = NULL_RTX;
9188 find_constant_pool_ref (PATTERN (insn), &pool_ref);
9191 rtx constant = get_pool_constant (pool_ref);
9192 machine_mode mode = get_pool_mode (pool_ref);
9195 curr_pool = s390_start_pool (&pool_list, insn);
9197 s390_add_constant (curr_pool, constant, mode);
9198 s390_add_pool_insn (curr_pool, insn);
9200 /* Don't split the pool chunk between a LTREL_OFFSET load
9201 and the corresponding LTREL_BASE. */
9202 if (GET_CODE (constant) == CONST
9203 && GET_CODE (XEXP (constant, 0)) == UNSPEC
9204 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
9206 gcc_assert (!pending_ltrel);
9207 pending_ltrel = pool_ref;
9212 if (JUMP_P (insn) || JUMP_TABLE_DATA_P (insn) || LABEL_P (insn))
9215 s390_add_pool_insn (curr_pool, insn);
9216 /* An LTREL_BASE must follow within the same basic block. */
9217 gcc_assert (!pending_ltrel);
9221 switch (NOTE_KIND (insn))
9223 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
9224 section_switch_p = true;
9226 case NOTE_INSN_VAR_LOCATION:
9227 case NOTE_INSN_CALL_ARG_LOCATION:
9234 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
9235 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
9238 if (TARGET_CPU_ZARCH)
9240 if (curr_pool->size < S390_POOL_CHUNK_MAX)
9243 s390_end_pool (curr_pool, NULL);
9248 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
9249 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
9252 /* We will later have to insert base register reload insns.
9253 Those will have an effect on code size, which we need to
9254 consider here. This calculation makes rather pessimistic
9255 worst-case assumptions. */
9259 if (chunk_size < S390_POOL_CHUNK_MIN
9260 && curr_pool->size < S390_POOL_CHUNK_MIN
9261 && !section_switch_p)
9264 /* Pool chunks can only be inserted after BARRIERs ... */
9265 if (BARRIER_P (insn))
9267 s390_end_pool (curr_pool, insn);
9272 /* ... so if we don't find one in time, create one. */
9273 else if (chunk_size > S390_POOL_CHUNK_MAX
9274 || curr_pool->size > S390_POOL_CHUNK_MAX
9275 || section_switch_p)
9277 rtx_insn *label, *jump, *barrier, *next, *prev;
9279 if (!section_switch_p)
9281 /* We can insert the barrier only after a 'real' insn. */
9282 if (! NONJUMP_INSN_P (insn) && ! CALL_P (insn))
9284 if (get_attr_length (insn) == 0)
9286 /* Don't separate LTREL_BASE from the corresponding
9287 LTREL_OFFSET load. */
9294 next = NEXT_INSN (insn);
9298 && (NOTE_KIND (next) == NOTE_INSN_VAR_LOCATION
9299 || NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION));
9303 gcc_assert (!pending_ltrel);
9305 /* The old pool has to end before the section switch
9306 note in order to make it part of the current
9308 insn = PREV_INSN (insn);
9311 label = gen_label_rtx ();
9313 if (prev && NOTE_P (prev))
9314 prev = prev_nonnote_insn (prev);
9316 jump = emit_jump_insn_after_setloc (gen_jump (label), insn,
9317 INSN_LOCATION (prev));
9319 jump = emit_jump_insn_after_noloc (gen_jump (label), insn);
9320 barrier = emit_barrier_after (jump);
9321 insn = emit_label_after (label, barrier);
9322 JUMP_LABEL (jump) = label;
9323 LABEL_NUSES (label) = 1;
9325 INSN_ADDRESSES_NEW (jump, -1);
9326 INSN_ADDRESSES_NEW (barrier, -1);
9327 INSN_ADDRESSES_NEW (insn, -1);
9329 s390_end_pool (curr_pool, barrier);
9337 s390_end_pool (curr_pool, NULL);
9338 gcc_assert (!pending_ltrel);
9340 /* Find all labels that are branched into
9341 from an insn belonging to a different chunk. */
9343 far_labels = BITMAP_ALLOC (NULL);
9345 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9347 rtx_jump_table_data *table;
9349 /* Labels marked with LABEL_PRESERVE_P can be target
9350 of non-local jumps, so we have to mark them.
9351 The same holds for named labels.
9353 Don't do that, however, if it is the label before
9357 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
9359 rtx_insn *vec_insn = NEXT_INSN (insn);
9360 if (! vec_insn || ! JUMP_TABLE_DATA_P (vec_insn))
9361 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
9363 /* Check potential targets in a table jump (casesi_jump). */
9364 else if (tablejump_p (insn, NULL, &table))
9366 rtx vec_pat = PATTERN (table);
9367 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
9369 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
9371 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
9373 if (s390_find_pool (pool_list, label)
9374 != s390_find_pool (pool_list, insn))
9375 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
9378 /* If we have a direct jump (conditional or unconditional),
9379 check all potential targets. */
9380 else if (JUMP_P (insn))
9382 rtx pat = PATTERN (insn);
9384 if (GET_CODE (pat) == PARALLEL)
9385 pat = XVECEXP (pat, 0, 0);
9387 if (GET_CODE (pat) == SET)
9389 rtx label = JUMP_LABEL (insn);
9390 if (label && !ANY_RETURN_P (label))
9392 if (s390_find_pool (pool_list, label)
9393 != s390_find_pool (pool_list, insn))
9394 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
9400 /* Insert base register reload insns before every pool. */
9402 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
9404 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
9406 rtx_insn *insn = curr_pool->first_insn;
9407 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
9410 /* Insert base register reload insns at every far label. */
9412 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9414 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
9416 struct constant_pool *pool = s390_find_pool (pool_list, insn);
9419 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
9421 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
9426 BITMAP_FREE (far_labels);
9429 /* Recompute insn addresses. */
9431 init_insn_lengths ();
9432 shorten_branches (get_insns ());
9437 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
9438 After we have decided to use this list, finish implementing
9439 all changes to the current function as required. */
9442 s390_chunkify_finish (struct constant_pool *pool_list)
9444 struct constant_pool *curr_pool = NULL;
9448 /* Replace all literal pool references. */
9450 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9453 replace_ltrel_base (&PATTERN (insn));
9455 curr_pool = s390_find_pool (pool_list, insn);
9459 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
9461 rtx addr, pool_ref = NULL_RTX;
9462 find_constant_pool_ref (PATTERN (insn), &pool_ref);
9465 if (s390_execute_label (insn))
9466 addr = s390_find_execute (curr_pool, insn);
9468 addr = s390_find_constant (curr_pool,
9469 get_pool_constant (pool_ref),
9470 get_pool_mode (pool_ref));
9472 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
9473 INSN_CODE (insn) = -1;
9478 /* Dump out all literal pools. */
9480 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
9481 s390_dump_pool (curr_pool, 0);
9483 /* Free pool list. */
9487 struct constant_pool *next = pool_list->next;
9488 s390_free_pool (pool_list);
9493 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
9494 We have decided we cannot use this list, so revert all changes
9495 to the current function that were done by s390_chunkify_start. */
9498 s390_chunkify_cancel (struct constant_pool *pool_list)
9500 struct constant_pool *curr_pool = NULL;
9503 /* Remove all pool placeholder insns. */
9505 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
9507 /* Did we insert an extra barrier? Remove it. */
9508 rtx_insn *barrier = PREV_INSN (curr_pool->pool_insn);
9509 rtx_insn *jump = barrier? PREV_INSN (barrier) : NULL;
9510 rtx_insn *label = NEXT_INSN (curr_pool->pool_insn);
9512 if (jump && JUMP_P (jump)
9513 && barrier && BARRIER_P (barrier)
9514 && label && LABEL_P (label)
9515 && GET_CODE (PATTERN (jump)) == SET
9516 && SET_DEST (PATTERN (jump)) == pc_rtx
9517 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
9518 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
9521 remove_insn (barrier);
9522 remove_insn (label);
9525 remove_insn (curr_pool->pool_insn);
9528 /* Remove all base register reload insns. */
9530 for (insn = get_insns (); insn; )
9532 rtx_insn *next_insn = NEXT_INSN (insn);
9534 if (NONJUMP_INSN_P (insn)
9535 && GET_CODE (PATTERN (insn)) == SET
9536 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
9537 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
9543 /* Free pool list. */
9547 struct constant_pool *next = pool_list->next;
9548 s390_free_pool (pool_list);
9553 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
9556 s390_output_pool_entry (rtx exp, machine_mode mode, unsigned int align)
9558 switch (GET_MODE_CLASS (mode))
9561 case MODE_DECIMAL_FLOAT:
9562 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
9564 assemble_real (*CONST_DOUBLE_REAL_VALUE (exp),
9565 as_a <scalar_float_mode> (mode), align);
9569 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
9570 mark_symbol_refs_as_used (exp);
9573 case MODE_VECTOR_INT:
9574 case MODE_VECTOR_FLOAT:
9577 machine_mode inner_mode;
9578 gcc_assert (GET_CODE (exp) == CONST_VECTOR);
9580 inner_mode = GET_MODE_INNER (GET_MODE (exp));
9581 for (i = 0; i < XVECLEN (exp, 0); i++)
9582 s390_output_pool_entry (XVECEXP (exp, 0, i),
9586 : GET_MODE_BITSIZE (inner_mode));
9596 /* Return an RTL expression representing the value of the return address
9597 for the frame COUNT steps up from the current frame. FRAME is the
9598 frame pointer of that frame. */
9601 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
9606 /* Without backchain, we fail for all but the current frame. */
9608 if (!TARGET_BACKCHAIN && count > 0)
9611 /* For the current frame, we need to make sure the initial
9612 value of RETURN_REGNUM is actually saved. */
9616 /* On non-z architectures branch splitting could overwrite r14. */
9617 if (TARGET_CPU_ZARCH)
9618 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
9621 cfun_frame_layout.save_return_addr_p = true;
9622 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
9626 if (TARGET_PACKED_STACK)
9627 offset = -2 * UNITS_PER_LONG;
9629 offset = RETURN_REGNUM * UNITS_PER_LONG;
9631 addr = plus_constant (Pmode, frame, offset);
9632 addr = memory_address (Pmode, addr);
9633 return gen_rtx_MEM (Pmode, addr);
9636 /* Return an RTL expression representing the back chain stored in
9637 the current stack frame. */
9640 s390_back_chain_rtx (void)
9644 gcc_assert (TARGET_BACKCHAIN);
9646 if (TARGET_PACKED_STACK)
9647 chain = plus_constant (Pmode, stack_pointer_rtx,
9648 STACK_POINTER_OFFSET - UNITS_PER_LONG);
9650 chain = stack_pointer_rtx;
9652 chain = gen_rtx_MEM (Pmode, chain);
9656 /* Find first call clobbered register unused in a function.
9657 This could be used as base register in a leaf function
9658 or for holding the return address before epilogue. */
9661 find_unused_clobbered_reg (void)
9664 for (i = 0; i < 6; i++)
9665 if (!df_regs_ever_live_p (i))
9671 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
9672 clobbered hard regs in SETREG. */
9675 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
9677 char *regs_ever_clobbered = (char *)data;
9678 unsigned int i, regno;
9679 machine_mode mode = GET_MODE (setreg);
9681 if (GET_CODE (setreg) == SUBREG)
9683 rtx inner = SUBREG_REG (setreg);
9684 if (!GENERAL_REG_P (inner) && !FP_REG_P (inner))
9686 regno = subreg_regno (setreg);
9688 else if (GENERAL_REG_P (setreg) || FP_REG_P (setreg))
9689 regno = REGNO (setreg);
9694 i < end_hard_regno (mode, regno);
9696 regs_ever_clobbered[i] = 1;
9699 /* Walks through all basic blocks of the current function looking
9700 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
9701 of the passed integer array REGS_EVER_CLOBBERED are set to one for
9702 each of those regs. */
9705 s390_regs_ever_clobbered (char regs_ever_clobbered[])
9711 memset (regs_ever_clobbered, 0, 32);
9713 /* For non-leaf functions we have to consider all call clobbered regs to be
9717 for (i = 0; i < 32; i++)
9718 regs_ever_clobbered[i] = call_really_used_regs[i];
9721 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
9722 this work is done by liveness analysis (mark_regs_live_at_end).
9723 Special care is needed for functions containing landing pads. Landing pads
9724 may use the eh registers, but the code which sets these registers is not
9725 contained in that function. Hence s390_regs_ever_clobbered is not able to
9726 deal with this automatically. */
9727 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
9728 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
9729 if (crtl->calls_eh_return
9730 || (cfun->machine->has_landing_pad_p
9731 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
9732 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
9734 /* For nonlocal gotos all call-saved registers have to be saved.
9735 This flag is also set for the unwinding code in libgcc.
9736 See expand_builtin_unwind_init. For regs_ever_live this is done by
9738 if (crtl->saves_all_registers)
9739 for (i = 0; i < 32; i++)
9740 if (!call_really_used_regs[i])
9741 regs_ever_clobbered[i] = 1;
9743 FOR_EACH_BB_FN (cur_bb, cfun)
9745 FOR_BB_INSNS (cur_bb, cur_insn)
9749 if (!INSN_P (cur_insn))
9752 pat = PATTERN (cur_insn);
9754 /* Ignore GPR restore insns. */
9755 if (epilogue_completed && RTX_FRAME_RELATED_P (cur_insn))
9757 if (GET_CODE (pat) == SET
9758 && GENERAL_REG_P (SET_DEST (pat)))
9761 if (GET_MODE (SET_SRC (pat)) == DImode
9762 && FP_REG_P (SET_SRC (pat)))
9766 if (GET_CODE (SET_SRC (pat)) == MEM)
9771 if (GET_CODE (pat) == PARALLEL
9772 && load_multiple_operation (pat, VOIDmode))
9777 s390_reg_clobbered_rtx,
9778 regs_ever_clobbered);
9783 /* Determine the frame area which actually has to be accessed
9784 in the function epilogue. The values are stored at the
9785 given pointers AREA_BOTTOM (address of the lowest used stack
9786 address) and AREA_TOP (address of the first item which does
9787 not belong to the stack frame). */
9790 s390_frame_area (int *area_bottom, int *area_top)
9797 if (cfun_frame_layout.first_restore_gpr != -1)
9799 b = (cfun_frame_layout.gprs_offset
9800 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
9801 t = b + (cfun_frame_layout.last_restore_gpr
9802 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
9805 if (TARGET_64BIT && cfun_save_high_fprs_p)
9807 b = MIN (b, cfun_frame_layout.f8_offset);
9808 t = MAX (t, (cfun_frame_layout.f8_offset
9809 + cfun_frame_layout.high_fprs * 8));
9814 if (cfun_fpr_save_p (FPR4_REGNUM))
9816 b = MIN (b, cfun_frame_layout.f4_offset);
9817 t = MAX (t, cfun_frame_layout.f4_offset + 8);
9819 if (cfun_fpr_save_p (FPR6_REGNUM))
9821 b = MIN (b, cfun_frame_layout.f4_offset + 8);
9822 t = MAX (t, cfun_frame_layout.f4_offset + 16);
9828 /* Update gpr_save_slots in the frame layout trying to make use of
9829 FPRs as GPR save slots.
9830 This is a helper routine of s390_register_info. */
9833 s390_register_info_gprtofpr ()
9835 int save_reg_slot = FPR0_REGNUM;
9838 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
9841 /* builtin_eh_return needs to be able to modify the return address
9842 on the stack. It could also adjust the FPR save slot instead but
9843 is it worth the trouble?! */
9844 if (crtl->calls_eh_return)
9847 for (i = 15; i >= 6; i--)
9849 if (cfun_gpr_save_slot (i) == SAVE_SLOT_NONE)
9852 /* Advance to the next FP register which can be used as a
9854 while ((!call_really_used_regs[save_reg_slot]
9855 || df_regs_ever_live_p (save_reg_slot)
9856 || cfun_fpr_save_p (save_reg_slot))
9857 && FP_REGNO_P (save_reg_slot))
9859 if (!FP_REGNO_P (save_reg_slot))
9861 /* We only want to use ldgr/lgdr if we can get rid of
9862 stm/lm entirely. So undo the gpr slot allocation in
9863 case we ran out of FPR save slots. */
9864 for (j = 6; j <= 15; j++)
9865 if (FP_REGNO_P (cfun_gpr_save_slot (j)))
9866 cfun_gpr_save_slot (j) = SAVE_SLOT_STACK;
9869 cfun_gpr_save_slot (i) = save_reg_slot++;
9873 /* Set the bits in fpr_bitmap for FPRs which need to be saved due to
9875 This is a helper routine for s390_register_info. */
9878 s390_register_info_stdarg_fpr ()
9884 /* Save the FP argument regs for stdarg. f0, f2 for 31 bit and
9885 f0-f4 for 64 bit. */
9887 || !TARGET_HARD_FLOAT
9888 || !cfun->va_list_fpr_size
9889 || crtl->args.info.fprs >= FP_ARG_NUM_REG)
9892 min_fpr = crtl->args.info.fprs;
9893 max_fpr = min_fpr + cfun->va_list_fpr_size - 1;
9894 if (max_fpr >= FP_ARG_NUM_REG)
9895 max_fpr = FP_ARG_NUM_REG - 1;
9897 /* FPR argument regs start at f0. */
9898 min_fpr += FPR0_REGNUM;
9899 max_fpr += FPR0_REGNUM;
9901 for (i = min_fpr; i <= max_fpr; i++)
9902 cfun_set_fpr_save (i);
9905 /* Reserve the GPR save slots for GPRs which need to be saved due to
9907 This is a helper routine for s390_register_info. */
9910 s390_register_info_stdarg_gpr ()
9917 || !cfun->va_list_gpr_size
9918 || crtl->args.info.gprs >= GP_ARG_NUM_REG)
9921 min_gpr = crtl->args.info.gprs;
9922 max_gpr = min_gpr + cfun->va_list_gpr_size - 1;
9923 if (max_gpr >= GP_ARG_NUM_REG)
9924 max_gpr = GP_ARG_NUM_REG - 1;
9926 /* GPR argument regs start at r2. */
9927 min_gpr += GPR2_REGNUM;
9928 max_gpr += GPR2_REGNUM;
9930 /* If r6 was supposed to be saved into an FPR and now needs to go to
9931 the stack for vararg we have to adjust the restore range to make
9932 sure that the restore is done from stack as well. */
9933 if (FP_REGNO_P (cfun_gpr_save_slot (GPR6_REGNUM))
9934 && min_gpr <= GPR6_REGNUM
9935 && max_gpr >= GPR6_REGNUM)
9937 if (cfun_frame_layout.first_restore_gpr == -1
9938 || cfun_frame_layout.first_restore_gpr > GPR6_REGNUM)
9939 cfun_frame_layout.first_restore_gpr = GPR6_REGNUM;
9940 if (cfun_frame_layout.last_restore_gpr == -1
9941 || cfun_frame_layout.last_restore_gpr < GPR6_REGNUM)
9942 cfun_frame_layout.last_restore_gpr = GPR6_REGNUM;
9945 if (cfun_frame_layout.first_save_gpr == -1
9946 || cfun_frame_layout.first_save_gpr > min_gpr)
9947 cfun_frame_layout.first_save_gpr = min_gpr;
9949 if (cfun_frame_layout.last_save_gpr == -1
9950 || cfun_frame_layout.last_save_gpr < max_gpr)
9951 cfun_frame_layout.last_save_gpr = max_gpr;
9953 for (i = min_gpr; i <= max_gpr; i++)
9954 cfun_gpr_save_slot (i) = SAVE_SLOT_STACK;
9957 /* Calculate the save and restore ranges for stm(g) and lm(g) in the
9958 prologue and epilogue. */
9961 s390_register_info_set_ranges ()
9965 /* Find the first and the last save slot supposed to use the stack
9966 to set the restore range.
9967 Vararg regs might be marked as save to stack but only the
9968 call-saved regs really need restoring (i.e. r6). This code
9969 assumes that the vararg regs have not yet been recorded in
9970 cfun_gpr_save_slot. */
9971 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != SAVE_SLOT_STACK; i++);
9972 for (j = 15; j > i && cfun_gpr_save_slot (j) != SAVE_SLOT_STACK; j--);
9973 cfun_frame_layout.first_restore_gpr = (i == 16) ? -1 : i;
9974 cfun_frame_layout.last_restore_gpr = (i == 16) ? -1 : j;
9975 cfun_frame_layout.first_save_gpr = (i == 16) ? -1 : i;
9976 cfun_frame_layout.last_save_gpr = (i == 16) ? -1 : j;
9979 /* The GPR and FPR save slots in cfun->machine->frame_layout are set
9980 for registers which need to be saved in function prologue.
9981 This function can be used until the insns emitted for save/restore
9982 of the regs are visible in the RTL stream. */
9985 s390_register_info ()
9988 char clobbered_regs[32];
9990 gcc_assert (!epilogue_completed);
9992 if (reload_completed)
9993 /* After reload we rely on our own routine to determine which
9994 registers need saving. */
9995 s390_regs_ever_clobbered (clobbered_regs);
9997 /* During reload we use regs_ever_live as a base since reload
9998 does changes in there which we otherwise would not be aware
10000 for (i = 0; i < 32; i++)
10001 clobbered_regs[i] = df_regs_ever_live_p (i);
10003 for (i = 0; i < 32; i++)
10004 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
10006 /* Mark the call-saved FPRs which need to be saved.
10007 This needs to be done before checking the special GPRs since the
10008 stack pointer usage depends on whether high FPRs have to be saved
10010 cfun_frame_layout.fpr_bitmap = 0;
10011 cfun_frame_layout.high_fprs = 0;
10012 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
10013 if (clobbered_regs[i] && !call_really_used_regs[i])
10015 cfun_set_fpr_save (i);
10016 if (i >= FPR8_REGNUM)
10017 cfun_frame_layout.high_fprs++;
10020 /* Register 12 is used for GOT address, but also as temp in prologue
10021 for split-stack stdarg functions (unless r14 is available). */
10023 |= ((flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
10024 || (flag_split_stack && cfun->stdarg
10025 && (crtl->is_leaf || TARGET_TPF_PROFILING
10026 || has_hard_reg_initial_val (Pmode, RETURN_REGNUM))));
10028 clobbered_regs[BASE_REGNUM]
10029 |= (cfun->machine->base_reg
10030 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
10032 clobbered_regs[HARD_FRAME_POINTER_REGNUM]
10033 |= !!frame_pointer_needed;
10035 /* On pre z900 machines this might take until machine dependent
10037 save_return_addr_p will only be set on non-zarch machines so
10038 there is no risk that r14 goes into an FPR instead of a stack
10040 clobbered_regs[RETURN_REGNUM]
10042 || TARGET_TPF_PROFILING
10043 || cfun->machine->split_branches_pending_p
10044 || cfun_frame_layout.save_return_addr_p
10045 || crtl->calls_eh_return);
10047 clobbered_regs[STACK_POINTER_REGNUM]
10049 || TARGET_TPF_PROFILING
10050 || cfun_save_high_fprs_p
10051 || get_frame_size () > 0
10052 || (reload_completed && cfun_frame_layout.frame_size > 0)
10053 || cfun->calls_alloca);
10055 memset (cfun_frame_layout.gpr_save_slots, SAVE_SLOT_NONE, 16);
10057 for (i = 6; i < 16; i++)
10058 if (clobbered_regs[i])
10059 cfun_gpr_save_slot (i) = SAVE_SLOT_STACK;
10061 s390_register_info_stdarg_fpr ();
10062 s390_register_info_gprtofpr ();
10063 s390_register_info_set_ranges ();
10064 /* stdarg functions might need to save GPRs 2 to 6. This might
10065 override the GPR->FPR save decision made by
10066 s390_register_info_gprtofpr for r6 since vararg regs must go to
10068 s390_register_info_stdarg_gpr ();
10071 /* This function is called by s390_optimize_prologue in order to get
10072 rid of unnecessary GPR save/restore instructions. The register info
10073 for the GPRs is re-computed and the ranges are re-calculated. */
10076 s390_optimize_register_info ()
10078 char clobbered_regs[32];
10081 gcc_assert (epilogue_completed);
10082 gcc_assert (!cfun->machine->split_branches_pending_p);
10084 s390_regs_ever_clobbered (clobbered_regs);
10086 for (i = 0; i < 32; i++)
10087 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
10089 /* There is still special treatment needed for cases invisible to
10090 s390_regs_ever_clobbered. */
10091 clobbered_regs[RETURN_REGNUM]
10092 |= (TARGET_TPF_PROFILING
10093 /* When expanding builtin_return_addr in ESA mode we do not
10094 know whether r14 will later be needed as scratch reg when
10095 doing branch splitting. So the builtin always accesses the
10096 r14 save slot and we need to stick to the save/restore
10097 decision for r14 even if it turns out that it didn't get
10099 || cfun_frame_layout.save_return_addr_p
10100 || crtl->calls_eh_return);
10102 memset (cfun_frame_layout.gpr_save_slots, SAVE_SLOT_NONE, 6);
10104 for (i = 6; i < 16; i++)
10105 if (!clobbered_regs[i])
10106 cfun_gpr_save_slot (i) = SAVE_SLOT_NONE;
10108 s390_register_info_set_ranges ();
10109 s390_register_info_stdarg_gpr ();
10112 /* Fill cfun->machine with info about frame of current function. */
10115 s390_frame_info (void)
10117 HOST_WIDE_INT lowest_offset;
10119 cfun_frame_layout.first_save_gpr_slot = cfun_frame_layout.first_save_gpr;
10120 cfun_frame_layout.last_save_gpr_slot = cfun_frame_layout.last_save_gpr;
10122 /* The va_arg builtin uses a constant distance of 16 *
10123 UNITS_PER_LONG (r0-r15) to reach the FPRs from the reg_save_area
10124 pointer. So even if we are going to save the stack pointer in an
10125 FPR we need the stack space in order to keep the offsets
10127 if (cfun->stdarg && cfun_save_arg_fprs_p)
10129 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
10131 if (cfun_frame_layout.first_save_gpr_slot == -1)
10132 cfun_frame_layout.first_save_gpr_slot = STACK_POINTER_REGNUM;
10135 cfun_frame_layout.frame_size = get_frame_size ();
10136 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
10137 fatal_error (input_location,
10138 "total size of local variables exceeds architecture limit");
10140 if (!TARGET_PACKED_STACK)
10142 /* Fixed stack layout. */
10143 cfun_frame_layout.backchain_offset = 0;
10144 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
10145 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
10146 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
10147 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
10150 else if (TARGET_BACKCHAIN)
10152 /* Kernel stack layout - packed stack, backchain, no float */
10153 gcc_assert (TARGET_SOFT_FLOAT);
10154 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
10157 /* The distance between the backchain and the return address
10158 save slot must not change. So we always need a slot for the
10159 stack pointer which resides in between. */
10160 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
10162 cfun_frame_layout.gprs_offset
10163 = cfun_frame_layout.backchain_offset - cfun_gprs_save_area_size;
10165 /* FPRs will not be saved. Nevertheless pick sane values to
10166 keep area calculations valid. */
10167 cfun_frame_layout.f0_offset =
10168 cfun_frame_layout.f4_offset =
10169 cfun_frame_layout.f8_offset = cfun_frame_layout.gprs_offset;
10175 /* Packed stack layout without backchain. */
10177 /* With stdarg FPRs need their dedicated slots. */
10178 num_fprs = (TARGET_64BIT && cfun->stdarg ? 2
10179 : (cfun_fpr_save_p (FPR4_REGNUM) +
10180 cfun_fpr_save_p (FPR6_REGNUM)));
10181 cfun_frame_layout.f4_offset = STACK_POINTER_OFFSET - 8 * num_fprs;
10183 num_fprs = (cfun->stdarg ? 2
10184 : (cfun_fpr_save_p (FPR0_REGNUM)
10185 + cfun_fpr_save_p (FPR2_REGNUM)));
10186 cfun_frame_layout.f0_offset = cfun_frame_layout.f4_offset - 8 * num_fprs;
10188 cfun_frame_layout.gprs_offset
10189 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
10191 cfun_frame_layout.f8_offset = (cfun_frame_layout.gprs_offset
10192 - cfun_frame_layout.high_fprs * 8);
10195 if (cfun_save_high_fprs_p)
10196 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
10198 if (!crtl->is_leaf)
10199 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
10201 /* In the following cases we have to allocate a STACK_POINTER_OFFSET
10202 sized area at the bottom of the stack. This is required also for
10203 leaf functions. When GCC generates a local stack reference it
10204 will always add STACK_POINTER_OFFSET to all these references. */
10206 && !TARGET_TPF_PROFILING
10207 && cfun_frame_layout.frame_size == 0
10208 && !cfun->calls_alloca)
10211 /* Calculate the number of bytes we have used in our own register
10212 save area. With the packed stack layout we can re-use the
10213 remaining bytes for normal stack elements. */
10215 if (TARGET_PACKED_STACK)
10216 lowest_offset = MIN (MIN (cfun_frame_layout.f0_offset,
10217 cfun_frame_layout.f4_offset),
10218 cfun_frame_layout.gprs_offset);
10222 if (TARGET_BACKCHAIN)
10223 lowest_offset = MIN (lowest_offset, cfun_frame_layout.backchain_offset);
10225 cfun_frame_layout.frame_size += STACK_POINTER_OFFSET - lowest_offset;
10227 /* If under 31 bit an odd number of gprs has to be saved we have to
10228 adjust the frame size to sustain 8 byte alignment of stack
10230 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
10231 STACK_BOUNDARY / BITS_PER_UNIT - 1)
10232 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
10235 /* Generate frame layout. Fills in register and frame data for the current
10236 function in cfun->machine. This routine can be called multiple times;
10237 it will re-do the complete frame layout every time. */
10240 s390_init_frame_layout (void)
10242 HOST_WIDE_INT frame_size;
10245 /* After LRA the frame layout is supposed to be read-only and should
10246 not be re-computed. */
10247 if (reload_completed)
10250 /* On S/390 machines, we may need to perform branch splitting, which
10251 will require both base and return address register. We have no
10252 choice but to assume we're going to need them until right at the
10253 end of the machine dependent reorg phase. */
10254 if (!TARGET_CPU_ZARCH)
10255 cfun->machine->split_branches_pending_p = true;
10259 frame_size = cfun_frame_layout.frame_size;
10261 /* Try to predict whether we'll need the base register. */
10262 base_used = cfun->machine->split_branches_pending_p
10263 || crtl->uses_const_pool
10264 || (!DISP_IN_RANGE (frame_size)
10265 && !CONST_OK_FOR_K (frame_size));
10267 /* Decide which register to use as literal pool base. In small
10268 leaf functions, try to use an unused call-clobbered register
10269 as base register to avoid save/restore overhead. */
10271 cfun->machine->base_reg = NULL_RTX;
10277 /* Prefer r5 (most likely to be free). */
10278 for (br = 5; br >= 2 && df_regs_ever_live_p (br); br--)
10280 cfun->machine->base_reg =
10281 gen_rtx_REG (Pmode, (br >= 2) ? br : BASE_REGNUM);
10284 s390_register_info ();
10285 s390_frame_info ();
10287 while (frame_size != cfun_frame_layout.frame_size);
10290 /* Remove the FPR clobbers from a tbegin insn if it can be proven that
10291 the TX is nonescaping. A transaction is considered escaping if
10292 there is at least one path from tbegin returning CC0 to the
10293 function exit block without an tend.
10295 The check so far has some limitations:
10296 - only single tbegin/tend BBs are supported
10297 - the first cond jump after tbegin must separate the CC0 path from ~CC0
10298 - when CC is copied to a GPR and the CC0 check is done with the GPR
10299 this is not supported
10303 s390_optimize_nonescaping_tx (void)
10305 const unsigned int CC0 = 1 << 3;
10306 basic_block tbegin_bb = NULL;
10307 basic_block tend_bb = NULL;
10310 bool result = true;
10312 rtx_insn *tbegin_insn = NULL;
10314 if (!cfun->machine->tbegin_p)
10317 for (bb_index = 0; bb_index < n_basic_blocks_for_fn (cfun); bb_index++)
10319 bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
10324 FOR_BB_INSNS (bb, insn)
10326 rtx ite, cc, pat, target;
10327 unsigned HOST_WIDE_INT mask;
10329 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
10332 pat = PATTERN (insn);
10334 if (GET_CODE (pat) == PARALLEL)
10335 pat = XVECEXP (pat, 0, 0);
10337 if (GET_CODE (pat) != SET
10338 || GET_CODE (SET_SRC (pat)) != UNSPEC_VOLATILE)
10341 if (XINT (SET_SRC (pat), 1) == UNSPECV_TBEGIN)
10345 tbegin_insn = insn;
10347 /* Just return if the tbegin doesn't have clobbers. */
10348 if (GET_CODE (PATTERN (insn)) != PARALLEL)
10351 if (tbegin_bb != NULL)
10354 /* Find the next conditional jump. */
10355 for (tmp = NEXT_INSN (insn);
10357 tmp = NEXT_INSN (tmp))
10359 if (reg_set_p (gen_rtx_REG (CCmode, CC_REGNUM), tmp))
10364 ite = SET_SRC (PATTERN (tmp));
10365 if (GET_CODE (ite) != IF_THEN_ELSE)
10368 cc = XEXP (XEXP (ite, 0), 0);
10369 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc))
10370 || GET_MODE (cc) != CCRAWmode
10371 || GET_CODE (XEXP (XEXP (ite, 0), 1)) != CONST_INT)
10374 if (bb->succs->length () != 2)
10377 mask = INTVAL (XEXP (XEXP (ite, 0), 1));
10378 if (GET_CODE (XEXP (ite, 0)) == NE)
10382 target = XEXP (ite, 1);
10383 else if (mask == (CC0 ^ 0xf))
10384 target = XEXP (ite, 2);
10392 ei = ei_start (bb->succs);
10393 e1 = ei_safe_edge (ei);
10395 e2 = ei_safe_edge (ei);
10397 if (e2->flags & EDGE_FALLTHRU)
10400 e1 = ei_safe_edge (ei);
10403 if (!(e1->flags & EDGE_FALLTHRU))
10406 tbegin_bb = (target == pc_rtx) ? e1->dest : e2->dest;
10408 if (tmp == BB_END (bb))
10413 if (XINT (SET_SRC (pat), 1) == UNSPECV_TEND)
10415 if (tend_bb != NULL)
10422 /* Either we successfully remove the FPR clobbers here or we are not
10423 able to do anything for this TX. Both cases don't qualify for
10425 cfun->machine->tbegin_p = false;
10427 if (tbegin_bb == NULL || tend_bb == NULL)
10430 calculate_dominance_info (CDI_POST_DOMINATORS);
10431 result = dominated_by_p (CDI_POST_DOMINATORS, tbegin_bb, tend_bb);
10432 free_dominance_info (CDI_POST_DOMINATORS);
10437 PATTERN (tbegin_insn) = gen_rtx_PARALLEL (VOIDmode,
10439 XVECEXP (PATTERN (tbegin_insn), 0, 0),
10440 XVECEXP (PATTERN (tbegin_insn), 0, 1)));
10441 INSN_CODE (tbegin_insn) = -1;
10442 df_insn_rescan (tbegin_insn);
10447 /* Implement TARGET_HARD_REGNO_NREGS. Because all registers in a class
10448 have the same size, this is equivalent to CLASS_MAX_NREGS. */
10450 static unsigned int
10451 s390_hard_regno_nregs (unsigned int regno, machine_mode mode)
10453 return s390_class_max_nregs (REGNO_REG_CLASS (regno), mode);
10456 /* Implement TARGET_HARD_REGNO_MODE_OK.
10458 Integer modes <= word size fit into any GPR.
10459 Integer modes > word size fit into successive GPRs, starting with
10460 an even-numbered register.
10461 SImode and DImode fit into FPRs as well.
10463 Floating point modes <= word size fit into any FPR or GPR.
10464 Floating point modes > word size (i.e. DFmode on 32-bit) fit
10465 into any FPR, or an even-odd GPR pair.
10466 TFmode fits only into an even-odd FPR pair.
10468 Complex floating point modes fit either into two FPRs, or into
10469 successive GPRs (again starting with an even number).
10470 TCmode fits only into two successive even-odd FPR pairs.
10472 Condition code modes fit only into the CC register. */
10475 s390_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
10477 if (!TARGET_VX && VECTOR_NOFP_REGNO_P (regno))
10480 switch (REGNO_REG_CLASS (regno))
10483 return ((GET_MODE_CLASS (mode) == MODE_INT
10484 && s390_class_max_nregs (VEC_REGS, mode) == 1)
10486 || (TARGET_VXE && mode == SFmode)
10487 || s390_vector_mode_supported_p (mode));
10491 && ((GET_MODE_CLASS (mode) == MODE_INT
10492 && s390_class_max_nregs (FP_REGS, mode) == 1)
10494 || s390_vector_mode_supported_p (mode)))
10497 if (REGNO_PAIR_OK (regno, mode))
10499 if (mode == SImode || mode == DImode)
10502 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
10507 if (FRAME_REGNO_P (regno) && mode == Pmode)
10512 if (REGNO_PAIR_OK (regno, mode))
10515 || (mode != TFmode && mode != TCmode && mode != TDmode))
10520 if (GET_MODE_CLASS (mode) == MODE_CC)
10524 if (REGNO_PAIR_OK (regno, mode))
10526 if (mode == SImode || mode == Pmode)
10537 /* Implement TARGET_MODES_TIEABLE_P. */
10540 s390_modes_tieable_p (machine_mode mode1, machine_mode mode2)
10542 return ((mode1 == SFmode || mode1 == DFmode)
10543 == (mode2 == SFmode || mode2 == DFmode));
10546 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
10549 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
10551 /* Once we've decided upon a register to use as base register, it must
10552 no longer be used for any other purpose. */
10553 if (cfun->machine->base_reg)
10554 if (REGNO (cfun->machine->base_reg) == old_reg
10555 || REGNO (cfun->machine->base_reg) == new_reg)
10558 /* Prevent regrename from using call-saved regs which haven't
10559 actually been saved. This is necessary since regrename assumes
10560 the backend save/restore decisions are based on
10561 df_regs_ever_live. Since we have our own routine we have to tell
10562 regrename manually about it. */
10563 if (GENERAL_REGNO_P (new_reg)
10564 && !call_really_used_regs[new_reg]
10565 && cfun_gpr_save_slot (new_reg) == SAVE_SLOT_NONE)
10571 /* Return nonzero if register REGNO can be used as a scratch register
10575 s390_hard_regno_scratch_ok (unsigned int regno)
10577 /* See s390_hard_regno_rename_ok. */
10578 if (GENERAL_REGNO_P (regno)
10579 && !call_really_used_regs[regno]
10580 && cfun_gpr_save_slot (regno) == SAVE_SLOT_NONE)
10586 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. When generating
10587 code that runs in z/Architecture mode, but conforms to the 31-bit
10588 ABI, GPRs can hold 8 bytes; the ABI guarantees only that the lower 4
10589 bytes are saved across calls, however. */
10592 s390_hard_regno_call_part_clobbered (unsigned int regno, machine_mode mode)
10596 && GET_MODE_SIZE (mode) > 4
10597 && ((regno >= 6 && regno <= 15) || regno == 32))
10601 && GET_MODE_SIZE (mode) > 8
10602 && (((TARGET_64BIT && regno >= 24 && regno <= 31))
10603 || (!TARGET_64BIT && (regno == 18 || regno == 19))))
10609 /* Maximum number of registers to represent a value of mode MODE
10610 in a register of class RCLASS. */
10613 s390_class_max_nregs (enum reg_class rclass, machine_mode mode)
10616 bool reg_pair_required_p = false;
10622 reg_size = TARGET_VX ? 16 : 8;
10624 /* TF and TD modes would fit into a VR but we put them into a
10625 register pair since we do not have 128bit FP instructions on
10628 && SCALAR_FLOAT_MODE_P (mode)
10629 && GET_MODE_SIZE (mode) >= 16)
10630 reg_pair_required_p = true;
10632 /* Even if complex types would fit into a single FPR/VR we force
10633 them into a register pair to deal with the parts more easily.
10634 (FIXME: What about complex ints?) */
10635 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
10636 reg_pair_required_p = true;
10642 reg_size = UNITS_PER_WORD;
10646 if (reg_pair_required_p)
10647 return 2 * ((GET_MODE_SIZE (mode) / 2 + reg_size - 1) / reg_size);
10649 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
10652 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
10655 s390_can_change_mode_class (machine_mode from_mode,
10656 machine_mode to_mode,
10657 reg_class_t rclass)
10659 machine_mode small_mode;
10660 machine_mode big_mode;
10662 /* V1TF and TF have different representations in vector
10664 if (reg_classes_intersect_p (VEC_REGS, rclass)
10665 && ((from_mode == V1TFmode && to_mode == TFmode)
10666 || (from_mode == TFmode && to_mode == V1TFmode)))
10669 if (GET_MODE_SIZE (from_mode) == GET_MODE_SIZE (to_mode))
10672 if (GET_MODE_SIZE (from_mode) < GET_MODE_SIZE (to_mode))
10674 small_mode = from_mode;
10675 big_mode = to_mode;
10679 small_mode = to_mode;
10680 big_mode = from_mode;
10683 /* Values residing in VRs are little-endian style. All modes are
10684 placed left-aligned in an VR. This means that we cannot allow
10685 switching between modes with differing sizes. Also if the vector
10686 facility is available we still place TFmode values in VR register
10687 pairs, since the only instructions we have operating on TFmodes
10688 only deal with register pairs. Therefore we have to allow DFmode
10689 subregs of TFmodes to enable the TFmode splitters. */
10690 if (reg_classes_intersect_p (VEC_REGS, rclass)
10691 && (GET_MODE_SIZE (small_mode) < 8
10692 || s390_class_max_nregs (VEC_REGS, big_mode) == 1))
10695 /* Likewise for access registers, since they have only half the
10696 word size on 64-bit. */
10697 if (reg_classes_intersect_p (ACCESS_REGS, rclass))
10703 /* Return true if we use LRA instead of reload pass. */
10707 return s390_lra_flag;
10710 /* Return true if register FROM can be eliminated via register TO. */
10713 s390_can_eliminate (const int from, const int to)
10715 /* On zSeries machines, we have not marked the base register as fixed.
10716 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
10717 If a function requires the base register, we say here that this
10718 elimination cannot be performed. This will cause reload to free
10719 up the base register (as if it were fixed). On the other hand,
10720 if the current function does *not* require the base register, we
10721 say here the elimination succeeds, which in turn allows reload
10722 to allocate the base register for any other purpose. */
10723 if (from == BASE_REGNUM && to == BASE_REGNUM)
10725 if (TARGET_CPU_ZARCH)
10727 s390_init_frame_layout ();
10728 return cfun->machine->base_reg == NULL_RTX;
10734 /* Everything else must point into the stack frame. */
10735 gcc_assert (to == STACK_POINTER_REGNUM
10736 || to == HARD_FRAME_POINTER_REGNUM);
10738 gcc_assert (from == FRAME_POINTER_REGNUM
10739 || from == ARG_POINTER_REGNUM
10740 || from == RETURN_ADDRESS_POINTER_REGNUM);
10742 /* Make sure we actually saved the return address. */
10743 if (from == RETURN_ADDRESS_POINTER_REGNUM)
10744 if (!crtl->calls_eh_return
10746 && !cfun_frame_layout.save_return_addr_p)
10752 /* Return offset between register FROM and TO initially after prolog. */
10755 s390_initial_elimination_offset (int from, int to)
10757 HOST_WIDE_INT offset;
10759 /* ??? Why are we called for non-eliminable pairs? */
10760 if (!s390_can_eliminate (from, to))
10765 case FRAME_POINTER_REGNUM:
10766 offset = (get_frame_size()
10767 + STACK_POINTER_OFFSET
10768 + crtl->outgoing_args_size);
10771 case ARG_POINTER_REGNUM:
10772 s390_init_frame_layout ();
10773 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
10776 case RETURN_ADDRESS_POINTER_REGNUM:
10777 s390_init_frame_layout ();
10779 if (cfun_frame_layout.first_save_gpr_slot == -1)
10781 /* If it turns out that for stdarg nothing went into the reg
10782 save area we also do not need the return address
10784 if (cfun->stdarg && !cfun_save_arg_fprs_p)
10787 gcc_unreachable ();
10790 /* In order to make the following work it is not necessary for
10791 r14 to have a save slot. It is sufficient if one other GPR
10792 got one. Since the GPRs are always stored without gaps we
10793 are able to calculate where the r14 save slot would
10795 offset = (cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset +
10796 (RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot) *
10805 gcc_unreachable ();
10811 /* Emit insn to save fpr REGNUM at offset OFFSET relative
10812 to register BASE. Return generated insn. */
10815 save_fpr (rtx base, int offset, int regnum)
10818 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
10820 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
10821 set_mem_alias_set (addr, get_varargs_alias_set ());
10823 set_mem_alias_set (addr, get_frame_alias_set ());
10825 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
10828 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
10829 to register BASE. Return generated insn. */
10832 restore_fpr (rtx base, int offset, int regnum)
10835 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
10836 set_mem_alias_set (addr, get_frame_alias_set ());
10838 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
10841 /* Return true if REGNO is a global register, but not one
10842 of the special ones that need to be saved/restored in anyway. */
10845 global_not_special_regno_p (int regno)
10847 return (global_regs[regno]
10848 /* These registers are special and need to be
10849 restored in any case. */
10850 && !(regno == STACK_POINTER_REGNUM
10851 || regno == RETURN_REGNUM
10852 || regno == BASE_REGNUM
10853 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
10856 /* Generate insn to save registers FIRST to LAST into
10857 the register save area located at offset OFFSET
10858 relative to register BASE. */
10861 save_gprs (rtx base, int offset, int first, int last)
10863 rtx addr, insn, note;
10866 addr = plus_constant (Pmode, base, offset);
10867 addr = gen_rtx_MEM (Pmode, addr);
10869 set_mem_alias_set (addr, get_frame_alias_set ());
10871 /* Special-case single register. */
10875 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
10877 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
10879 if (!global_not_special_regno_p (first))
10880 RTX_FRAME_RELATED_P (insn) = 1;
10885 insn = gen_store_multiple (addr,
10886 gen_rtx_REG (Pmode, first),
10887 GEN_INT (last - first + 1));
10889 if (first <= 6 && cfun->stdarg)
10890 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
10892 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
10894 if (first + i <= 6)
10895 set_mem_alias_set (mem, get_varargs_alias_set ());
10898 /* We need to set the FRAME_RELATED flag on all SETs
10899 inside the store-multiple pattern.
10901 However, we must not emit DWARF records for registers 2..5
10902 if they are stored for use by variable arguments ...
10904 ??? Unfortunately, it is not enough to simply not the
10905 FRAME_RELATED flags for those SETs, because the first SET
10906 of the PARALLEL is always treated as if it had the flag
10907 set, even if it does not. Therefore we emit a new pattern
10908 without those registers as REG_FRAME_RELATED_EXPR note. */
10910 if (first >= 6 && !global_not_special_regno_p (first))
10912 rtx pat = PATTERN (insn);
10914 for (i = 0; i < XVECLEN (pat, 0); i++)
10915 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
10916 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
10918 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
10920 RTX_FRAME_RELATED_P (insn) = 1;
10922 else if (last >= 6)
10926 for (start = first >= 6 ? first : 6; start <= last; start++)
10927 if (!global_not_special_regno_p (start))
10933 addr = plus_constant (Pmode, base,
10934 offset + (start - first) * UNITS_PER_LONG);
10939 note = gen_movdi (gen_rtx_MEM (Pmode, addr),
10940 gen_rtx_REG (Pmode, start));
10942 note = gen_movsi (gen_rtx_MEM (Pmode, addr),
10943 gen_rtx_REG (Pmode, start));
10944 note = PATTERN (note);
10946 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
10947 RTX_FRAME_RELATED_P (insn) = 1;
10952 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
10953 gen_rtx_REG (Pmode, start),
10954 GEN_INT (last - start + 1));
10955 note = PATTERN (note);
10957 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
10959 for (i = 0; i < XVECLEN (note, 0); i++)
10960 if (GET_CODE (XVECEXP (note, 0, i)) == SET
10961 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
10963 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
10965 RTX_FRAME_RELATED_P (insn) = 1;
10971 /* Generate insn to restore registers FIRST to LAST from
10972 the register save area located at offset OFFSET
10973 relative to register BASE. */
10976 restore_gprs (rtx base, int offset, int first, int last)
10980 addr = plus_constant (Pmode, base, offset);
10981 addr = gen_rtx_MEM (Pmode, addr);
10982 set_mem_alias_set (addr, get_frame_alias_set ());
10984 /* Special-case single register. */
10988 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
10990 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
10992 RTX_FRAME_RELATED_P (insn) = 1;
10996 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
10998 GEN_INT (last - first + 1));
10999 RTX_FRAME_RELATED_P (insn) = 1;
11003 /* Return insn sequence to load the GOT register. */
11006 s390_load_got (void)
11010 /* We cannot use pic_offset_table_rtx here since we use this
11011 function also for non-pic if __tls_get_offset is called and in
11012 that case PIC_OFFSET_TABLE_REGNUM as well as pic_offset_table_rtx
11014 rtx got_rtx = gen_rtx_REG (Pmode, 12);
11018 if (TARGET_CPU_ZARCH)
11020 emit_move_insn (got_rtx, s390_got_symbol ());
11026 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, s390_got_symbol ()),
11027 UNSPEC_LTREL_OFFSET);
11028 offset = gen_rtx_CONST (Pmode, offset);
11029 offset = force_const_mem (Pmode, offset);
11031 emit_move_insn (got_rtx, offset);
11033 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
11034 UNSPEC_LTREL_BASE);
11035 offset = gen_rtx_PLUS (Pmode, got_rtx, offset);
11037 emit_move_insn (got_rtx, offset);
11040 insns = get_insns ();
11045 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
11046 and the change to the stack pointer. */
11049 s390_emit_stack_tie (void)
11051 rtx mem = gen_frame_mem (BLKmode,
11052 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
11054 emit_insn (gen_stack_tie (mem));
11057 /* Copy GPRS into FPR save slots. */
11060 s390_save_gprs_to_fprs (void)
11064 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
11067 for (i = 6; i < 16; i++)
11069 if (FP_REGNO_P (cfun_gpr_save_slot (i)))
11072 emit_move_insn (gen_rtx_REG (DImode, cfun_gpr_save_slot (i)),
11073 gen_rtx_REG (DImode, i));
11074 RTX_FRAME_RELATED_P (insn) = 1;
11075 /* This prevents dwarf2cfi from interpreting the set. Doing
11076 so it might emit def_cfa_register infos setting an FPR as
11078 add_reg_note (insn, REG_CFA_REGISTER, copy_rtx (PATTERN (insn)));
11083 /* Restore GPRs from FPR save slots. */
11086 s390_restore_gprs_from_fprs (void)
11090 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
11093 for (i = 6; i < 16; i++)
11097 if (!FP_REGNO_P (cfun_gpr_save_slot (i)))
11100 rtx fpr = gen_rtx_REG (DImode, cfun_gpr_save_slot (i));
11102 if (i == STACK_POINTER_REGNUM)
11103 insn = emit_insn (gen_stack_restore_from_fpr (fpr));
11105 insn = emit_move_insn (gen_rtx_REG (DImode, i), fpr);
11107 df_set_regs_ever_live (i, true);
11108 add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, i));
11109 if (i == STACK_POINTER_REGNUM)
11110 add_reg_note (insn, REG_CFA_DEF_CFA,
11111 plus_constant (Pmode, stack_pointer_rtx,
11112 STACK_POINTER_OFFSET));
11113 RTX_FRAME_RELATED_P (insn) = 1;
11118 /* A pass run immediately before shrink-wrapping and prologue and epilogue
11123 const pass_data pass_data_s390_early_mach =
11125 RTL_PASS, /* type */
11126 "early_mach", /* name */
11127 OPTGROUP_NONE, /* optinfo_flags */
11128 TV_MACH_DEP, /* tv_id */
11129 0, /* properties_required */
11130 0, /* properties_provided */
11131 0, /* properties_destroyed */
11132 0, /* todo_flags_start */
11133 ( TODO_df_verify | TODO_df_finish ), /* todo_flags_finish */
11136 class pass_s390_early_mach : public rtl_opt_pass
11139 pass_s390_early_mach (gcc::context *ctxt)
11140 : rtl_opt_pass (pass_data_s390_early_mach, ctxt)
11143 /* opt_pass methods: */
11144 virtual unsigned int execute (function *);
11146 }; // class pass_s390_early_mach
11149 pass_s390_early_mach::execute (function *fun)
11153 /* Try to get rid of the FPR clobbers. */
11154 s390_optimize_nonescaping_tx ();
11156 /* Re-compute register info. */
11157 s390_register_info ();
11159 /* If we're using a base register, ensure that it is always valid for
11160 the first non-prologue instruction. */
11161 if (fun->machine->base_reg)
11162 emit_insn_at_entry (gen_main_pool (fun->machine->base_reg));
11164 /* Annotate all constant pool references to let the scheduler know
11165 they implicitly use the base register. */
11166 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11169 annotate_constant_pool_refs (&PATTERN (insn));
11170 df_insn_rescan (insn);
11175 } // anon namespace
11177 /* Calculate TARGET = REG + OFFSET as s390_emit_prologue would do it.
11178 - push too big immediates to the literal pool and annotate the refs
11179 - emit frame related notes for stack pointer changes. */
11182 s390_prologue_plus_offset (rtx target, rtx reg, rtx offset, bool frame_related_p)
11185 rtx orig_offset = offset;
11187 gcc_assert (REG_P (target));
11188 gcc_assert (REG_P (reg));
11189 gcc_assert (CONST_INT_P (offset));
11191 if (offset == const0_rtx) /* lr/lgr */
11193 insn = emit_move_insn (target, reg);
11195 else if (DISP_IN_RANGE (INTVAL (offset))) /* la */
11197 insn = emit_move_insn (target, gen_rtx_PLUS (Pmode, reg,
11202 if (!satisfies_constraint_K (offset) /* ahi/aghi */
11204 || (!satisfies_constraint_Op (offset) /* alfi/algfi */
11205 && !satisfies_constraint_On (offset)))) /* slfi/slgfi */
11206 offset = force_const_mem (Pmode, offset);
11210 insn = emit_move_insn (target, reg);
11211 RTX_FRAME_RELATED_P (insn) = frame_related_p ? 1 : 0;
11214 insn = emit_insn (gen_add2_insn (target, offset));
11216 if (!CONST_INT_P (offset))
11218 annotate_constant_pool_refs (&PATTERN (insn));
11220 if (frame_related_p)
11221 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
11222 gen_rtx_SET (target,
11223 gen_rtx_PLUS (Pmode, target,
11228 RTX_FRAME_RELATED_P (insn) = frame_related_p ? 1 : 0;
11230 /* If this is a stack adjustment and we are generating a stack clash
11231 prologue, then add a REG_STACK_CHECK note to signal that this insn
11232 should be left alone. */
11233 if (flag_stack_clash_protection && target == stack_pointer_rtx)
11234 add_reg_note (insn, REG_STACK_CHECK, const0_rtx);
11239 /* Emit a compare instruction with a volatile memory access as stack
11240 probe. It does not waste store tags and does not clobber any
11241 registers apart from the condition code. */
11243 s390_emit_stack_probe (rtx addr)
11245 rtx tmp = gen_rtx_MEM (Pmode, addr);
11246 MEM_VOLATILE_P (tmp) = 1;
11247 s390_emit_compare (EQ, gen_rtx_REG (Pmode, 0), tmp);
11248 emit_insn (gen_blockage ());
11251 /* Use a runtime loop if we have to emit more probes than this. */
11252 #define MIN_UNROLL_PROBES 3
11254 /* Allocate SIZE bytes of stack space, using TEMP_REG as a temporary
11255 if necessary. LAST_PROBE_OFFSET contains the offset of the closest
11256 probe relative to the stack pointer.
11258 Note that SIZE is negative.
11260 The return value is true if TEMP_REG has been clobbered. */
11262 allocate_stack_space (rtx size, HOST_WIDE_INT last_probe_offset,
11265 bool temp_reg_clobbered_p = false;
11266 HOST_WIDE_INT probe_interval
11267 = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL);
11268 HOST_WIDE_INT guard_size
11269 = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE);
11271 if (flag_stack_clash_protection)
11273 if (last_probe_offset + -INTVAL (size) < guard_size)
11274 dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME, true);
11277 rtx offset = GEN_INT (probe_interval - UNITS_PER_LONG);
11278 HOST_WIDE_INT rounded_size = -INTVAL (size) & -probe_interval;
11279 HOST_WIDE_INT num_probes = rounded_size / probe_interval;
11280 HOST_WIDE_INT residual = -INTVAL (size) - rounded_size;
11282 if (num_probes < MIN_UNROLL_PROBES)
11284 /* Emit unrolled probe statements. */
11286 for (unsigned int i = 0; i < num_probes; i++)
11288 s390_prologue_plus_offset (stack_pointer_rtx,
11290 GEN_INT (-probe_interval), true);
11291 s390_emit_stack_probe (gen_rtx_PLUS (Pmode,
11295 dump_stack_clash_frame_info (PROBE_INLINE, residual != 0);
11299 /* Emit a loop probing the pages. */
11301 rtx_code_label *loop_start_label = gen_label_rtx ();
11303 /* From now on temp_reg will be the CFA register. */
11304 s390_prologue_plus_offset (temp_reg, stack_pointer_rtx,
11305 GEN_INT (-rounded_size), true);
11306 emit_label (loop_start_label);
11308 s390_prologue_plus_offset (stack_pointer_rtx,
11310 GEN_INT (-probe_interval), false);
11311 s390_emit_stack_probe (gen_rtx_PLUS (Pmode,
11314 emit_cmp_and_jump_insns (stack_pointer_rtx, temp_reg,
11316 Pmode, 1, loop_start_label);
11318 /* Without this make_edges ICEes. */
11319 JUMP_LABEL (get_last_insn ()) = loop_start_label;
11320 LABEL_NUSES (loop_start_label) = 1;
11322 /* That's going to be a NOP since stack pointer and
11323 temp_reg are supposed to be the same here. We just
11324 emit it to set the CFA reg back to r15. */
11325 s390_prologue_plus_offset (stack_pointer_rtx, temp_reg,
11327 temp_reg_clobbered_p = true;
11328 dump_stack_clash_frame_info (PROBE_LOOP, residual != 0);
11331 /* Handle any residual allocation request. */
11332 s390_prologue_plus_offset (stack_pointer_rtx,
11334 GEN_INT (-residual), true);
11335 last_probe_offset += residual;
11336 if (last_probe_offset >= probe_interval)
11337 s390_emit_stack_probe (gen_rtx_PLUS (Pmode,
11340 - UNITS_PER_LONG)));
11342 return temp_reg_clobbered_p;
11346 /* Subtract frame size from stack pointer. */
11347 s390_prologue_plus_offset (stack_pointer_rtx,
11351 return temp_reg_clobbered_p;
11354 /* Expand the prologue into a bunch of separate insns. */
11357 s390_emit_prologue (void)
11365 /* Choose best register to use for temp use within prologue.
11366 TPF with profiling must avoid the register 14 - the tracing function
11367 needs the original contents of r14 to be preserved. */
11369 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
11371 && !TARGET_TPF_PROFILING)
11372 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
11373 else if (flag_split_stack && cfun->stdarg)
11374 temp_reg = gen_rtx_REG (Pmode, 12);
11376 temp_reg = gen_rtx_REG (Pmode, 1);
11378 /* When probing for stack-clash mitigation, we have to track the distance
11379 between the stack pointer and closest known reference.
11381 Most of the time we have to make a worst case assumption. The
11382 only exception is when TARGET_BACKCHAIN is active, in which case
11383 we know *sp (offset 0) was written. */
11384 HOST_WIDE_INT probe_interval
11385 = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL);
11386 HOST_WIDE_INT last_probe_offset
11387 = (TARGET_BACKCHAIN
11388 ? (TARGET_PACKED_STACK ? STACK_POINTER_OFFSET - UNITS_PER_LONG : 0)
11389 : probe_interval - (STACK_BOUNDARY / UNITS_PER_WORD));
11391 s390_save_gprs_to_fprs ();
11393 /* Save call saved gprs. */
11394 if (cfun_frame_layout.first_save_gpr != -1)
11396 insn = save_gprs (stack_pointer_rtx,
11397 cfun_frame_layout.gprs_offset +
11398 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
11399 - cfun_frame_layout.first_save_gpr_slot),
11400 cfun_frame_layout.first_save_gpr,
11401 cfun_frame_layout.last_save_gpr);
11403 /* This is not 100% correct. If we have more than one register saved,
11404 then LAST_PROBE_OFFSET can move even closer to sp. */
11406 = (cfun_frame_layout.gprs_offset +
11407 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
11408 - cfun_frame_layout.first_save_gpr_slot));
11413 /* Dummy insn to mark literal pool slot. */
11415 if (cfun->machine->base_reg)
11416 emit_insn (gen_main_pool (cfun->machine->base_reg));
11418 offset = cfun_frame_layout.f0_offset;
11420 /* Save f0 and f2. */
11421 for (i = FPR0_REGNUM; i <= FPR0_REGNUM + 1; i++)
11423 if (cfun_fpr_save_p (i))
11425 save_fpr (stack_pointer_rtx, offset, i);
11426 if (offset < last_probe_offset)
11427 last_probe_offset = offset;
11430 else if (!TARGET_PACKED_STACK || cfun->stdarg)
11434 /* Save f4 and f6. */
11435 offset = cfun_frame_layout.f4_offset;
11436 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
11438 if (cfun_fpr_save_p (i))
11440 insn = save_fpr (stack_pointer_rtx, offset, i);
11441 if (offset < last_probe_offset)
11442 last_probe_offset = offset;
11445 /* If f4 and f6 are call clobbered they are saved due to
11446 stdargs and therefore are not frame related. */
11447 if (!call_really_used_regs[i])
11448 RTX_FRAME_RELATED_P (insn) = 1;
11450 else if (!TARGET_PACKED_STACK || call_really_used_regs[i])
11454 if (TARGET_PACKED_STACK
11455 && cfun_save_high_fprs_p
11456 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
11458 offset = (cfun_frame_layout.f8_offset
11459 + (cfun_frame_layout.high_fprs - 1) * 8);
11461 for (i = FPR15_REGNUM; i >= FPR8_REGNUM && offset >= 0; i--)
11462 if (cfun_fpr_save_p (i))
11464 insn = save_fpr (stack_pointer_rtx, offset, i);
11465 if (offset < last_probe_offset)
11466 last_probe_offset = offset;
11468 RTX_FRAME_RELATED_P (insn) = 1;
11471 if (offset >= cfun_frame_layout.f8_offset)
11475 if (!TARGET_PACKED_STACK)
11476 next_fpr = cfun_save_high_fprs_p ? FPR15_REGNUM : 0;
11478 if (flag_stack_usage_info)
11479 current_function_static_stack_size = cfun_frame_layout.frame_size;
11481 /* Decrement stack pointer. */
11483 if (cfun_frame_layout.frame_size > 0)
11485 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
11486 rtx_insn *stack_pointer_backup_loc;
11487 bool temp_reg_clobbered_p;
11489 if (s390_stack_size)
11491 HOST_WIDE_INT stack_guard;
11493 if (s390_stack_guard)
11494 stack_guard = s390_stack_guard;
11497 /* If no value for stack guard is provided the smallest power of 2
11498 larger than the current frame size is chosen. */
11500 while (stack_guard < cfun_frame_layout.frame_size)
11504 if (cfun_frame_layout.frame_size >= s390_stack_size)
11506 warning (0, "frame size of function %qs is %wd"
11507 " bytes exceeding user provided stack limit of "
11509 "An unconditional trap is added.",
11510 current_function_name(), cfun_frame_layout.frame_size,
11512 emit_insn (gen_trap ());
11517 /* stack_guard has to be smaller than s390_stack_size.
11518 Otherwise we would emit an AND with zero which would
11519 not match the test under mask pattern. */
11520 if (stack_guard >= s390_stack_size)
11522 warning (0, "frame size of function %qs is %wd"
11523 " bytes which is more than half the stack size. "
11524 "The dynamic check would not be reliable. "
11525 "No check emitted for this function.",
11526 current_function_name(),
11527 cfun_frame_layout.frame_size);
11531 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
11532 & ~(stack_guard - 1));
11534 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
11535 GEN_INT (stack_check_mask));
11537 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
11539 t, const0_rtx, const0_rtx));
11541 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
11543 t, const0_rtx, const0_rtx));
11548 if (s390_warn_framesize > 0
11549 && cfun_frame_layout.frame_size >= s390_warn_framesize)
11550 warning (0, "frame size of %qs is %wd bytes",
11551 current_function_name (), cfun_frame_layout.frame_size);
11553 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
11554 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
11556 /* Save the location where we could backup the incoming stack
11558 stack_pointer_backup_loc = get_last_insn ();
11560 temp_reg_clobbered_p = allocate_stack_space (frame_off, last_probe_offset,
11563 if (TARGET_BACKCHAIN || next_fpr)
11565 if (temp_reg_clobbered_p)
11567 /* allocate_stack_space had to make use of temp_reg and
11568 we need it to hold a backup of the incoming stack
11569 pointer. Calculate back that value from the current
11571 s390_prologue_plus_offset (temp_reg, stack_pointer_rtx,
11572 GEN_INT (cfun_frame_layout.frame_size),
11577 /* allocate_stack_space didn't actually required
11578 temp_reg. Insert the stack pointer backup insn
11579 before the stack pointer decrement code - knowing now
11580 that the value will survive. */
11581 emit_insn_after (gen_move_insn (temp_reg, stack_pointer_rtx),
11582 stack_pointer_backup_loc);
11586 /* Set backchain. */
11588 if (TARGET_BACKCHAIN)
11590 if (cfun_frame_layout.backchain_offset)
11591 addr = gen_rtx_MEM (Pmode,
11592 plus_constant (Pmode, stack_pointer_rtx,
11593 cfun_frame_layout.backchain_offset));
11595 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
11596 set_mem_alias_set (addr, get_frame_alias_set ());
11597 insn = emit_insn (gen_move_insn (addr, temp_reg));
11600 /* If we support non-call exceptions (e.g. for Java),
11601 we need to make sure the backchain pointer is set up
11602 before any possibly trapping memory access. */
11603 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
11605 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
11606 emit_clobber (addr);
11609 else if (flag_stack_clash_protection)
11610 dump_stack_clash_frame_info (NO_PROBE_NO_FRAME, false);
11612 /* Save fprs 8 - 15 (64 bit ABI). */
11614 if (cfun_save_high_fprs_p && next_fpr)
11616 /* If the stack might be accessed through a different register
11617 we have to make sure that the stack pointer decrement is not
11618 moved below the use of the stack slots. */
11619 s390_emit_stack_tie ();
11621 insn = emit_insn (gen_add2_insn (temp_reg,
11622 GEN_INT (cfun_frame_layout.f8_offset)));
11626 for (i = FPR8_REGNUM; i <= next_fpr; i++)
11627 if (cfun_fpr_save_p (i))
11629 rtx addr = plus_constant (Pmode, stack_pointer_rtx,
11630 cfun_frame_layout.frame_size
11631 + cfun_frame_layout.f8_offset
11634 insn = save_fpr (temp_reg, offset, i);
11636 RTX_FRAME_RELATED_P (insn) = 1;
11637 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
11638 gen_rtx_SET (gen_rtx_MEM (DFmode, addr),
11639 gen_rtx_REG (DFmode, i)));
11643 /* Set frame pointer, if needed. */
11645 if (frame_pointer_needed)
11647 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
11648 RTX_FRAME_RELATED_P (insn) = 1;
11651 /* Set up got pointer, if needed. */
11653 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
11655 rtx_insn *insns = s390_load_got ();
11657 for (rtx_insn *insn = insns; insn; insn = NEXT_INSN (insn))
11658 annotate_constant_pool_refs (&PATTERN (insn));
11663 if (TARGET_TPF_PROFILING)
11665 /* Generate a BAS instruction to serve as a function
11666 entry intercept to facilitate the use of tracing
11667 algorithms located at the branch target. */
11668 emit_insn (gen_prologue_tpf ());
11670 /* Emit a blockage here so that all code
11671 lies between the profiling mechanisms. */
11672 emit_insn (gen_blockage ());
11676 /* Expand the epilogue into a bunch of separate insns. */
11679 s390_emit_epilogue (bool sibcall)
11681 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
11682 int area_bottom, area_top, offset = 0;
11687 if (TARGET_TPF_PROFILING)
11690 /* Generate a BAS instruction to serve as a function
11691 entry intercept to facilitate the use of tracing
11692 algorithms located at the branch target. */
11694 /* Emit a blockage here so that all code
11695 lies between the profiling mechanisms. */
11696 emit_insn (gen_blockage ());
11698 emit_insn (gen_epilogue_tpf ());
11701 /* Check whether to use frame or stack pointer for restore. */
11703 frame_pointer = (frame_pointer_needed
11704 ? hard_frame_pointer_rtx : stack_pointer_rtx);
11706 s390_frame_area (&area_bottom, &area_top);
11708 /* Check whether we can access the register save area.
11709 If not, increment the frame pointer as required. */
11711 if (area_top <= area_bottom)
11713 /* Nothing to restore. */
11715 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
11716 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
11718 /* Area is in range. */
11719 offset = cfun_frame_layout.frame_size;
11723 rtx insn, frame_off, cfa;
11725 offset = area_bottom < 0 ? -area_bottom : 0;
11726 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
11728 cfa = gen_rtx_SET (frame_pointer,
11729 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
11730 if (DISP_IN_RANGE (INTVAL (frame_off)))
11732 insn = gen_rtx_SET (frame_pointer,
11733 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
11734 insn = emit_insn (insn);
11738 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
11739 frame_off = force_const_mem (Pmode, frame_off);
11741 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
11742 annotate_constant_pool_refs (&PATTERN (insn));
11744 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
11745 RTX_FRAME_RELATED_P (insn) = 1;
11748 /* Restore call saved fprs. */
11752 if (cfun_save_high_fprs_p)
11754 next_offset = cfun_frame_layout.f8_offset;
11755 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
11757 if (cfun_fpr_save_p (i))
11759 restore_fpr (frame_pointer,
11760 offset + next_offset, i);
11762 = alloc_reg_note (REG_CFA_RESTORE,
11763 gen_rtx_REG (DFmode, i), cfa_restores);
11772 next_offset = cfun_frame_layout.f4_offset;
11774 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
11776 if (cfun_fpr_save_p (i))
11778 restore_fpr (frame_pointer,
11779 offset + next_offset, i);
11781 = alloc_reg_note (REG_CFA_RESTORE,
11782 gen_rtx_REG (DFmode, i), cfa_restores);
11785 else if (!TARGET_PACKED_STACK)
11791 /* Return register. */
11793 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
11795 /* Restore call saved gprs. */
11797 if (cfun_frame_layout.first_restore_gpr != -1)
11802 /* Check for global register and save them
11803 to stack location from where they get restored. */
11805 for (i = cfun_frame_layout.first_restore_gpr;
11806 i <= cfun_frame_layout.last_restore_gpr;
11809 if (global_not_special_regno_p (i))
11811 addr = plus_constant (Pmode, frame_pointer,
11812 offset + cfun_frame_layout.gprs_offset
11813 + (i - cfun_frame_layout.first_save_gpr_slot)
11815 addr = gen_rtx_MEM (Pmode, addr);
11816 set_mem_alias_set (addr, get_frame_alias_set ());
11817 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
11821 = alloc_reg_note (REG_CFA_RESTORE,
11822 gen_rtx_REG (Pmode, i), cfa_restores);
11825 /* Fetch return address from stack before load multiple,
11826 this will do good for scheduling.
11828 Only do this if we already decided that r14 needs to be
11829 saved to a stack slot. (And not just because r14 happens to
11830 be in between two GPRs which need saving.) Otherwise it
11831 would be difficult to take that decision back in
11832 s390_optimize_prologue.
11834 This optimization is only helpful on in-order machines. */
11836 && cfun_gpr_save_slot (RETURN_REGNUM) == SAVE_SLOT_STACK
11837 && s390_tune <= PROCESSOR_2097_Z10)
11839 int return_regnum = find_unused_clobbered_reg();
11840 if (!return_regnum)
11842 return_reg = gen_rtx_REG (Pmode, return_regnum);
11844 addr = plus_constant (Pmode, frame_pointer,
11845 offset + cfun_frame_layout.gprs_offset
11847 - cfun_frame_layout.first_save_gpr_slot)
11849 addr = gen_rtx_MEM (Pmode, addr);
11850 set_mem_alias_set (addr, get_frame_alias_set ());
11851 emit_move_insn (return_reg, addr);
11853 /* Once we did that optimization we have to make sure
11854 s390_optimize_prologue does not try to remove the store
11855 of r14 since we will not be able to find the load issued
11857 cfun_frame_layout.save_return_addr_p = true;
11860 insn = restore_gprs (frame_pointer,
11861 offset + cfun_frame_layout.gprs_offset
11862 + (cfun_frame_layout.first_restore_gpr
11863 - cfun_frame_layout.first_save_gpr_slot)
11865 cfun_frame_layout.first_restore_gpr,
11866 cfun_frame_layout.last_restore_gpr);
11867 insn = emit_insn (insn);
11868 REG_NOTES (insn) = cfa_restores;
11869 add_reg_note (insn, REG_CFA_DEF_CFA,
11870 plus_constant (Pmode, stack_pointer_rtx,
11871 STACK_POINTER_OFFSET));
11872 RTX_FRAME_RELATED_P (insn) = 1;
11875 s390_restore_gprs_from_fprs ();
11880 /* Return to caller. */
11882 p = rtvec_alloc (2);
11884 RTVEC_ELT (p, 0) = ret_rtx;
11885 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
11886 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
11890 /* Implement TARGET_SET_UP_BY_PROLOGUE. */
11893 s300_set_up_by_prologue (hard_reg_set_container *regs)
11895 if (cfun->machine->base_reg
11896 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
11897 SET_HARD_REG_BIT (regs->set, REGNO (cfun->machine->base_reg));
11900 /* -fsplit-stack support. */
11902 /* A SYMBOL_REF for __morestack. */
11903 static GTY(()) rtx morestack_ref;
11905 /* When using -fsplit-stack, the allocation routines set a field in
11906 the TCB to the bottom of the stack plus this much space, measured
11909 #define SPLIT_STACK_AVAILABLE 1024
11911 /* Emit -fsplit-stack prologue, which goes before the regular function
11915 s390_expand_split_stack_prologue (void)
11917 rtx r1, guard, cc = NULL;
11919 /* Offset from thread pointer to __private_ss. */
11920 int psso = TARGET_64BIT ? 0x38 : 0x20;
11921 /* Pointer size in bytes. */
11922 /* Frame size and argument size - the two parameters to __morestack. */
11923 HOST_WIDE_INT frame_size = cfun_frame_layout.frame_size;
11924 /* Align argument size to 8 bytes - simplifies __morestack code. */
11925 HOST_WIDE_INT args_size = crtl->args.size >= 0
11926 ? ((crtl->args.size + 7) & ~7)
11928 /* Label to be called by __morestack. */
11929 rtx_code_label *call_done = NULL;
11930 rtx_code_label *parm_base = NULL;
11933 gcc_assert (flag_split_stack && reload_completed);
11934 if (!TARGET_CPU_ZARCH)
11936 sorry ("CPUs older than z900 are not supported for -fsplit-stack");
11940 r1 = gen_rtx_REG (Pmode, 1);
11942 /* If no stack frame will be allocated, don't do anything. */
11945 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
11947 /* If va_start is used, just use r15. */
11948 emit_move_insn (r1,
11949 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
11950 GEN_INT (STACK_POINTER_OFFSET)));
11956 if (morestack_ref == NULL_RTX)
11958 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
11959 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
11960 | SYMBOL_FLAG_FUNCTION);
11963 if (CONST_OK_FOR_K (frame_size) || CONST_OK_FOR_Op (frame_size))
11965 /* If frame_size will fit in an add instruction, do a stack space
11966 check, and only call __morestack if there's not enough space. */
11968 /* Get thread pointer. r1 is the only register we can always destroy - r0
11969 could contain a static chain (and cannot be used to address memory
11970 anyway), r2-r6 can contain parameters, and r6-r15 are callee-saved. */
11971 emit_move_insn (r1, gen_rtx_REG (Pmode, TP_REGNUM));
11972 /* Aim at __private_ss. */
11973 guard = gen_rtx_MEM (Pmode, plus_constant (Pmode, r1, psso));
11975 /* If less that 1kiB used, skip addition and compare directly with
11977 if (frame_size > SPLIT_STACK_AVAILABLE)
11979 emit_move_insn (r1, guard);
11981 emit_insn (gen_adddi3 (r1, r1, GEN_INT (frame_size)));
11983 emit_insn (gen_addsi3 (r1, r1, GEN_INT (frame_size)));
11987 /* Compare the (maybe adjusted) guard with the stack pointer. */
11988 cc = s390_emit_compare (LT, stack_pointer_rtx, guard);
11991 call_done = gen_label_rtx ();
11992 parm_base = gen_label_rtx ();
11994 /* Emit the parameter block. */
11995 tmp = gen_split_stack_data (parm_base, call_done,
11996 GEN_INT (frame_size),
11997 GEN_INT (args_size));
11998 insn = emit_insn (tmp);
11999 add_reg_note (insn, REG_LABEL_OPERAND, call_done);
12000 LABEL_NUSES (call_done)++;
12001 add_reg_note (insn, REG_LABEL_OPERAND, parm_base);
12002 LABEL_NUSES (parm_base)++;
12004 /* %r1 = litbase. */
12005 insn = emit_move_insn (r1, gen_rtx_LABEL_REF (VOIDmode, parm_base));
12006 add_reg_note (insn, REG_LABEL_OPERAND, parm_base);
12007 LABEL_NUSES (parm_base)++;
12009 /* Now, we need to call __morestack. It has very special calling
12010 conventions: it preserves param/return/static chain registers for
12011 calling main function body, and looks for its own parameters at %r1. */
12015 tmp = gen_split_stack_cond_call (morestack_ref, cc, call_done);
12017 insn = emit_jump_insn (tmp);
12018 JUMP_LABEL (insn) = call_done;
12019 LABEL_NUSES (call_done)++;
12021 /* Mark the jump as very unlikely to be taken. */
12022 add_reg_br_prob_note (insn,
12023 profile_probability::very_unlikely ());
12025 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
12027 /* If va_start is used, and __morestack was not called, just use
12029 emit_move_insn (r1,
12030 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
12031 GEN_INT (STACK_POINTER_OFFSET)));
12036 tmp = gen_split_stack_call (morestack_ref, call_done);
12037 insn = emit_jump_insn (tmp);
12038 JUMP_LABEL (insn) = call_done;
12039 LABEL_NUSES (call_done)++;
12043 /* __morestack will call us here. */
12045 emit_label (call_done);
12048 /* We may have to tell the dataflow pass that the split stack prologue
12049 is initializing a register. */
12052 s390_live_on_entry (bitmap regs)
12054 if (cfun->machine->split_stack_varargs_pointer != NULL_RTX)
12056 gcc_assert (flag_split_stack);
12057 bitmap_set_bit (regs, 1);
12061 /* Return true if the function can use simple_return to return outside
12062 of a shrink-wrapped region. At present shrink-wrapping is supported
12066 s390_can_use_simple_return_insn (void)
12071 /* Return true if the epilogue is guaranteed to contain only a return
12072 instruction and if a direct return can therefore be used instead.
12073 One of the main advantages of using direct return instructions
12074 is that we can then use conditional returns. */
12077 s390_can_use_return_insn (void)
12081 if (!reload_completed)
12087 if (TARGET_TPF_PROFILING)
12090 for (i = 0; i < 16; i++)
12091 if (cfun_gpr_save_slot (i) != SAVE_SLOT_NONE)
12094 /* For 31 bit this is not covered by the frame_size check below
12095 since f4, f6 are saved in the register save area without needing
12096 additional stack space. */
12098 && (cfun_fpr_save_p (FPR4_REGNUM) || cfun_fpr_save_p (FPR6_REGNUM)))
12101 if (cfun->machine->base_reg
12102 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
12105 return cfun_frame_layout.frame_size == 0;
12108 /* The VX ABI differs for vararg functions. Therefore we need the
12109 prototype of the callee to be available when passing vector type
12111 static const char *
12112 s390_invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
12114 return ((TARGET_VX_ABI
12116 && VECTOR_TYPE_P (TREE_TYPE (val))
12117 && (funcdecl == NULL_TREE
12118 || (TREE_CODE (funcdecl) == FUNCTION_DECL
12119 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
12120 ? N_("vector argument passed to unprototyped function")
12125 /* Return the size in bytes of a function argument of
12126 type TYPE and/or mode MODE. At least one of TYPE or
12127 MODE must be specified. */
12130 s390_function_arg_size (machine_mode mode, const_tree type)
12133 return int_size_in_bytes (type);
12135 /* No type info available for some library calls ... */
12136 if (mode != BLKmode)
12137 return GET_MODE_SIZE (mode);
12139 /* If we have neither type nor mode, abort */
12140 gcc_unreachable ();
12143 /* Return true if a function argument of type TYPE and mode MODE
12144 is to be passed in a vector register, if available. */
12147 s390_function_arg_vector (machine_mode mode, const_tree type)
12149 if (!TARGET_VX_ABI)
12152 if (s390_function_arg_size (mode, type) > 16)
12155 /* No type info available for some library calls ... */
12157 return VECTOR_MODE_P (mode);
12159 /* The ABI says that record types with a single member are treated
12160 just like that member would be. */
12161 while (TREE_CODE (type) == RECORD_TYPE)
12163 tree field, single = NULL_TREE;
12165 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
12167 if (TREE_CODE (field) != FIELD_DECL)
12170 if (single == NULL_TREE)
12171 single = TREE_TYPE (field);
12176 if (single == NULL_TREE)
12180 /* If the field declaration adds extra byte due to
12181 e.g. padding this is not accepted as vector type. */
12182 if (int_size_in_bytes (single) <= 0
12183 || int_size_in_bytes (single) != int_size_in_bytes (type))
12189 return VECTOR_TYPE_P (type);
12192 /* Return true if a function argument of type TYPE and mode MODE
12193 is to be passed in a floating-point register, if available. */
12196 s390_function_arg_float (machine_mode mode, const_tree type)
12198 if (s390_function_arg_size (mode, type) > 8)
12201 /* Soft-float changes the ABI: no floating-point registers are used. */
12202 if (TARGET_SOFT_FLOAT)
12205 /* No type info available for some library calls ... */
12207 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
12209 /* The ABI says that record types with a single member are treated
12210 just like that member would be. */
12211 while (TREE_CODE (type) == RECORD_TYPE)
12213 tree field, single = NULL_TREE;
12215 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
12217 if (TREE_CODE (field) != FIELD_DECL)
12220 if (single == NULL_TREE)
12221 single = TREE_TYPE (field);
12226 if (single == NULL_TREE)
12232 return TREE_CODE (type) == REAL_TYPE;
12235 /* Return true if a function argument of type TYPE and mode MODE
12236 is to be passed in an integer register, or a pair of integer
12237 registers, if available. */
12240 s390_function_arg_integer (machine_mode mode, const_tree type)
12242 int size = s390_function_arg_size (mode, type);
12246 /* No type info available for some library calls ... */
12248 return GET_MODE_CLASS (mode) == MODE_INT
12249 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
12251 /* We accept small integral (and similar) types. */
12252 if (INTEGRAL_TYPE_P (type)
12253 || POINTER_TYPE_P (type)
12254 || TREE_CODE (type) == NULLPTR_TYPE
12255 || TREE_CODE (type) == OFFSET_TYPE
12256 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
12259 /* We also accept structs of size 1, 2, 4, 8 that are not
12260 passed in floating-point registers. */
12261 if (AGGREGATE_TYPE_P (type)
12262 && exact_log2 (size) >= 0
12263 && !s390_function_arg_float (mode, type))
12269 /* Return 1 if a function argument of type TYPE and mode MODE
12270 is to be passed by reference. The ABI specifies that only
12271 structures of size 1, 2, 4, or 8 bytes are passed by value,
12272 all other structures (and complex numbers) are passed by
12276 s390_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
12277 machine_mode mode, const_tree type,
12278 bool named ATTRIBUTE_UNUSED)
12280 int size = s390_function_arg_size (mode, type);
12282 if (s390_function_arg_vector (mode, type))
12290 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
12293 if (TREE_CODE (type) == COMPLEX_TYPE
12294 || TREE_CODE (type) == VECTOR_TYPE)
12301 /* Update the data in CUM to advance over an argument of mode MODE and
12302 data type TYPE. (TYPE is null for libcalls where that information
12303 may not be available.). The boolean NAMED specifies whether the
12304 argument is a named argument (as opposed to an unnamed argument
12305 matching an ellipsis). */
12308 s390_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
12309 const_tree type, bool named)
12311 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12313 if (s390_function_arg_vector (mode, type))
12315 /* We are called for unnamed vector stdarg arguments which are
12316 passed on the stack. In this case this hook does not have to
12317 do anything since stack arguments are tracked by common
12323 else if (s390_function_arg_float (mode, type))
12327 else if (s390_function_arg_integer (mode, type))
12329 int size = s390_function_arg_size (mode, type);
12330 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
12333 gcc_unreachable ();
12336 /* Define where to put the arguments to a function.
12337 Value is zero to push the argument on the stack,
12338 or a hard register in which to store the argument.
12340 MODE is the argument's machine mode.
12341 TYPE is the data type of the argument (as a tree).
12342 This is null for libcalls where that information may
12344 CUM is a variable of type CUMULATIVE_ARGS which gives info about
12345 the preceding args and about the function being called.
12346 NAMED is nonzero if this argument is a named parameter
12347 (otherwise it is an extra parameter matching an ellipsis).
12349 On S/390, we use general purpose registers 2 through 6 to
12350 pass integer, pointer, and certain structure arguments, and
12351 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
12352 to pass floating point arguments. All remaining arguments
12353 are pushed to the stack. */
12356 s390_function_arg (cumulative_args_t cum_v, machine_mode mode,
12357 const_tree type, bool named)
12359 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12362 s390_check_type_for_vector_abi (type, true, false);
12364 if (s390_function_arg_vector (mode, type))
12366 /* Vector arguments being part of the ellipsis are passed on the
12368 if (!named || (cum->vrs + 1 > VEC_ARG_NUM_REG))
12371 return gen_rtx_REG (mode, cum->vrs + FIRST_VEC_ARG_REGNO);
12373 else if (s390_function_arg_float (mode, type))
12375 if (cum->fprs + 1 > FP_ARG_NUM_REG)
12378 return gen_rtx_REG (mode, cum->fprs + 16);
12380 else if (s390_function_arg_integer (mode, type))
12382 int size = s390_function_arg_size (mode, type);
12383 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
12385 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
12387 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
12388 return gen_rtx_REG (mode, cum->gprs + 2);
12389 else if (n_gprs == 2)
12391 rtvec p = rtvec_alloc (2);
12394 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
12397 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
12400 return gen_rtx_PARALLEL (mode, p);
12404 /* After the real arguments, expand_call calls us once again
12405 with a void_type_node type. Whatever we return here is
12406 passed as operand 2 to the call expanders.
12408 We don't need this feature ... */
12409 else if (type == void_type_node)
12412 gcc_unreachable ();
12415 /* Implement TARGET_FUNCTION_ARG_BOUNDARY. Vector arguments are
12416 left-justified when placed on the stack during parameter passing. */
12418 static pad_direction
12419 s390_function_arg_padding (machine_mode mode, const_tree type)
12421 if (s390_function_arg_vector (mode, type))
12424 return default_function_arg_padding (mode, type);
12427 /* Return true if return values of type TYPE should be returned
12428 in a memory buffer whose address is passed by the caller as
12429 hidden first argument. */
12432 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
12434 /* We accept small integral (and similar) types. */
12435 if (INTEGRAL_TYPE_P (type)
12436 || POINTER_TYPE_P (type)
12437 || TREE_CODE (type) == OFFSET_TYPE
12438 || TREE_CODE (type) == REAL_TYPE)
12439 return int_size_in_bytes (type) > 8;
12441 /* vector types which fit into a VR. */
12443 && VECTOR_TYPE_P (type)
12444 && int_size_in_bytes (type) <= 16)
12447 /* Aggregates and similar constructs are always returned
12449 if (AGGREGATE_TYPE_P (type)
12450 || TREE_CODE (type) == COMPLEX_TYPE
12451 || VECTOR_TYPE_P (type))
12454 /* ??? We get called on all sorts of random stuff from
12455 aggregate_value_p. We can't abort, but it's not clear
12456 what's safe to return. Pretend it's a struct I guess. */
12460 /* Function arguments and return values are promoted to word size. */
12462 static machine_mode
12463 s390_promote_function_mode (const_tree type, machine_mode mode,
12465 const_tree fntype ATTRIBUTE_UNUSED,
12466 int for_return ATTRIBUTE_UNUSED)
12468 if (INTEGRAL_MODE_P (mode)
12469 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
12471 if (type != NULL_TREE && POINTER_TYPE_P (type))
12472 *punsignedp = POINTERS_EXTEND_UNSIGNED;
12479 /* Define where to return a (scalar) value of type RET_TYPE.
12480 If RET_TYPE is null, define where to return a (scalar)
12481 value of mode MODE from a libcall. */
12484 s390_function_and_libcall_value (machine_mode mode,
12485 const_tree ret_type,
12486 const_tree fntype_or_decl,
12487 bool outgoing ATTRIBUTE_UNUSED)
12489 /* For vector return types it is important to use the RET_TYPE
12490 argument whenever available since the middle-end might have
12491 changed the mode to a scalar mode. */
12492 bool vector_ret_type_p = ((ret_type && VECTOR_TYPE_P (ret_type))
12493 || (!ret_type && VECTOR_MODE_P (mode)));
12495 /* For normal functions perform the promotion as
12496 promote_function_mode would do. */
12499 int unsignedp = TYPE_UNSIGNED (ret_type);
12500 mode = promote_function_mode (ret_type, mode, &unsignedp,
12501 fntype_or_decl, 1);
12504 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT
12505 || SCALAR_FLOAT_MODE_P (mode)
12506 || (TARGET_VX_ABI && vector_ret_type_p));
12507 gcc_assert (GET_MODE_SIZE (mode) <= (TARGET_VX_ABI ? 16 : 8));
12509 if (TARGET_VX_ABI && vector_ret_type_p)
12510 return gen_rtx_REG (mode, FIRST_VEC_ARG_REGNO);
12511 else if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
12512 return gen_rtx_REG (mode, 16);
12513 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
12514 || UNITS_PER_LONG == UNITS_PER_WORD)
12515 return gen_rtx_REG (mode, 2);
12516 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
12518 /* This case is triggered when returning a 64 bit value with
12519 -m31 -mzarch. Although the value would fit into a single
12520 register it has to be forced into a 32 bit register pair in
12521 order to match the ABI. */
12522 rtvec p = rtvec_alloc (2);
12525 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
12527 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
12529 return gen_rtx_PARALLEL (mode, p);
12532 gcc_unreachable ();
12535 /* Define where to return a scalar return value of type RET_TYPE. */
12538 s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
12541 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
12542 fn_decl_or_type, outgoing);
12545 /* Define where to return a scalar libcall return value of mode
12549 s390_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
12551 return s390_function_and_libcall_value (mode, NULL_TREE,
12556 /* Create and return the va_list datatype.
12558 On S/390, va_list is an array type equivalent to
12560 typedef struct __va_list_tag
12564 void *__overflow_arg_area;
12565 void *__reg_save_area;
12568 where __gpr and __fpr hold the number of general purpose
12569 or floating point arguments used up to now, respectively,
12570 __overflow_arg_area points to the stack location of the
12571 next argument passed on the stack, and __reg_save_area
12572 always points to the start of the register area in the
12573 call frame of the current function. The function prologue
12574 saves all registers used for argument passing into this
12575 area if the function uses variable arguments. */
12578 s390_build_builtin_va_list (void)
12580 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
12582 record = lang_hooks.types.make_type (RECORD_TYPE);
12585 build_decl (BUILTINS_LOCATION,
12586 TYPE_DECL, get_identifier ("__va_list_tag"), record);
12588 f_gpr = build_decl (BUILTINS_LOCATION,
12589 FIELD_DECL, get_identifier ("__gpr"),
12590 long_integer_type_node);
12591 f_fpr = build_decl (BUILTINS_LOCATION,
12592 FIELD_DECL, get_identifier ("__fpr"),
12593 long_integer_type_node);
12594 f_ovf = build_decl (BUILTINS_LOCATION,
12595 FIELD_DECL, get_identifier ("__overflow_arg_area"),
12597 f_sav = build_decl (BUILTINS_LOCATION,
12598 FIELD_DECL, get_identifier ("__reg_save_area"),
12601 va_list_gpr_counter_field = f_gpr;
12602 va_list_fpr_counter_field = f_fpr;
12604 DECL_FIELD_CONTEXT (f_gpr) = record;
12605 DECL_FIELD_CONTEXT (f_fpr) = record;
12606 DECL_FIELD_CONTEXT (f_ovf) = record;
12607 DECL_FIELD_CONTEXT (f_sav) = record;
12609 TYPE_STUB_DECL (record) = type_decl;
12610 TYPE_NAME (record) = type_decl;
12611 TYPE_FIELDS (record) = f_gpr;
12612 DECL_CHAIN (f_gpr) = f_fpr;
12613 DECL_CHAIN (f_fpr) = f_ovf;
12614 DECL_CHAIN (f_ovf) = f_sav;
12616 layout_type (record);
12618 /* The correct type is an array type of one element. */
12619 return build_array_type (record, build_index_type (size_zero_node));
12622 /* Implement va_start by filling the va_list structure VALIST.
12623 STDARG_P is always true, and ignored.
12624 NEXTARG points to the first anonymous stack argument.
12626 The following global variables are used to initialize
12627 the va_list structure:
12630 holds number of gprs and fprs used for named arguments.
12631 crtl->args.arg_offset_rtx:
12632 holds the offset of the first anonymous stack argument
12633 (relative to the virtual arg pointer). */
12636 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
12638 HOST_WIDE_INT n_gpr, n_fpr;
12640 tree f_gpr, f_fpr, f_ovf, f_sav;
12641 tree gpr, fpr, ovf, sav, t;
12643 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12644 f_fpr = DECL_CHAIN (f_gpr);
12645 f_ovf = DECL_CHAIN (f_fpr);
12646 f_sav = DECL_CHAIN (f_ovf);
12648 valist = build_simple_mem_ref (valist);
12649 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12650 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
12651 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
12652 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
12654 /* Count number of gp and fp argument registers used. */
12656 n_gpr = crtl->args.info.gprs;
12657 n_fpr = crtl->args.info.fprs;
12659 if (cfun->va_list_gpr_size)
12661 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
12662 build_int_cst (NULL_TREE, n_gpr));
12663 TREE_SIDE_EFFECTS (t) = 1;
12664 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12667 if (cfun->va_list_fpr_size)
12669 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
12670 build_int_cst (NULL_TREE, n_fpr));
12671 TREE_SIDE_EFFECTS (t) = 1;
12672 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12675 if (flag_split_stack
12676 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
12678 && cfun->machine->split_stack_varargs_pointer == NULL_RTX)
12683 reg = gen_reg_rtx (Pmode);
12684 cfun->machine->split_stack_varargs_pointer = reg;
12687 emit_move_insn (reg, gen_rtx_REG (Pmode, 1));
12688 seq = get_insns ();
12691 push_topmost_sequence ();
12692 emit_insn_after (seq, entry_of_function ());
12693 pop_topmost_sequence ();
12696 /* Find the overflow area.
12697 FIXME: This currently is too pessimistic when the vector ABI is
12698 enabled. In that case we *always* set up the overflow area
12700 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
12701 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG
12704 if (cfun->machine->split_stack_varargs_pointer == NULL_RTX)
12705 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
12707 t = make_tree (TREE_TYPE (ovf), cfun->machine->split_stack_varargs_pointer);
12709 off = INTVAL (crtl->args.arg_offset_rtx);
12710 off = off < 0 ? 0 : off;
12711 if (TARGET_DEBUG_ARG)
12712 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
12713 (int)n_gpr, (int)n_fpr, off);
12715 t = fold_build_pointer_plus_hwi (t, off);
12717 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
12718 TREE_SIDE_EFFECTS (t) = 1;
12719 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12722 /* Find the register save area. */
12723 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
12724 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
12726 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
12727 t = fold_build_pointer_plus_hwi (t, -RETURN_REGNUM * UNITS_PER_LONG);
12729 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
12730 TREE_SIDE_EFFECTS (t) = 1;
12731 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12735 /* Implement va_arg by updating the va_list structure
12736 VALIST as required to retrieve an argument of type
12737 TYPE, and returning that argument.
12739 Generates code equivalent to:
12741 if (integral value) {
12742 if (size <= 4 && args.gpr < 5 ||
12743 size > 4 && args.gpr < 4 )
12744 ret = args.reg_save_area[args.gpr+8]
12746 ret = *args.overflow_arg_area++;
12747 } else if (vector value) {
12748 ret = *args.overflow_arg_area;
12749 args.overflow_arg_area += size / 8;
12750 } else if (float value) {
12752 ret = args.reg_save_area[args.fpr+64]
12754 ret = *args.overflow_arg_area++;
12755 } else if (aggregate value) {
12757 ret = *args.reg_save_area[args.gpr]
12759 ret = **args.overflow_arg_area++;
12763 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
12764 gimple_seq *post_p ATTRIBUTE_UNUSED)
12766 tree f_gpr, f_fpr, f_ovf, f_sav;
12767 tree gpr, fpr, ovf, sav, reg, t, u;
12768 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
12769 tree lab_false, lab_over = NULL_TREE;
12770 tree addr = create_tmp_var (ptr_type_node, "addr");
12771 bool left_align_p; /* How a value < UNITS_PER_LONG is aligned within
12774 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12775 f_fpr = DECL_CHAIN (f_gpr);
12776 f_ovf = DECL_CHAIN (f_fpr);
12777 f_sav = DECL_CHAIN (f_ovf);
12779 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12780 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
12781 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
12783 /* The tree for args* cannot be shared between gpr/fpr and ovf since
12784 both appear on a lhs. */
12785 valist = unshare_expr (valist);
12786 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
12788 size = int_size_in_bytes (type);
12790 s390_check_type_for_vector_abi (type, true, false);
12792 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
12794 if (TARGET_DEBUG_ARG)
12796 fprintf (stderr, "va_arg: aggregate type");
12800 /* Aggregates are passed by reference. */
12805 /* kernel stack layout on 31 bit: It is assumed here that no padding
12806 will be added by s390_frame_info because for va_args always an even
12807 number of gprs has to be saved r15-r2 = 14 regs. */
12808 sav_ofs = 2 * UNITS_PER_LONG;
12809 sav_scale = UNITS_PER_LONG;
12810 size = UNITS_PER_LONG;
12811 max_reg = GP_ARG_NUM_REG - n_reg;
12812 left_align_p = false;
12814 else if (s390_function_arg_vector (TYPE_MODE (type), type))
12816 if (TARGET_DEBUG_ARG)
12818 fprintf (stderr, "va_arg: vector type");
12828 left_align_p = true;
12830 else if (s390_function_arg_float (TYPE_MODE (type), type))
12832 if (TARGET_DEBUG_ARG)
12834 fprintf (stderr, "va_arg: float type");
12838 /* FP args go in FP registers, if present. */
12842 sav_ofs = 16 * UNITS_PER_LONG;
12844 max_reg = FP_ARG_NUM_REG - n_reg;
12845 left_align_p = false;
12849 if (TARGET_DEBUG_ARG)
12851 fprintf (stderr, "va_arg: other type");
12855 /* Otherwise into GP registers. */
12858 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
12860 /* kernel stack layout on 31 bit: It is assumed here that no padding
12861 will be added by s390_frame_info because for va_args always an even
12862 number of gprs has to be saved r15-r2 = 14 regs. */
12863 sav_ofs = 2 * UNITS_PER_LONG;
12865 if (size < UNITS_PER_LONG)
12866 sav_ofs += UNITS_PER_LONG - size;
12868 sav_scale = UNITS_PER_LONG;
12869 max_reg = GP_ARG_NUM_REG - n_reg;
12870 left_align_p = false;
12873 /* Pull the value out of the saved registers ... */
12875 if (reg != NULL_TREE)
12878 if (reg > ((typeof (reg))max_reg))
12881 addr = sav + sav_ofs + reg * save_scale;
12888 lab_false = create_artificial_label (UNKNOWN_LOCATION);
12889 lab_over = create_artificial_label (UNKNOWN_LOCATION);
12891 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
12892 t = build2 (GT_EXPR, boolean_type_node, reg, t);
12893 u = build1 (GOTO_EXPR, void_type_node, lab_false);
12894 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
12895 gimplify_and_add (t, pre_p);
12897 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
12898 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
12899 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
12900 t = fold_build_pointer_plus (t, u);
12902 gimplify_assign (addr, t, pre_p);
12904 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
12906 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
12909 /* ... Otherwise out of the overflow area. */
12912 if (size < UNITS_PER_LONG && !left_align_p)
12913 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG - size);
12915 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
12917 gimplify_assign (addr, t, pre_p);
12919 if (size < UNITS_PER_LONG && left_align_p)
12920 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG);
12922 t = fold_build_pointer_plus_hwi (t, size);
12924 gimplify_assign (ovf, t, pre_p);
12926 if (reg != NULL_TREE)
12927 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
12930 /* Increment register save count. */
12934 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
12935 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
12936 gimplify_and_add (u, pre_p);
12941 t = build_pointer_type_for_mode (build_pointer_type (type),
12943 addr = fold_convert (t, addr);
12944 addr = build_va_arg_indirect_ref (addr);
12948 t = build_pointer_type_for_mode (type, ptr_mode, true);
12949 addr = fold_convert (t, addr);
12952 return build_va_arg_indirect_ref (addr);
12955 /* Emit rtl for the tbegin or tbegin_retry (RETRY != NULL_RTX)
12957 DEST - Register location where CC will be stored.
12958 TDB - Pointer to a 256 byte area where to store the transaction.
12959 diagnostic block. NULL if TDB is not needed.
12960 RETRY - Retry count value. If non-NULL a retry loop for CC2
12962 CLOBBER_FPRS_P - If true clobbers for all FPRs are emitted as part
12963 of the tbegin instruction pattern. */
12966 s390_expand_tbegin (rtx dest, rtx tdb, rtx retry, bool clobber_fprs_p)
12968 rtx retry_plus_two = gen_reg_rtx (SImode);
12969 rtx retry_reg = gen_reg_rtx (SImode);
12970 rtx_code_label *retry_label = NULL;
12972 if (retry != NULL_RTX)
12974 emit_move_insn (retry_reg, retry);
12975 emit_insn (gen_addsi3 (retry_plus_two, retry_reg, const2_rtx));
12976 emit_insn (gen_addsi3 (retry_reg, retry_reg, const1_rtx));
12977 retry_label = gen_label_rtx ();
12978 emit_label (retry_label);
12981 if (clobber_fprs_p)
12984 emit_insn (gen_tbegin_1_z13 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
12987 emit_insn (gen_tbegin_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
12991 emit_insn (gen_tbegin_nofloat_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
12994 emit_move_insn (dest, gen_rtx_UNSPEC (SImode,
12995 gen_rtvec (1, gen_rtx_REG (CCRAWmode,
12997 UNSPEC_CC_TO_INT));
12998 if (retry != NULL_RTX)
13000 const int CC0 = 1 << 3;
13001 const int CC1 = 1 << 2;
13002 const int CC3 = 1 << 0;
13004 rtx count = gen_reg_rtx (SImode);
13005 rtx_code_label *leave_label = gen_label_rtx ();
13007 /* Exit for success and permanent failures. */
13008 jump = s390_emit_jump (leave_label,
13009 gen_rtx_EQ (VOIDmode,
13010 gen_rtx_REG (CCRAWmode, CC_REGNUM),
13011 gen_rtx_CONST_INT (VOIDmode, CC0 | CC1 | CC3)));
13012 LABEL_NUSES (leave_label) = 1;
13014 /* CC2 - transient failure. Perform retry with ppa. */
13015 emit_move_insn (count, retry_plus_two);
13016 emit_insn (gen_subsi3 (count, count, retry_reg));
13017 emit_insn (gen_tx_assist (count));
13018 jump = emit_jump_insn (gen_doloop_si64 (retry_label,
13021 JUMP_LABEL (jump) = retry_label;
13022 LABEL_NUSES (retry_label) = 1;
13023 emit_label (leave_label);
13028 /* Return the decl for the target specific builtin with the function
13032 s390_builtin_decl (unsigned fcode, bool initialized_p ATTRIBUTE_UNUSED)
13034 if (fcode >= S390_BUILTIN_MAX)
13035 return error_mark_node;
13037 return s390_builtin_decls[fcode];
13040 /* We call mcount before the function prologue. So a profiled leaf
13041 function should stay a leaf function. */
13044 s390_keep_leaf_when_profiled ()
13049 /* Output assembly code for the trampoline template to
13052 On S/390, we use gpr 1 internally in the trampoline code;
13053 gpr 0 is used to hold the static chain. */
13056 s390_asm_trampoline_template (FILE *file)
13059 op[0] = gen_rtx_REG (Pmode, 0);
13060 op[1] = gen_rtx_REG (Pmode, 1);
13064 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
13065 output_asm_insn ("lmg\t%0,%1,14(%1)", op); /* 6 byte */
13066 output_asm_insn ("br\t%1", op); /* 2 byte */
13067 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
13071 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
13072 output_asm_insn ("lm\t%0,%1,6(%1)", op); /* 4 byte */
13073 output_asm_insn ("br\t%1", op); /* 2 byte */
13074 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
13078 /* Emit RTL insns to initialize the variable parts of a trampoline.
13079 FNADDR is an RTX for the address of the function's pure code.
13080 CXT is an RTX for the static chain value for the function. */
13083 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
13085 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
13088 emit_block_move (m_tramp, assemble_trampoline_template (),
13089 GEN_INT (2 * UNITS_PER_LONG), BLOCK_OP_NORMAL);
13091 mem = adjust_address (m_tramp, Pmode, 2 * UNITS_PER_LONG);
13092 emit_move_insn (mem, cxt);
13093 mem = adjust_address (m_tramp, Pmode, 3 * UNITS_PER_LONG);
13094 emit_move_insn (mem, fnaddr);
13097 /* Output assembler code to FILE to increment profiler label # LABELNO
13098 for profiling a function entry. */
13101 s390_function_profiler (FILE *file, int labelno)
13106 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
13108 fprintf (file, "# function profiler \n");
13110 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
13111 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
13112 op[1] = gen_rtx_MEM (Pmode, plus_constant (Pmode, op[1], UNITS_PER_LONG));
13114 op[2] = gen_rtx_REG (Pmode, 1);
13115 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
13116 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
13118 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
13121 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
13122 op[4] = gen_rtx_CONST (Pmode, op[4]);
13127 output_asm_insn ("stg\t%0,%1", op);
13128 output_asm_insn ("larl\t%2,%3", op);
13129 output_asm_insn ("brasl\t%0,%4", op);
13130 output_asm_insn ("lg\t%0,%1", op);
13132 else if (TARGET_CPU_ZARCH)
13134 output_asm_insn ("st\t%0,%1", op);
13135 output_asm_insn ("larl\t%2,%3", op);
13136 output_asm_insn ("brasl\t%0,%4", op);
13137 output_asm_insn ("l\t%0,%1", op);
13139 else if (!flag_pic)
13141 op[6] = gen_label_rtx ();
13143 output_asm_insn ("st\t%0,%1", op);
13144 output_asm_insn ("bras\t%2,%l6", op);
13145 output_asm_insn (".long\t%4", op);
13146 output_asm_insn (".long\t%3", op);
13147 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
13148 output_asm_insn ("l\t%0,0(%2)", op);
13149 output_asm_insn ("l\t%2,4(%2)", op);
13150 output_asm_insn ("basr\t%0,%0", op);
13151 output_asm_insn ("l\t%0,%1", op);
13155 op[5] = gen_label_rtx ();
13156 op[6] = gen_label_rtx ();
13158 output_asm_insn ("st\t%0,%1", op);
13159 output_asm_insn ("bras\t%2,%l6", op);
13160 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
13161 output_asm_insn (".long\t%4-%l5", op);
13162 output_asm_insn (".long\t%3-%l5", op);
13163 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
13164 output_asm_insn ("lr\t%0,%2", op);
13165 output_asm_insn ("a\t%0,0(%2)", op);
13166 output_asm_insn ("a\t%2,4(%2)", op);
13167 output_asm_insn ("basr\t%0,%0", op);
13168 output_asm_insn ("l\t%0,%1", op);
13172 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
13173 into its SYMBOL_REF_FLAGS. */
13176 s390_encode_section_info (tree decl, rtx rtl, int first)
13178 default_encode_section_info (decl, rtl, first);
13180 if (TREE_CODE (decl) == VAR_DECL)
13182 /* Store the alignment to be able to check if we can use
13183 a larl/load-relative instruction. We only handle the cases
13184 that can go wrong (i.e. no FUNC_DECLs). */
13185 if (DECL_ALIGN (decl) == 0 || DECL_ALIGN (decl) % 16)
13186 SYMBOL_FLAG_SET_NOTALIGN2 (XEXP (rtl, 0));
13187 else if (DECL_ALIGN (decl) % 32)
13188 SYMBOL_FLAG_SET_NOTALIGN4 (XEXP (rtl, 0));
13189 else if (DECL_ALIGN (decl) % 64)
13190 SYMBOL_FLAG_SET_NOTALIGN8 (XEXP (rtl, 0));
13193 /* Literal pool references don't have a decl so they are handled
13194 differently here. We rely on the information in the MEM_ALIGN
13195 entry to decide upon the alignment. */
13197 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
13198 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0)))
13200 if (MEM_ALIGN (rtl) == 0 || MEM_ALIGN (rtl) % 16)
13201 SYMBOL_FLAG_SET_NOTALIGN2 (XEXP (rtl, 0));
13202 else if (MEM_ALIGN (rtl) % 32)
13203 SYMBOL_FLAG_SET_NOTALIGN4 (XEXP (rtl, 0));
13204 else if (MEM_ALIGN (rtl) % 64)
13205 SYMBOL_FLAG_SET_NOTALIGN8 (XEXP (rtl, 0));
13209 /* Output thunk to FILE that implements a C++ virtual function call (with
13210 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
13211 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
13212 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
13213 relative to the resulting this pointer. */
13216 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
13217 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
13223 /* Make sure unwind info is emitted for the thunk if needed. */
13224 final_start_function (emit_barrier (), file, 1);
13226 /* Operand 0 is the target function. */
13227 op[0] = XEXP (DECL_RTL (function), 0);
13228 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
13231 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
13232 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
13233 op[0] = gen_rtx_CONST (Pmode, op[0]);
13236 /* Operand 1 is the 'this' pointer. */
13237 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
13238 op[1] = gen_rtx_REG (Pmode, 3);
13240 op[1] = gen_rtx_REG (Pmode, 2);
13242 /* Operand 2 is the delta. */
13243 op[2] = GEN_INT (delta);
13245 /* Operand 3 is the vcall_offset. */
13246 op[3] = GEN_INT (vcall_offset);
13248 /* Operand 4 is the temporary register. */
13249 op[4] = gen_rtx_REG (Pmode, 1);
13251 /* Operands 5 to 8 can be used as labels. */
13257 /* Operand 9 can be used for temporary register. */
13260 /* Generate code. */
13263 /* Setup literal pool pointer if required. */
13264 if ((!DISP_IN_RANGE (delta)
13265 && !CONST_OK_FOR_K (delta)
13266 && !CONST_OK_FOR_Os (delta))
13267 || (!DISP_IN_RANGE (vcall_offset)
13268 && !CONST_OK_FOR_K (vcall_offset)
13269 && !CONST_OK_FOR_Os (vcall_offset)))
13271 op[5] = gen_label_rtx ();
13272 output_asm_insn ("larl\t%4,%5", op);
13275 /* Add DELTA to this pointer. */
13278 if (CONST_OK_FOR_J (delta))
13279 output_asm_insn ("la\t%1,%2(%1)", op);
13280 else if (DISP_IN_RANGE (delta))
13281 output_asm_insn ("lay\t%1,%2(%1)", op);
13282 else if (CONST_OK_FOR_K (delta))
13283 output_asm_insn ("aghi\t%1,%2", op);
13284 else if (CONST_OK_FOR_Os (delta))
13285 output_asm_insn ("agfi\t%1,%2", op);
13288 op[6] = gen_label_rtx ();
13289 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
13293 /* Perform vcall adjustment. */
13296 if (DISP_IN_RANGE (vcall_offset))
13298 output_asm_insn ("lg\t%4,0(%1)", op);
13299 output_asm_insn ("ag\t%1,%3(%4)", op);
13301 else if (CONST_OK_FOR_K (vcall_offset))
13303 output_asm_insn ("lghi\t%4,%3", op);
13304 output_asm_insn ("ag\t%4,0(%1)", op);
13305 output_asm_insn ("ag\t%1,0(%4)", op);
13307 else if (CONST_OK_FOR_Os (vcall_offset))
13309 output_asm_insn ("lgfi\t%4,%3", op);
13310 output_asm_insn ("ag\t%4,0(%1)", op);
13311 output_asm_insn ("ag\t%1,0(%4)", op);
13315 op[7] = gen_label_rtx ();
13316 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
13317 output_asm_insn ("ag\t%4,0(%1)", op);
13318 output_asm_insn ("ag\t%1,0(%4)", op);
13322 /* Jump to target. */
13323 output_asm_insn ("jg\t%0", op);
13325 /* Output literal pool if required. */
13328 output_asm_insn (".align\t4", op);
13329 targetm.asm_out.internal_label (file, "L",
13330 CODE_LABEL_NUMBER (op[5]));
13334 targetm.asm_out.internal_label (file, "L",
13335 CODE_LABEL_NUMBER (op[6]));
13336 output_asm_insn (".long\t%2", op);
13340 targetm.asm_out.internal_label (file, "L",
13341 CODE_LABEL_NUMBER (op[7]));
13342 output_asm_insn (".long\t%3", op);
13347 /* Setup base pointer if required. */
13349 || (!DISP_IN_RANGE (delta)
13350 && !CONST_OK_FOR_K (delta)
13351 && !CONST_OK_FOR_Os (delta))
13352 || (!DISP_IN_RANGE (delta)
13353 && !CONST_OK_FOR_K (vcall_offset)
13354 && !CONST_OK_FOR_Os (vcall_offset)))
13356 op[5] = gen_label_rtx ();
13357 output_asm_insn ("basr\t%4,0", op);
13358 targetm.asm_out.internal_label (file, "L",
13359 CODE_LABEL_NUMBER (op[5]));
13362 /* Add DELTA to this pointer. */
13365 if (CONST_OK_FOR_J (delta))
13366 output_asm_insn ("la\t%1,%2(%1)", op);
13367 else if (DISP_IN_RANGE (delta))
13368 output_asm_insn ("lay\t%1,%2(%1)", op);
13369 else if (CONST_OK_FOR_K (delta))
13370 output_asm_insn ("ahi\t%1,%2", op);
13371 else if (CONST_OK_FOR_Os (delta))
13372 output_asm_insn ("afi\t%1,%2", op);
13375 op[6] = gen_label_rtx ();
13376 output_asm_insn ("a\t%1,%6-%5(%4)", op);
13380 /* Perform vcall adjustment. */
13383 if (CONST_OK_FOR_J (vcall_offset))
13385 output_asm_insn ("l\t%4,0(%1)", op);
13386 output_asm_insn ("a\t%1,%3(%4)", op);
13388 else if (DISP_IN_RANGE (vcall_offset))
13390 output_asm_insn ("l\t%4,0(%1)", op);
13391 output_asm_insn ("ay\t%1,%3(%4)", op);
13393 else if (CONST_OK_FOR_K (vcall_offset))
13395 output_asm_insn ("lhi\t%4,%3", op);
13396 output_asm_insn ("a\t%4,0(%1)", op);
13397 output_asm_insn ("a\t%1,0(%4)", op);
13399 else if (CONST_OK_FOR_Os (vcall_offset))
13401 output_asm_insn ("iilf\t%4,%3", op);
13402 output_asm_insn ("a\t%4,0(%1)", op);
13403 output_asm_insn ("a\t%1,0(%4)", op);
13407 op[7] = gen_label_rtx ();
13408 output_asm_insn ("l\t%4,%7-%5(%4)", op);
13409 output_asm_insn ("a\t%4,0(%1)", op);
13410 output_asm_insn ("a\t%1,0(%4)", op);
13413 /* We had to clobber the base pointer register.
13414 Re-setup the base pointer (with a different base). */
13415 op[5] = gen_label_rtx ();
13416 output_asm_insn ("basr\t%4,0", op);
13417 targetm.asm_out.internal_label (file, "L",
13418 CODE_LABEL_NUMBER (op[5]));
13421 /* Jump to target. */
13422 op[8] = gen_label_rtx ();
13425 output_asm_insn ("l\t%4,%8-%5(%4)", op);
13426 else if (!nonlocal)
13427 output_asm_insn ("a\t%4,%8-%5(%4)", op);
13428 /* We cannot call through .plt, since .plt requires %r12 loaded. */
13429 else if (flag_pic == 1)
13431 output_asm_insn ("a\t%4,%8-%5(%4)", op);
13432 output_asm_insn ("l\t%4,%0(%4)", op);
13434 else if (flag_pic == 2)
13436 op[9] = gen_rtx_REG (Pmode, 0);
13437 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
13438 output_asm_insn ("a\t%4,%8-%5(%4)", op);
13439 output_asm_insn ("ar\t%4,%9", op);
13440 output_asm_insn ("l\t%4,0(%4)", op);
13443 output_asm_insn ("br\t%4", op);
13445 /* Output literal pool. */
13446 output_asm_insn (".align\t4", op);
13448 if (nonlocal && flag_pic == 2)
13449 output_asm_insn (".long\t%0", op);
13452 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
13453 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
13456 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
13458 output_asm_insn (".long\t%0", op);
13460 output_asm_insn (".long\t%0-%5", op);
13464 targetm.asm_out.internal_label (file, "L",
13465 CODE_LABEL_NUMBER (op[6]));
13466 output_asm_insn (".long\t%2", op);
13470 targetm.asm_out.internal_label (file, "L",
13471 CODE_LABEL_NUMBER (op[7]));
13472 output_asm_insn (".long\t%3", op);
13475 final_end_function ();
13479 s390_valid_pointer_mode (scalar_int_mode mode)
13481 return (mode == SImode || (TARGET_64BIT && mode == DImode));
13484 /* Checks whether the given CALL_EXPR would use a caller
13485 saved register. This is used to decide whether sibling call
13486 optimization could be performed on the respective function
13490 s390_call_saved_register_used (tree call_expr)
13492 CUMULATIVE_ARGS cum_v;
13493 cumulative_args_t cum;
13500 INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0);
13501 cum = pack_cumulative_args (&cum_v);
13503 for (i = 0; i < call_expr_nargs (call_expr); i++)
13505 parameter = CALL_EXPR_ARG (call_expr, i);
13506 gcc_assert (parameter);
13508 /* For an undeclared variable passed as parameter we will get
13509 an ERROR_MARK node here. */
13510 if (TREE_CODE (parameter) == ERROR_MARK)
13513 type = TREE_TYPE (parameter);
13516 mode = TYPE_MODE (type);
13519 /* We assume that in the target function all parameters are
13520 named. This only has an impact on vector argument register
13521 usage none of which is call-saved. */
13522 if (pass_by_reference (&cum_v, mode, type, true))
13525 type = build_pointer_type (type);
13528 parm_rtx = s390_function_arg (cum, mode, type, true);
13530 s390_function_arg_advance (cum, mode, type, true);
13535 if (REG_P (parm_rtx))
13537 for (reg = 0; reg < REG_NREGS (parm_rtx); reg++)
13538 if (!call_used_regs[reg + REGNO (parm_rtx)])
13542 if (GET_CODE (parm_rtx) == PARALLEL)
13546 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
13548 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
13550 gcc_assert (REG_P (r));
13552 for (reg = 0; reg < REG_NREGS (r); reg++)
13553 if (!call_used_regs[reg + REGNO (r)])
13562 /* Return true if the given call expression can be
13563 turned into a sibling call.
13564 DECL holds the declaration of the function to be called whereas
13565 EXP is the call expression itself. */
13568 s390_function_ok_for_sibcall (tree decl, tree exp)
13570 /* The TPF epilogue uses register 1. */
13571 if (TARGET_TPF_PROFILING)
13574 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
13575 which would have to be restored before the sibcall. */
13576 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
13579 /* Register 6 on s390 is available as an argument register but unfortunately
13580 "caller saved". This makes functions needing this register for arguments
13581 not suitable for sibcalls. */
13582 return !s390_call_saved_register_used (exp);
13585 /* Return the fixed registers used for condition codes. */
13588 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
13591 *p2 = INVALID_REGNUM;
13596 /* This function is used by the call expanders of the machine description.
13597 It emits the call insn itself together with the necessary operations
13598 to adjust the target address and returns the emitted insn.
13599 ADDR_LOCATION is the target address rtx
13600 TLS_CALL the location of the thread-local symbol
13601 RESULT_REG the register where the result of the call should be stored
13602 RETADDR_REG the register where the return address should be stored
13603 If this parameter is NULL_RTX the call is considered
13604 to be a sibling call. */
13607 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
13610 bool plt_call = false;
13616 /* Direct function calls need special treatment. */
13617 if (GET_CODE (addr_location) == SYMBOL_REF)
13619 /* When calling a global routine in PIC mode, we must
13620 replace the symbol itself with the PLT stub. */
13621 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
13623 if (TARGET_64BIT || retaddr_reg != NULL_RTX)
13625 addr_location = gen_rtx_UNSPEC (Pmode,
13626 gen_rtvec (1, addr_location),
13628 addr_location = gen_rtx_CONST (Pmode, addr_location);
13632 /* For -fpic code the PLT entries might use r12 which is
13633 call-saved. Therefore we cannot do a sibcall when
13634 calling directly using a symbol ref. When reaching
13635 this point we decided (in s390_function_ok_for_sibcall)
13636 to do a sibcall for a function pointer but one of the
13637 optimizers was able to get rid of the function pointer
13638 by propagating the symbol ref into the call. This
13639 optimization is illegal for S/390 so we turn the direct
13640 call into a indirect call again. */
13641 addr_location = force_reg (Pmode, addr_location);
13644 /* Unless we can use the bras(l) insn, force the
13645 routine address into a register. */
13646 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
13649 addr_location = legitimize_pic_address (addr_location, 0);
13651 addr_location = force_reg (Pmode, addr_location);
13655 /* If it is already an indirect call or the code above moved the
13656 SYMBOL_REF to somewhere else make sure the address can be found in
13658 if (retaddr_reg == NULL_RTX
13659 && GET_CODE (addr_location) != SYMBOL_REF
13662 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
13663 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
13666 addr_location = gen_rtx_MEM (QImode, addr_location);
13667 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
13669 if (result_reg != NULL_RTX)
13670 call = gen_rtx_SET (result_reg, call);
13672 if (retaddr_reg != NULL_RTX)
13674 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
13676 if (tls_call != NULL_RTX)
13677 vec = gen_rtvec (3, call, clobber,
13678 gen_rtx_USE (VOIDmode, tls_call));
13680 vec = gen_rtvec (2, call, clobber);
13682 call = gen_rtx_PARALLEL (VOIDmode, vec);
13685 insn = emit_call_insn (call);
13687 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
13688 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
13690 /* s390_function_ok_for_sibcall should
13691 have denied sibcalls in this case. */
13692 gcc_assert (retaddr_reg != NULL_RTX);
13693 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, 12));
13698 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
13701 s390_conditional_register_usage (void)
13707 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
13708 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
13710 if (TARGET_CPU_ZARCH)
13712 fixed_regs[BASE_REGNUM] = 0;
13713 call_used_regs[BASE_REGNUM] = 0;
13714 fixed_regs[RETURN_REGNUM] = 0;
13715 call_used_regs[RETURN_REGNUM] = 0;
13719 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
13720 call_used_regs[i] = call_really_used_regs[i] = 0;
13724 call_used_regs[FPR4_REGNUM] = call_really_used_regs[FPR4_REGNUM] = 0;
13725 call_used_regs[FPR6_REGNUM] = call_really_used_regs[FPR6_REGNUM] = 0;
13728 if (TARGET_SOFT_FLOAT)
13730 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
13731 call_used_regs[i] = fixed_regs[i] = 1;
13734 /* Disable v16 - v31 for non-vector target. */
13737 for (i = VR16_REGNUM; i <= VR31_REGNUM; i++)
13738 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
13742 /* Corresponding function to eh_return expander. */
13744 static GTY(()) rtx s390_tpf_eh_return_symbol;
13746 s390_emit_tpf_eh_return (rtx target)
13751 if (!s390_tpf_eh_return_symbol)
13752 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
13754 reg = gen_rtx_REG (Pmode, 2);
13755 orig_ra = gen_rtx_REG (Pmode, 3);
13757 emit_move_insn (reg, target);
13758 emit_move_insn (orig_ra, get_hard_reg_initial_val (Pmode, RETURN_REGNUM));
13759 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
13760 gen_rtx_REG (Pmode, RETURN_REGNUM));
13761 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
13762 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), orig_ra);
13764 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
13767 /* Rework the prologue/epilogue to avoid saving/restoring
13768 registers unnecessarily. */
13771 s390_optimize_prologue (void)
13773 rtx_insn *insn, *new_insn, *next_insn;
13775 /* Do a final recompute of the frame-related data. */
13776 s390_optimize_register_info ();
13778 /* If all special registers are in fact used, there's nothing we
13779 can do, so no point in walking the insn list. */
13781 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
13782 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
13783 && (TARGET_CPU_ZARCH
13784 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
13785 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
13788 /* Search for prologue/epilogue insns and replace them. */
13790 for (insn = get_insns (); insn; insn = next_insn)
13792 int first, last, off;
13793 rtx set, base, offset;
13796 next_insn = NEXT_INSN (insn);
13798 if (! NONJUMP_INSN_P (insn) || ! RTX_FRAME_RELATED_P (insn))
13801 pat = PATTERN (insn);
13803 /* Remove ldgr/lgdr instructions used for saving and restore
13804 GPRs if possible. */
13809 if (INSN_CODE (insn) == CODE_FOR_stack_restore_from_fpr)
13810 tmp_pat = XVECEXP (pat, 0, 0);
13812 if (GET_CODE (tmp_pat) == SET
13813 && GET_MODE (SET_SRC (tmp_pat)) == DImode
13814 && REG_P (SET_SRC (tmp_pat))
13815 && REG_P (SET_DEST (tmp_pat)))
13817 int src_regno = REGNO (SET_SRC (tmp_pat));
13818 int dest_regno = REGNO (SET_DEST (tmp_pat));
13822 if (!((GENERAL_REGNO_P (src_regno)
13823 && FP_REGNO_P (dest_regno))
13824 || (FP_REGNO_P (src_regno)
13825 && GENERAL_REGNO_P (dest_regno))))
13828 gpr_regno = GENERAL_REGNO_P (src_regno) ? src_regno : dest_regno;
13829 fpr_regno = FP_REGNO_P (src_regno) ? src_regno : dest_regno;
13831 /* GPR must be call-saved, FPR must be call-clobbered. */
13832 if (!call_really_used_regs[fpr_regno]
13833 || call_really_used_regs[gpr_regno])
13836 /* It must not happen that what we once saved in an FPR now
13837 needs a stack slot. */
13838 gcc_assert (cfun_gpr_save_slot (gpr_regno) != SAVE_SLOT_STACK);
13840 if (cfun_gpr_save_slot (gpr_regno) == SAVE_SLOT_NONE)
13842 remove_insn (insn);
13848 if (GET_CODE (pat) == PARALLEL
13849 && store_multiple_operation (pat, VOIDmode))
13851 set = XVECEXP (pat, 0, 0);
13852 first = REGNO (SET_SRC (set));
13853 last = first + XVECLEN (pat, 0) - 1;
13854 offset = const0_rtx;
13855 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
13856 off = INTVAL (offset);
13858 if (GET_CODE (base) != REG || off < 0)
13860 if (cfun_frame_layout.first_save_gpr != -1
13861 && (cfun_frame_layout.first_save_gpr < first
13862 || cfun_frame_layout.last_save_gpr > last))
13864 if (REGNO (base) != STACK_POINTER_REGNUM
13865 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13867 if (first > BASE_REGNUM || last < BASE_REGNUM)
13870 if (cfun_frame_layout.first_save_gpr != -1)
13872 rtx s_pat = save_gprs (base,
13873 off + (cfun_frame_layout.first_save_gpr
13874 - first) * UNITS_PER_LONG,
13875 cfun_frame_layout.first_save_gpr,
13876 cfun_frame_layout.last_save_gpr);
13877 new_insn = emit_insn_before (s_pat, insn);
13878 INSN_ADDRESSES_NEW (new_insn, -1);
13881 remove_insn (insn);
13885 if (cfun_frame_layout.first_save_gpr == -1
13886 && GET_CODE (pat) == SET
13887 && GENERAL_REG_P (SET_SRC (pat))
13888 && GET_CODE (SET_DEST (pat)) == MEM)
13891 first = REGNO (SET_SRC (set));
13892 offset = const0_rtx;
13893 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
13894 off = INTVAL (offset);
13896 if (GET_CODE (base) != REG || off < 0)
13898 if (REGNO (base) != STACK_POINTER_REGNUM
13899 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13902 remove_insn (insn);
13906 if (GET_CODE (pat) == PARALLEL
13907 && load_multiple_operation (pat, VOIDmode))
13909 set = XVECEXP (pat, 0, 0);
13910 first = REGNO (SET_DEST (set));
13911 last = first + XVECLEN (pat, 0) - 1;
13912 offset = const0_rtx;
13913 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
13914 off = INTVAL (offset);
13916 if (GET_CODE (base) != REG || off < 0)
13919 if (cfun_frame_layout.first_restore_gpr != -1
13920 && (cfun_frame_layout.first_restore_gpr < first
13921 || cfun_frame_layout.last_restore_gpr > last))
13923 if (REGNO (base) != STACK_POINTER_REGNUM
13924 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13926 if (first > BASE_REGNUM || last < BASE_REGNUM)
13929 if (cfun_frame_layout.first_restore_gpr != -1)
13931 rtx rpat = restore_gprs (base,
13932 off + (cfun_frame_layout.first_restore_gpr
13933 - first) * UNITS_PER_LONG,
13934 cfun_frame_layout.first_restore_gpr,
13935 cfun_frame_layout.last_restore_gpr);
13937 /* Remove REG_CFA_RESTOREs for registers that we no
13938 longer need to save. */
13939 REG_NOTES (rpat) = REG_NOTES (insn);
13940 for (rtx *ptr = ®_NOTES (rpat); *ptr; )
13941 if (REG_NOTE_KIND (*ptr) == REG_CFA_RESTORE
13942 && ((int) REGNO (XEXP (*ptr, 0))
13943 < cfun_frame_layout.first_restore_gpr))
13944 *ptr = XEXP (*ptr, 1);
13946 ptr = &XEXP (*ptr, 1);
13947 new_insn = emit_insn_before (rpat, insn);
13948 RTX_FRAME_RELATED_P (new_insn) = 1;
13949 INSN_ADDRESSES_NEW (new_insn, -1);
13952 remove_insn (insn);
13956 if (cfun_frame_layout.first_restore_gpr == -1
13957 && GET_CODE (pat) == SET
13958 && GENERAL_REG_P (SET_DEST (pat))
13959 && GET_CODE (SET_SRC (pat)) == MEM)
13962 first = REGNO (SET_DEST (set));
13963 offset = const0_rtx;
13964 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
13965 off = INTVAL (offset);
13967 if (GET_CODE (base) != REG || off < 0)
13970 if (REGNO (base) != STACK_POINTER_REGNUM
13971 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
13974 remove_insn (insn);
13980 /* On z10 and later the dynamic branch prediction must see the
13981 backward jump within a certain windows. If not it falls back to
13982 the static prediction. This function rearranges the loop backward
13983 branch in a way which makes the static prediction always correct.
13984 The function returns true if it added an instruction. */
13986 s390_fix_long_loop_prediction (rtx_insn *insn)
13988 rtx set = single_set (insn);
13989 rtx code_label, label_ref;
13990 rtx_insn *uncond_jump;
13991 rtx_insn *cur_insn;
13995 /* This will exclude branch on count and branch on index patterns
13996 since these are correctly statically predicted. */
13998 || SET_DEST (set) != pc_rtx
13999 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
14002 /* Skip conditional returns. */
14003 if (ANY_RETURN_P (XEXP (SET_SRC (set), 1))
14004 && XEXP (SET_SRC (set), 2) == pc_rtx)
14007 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
14008 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
14010 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
14012 code_label = XEXP (label_ref, 0);
14014 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
14015 || INSN_ADDRESSES (INSN_UID (insn)) == -1
14016 || (INSN_ADDRESSES (INSN_UID (insn))
14017 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
14020 for (distance = 0, cur_insn = PREV_INSN (insn);
14021 distance < PREDICT_DISTANCE - 6;
14022 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
14023 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
14026 rtx_code_label *new_label = gen_label_rtx ();
14027 uncond_jump = emit_jump_insn_after (
14028 gen_rtx_SET (pc_rtx,
14029 gen_rtx_LABEL_REF (VOIDmode, code_label)),
14031 emit_label_after (new_label, uncond_jump);
14033 tmp = XEXP (SET_SRC (set), 1);
14034 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
14035 XEXP (SET_SRC (set), 2) = tmp;
14036 INSN_CODE (insn) = -1;
14038 XEXP (label_ref, 0) = new_label;
14039 JUMP_LABEL (insn) = new_label;
14040 JUMP_LABEL (uncond_jump) = code_label;
14045 /* Returns 1 if INSN reads the value of REG for purposes not related
14046 to addressing of memory, and 0 otherwise. */
14048 s390_non_addr_reg_read_p (rtx reg, rtx_insn *insn)
14050 return reg_referenced_p (reg, PATTERN (insn))
14051 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
14054 /* Starting from INSN find_cond_jump looks downwards in the insn
14055 stream for a single jump insn which is the last user of the
14056 condition code set in INSN. */
14058 find_cond_jump (rtx_insn *insn)
14060 for (; insn; insn = NEXT_INSN (insn))
14064 if (LABEL_P (insn))
14067 if (!JUMP_P (insn))
14069 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
14074 /* This will be triggered by a return. */
14075 if (GET_CODE (PATTERN (insn)) != SET)
14078 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
14079 ite = SET_SRC (PATTERN (insn));
14081 if (GET_CODE (ite) != IF_THEN_ELSE)
14084 cc = XEXP (XEXP (ite, 0), 0);
14085 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
14088 if (find_reg_note (insn, REG_DEAD, cc))
14096 /* Swap the condition in COND and the operands in OP0 and OP1 so that
14097 the semantics does not change. If NULL_RTX is passed as COND the
14098 function tries to find the conditional jump starting with INSN. */
14100 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx_insn *insn)
14104 if (cond == NULL_RTX)
14106 rtx_insn *jump = find_cond_jump (NEXT_INSN (insn));
14107 rtx set = jump ? single_set (jump) : NULL_RTX;
14109 if (set == NULL_RTX)
14112 cond = XEXP (SET_SRC (set), 0);
14117 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
14120 /* On z10, instructions of the compare-and-branch family have the
14121 property to access the register occurring as second operand with
14122 its bits complemented. If such a compare is grouped with a second
14123 instruction that accesses the same register non-complemented, and
14124 if that register's value is delivered via a bypass, then the
14125 pipeline recycles, thereby causing significant performance decline.
14126 This function locates such situations and exchanges the two
14127 operands of the compare. The function return true whenever it
14130 s390_z10_optimize_cmp (rtx_insn *insn)
14132 rtx_insn *prev_insn, *next_insn;
14133 bool insn_added_p = false;
14134 rtx cond, *op0, *op1;
14136 if (GET_CODE (PATTERN (insn)) == PARALLEL)
14138 /* Handle compare and branch and branch on count
14140 rtx pattern = single_set (insn);
14143 || SET_DEST (pattern) != pc_rtx
14144 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
14147 cond = XEXP (SET_SRC (pattern), 0);
14148 op0 = &XEXP (cond, 0);
14149 op1 = &XEXP (cond, 1);
14151 else if (GET_CODE (PATTERN (insn)) == SET)
14155 /* Handle normal compare instructions. */
14156 src = SET_SRC (PATTERN (insn));
14157 dest = SET_DEST (PATTERN (insn));
14160 || !CC_REGNO_P (REGNO (dest))
14161 || GET_CODE (src) != COMPARE)
14164 /* s390_swap_cmp will try to find the conditional
14165 jump when passing NULL_RTX as condition. */
14167 op0 = &XEXP (src, 0);
14168 op1 = &XEXP (src, 1);
14173 if (!REG_P (*op0) || !REG_P (*op1))
14176 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
14179 /* Swap the COMPARE arguments and its mask if there is a
14180 conflicting access in the previous insn. */
14181 prev_insn = prev_active_insn (insn);
14182 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
14183 && reg_referenced_p (*op1, PATTERN (prev_insn)))
14184 s390_swap_cmp (cond, op0, op1, insn);
14186 /* Check if there is a conflict with the next insn. If there
14187 was no conflict with the previous insn, then swap the
14188 COMPARE arguments and its mask. If we already swapped
14189 the operands, or if swapping them would cause a conflict
14190 with the previous insn, issue a NOP after the COMPARE in
14191 order to separate the two instuctions. */
14192 next_insn = next_active_insn (insn);
14193 if (next_insn != NULL_RTX && INSN_P (next_insn)
14194 && s390_non_addr_reg_read_p (*op1, next_insn))
14196 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
14197 && s390_non_addr_reg_read_p (*op0, prev_insn))
14199 if (REGNO (*op1) == 0)
14200 emit_insn_after (gen_nop1 (), insn);
14202 emit_insn_after (gen_nop (), insn);
14203 insn_added_p = true;
14206 s390_swap_cmp (cond, op0, op1, insn);
14208 return insn_added_p;
14211 /* Number of INSNs to be scanned backward in the last BB of the loop
14212 and forward in the first BB of the loop. This usually should be a
14213 bit more than the number of INSNs which could go into one
14215 #define S390_OSC_SCAN_INSN_NUM 5
14217 /* Scan LOOP for static OSC collisions and return true if a osc_break
14218 should be issued for this loop. */
14220 s390_adjust_loop_scan_osc (struct loop* loop)
14223 HARD_REG_SET modregs, newregs;
14224 rtx_insn *insn, *store_insn = NULL;
14226 struct s390_address addr_store, addr_load;
14227 subrtx_iterator::array_type array;
14230 CLEAR_HARD_REG_SET (modregs);
14233 FOR_BB_INSNS_REVERSE (loop->latch, insn)
14235 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
14239 if (insn_count > S390_OSC_SCAN_INSN_NUM)
14242 find_all_hard_reg_sets (insn, &newregs, true);
14243 IOR_HARD_REG_SET (modregs, newregs);
14245 set = single_set (insn);
14249 if (MEM_P (SET_DEST (set))
14250 && s390_decompose_address (XEXP (SET_DEST (set), 0), &addr_store))
14257 if (store_insn == NULL_RTX)
14261 FOR_BB_INSNS (loop->header, insn)
14263 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
14266 if (insn == store_insn)
14270 if (insn_count > S390_OSC_SCAN_INSN_NUM)
14273 find_all_hard_reg_sets (insn, &newregs, true);
14274 IOR_HARD_REG_SET (modregs, newregs);
14276 set = single_set (insn);
14280 /* An intermediate store disrupts static OSC checking
14282 if (MEM_P (SET_DEST (set))
14283 && s390_decompose_address (XEXP (SET_DEST (set), 0), NULL))
14286 FOR_EACH_SUBRTX (iter, array, SET_SRC (set), NONCONST)
14288 && s390_decompose_address (XEXP (*iter, 0), &addr_load)
14289 && rtx_equal_p (addr_load.base, addr_store.base)
14290 && rtx_equal_p (addr_load.indx, addr_store.indx)
14291 && rtx_equal_p (addr_load.disp, addr_store.disp))
14293 if ((addr_load.base != NULL_RTX
14294 && TEST_HARD_REG_BIT (modregs, REGNO (addr_load.base)))
14295 || (addr_load.indx != NULL_RTX
14296 && TEST_HARD_REG_BIT (modregs, REGNO (addr_load.indx))))
14303 /* Look for adjustments which can be done on simple innermost
14306 s390_adjust_loops ()
14308 struct loop *loop = NULL;
14311 compute_bb_for_insn ();
14313 /* Find the loops. */
14314 loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
14316 FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
14320 flow_loop_dump (loop, dump_file, NULL, 0);
14321 fprintf (dump_file, ";; OSC loop scan Loop: ");
14323 if (loop->latch == NULL
14324 || pc_set (BB_END (loop->latch)) == NULL_RTX
14325 || !s390_adjust_loop_scan_osc (loop))
14329 if (loop->latch == NULL)
14330 fprintf (dump_file, " muliple backward jumps\n");
14333 fprintf (dump_file, " header insn: %d latch insn: %d ",
14334 INSN_UID (BB_HEAD (loop->header)),
14335 INSN_UID (BB_END (loop->latch)));
14336 if (pc_set (BB_END (loop->latch)) == NULL_RTX)
14337 fprintf (dump_file, " loop does not end with jump\n");
14339 fprintf (dump_file, " not instrumented\n");
14345 rtx_insn *new_insn;
14348 fprintf (dump_file, " adding OSC break insn: ");
14349 new_insn = emit_insn_before (gen_osc_break (),
14350 BB_END (loop->latch));
14351 INSN_ADDRESSES_NEW (new_insn, -1);
14355 loop_optimizer_finalize ();
14357 df_finish_pass (false);
14360 /* Perform machine-dependent processing. */
14365 bool pool_overflow = false;
14366 int hw_before, hw_after;
14368 if (s390_tune == PROCESSOR_2964_Z13)
14369 s390_adjust_loops ();
14371 /* Make sure all splits have been performed; splits after
14372 machine_dependent_reorg might confuse insn length counts. */
14373 split_all_insns_noflow ();
14375 /* Install the main literal pool and the associated base
14376 register load insns.
14378 In addition, there are two problematic situations we need
14381 - the literal pool might be > 4096 bytes in size, so that
14382 some of its elements cannot be directly accessed
14384 - a branch target might be > 64K away from the branch, so that
14385 it is not possible to use a PC-relative instruction.
14387 To fix those, we split the single literal pool into multiple
14388 pool chunks, reloading the pool base register at various
14389 points throughout the function to ensure it always points to
14390 the pool chunk the following code expects, and / or replace
14391 PC-relative branches by absolute branches.
14393 However, the two problems are interdependent: splitting the
14394 literal pool can move a branch further away from its target,
14395 causing the 64K limit to overflow, and on the other hand,
14396 replacing a PC-relative branch by an absolute branch means
14397 we need to put the branch target address into the literal
14398 pool, possibly causing it to overflow.
14400 So, we loop trying to fix up both problems until we manage
14401 to satisfy both conditions at the same time. Note that the
14402 loop is guaranteed to terminate as every pass of the loop
14403 strictly decreases the total number of PC-relative branches
14404 in the function. (This is not completely true as there
14405 might be branch-over-pool insns introduced by chunkify_start.
14406 Those never need to be split however.) */
14410 struct constant_pool *pool = NULL;
14412 /* Collect the literal pool. */
14413 if (!pool_overflow)
14415 pool = s390_mainpool_start ();
14417 pool_overflow = true;
14420 /* If literal pool overflowed, start to chunkify it. */
14422 pool = s390_chunkify_start ();
14424 /* Split out-of-range branches. If this has created new
14425 literal pool entries, cancel current chunk list and
14426 recompute it. zSeries machines have large branch
14427 instructions, so we never need to split a branch. */
14428 if (!TARGET_CPU_ZARCH && s390_split_branches ())
14431 s390_chunkify_cancel (pool);
14433 s390_mainpool_cancel (pool);
14438 /* If we made it up to here, both conditions are satisfied.
14439 Finish up literal pool related changes. */
14441 s390_chunkify_finish (pool);
14443 s390_mainpool_finish (pool);
14445 /* We're done splitting branches. */
14446 cfun->machine->split_branches_pending_p = false;
14450 /* Generate out-of-pool execute target insns. */
14451 if (TARGET_CPU_ZARCH)
14453 rtx_insn *insn, *target;
14456 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
14458 label = s390_execute_label (insn);
14462 gcc_assert (label != const0_rtx);
14464 target = emit_label (XEXP (label, 0));
14465 INSN_ADDRESSES_NEW (target, -1);
14467 target = emit_insn (s390_execute_target (insn));
14468 INSN_ADDRESSES_NEW (target, -1);
14472 /* Try to optimize prologue and epilogue further. */
14473 s390_optimize_prologue ();
14475 /* Walk over the insns and do some >=z10 specific changes. */
14476 if (s390_tune >= PROCESSOR_2097_Z10)
14479 bool insn_added_p = false;
14481 /* The insn lengths and addresses have to be up to date for the
14482 following manipulations. */
14483 shorten_branches (get_insns ());
14485 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
14487 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
14491 insn_added_p |= s390_fix_long_loop_prediction (insn);
14493 if ((GET_CODE (PATTERN (insn)) == PARALLEL
14494 || GET_CODE (PATTERN (insn)) == SET)
14495 && s390_tune == PROCESSOR_2097_Z10)
14496 insn_added_p |= s390_z10_optimize_cmp (insn);
14499 /* Adjust branches if we added new instructions. */
14501 shorten_branches (get_insns ());
14504 s390_function_num_hotpatch_hw (current_function_decl, &hw_before, &hw_after);
14509 /* Insert NOPs for hotpatching. */
14510 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
14512 1. inside the area covered by debug information to allow setting
14513 breakpoints at the NOPs,
14514 2. before any insn which results in an asm instruction,
14515 3. before in-function labels to avoid jumping to the NOPs, for
14516 example as part of a loop,
14517 4. before any barrier in case the function is completely empty
14518 (__builtin_unreachable ()) and has neither internal labels nor
14521 if (active_insn_p (insn) || BARRIER_P (insn) || LABEL_P (insn))
14523 /* Output a series of NOPs before the first active insn. */
14524 while (insn && hw_after > 0)
14526 if (hw_after >= 3 && TARGET_CPU_ZARCH)
14528 emit_insn_before (gen_nop_6_byte (), insn);
14531 else if (hw_after >= 2)
14533 emit_insn_before (gen_nop_4_byte (), insn);
14538 emit_insn_before (gen_nop_2_byte (), insn);
14545 /* Return true if INSN is a fp load insn writing register REGNO. */
14547 s390_fpload_toreg (rtx_insn *insn, unsigned int regno)
14550 enum attr_type flag = s390_safe_attr_type (insn);
14552 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
14555 set = single_set (insn);
14557 if (set == NULL_RTX)
14560 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
14563 if (REGNO (SET_DEST (set)) != regno)
14569 /* This value describes the distance to be avoided between an
14570 arithmetic fp instruction and an fp load writing the same register.
14571 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
14572 fine but the exact value has to be avoided. Otherwise the FP
14573 pipeline will throw an exception causing a major penalty. */
14574 #define Z10_EARLYLOAD_DISTANCE 7
14576 /* Rearrange the ready list in order to avoid the situation described
14577 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
14578 moved to the very end of the ready list. */
14580 s390_z10_prevent_earlyload_conflicts (rtx_insn **ready, int *nready_p)
14582 unsigned int regno;
14583 int nready = *nready_p;
14588 enum attr_type flag;
14591 /* Skip DISTANCE - 1 active insns. */
14592 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
14593 distance > 0 && insn != NULL_RTX;
14594 distance--, insn = prev_active_insn (insn))
14595 if (CALL_P (insn) || JUMP_P (insn))
14598 if (insn == NULL_RTX)
14601 set = single_set (insn);
14603 if (set == NULL_RTX || !REG_P (SET_DEST (set))
14604 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
14607 flag = s390_safe_attr_type (insn);
14609 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
14612 regno = REGNO (SET_DEST (set));
14615 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
14622 memmove (&ready[1], &ready[0], sizeof (rtx_insn *) * i);
14626 /* Returns TRUE if BB is entered via a fallthru edge and all other
14627 incoming edges are less than unlikely. */
14629 s390_bb_fallthru_entry_likely (basic_block bb)
14631 edge e, fallthru_edge;
14637 fallthru_edge = find_fallthru_edge (bb->preds);
14638 if (!fallthru_edge)
14641 FOR_EACH_EDGE (e, ei, bb->preds)
14642 if (e != fallthru_edge
14643 && e->probability >= profile_probability::unlikely ())
14649 /* The s390_sched_state variable tracks the state of the current or
14650 the last instruction group.
14652 0,1,2 number of instructions scheduled in the current group
14653 3 the last group is complete - normal insns
14654 4 the last group was a cracked/expanded insn */
14656 static int s390_sched_state = 0;
14658 #define S390_SCHED_STATE_NORMAL 3
14659 #define S390_SCHED_STATE_CRACKED 4
14661 #define S390_SCHED_ATTR_MASK_CRACKED 0x1
14662 #define S390_SCHED_ATTR_MASK_EXPANDED 0x2
14663 #define S390_SCHED_ATTR_MASK_ENDGROUP 0x4
14664 #define S390_SCHED_ATTR_MASK_GROUPALONE 0x8
14666 static unsigned int
14667 s390_get_sched_attrmask (rtx_insn *insn)
14669 unsigned int mask = 0;
14673 case PROCESSOR_2827_ZEC12:
14674 if (get_attr_zEC12_cracked (insn))
14675 mask |= S390_SCHED_ATTR_MASK_CRACKED;
14676 if (get_attr_zEC12_expanded (insn))
14677 mask |= S390_SCHED_ATTR_MASK_EXPANDED;
14678 if (get_attr_zEC12_endgroup (insn))
14679 mask |= S390_SCHED_ATTR_MASK_ENDGROUP;
14680 if (get_attr_zEC12_groupalone (insn))
14681 mask |= S390_SCHED_ATTR_MASK_GROUPALONE;
14683 case PROCESSOR_2964_Z13:
14684 case PROCESSOR_3906_Z14:
14685 if (get_attr_z13_cracked (insn))
14686 mask |= S390_SCHED_ATTR_MASK_CRACKED;
14687 if (get_attr_z13_expanded (insn))
14688 mask |= S390_SCHED_ATTR_MASK_EXPANDED;
14689 if (get_attr_z13_endgroup (insn))
14690 mask |= S390_SCHED_ATTR_MASK_ENDGROUP;
14691 if (get_attr_z13_groupalone (insn))
14692 mask |= S390_SCHED_ATTR_MASK_GROUPALONE;
14695 gcc_unreachable ();
14700 static unsigned int
14701 s390_get_unit_mask (rtx_insn *insn, int *units)
14703 unsigned int mask = 0;
14707 case PROCESSOR_2964_Z13:
14708 case PROCESSOR_3906_Z14:
14710 if (get_attr_z13_unit_lsu (insn))
14712 if (get_attr_z13_unit_fxu (insn))
14714 if (get_attr_z13_unit_vfu (insn))
14718 gcc_unreachable ();
14723 /* Return the scheduling score for INSN. The higher the score the
14724 better. The score is calculated from the OOO scheduling attributes
14725 of INSN and the scheduling state s390_sched_state. */
14727 s390_sched_score (rtx_insn *insn)
14729 unsigned int mask = s390_get_sched_attrmask (insn);
14732 switch (s390_sched_state)
14735 /* Try to put insns into the first slot which would otherwise
14737 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
14738 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
14740 if ((mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
14744 /* Prefer not cracked insns while trying to put together a
14746 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
14747 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0
14748 && (mask & S390_SCHED_ATTR_MASK_GROUPALONE) == 0)
14750 if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) == 0)
14754 /* Prefer not cracked insns while trying to put together a
14756 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
14757 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0
14758 && (mask & S390_SCHED_ATTR_MASK_GROUPALONE) == 0)
14760 /* Prefer endgroup insns in the last slot. */
14761 if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) != 0)
14764 case S390_SCHED_STATE_NORMAL:
14765 /* Prefer not cracked insns if the last was not cracked. */
14766 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) == 0
14767 && (mask & S390_SCHED_ATTR_MASK_EXPANDED) == 0)
14769 if ((mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
14772 case S390_SCHED_STATE_CRACKED:
14773 /* Try to keep cracked insns together to prevent them from
14774 interrupting groups. */
14775 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
14776 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
14781 if (s390_tune >= PROCESSOR_2964_Z13)
14784 unsigned unit_mask, m = 1;
14786 unit_mask = s390_get_unit_mask (insn, &units);
14787 gcc_assert (units <= MAX_SCHED_UNITS);
14789 /* Add a score in range 0..MAX_SCHED_MIX_SCORE depending on how long
14790 ago the last insn of this unit type got scheduled. This is
14791 supposed to help providing a proper instruction mix to the
14793 for (i = 0; i < units; i++, m <<= 1)
14795 score += (last_scheduled_unit_distance[i] * MAX_SCHED_MIX_SCORE /
14796 MAX_SCHED_MIX_DISTANCE);
14798 unsigned latency = insn_default_latency (insn);
14800 int other_side = 1 - current_side;
14802 /* Try to delay long-running insns when side is busy. */
14803 if (latency > LONGRUNNING_THRESHOLD)
14805 if (get_attr_z13_unit_fxu (insn) && fxu_longrunning[current_side]
14806 && fxu_longrunning[other_side] <= fxu_longrunning[current_side])
14807 score = MAX (0, score - 10);
14809 if (get_attr_z13_unit_vfu (insn) && vfu_longrunning[current_side]
14810 && vfu_longrunning[other_side] <= vfu_longrunning[current_side])
14811 score = MAX (0, score - 10);
14818 /* This function is called via hook TARGET_SCHED_REORDER before
14819 issuing one insn from list READY which contains *NREADYP entries.
14820 For target z10 it reorders load instructions to avoid early load
14821 conflicts in the floating point pipeline */
14823 s390_sched_reorder (FILE *file, int verbose,
14824 rtx_insn **ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
14826 if (s390_tune == PROCESSOR_2097_Z10
14827 && reload_completed
14829 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
14831 if (s390_tune >= PROCESSOR_2827_ZEC12
14832 && reload_completed
14836 int last_index = *nreadyp - 1;
14837 int max_index = -1;
14838 int max_score = -1;
14841 /* Just move the insn with the highest score to the top (the
14842 end) of the list. A full sort is not needed since a conflict
14843 in the hazard recognition cannot happen. So the top insn in
14844 the ready list will always be taken. */
14845 for (i = last_index; i >= 0; i--)
14849 if (recog_memoized (ready[i]) < 0)
14852 score = s390_sched_score (ready[i]);
14853 if (score > max_score)
14860 if (max_index != -1)
14862 if (max_index != last_index)
14864 tmp = ready[max_index];
14865 ready[max_index] = ready[last_index];
14866 ready[last_index] = tmp;
14870 ";;\t\tBACKEND: move insn %d to the top of list\n",
14871 INSN_UID (ready[last_index]));
14873 else if (verbose > 5)
14875 ";;\t\tBACKEND: best insn %d already on top\n",
14876 INSN_UID (ready[last_index]));
14881 fprintf (file, "ready list ooo attributes - sched state: %d\n",
14884 for (i = last_index; i >= 0; i--)
14886 unsigned int sched_mask;
14887 rtx_insn *insn = ready[i];
14889 if (recog_memoized (insn) < 0)
14892 sched_mask = s390_get_sched_attrmask (insn);
14893 fprintf (file, ";;\t\tBACKEND: insn %d score: %d: ",
14895 s390_sched_score (insn));
14896 #define PRINT_SCHED_ATTR(M, ATTR) fprintf (file, "%s ",\
14897 ((M) & sched_mask) ? #ATTR : "");
14898 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_CRACKED, cracked);
14899 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_EXPANDED, expanded);
14900 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_ENDGROUP, endgroup);
14901 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_GROUPALONE, groupalone);
14902 #undef PRINT_SCHED_ATTR
14903 if (s390_tune >= PROCESSOR_2964_Z13)
14905 unsigned int unit_mask, m = 1;
14908 unit_mask = s390_get_unit_mask (insn, &units);
14909 fprintf (file, "(units:");
14910 for (j = 0; j < units; j++, m <<= 1)
14912 fprintf (file, " u%d", j);
14913 fprintf (file, ")");
14915 fprintf (file, "\n");
14920 return s390_issue_rate ();
14924 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
14925 the scheduler has issued INSN. It stores the last issued insn into
14926 last_scheduled_insn in order to make it available for
14927 s390_sched_reorder. */
14929 s390_sched_variable_issue (FILE *file, int verbose, rtx_insn *insn, int more)
14931 last_scheduled_insn = insn;
14933 bool starts_group = false;
14935 if (s390_tune >= PROCESSOR_2827_ZEC12
14936 && reload_completed
14937 && recog_memoized (insn) >= 0)
14939 unsigned int mask = s390_get_sched_attrmask (insn);
14941 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
14942 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0
14943 || (mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
14944 starts_group = true;
14946 if ((mask & S390_SCHED_ATTR_MASK_CRACKED) != 0
14947 || (mask & S390_SCHED_ATTR_MASK_EXPANDED) != 0)
14948 s390_sched_state = S390_SCHED_STATE_CRACKED;
14949 else if ((mask & S390_SCHED_ATTR_MASK_ENDGROUP) != 0
14950 || (mask & S390_SCHED_ATTR_MASK_GROUPALONE) != 0)
14951 s390_sched_state = S390_SCHED_STATE_NORMAL;
14954 /* Only normal insns are left (mask == 0). */
14955 switch (s390_sched_state)
14958 starts_group = true;
14962 s390_sched_state++;
14964 case S390_SCHED_STATE_NORMAL:
14965 starts_group = true;
14966 s390_sched_state = 1;
14968 case S390_SCHED_STATE_CRACKED:
14969 s390_sched_state = S390_SCHED_STATE_NORMAL;
14974 if (s390_tune >= PROCESSOR_2964_Z13)
14977 unsigned unit_mask, m = 1;
14979 unit_mask = s390_get_unit_mask (insn, &units);
14980 gcc_assert (units <= MAX_SCHED_UNITS);
14982 for (i = 0; i < units; i++, m <<= 1)
14984 last_scheduled_unit_distance[i] = 0;
14985 else if (last_scheduled_unit_distance[i] < MAX_SCHED_MIX_DISTANCE)
14986 last_scheduled_unit_distance[i]++;
14989 /* If this insn started a new group, the side flipped. */
14991 current_side = current_side ? 0 : 1;
14993 for (int i = 0; i < 2; i++)
14995 if (fxu_longrunning[i] >= 1)
14996 fxu_longrunning[i] -= 1;
14997 if (vfu_longrunning[i] >= 1)
14998 vfu_longrunning[i] -= 1;
15001 unsigned latency = insn_default_latency (insn);
15002 if (latency > LONGRUNNING_THRESHOLD)
15004 if (get_attr_z13_unit_fxu (insn))
15005 fxu_longrunning[current_side] = latency * LATENCY_FACTOR;
15007 vfu_longrunning[current_side] = latency * LATENCY_FACTOR;
15012 unsigned int sched_mask;
15014 sched_mask = s390_get_sched_attrmask (insn);
15016 fprintf (file, ";;\t\tBACKEND: insn %d: ", INSN_UID (insn));
15017 #define PRINT_SCHED_ATTR(M, ATTR) fprintf (file, "%s ", ((M) & sched_mask) ? #ATTR : "");
15018 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_CRACKED, cracked);
15019 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_EXPANDED, expanded);
15020 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_ENDGROUP, endgroup);
15021 PRINT_SCHED_ATTR (S390_SCHED_ATTR_MASK_GROUPALONE, groupalone);
15022 #undef PRINT_SCHED_ATTR
15024 if (s390_tune >= PROCESSOR_2964_Z13)
15026 unsigned int unit_mask, m = 1;
15029 unit_mask = s390_get_unit_mask (insn, &units);
15030 fprintf (file, "(units:");
15031 for (j = 0; j < units; j++, m <<= 1)
15033 fprintf (file, " %d", j);
15034 fprintf (file, ")");
15036 fprintf (file, " sched state: %d\n", s390_sched_state);
15038 if (s390_tune >= PROCESSOR_2964_Z13)
15042 s390_get_unit_mask (insn, &units);
15044 fprintf (file, ";;\t\tBACKEND: units unused for: ");
15045 for (j = 0; j < units; j++)
15046 fprintf (file, "%d:%d ", j, last_scheduled_unit_distance[j]);
15047 fprintf (file, "\n");
15052 if (GET_CODE (PATTERN (insn)) != USE
15053 && GET_CODE (PATTERN (insn)) != CLOBBER)
15060 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
15061 int verbose ATTRIBUTE_UNUSED,
15062 int max_ready ATTRIBUTE_UNUSED)
15064 last_scheduled_insn = NULL;
15065 memset (last_scheduled_unit_distance, 0, MAX_SCHED_UNITS * sizeof (int));
15067 /* If the next basic block is most likely entered via a fallthru edge
15068 we keep the last sched state. Otherwise we start a new group.
15069 The scheduler traverses basic blocks in "instruction stream" ordering
15070 so if we see a fallthru edge here, s390_sched_state will be of its
15073 current_sched_info->prev_head is the insn before the first insn of the
15074 block of insns to be scheduled.
15076 rtx_insn *insn = current_sched_info->prev_head
15077 ? NEXT_INSN (current_sched_info->prev_head) : NULL;
15078 basic_block bb = insn ? BLOCK_FOR_INSN (insn) : NULL;
15079 if (s390_tune < PROCESSOR_2964_Z13 || !s390_bb_fallthru_entry_likely (bb))
15080 s390_sched_state = 0;
15083 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
15084 a new number struct loop *loop should be unrolled if tuned for cpus with
15085 a built-in stride prefetcher.
15086 The loop is analyzed for memory accesses by calling check_dpu for
15087 each rtx of the loop. Depending on the loop_depth and the amount of
15088 memory accesses a new number <=nunroll is returned to improve the
15089 behavior of the hardware prefetch unit. */
15091 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
15096 unsigned mem_count = 0;
15098 if (s390_tune < PROCESSOR_2097_Z10)
15101 /* Count the number of memory references within the loop body. */
15102 bbs = get_loop_body (loop);
15103 subrtx_iterator::array_type array;
15104 for (i = 0; i < loop->num_nodes; i++)
15105 FOR_BB_INSNS (bbs[i], insn)
15106 if (INSN_P (insn) && INSN_CODE (insn) != -1)
15107 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
15112 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
15113 if (mem_count == 0)
15116 switch (loop_depth(loop))
15119 return MIN (nunroll, 28 / mem_count);
15121 return MIN (nunroll, 22 / mem_count);
15123 return MIN (nunroll, 16 / mem_count);
15127 /* Restore the current options. This is a hook function and also called
15131 s390_function_specific_restore (struct gcc_options *opts,
15132 struct cl_target_option *ptr ATTRIBUTE_UNUSED)
15134 opts->x_s390_cost_pointer = (long)processor_table[opts->x_s390_tune].cost;
15138 s390_option_override_internal (bool main_args_p,
15139 struct gcc_options *opts,
15140 const struct gcc_options *opts_set)
15142 const char *prefix;
15143 const char *suffix;
15145 /* Set up prefix/suffix so the error messages refer to either the command
15146 line argument, or the attribute(target). */
15154 prefix = "option(\"";
15159 /* Architecture mode defaults according to ABI. */
15160 if (!(opts_set->x_target_flags & MASK_ZARCH))
15163 opts->x_target_flags |= MASK_ZARCH;
15165 opts->x_target_flags &= ~MASK_ZARCH;
15168 /* Set the march default in case it hasn't been specified on cmdline. */
15169 if (!opts_set->x_s390_arch)
15170 opts->x_s390_arch = PROCESSOR_2064_Z900;
15171 else if (opts->x_s390_arch == PROCESSOR_9672_G5
15172 || opts->x_s390_arch == PROCESSOR_9672_G6)
15173 warning (OPT_Wdeprecated, "%sarch=%s%s is deprecated and will be removed "
15174 "in future releases; use at least %sarch=z900%s",
15175 prefix, opts->x_s390_arch == PROCESSOR_9672_G5 ? "g5" : "g6",
15176 suffix, prefix, suffix);
15178 opts->x_s390_arch_flags = processor_flags_table[(int) opts->x_s390_arch];
15180 /* Determine processor to tune for. */
15181 if (!opts_set->x_s390_tune)
15182 opts->x_s390_tune = opts->x_s390_arch;
15183 else if (opts->x_s390_tune == PROCESSOR_9672_G5
15184 || opts->x_s390_tune == PROCESSOR_9672_G6)
15185 warning (OPT_Wdeprecated, "%stune=%s%s is deprecated and will be removed "
15186 "in future releases; use at least %stune=z900%s",
15187 prefix, opts->x_s390_tune == PROCESSOR_9672_G5 ? "g5" : "g6",
15188 suffix, prefix, suffix);
15190 opts->x_s390_tune_flags = processor_flags_table[opts->x_s390_tune];
15192 /* Sanity checks. */
15193 if (opts->x_s390_arch == PROCESSOR_NATIVE
15194 || opts->x_s390_tune == PROCESSOR_NATIVE)
15195 gcc_unreachable ();
15196 if (TARGET_ZARCH_P (opts->x_target_flags) && !TARGET_CPU_ZARCH_P (opts))
15197 error ("z/Architecture mode not supported on %s",
15198 processor_table[(int)opts->x_s390_arch].name);
15199 if (TARGET_64BIT && !TARGET_ZARCH_P (opts->x_target_flags))
15200 error ("64-bit ABI not supported in ESA/390 mode");
15202 /* Enable hardware transactions if available and not explicitly
15203 disabled by user. E.g. with -m31 -march=zEC12 -mzarch */
15204 if (!TARGET_OPT_HTM_P (opts_set->x_target_flags))
15206 if (TARGET_CPU_HTM_P (opts) && TARGET_ZARCH_P (opts->x_target_flags))
15207 opts->x_target_flags |= MASK_OPT_HTM;
15209 opts->x_target_flags &= ~MASK_OPT_HTM;
15212 if (TARGET_OPT_VX_P (opts_set->x_target_flags))
15214 if (TARGET_OPT_VX_P (opts->x_target_flags))
15216 if (!TARGET_CPU_VX_P (opts))
15217 error ("hardware vector support not available on %s",
15218 processor_table[(int)opts->x_s390_arch].name);
15219 if (TARGET_SOFT_FLOAT_P (opts->x_target_flags))
15220 error ("hardware vector support not available with -msoft-float");
15225 if (TARGET_CPU_VX_P (opts))
15226 /* Enable vector support if available and not explicitly disabled
15227 by user. E.g. with -m31 -march=z13 -mzarch */
15228 opts->x_target_flags |= MASK_OPT_VX;
15230 opts->x_target_flags &= ~MASK_OPT_VX;
15233 /* Use hardware DFP if available and not explicitly disabled by
15234 user. E.g. with -m31 -march=z10 -mzarch */
15235 if (!TARGET_HARD_DFP_P (opts_set->x_target_flags))
15237 if (TARGET_DFP_P (opts))
15238 opts->x_target_flags |= MASK_HARD_DFP;
15240 opts->x_target_flags &= ~MASK_HARD_DFP;
15243 if (TARGET_HARD_DFP_P (opts->x_target_flags) && !TARGET_DFP_P (opts))
15245 if (TARGET_HARD_DFP_P (opts_set->x_target_flags))
15247 if (!TARGET_CPU_DFP_P (opts))
15248 error ("hardware decimal floating point instructions"
15249 " not available on %s",
15250 processor_table[(int)opts->x_s390_arch].name);
15251 if (!TARGET_ZARCH_P (opts->x_target_flags))
15252 error ("hardware decimal floating point instructions"
15253 " not available in ESA/390 mode");
15256 opts->x_target_flags &= ~MASK_HARD_DFP;
15259 if (TARGET_SOFT_FLOAT_P (opts_set->x_target_flags)
15260 && TARGET_SOFT_FLOAT_P (opts->x_target_flags))
15262 if (TARGET_HARD_DFP_P (opts_set->x_target_flags)
15263 && TARGET_HARD_DFP_P (opts->x_target_flags))
15264 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
15266 opts->x_target_flags &= ~MASK_HARD_DFP;
15269 if (TARGET_BACKCHAIN_P (opts->x_target_flags)
15270 && TARGET_PACKED_STACK_P (opts->x_target_flags)
15271 && TARGET_HARD_FLOAT_P (opts->x_target_flags))
15272 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
15275 if (opts->x_s390_stack_size)
15277 if (opts->x_s390_stack_guard >= opts->x_s390_stack_size)
15278 error ("stack size must be greater than the stack guard value");
15279 else if (opts->x_s390_stack_size > 1 << 16)
15280 error ("stack size must not be greater than 64k");
15282 else if (opts->x_s390_stack_guard)
15283 error ("-mstack-guard implies use of -mstack-size");
15285 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
15286 if (!TARGET_LONG_DOUBLE_128_P (opts_set->x_target_flags))
15287 opts->x_target_flags |= MASK_LONG_DOUBLE_128;
15290 if (opts->x_s390_tune >= PROCESSOR_2097_Z10)
15292 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
15293 opts->x_param_values,
15294 opts_set->x_param_values);
15295 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
15296 opts->x_param_values,
15297 opts_set->x_param_values);
15298 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
15299 opts->x_param_values,
15300 opts_set->x_param_values);
15301 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
15302 opts->x_param_values,
15303 opts_set->x_param_values);
15306 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
15307 opts->x_param_values,
15308 opts_set->x_param_values);
15309 /* values for loop prefetching */
15310 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
15311 opts->x_param_values,
15312 opts_set->x_param_values);
15313 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
15314 opts->x_param_values,
15315 opts_set->x_param_values);
15316 /* s390 has more than 2 levels and the size is much larger. Since
15317 we are always running virtualized assume that we only get a small
15318 part of the caches above l1. */
15319 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
15320 opts->x_param_values,
15321 opts_set->x_param_values);
15322 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
15323 opts->x_param_values,
15324 opts_set->x_param_values);
15325 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
15326 opts->x_param_values,
15327 opts_set->x_param_values);
15329 /* Use the alternative scheduling-pressure algorithm by default. */
15330 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
15331 opts->x_param_values,
15332 opts_set->x_param_values);
15334 maybe_set_param_value (PARAM_MIN_VECT_LOOP_BOUND, 2,
15335 opts->x_param_values,
15336 opts_set->x_param_values);
15338 /* Call target specific restore function to do post-init work. At the moment,
15339 this just sets opts->x_s390_cost_pointer. */
15340 s390_function_specific_restore (opts, NULL);
15344 s390_option_override (void)
15347 cl_deferred_option *opt;
15348 vec<cl_deferred_option> *v =
15349 (vec<cl_deferred_option> *) s390_deferred_options;
15352 FOR_EACH_VEC_ELT (*v, i, opt)
15354 switch (opt->opt_index)
15356 case OPT_mhotpatch_:
15360 char *s = strtok (ASTRDUP (opt->arg), ",");
15361 char *t = strtok (NULL, "\0");
15365 val1 = integral_argument (s);
15366 val2 = integral_argument (t);
15373 if (val1 == -1 || val2 == -1)
15375 /* argument is not a plain number */
15376 error ("arguments to %qs should be non-negative integers",
15380 else if (val1 > s390_hotpatch_hw_max
15381 || val2 > s390_hotpatch_hw_max)
15383 error ("argument to %qs is too large (max. %d)",
15384 "-mhotpatch=n,m", s390_hotpatch_hw_max);
15387 s390_hotpatch_hw_before_label = val1;
15388 s390_hotpatch_hw_after_label = val2;
15392 gcc_unreachable ();
15396 /* Set up function hooks. */
15397 init_machine_status = s390_init_machine_status;
15399 s390_option_override_internal (true, &global_options, &global_options_set);
15401 /* Save the initial options in case the user does function specific
15403 target_option_default_node = build_target_option_node (&global_options);
15404 target_option_current_node = target_option_default_node;
15406 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
15407 requires the arch flags to be evaluated already. Since prefetching
15408 is beneficial on s390, we enable it if available. */
15409 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
15410 flag_prefetch_loop_arrays = 1;
15412 if (!s390_pic_data_is_text_relative && !flag_pic)
15413 error ("-mno-pic-data-is-text-relative cannot be used without -fpic/-fPIC");
15417 /* Don't emit DWARF3/4 unless specifically selected. The TPF
15418 debuggers do not yet support DWARF 3/4. */
15419 if (!global_options_set.x_dwarf_strict)
15421 if (!global_options_set.x_dwarf_version)
15425 /* Register a target-specific optimization-and-lowering pass
15426 to run immediately before prologue and epilogue generation.
15428 Registering the pass must be done at start up. It's
15429 convenient to do it here. */
15430 opt_pass *new_pass = new pass_s390_early_mach (g);
15431 struct register_pass_info insert_pass_s390_early_mach =
15433 new_pass, /* pass */
15434 "pro_and_epilogue", /* reference_pass_name */
15435 1, /* ref_pass_instance_number */
15436 PASS_POS_INSERT_BEFORE /* po_op */
15438 register_pass (&insert_pass_s390_early_mach);
15441 #if S390_USE_TARGET_ATTRIBUTE
15442 /* Inner function to process the attribute((target(...))), take an argument and
15443 set the current options from the argument. If we have a list, recursively go
15447 s390_valid_target_attribute_inner_p (tree args,
15448 struct gcc_options *opts,
15449 struct gcc_options *new_opts_set,
15455 #define S390_ATTRIB(S,O,A) { S, sizeof (S)-1, O, A, 0 }
15456 #define S390_PRAGMA(S,O,A) { S, sizeof (S)-1, O, A, 1 }
15457 static const struct
15459 const char *string;
15463 int only_as_pragma;
15466 S390_ATTRIB ("arch=", OPT_march_, 1),
15467 S390_ATTRIB ("tune=", OPT_mtune_, 1),
15468 /* uinteger options */
15469 S390_ATTRIB ("stack-guard=", OPT_mstack_guard_, 1),
15470 S390_ATTRIB ("stack-size=", OPT_mstack_size_, 1),
15471 S390_ATTRIB ("branch-cost=", OPT_mbranch_cost_, 1),
15472 S390_ATTRIB ("warn-framesize=", OPT_mwarn_framesize_, 1),
15474 S390_ATTRIB ("backchain", OPT_mbackchain, 0),
15475 S390_ATTRIB ("hard-dfp", OPT_mhard_dfp, 0),
15476 S390_ATTRIB ("hard-float", OPT_mhard_float, 0),
15477 S390_ATTRIB ("htm", OPT_mhtm, 0),
15478 S390_ATTRIB ("vx", OPT_mvx, 0),
15479 S390_ATTRIB ("packed-stack", OPT_mpacked_stack, 0),
15480 S390_ATTRIB ("small-exec", OPT_msmall_exec, 0),
15481 S390_ATTRIB ("soft-float", OPT_msoft_float, 0),
15482 S390_ATTRIB ("mvcle", OPT_mmvcle, 0),
15483 S390_PRAGMA ("zvector", OPT_mzvector, 0),
15484 /* boolean options */
15485 S390_ATTRIB ("warn-dynamicstack", OPT_mwarn_dynamicstack, 0),
15490 /* If this is a list, recurse to get the options. */
15491 if (TREE_CODE (args) == TREE_LIST)
15494 int num_pragma_values;
15497 /* Note: attribs.c:decl_attributes prepends the values from
15498 current_target_pragma to the list of target attributes. To determine
15499 whether we're looking at a value of the attribute or the pragma we
15500 assume that the first [list_length (current_target_pragma)] values in
15501 the list are the values from the pragma. */
15502 num_pragma_values = (!force_pragma && current_target_pragma != NULL)
15503 ? list_length (current_target_pragma) : 0;
15504 for (i = 0; args; args = TREE_CHAIN (args), i++)
15508 is_pragma = (force_pragma || i < num_pragma_values);
15509 if (TREE_VALUE (args)
15510 && !s390_valid_target_attribute_inner_p (TREE_VALUE (args),
15511 opts, new_opts_set,
15520 else if (TREE_CODE (args) != STRING_CST)
15522 error ("attribute %<target%> argument not a string");
15526 /* Handle multiple arguments separated by commas. */
15527 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
15529 while (next_optstr && *next_optstr != '\0')
15531 char *p = next_optstr;
15533 char *comma = strchr (next_optstr, ',');
15534 size_t len, opt_len;
15540 enum cl_var_type var_type;
15546 len = comma - next_optstr;
15547 next_optstr = comma + 1;
15552 next_optstr = NULL;
15555 /* Recognize no-xxx. */
15556 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
15565 /* Find the option. */
15568 for (i = 0; i < ARRAY_SIZE (attrs); i++)
15570 opt_len = attrs[i].len;
15571 if (ch == attrs[i].string[0]
15572 && ((attrs[i].has_arg) ? len > opt_len : len == opt_len)
15573 && memcmp (p, attrs[i].string, opt_len) == 0)
15575 opt = attrs[i].opt;
15576 if (!opt_set_p && cl_options[opt].cl_reject_negative)
15578 mask = cl_options[opt].var_value;
15579 var_type = cl_options[opt].var_type;
15585 /* Process the option. */
15588 error ("attribute(target(\"%s\")) is unknown", orig_p);
15591 else if (attrs[i].only_as_pragma && !force_pragma)
15593 /* Value is not allowed for the target attribute. */
15594 error ("value %qs is not supported by attribute %<target%>",
15599 else if (var_type == CLVC_BIT_SET || var_type == CLVC_BIT_CLEAR)
15601 if (var_type == CLVC_BIT_CLEAR)
15602 opt_set_p = !opt_set_p;
15605 opts->x_target_flags |= mask;
15607 opts->x_target_flags &= ~mask;
15608 new_opts_set->x_target_flags |= mask;
15611 else if (cl_options[opt].var_type == CLVC_BOOLEAN)
15615 if (cl_options[opt].cl_uinteger)
15617 /* Unsigned integer argument. Code based on the function
15618 decode_cmdline_option () in opts-common.c. */
15619 value = integral_argument (p + opt_len);
15622 value = (opt_set_p) ? 1 : 0;
15626 struct cl_decoded_option decoded;
15628 /* Value range check; only implemented for numeric and boolean
15629 options at the moment. */
15630 generate_option (opt, NULL, value, CL_TARGET, &decoded);
15631 s390_handle_option (opts, new_opts_set, &decoded, input_location);
15632 set_option (opts, new_opts_set, opt, value,
15633 p + opt_len, DK_UNSPECIFIED, input_location,
15638 error ("attribute(target(\"%s\")) is unknown", orig_p);
15643 else if (cl_options[opt].var_type == CLVC_ENUM)
15648 arg_ok = opt_enum_arg_to_value (opt, p + opt_len, &value, CL_TARGET);
15650 set_option (opts, new_opts_set, opt, value,
15651 p + opt_len, DK_UNSPECIFIED, input_location,
15655 error ("attribute(target(\"%s\")) is unknown", orig_p);
15661 gcc_unreachable ();
15666 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
15669 s390_valid_target_attribute_tree (tree args,
15670 struct gcc_options *opts,
15671 const struct gcc_options *opts_set,
15674 tree t = NULL_TREE;
15675 struct gcc_options new_opts_set;
15677 memset (&new_opts_set, 0, sizeof (new_opts_set));
15679 /* Process each of the options on the chain. */
15680 if (! s390_valid_target_attribute_inner_p (args, opts, &new_opts_set,
15682 return error_mark_node;
15684 /* If some option was set (even if it has not changed), rerun
15685 s390_option_override_internal, and then save the options away. */
15686 if (new_opts_set.x_target_flags
15687 || new_opts_set.x_s390_arch
15688 || new_opts_set.x_s390_tune
15689 || new_opts_set.x_s390_stack_guard
15690 || new_opts_set.x_s390_stack_size
15691 || new_opts_set.x_s390_branch_cost
15692 || new_opts_set.x_s390_warn_framesize
15693 || new_opts_set.x_s390_warn_dynamicstack_p)
15695 const unsigned char *src = (const unsigned char *)opts_set;
15696 unsigned char *dest = (unsigned char *)&new_opts_set;
15699 /* Merge the original option flags into the new ones. */
15700 for (i = 0; i < sizeof(*opts_set); i++)
15703 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
15704 s390_option_override_internal (false, opts, &new_opts_set);
15705 /* Save the current options unless we are validating options for
15707 t = build_target_option_node (opts);
15712 /* Hook to validate attribute((target("string"))). */
15715 s390_valid_target_attribute_p (tree fndecl,
15716 tree ARG_UNUSED (name),
15718 int ARG_UNUSED (flags))
15720 struct gcc_options func_options;
15721 tree new_target, new_optimize;
15724 /* attribute((target("default"))) does nothing, beyond
15725 affecting multi-versioning. */
15726 if (TREE_VALUE (args)
15727 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
15728 && TREE_CHAIN (args) == NULL_TREE
15729 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
15732 tree old_optimize = build_optimization_node (&global_options);
15734 /* Get the optimization options of the current function. */
15735 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
15737 if (!func_optimize)
15738 func_optimize = old_optimize;
15740 /* Init func_options. */
15741 memset (&func_options, 0, sizeof (func_options));
15742 init_options_struct (&func_options, NULL);
15743 lang_hooks.init_options_struct (&func_options);
15745 cl_optimization_restore (&func_options, TREE_OPTIMIZATION (func_optimize));
15747 /* Initialize func_options to the default before its target options can
15749 cl_target_option_restore (&func_options,
15750 TREE_TARGET_OPTION (target_option_default_node));
15752 new_target = s390_valid_target_attribute_tree (args, &func_options,
15753 &global_options_set,
15755 current_target_pragma));
15756 new_optimize = build_optimization_node (&func_options);
15757 if (new_target == error_mark_node)
15759 else if (fndecl && new_target)
15761 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
15762 if (old_optimize != new_optimize)
15763 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
15768 /* Hook to determine if one function can safely inline another. */
15771 s390_can_inline_p (tree caller, tree callee)
15773 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
15774 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
15777 callee_tree = target_option_default_node;
15779 caller_tree = target_option_default_node;
15780 if (callee_tree == caller_tree)
15783 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
15784 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
15787 if ((caller_opts->x_target_flags & ~(MASK_SOFT_FLOAT | MASK_HARD_DFP))
15788 != (callee_opts->x_target_flags & ~(MASK_SOFT_FLOAT | MASK_HARD_DFP)))
15791 /* Don't inline functions to be compiled for a more recent arch into a
15792 function for an older arch. */
15793 else if (caller_opts->x_s390_arch < callee_opts->x_s390_arch)
15796 /* Inlining a hard float function into a soft float function is only
15797 allowed if the hard float function doesn't actually make use of
15800 We are called from FEs for multi-versioning call optimization, so
15801 beware of ipa_fn_summaries not available. */
15802 else if (((TARGET_SOFT_FLOAT_P (caller_opts->x_target_flags)
15803 && !TARGET_SOFT_FLOAT_P (callee_opts->x_target_flags))
15804 || (!TARGET_HARD_DFP_P (caller_opts->x_target_flags)
15805 && TARGET_HARD_DFP_P (callee_opts->x_target_flags)))
15806 && (! ipa_fn_summaries
15807 || ipa_fn_summaries->get
15808 (cgraph_node::get (callee))->fp_expressions))
15814 /* Restore targets globals from NEW_TREE and invalidate s390_previous_fndecl
15818 s390_activate_target_options (tree new_tree)
15820 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
15821 if (TREE_TARGET_GLOBALS (new_tree))
15822 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
15823 else if (new_tree == target_option_default_node)
15824 restore_target_globals (&default_target_globals);
15826 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
15827 s390_previous_fndecl = NULL_TREE;
15830 /* Establish appropriate back-end context for processing the function
15831 FNDECL. The argument might be NULL to indicate processing at top
15832 level, outside of any function scope. */
15834 s390_set_current_function (tree fndecl)
15836 /* Only change the context if the function changes. This hook is called
15837 several times in the course of compiling a function, and we don't want to
15838 slow things down too much or call target_reinit when it isn't safe. */
15839 if (fndecl == s390_previous_fndecl)
15843 if (s390_previous_fndecl == NULL_TREE)
15844 old_tree = target_option_current_node;
15845 else if (DECL_FUNCTION_SPECIFIC_TARGET (s390_previous_fndecl))
15846 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (s390_previous_fndecl);
15848 old_tree = target_option_default_node;
15850 if (fndecl == NULL_TREE)
15852 if (old_tree != target_option_current_node)
15853 s390_activate_target_options (target_option_current_node);
15857 tree new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
15858 if (new_tree == NULL_TREE)
15859 new_tree = target_option_default_node;
15861 if (old_tree != new_tree)
15862 s390_activate_target_options (new_tree);
15863 s390_previous_fndecl = fndecl;
15867 /* Implement TARGET_USE_BY_PIECES_INFRASTRUCTURE_P. */
15870 s390_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size,
15871 unsigned int align ATTRIBUTE_UNUSED,
15872 enum by_pieces_operation op ATTRIBUTE_UNUSED,
15873 bool speed_p ATTRIBUTE_UNUSED)
15875 return (size == 1 || size == 2
15876 || size == 4 || (TARGET_ZARCH && size == 8));
15879 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
15882 s390_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
15884 tree sfpc = s390_builtin_decls[S390_BUILTIN_s390_sfpc];
15885 tree efpc = s390_builtin_decls[S390_BUILTIN_s390_efpc];
15886 tree call_efpc = build_call_expr (efpc, 0);
15887 tree fenv_var = create_tmp_var_raw (unsigned_type_node);
15889 #define FPC_EXCEPTION_MASK HOST_WIDE_INT_UC (0xf8000000)
15890 #define FPC_FLAGS_MASK HOST_WIDE_INT_UC (0x00f80000)
15891 #define FPC_DXC_MASK HOST_WIDE_INT_UC (0x0000ff00)
15892 #define FPC_EXCEPTION_MASK_SHIFT HOST_WIDE_INT_UC (24)
15893 #define FPC_FLAGS_SHIFT HOST_WIDE_INT_UC (16)
15894 #define FPC_DXC_SHIFT HOST_WIDE_INT_UC (8)
15896 /* Generates the equivalent of feholdexcept (&fenv_var)
15898 fenv_var = __builtin_s390_efpc ();
15899 __builtin_s390_sfpc (fenv_var & mask) */
15900 tree old_fpc = build2 (MODIFY_EXPR, unsigned_type_node, fenv_var, call_efpc);
15902 build2 (BIT_AND_EXPR, unsigned_type_node, fenv_var,
15903 build_int_cst (unsigned_type_node,
15904 ~(FPC_DXC_MASK | FPC_FLAGS_MASK |
15905 FPC_EXCEPTION_MASK)));
15906 tree set_new_fpc = build_call_expr (sfpc, 1, new_fpc);
15907 *hold = build2 (COMPOUND_EXPR, void_type_node, old_fpc, set_new_fpc);
15909 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT)
15911 __builtin_s390_sfpc (__builtin_s390_efpc () & mask) */
15912 new_fpc = build2 (BIT_AND_EXPR, unsigned_type_node, call_efpc,
15913 build_int_cst (unsigned_type_node,
15914 ~(FPC_DXC_MASK | FPC_FLAGS_MASK)));
15915 *clear = build_call_expr (sfpc, 1, new_fpc);
15917 /* Generates the equivalent of feupdateenv (fenv_var)
15919 old_fpc = __builtin_s390_efpc ();
15920 __builtin_s390_sfpc (fenv_var);
15921 __atomic_feraiseexcept ((old_fpc & FPC_FLAGS_MASK) >> FPC_FLAGS_SHIFT); */
15923 old_fpc = create_tmp_var_raw (unsigned_type_node);
15924 tree store_old_fpc = build2 (MODIFY_EXPR, void_type_node,
15925 old_fpc, call_efpc);
15927 set_new_fpc = build_call_expr (sfpc, 1, fenv_var);
15929 tree raise_old_except = build2 (BIT_AND_EXPR, unsigned_type_node, old_fpc,
15930 build_int_cst (unsigned_type_node,
15932 raise_old_except = build2 (RSHIFT_EXPR, unsigned_type_node, raise_old_except,
15933 build_int_cst (unsigned_type_node,
15935 tree atomic_feraiseexcept
15936 = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
15937 raise_old_except = build_call_expr (atomic_feraiseexcept,
15938 1, raise_old_except);
15940 *update = build2 (COMPOUND_EXPR, void_type_node,
15941 build2 (COMPOUND_EXPR, void_type_node,
15942 store_old_fpc, set_new_fpc),
15945 #undef FPC_EXCEPTION_MASK
15946 #undef FPC_FLAGS_MASK
15947 #undef FPC_DXC_MASK
15948 #undef FPC_EXCEPTION_MASK_SHIFT
15949 #undef FPC_FLAGS_SHIFT
15950 #undef FPC_DXC_SHIFT
15953 /* Return the vector mode to be used for inner mode MODE when doing
15955 static machine_mode
15956 s390_preferred_simd_mode (scalar_mode mode)
15984 /* Our hardware does not require vectors to be strictly aligned. */
15986 s390_support_vector_misalignment (machine_mode mode ATTRIBUTE_UNUSED,
15987 const_tree type ATTRIBUTE_UNUSED,
15988 int misalignment ATTRIBUTE_UNUSED,
15989 bool is_packed ATTRIBUTE_UNUSED)
15994 return default_builtin_support_vector_misalignment (mode, type, misalignment,
15998 /* The vector ABI requires vector types to be aligned on an 8 byte
15999 boundary (our stack alignment). However, we allow this to be
16000 overriden by the user, while this definitely breaks the ABI. */
16001 static HOST_WIDE_INT
16002 s390_vector_alignment (const_tree type)
16004 if (!TARGET_VX_ABI)
16005 return default_vector_alignment (type);
16007 if (TYPE_USER_ALIGN (type))
16008 return TYPE_ALIGN (type);
16010 return MIN (64, tree_to_shwi (TYPE_SIZE (type)));
16013 /* Implement TARGET_CONSTANT_ALIGNMENT. Alignment on even addresses for
16014 LARL instruction. */
16016 static HOST_WIDE_INT
16017 s390_constant_alignment (const_tree, HOST_WIDE_INT align)
16019 return MAX (align, 16);
16022 #ifdef HAVE_AS_MACHINE_MACHINEMODE
16023 /* Implement TARGET_ASM_FILE_START. */
16025 s390_asm_file_start (void)
16027 default_file_start ();
16028 s390_asm_output_machine_for_arch (asm_out_file);
16032 /* Implement TARGET_ASM_FILE_END. */
16034 s390_asm_file_end (void)
16036 #ifdef HAVE_AS_GNU_ATTRIBUTE
16037 varpool_node *vnode;
16038 cgraph_node *cnode;
16040 FOR_EACH_VARIABLE (vnode)
16041 if (TREE_PUBLIC (vnode->decl))
16042 s390_check_type_for_vector_abi (TREE_TYPE (vnode->decl), false, false);
16044 FOR_EACH_FUNCTION (cnode)
16045 if (TREE_PUBLIC (cnode->decl))
16046 s390_check_type_for_vector_abi (TREE_TYPE (cnode->decl), false, false);
16049 if (s390_vector_abi != 0)
16050 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
16053 file_end_indicate_exec_stack ();
16055 if (flag_split_stack)
16056 file_end_indicate_split_stack ();
16059 /* Return true if TYPE is a vector bool type. */
16061 s390_vector_bool_type_p (const_tree type)
16063 return TYPE_VECTOR_OPAQUE (type);
16066 /* Return the diagnostic message string if the binary operation OP is
16067 not permitted on TYPE1 and TYPE2, NULL otherwise. */
16069 s390_invalid_binary_op (int op ATTRIBUTE_UNUSED, const_tree type1, const_tree type2)
16071 bool bool1_p, bool2_p;
16075 machine_mode mode1, mode2;
16077 if (!TARGET_ZVECTOR)
16080 if (!VECTOR_TYPE_P (type1) || !VECTOR_TYPE_P (type2))
16083 bool1_p = s390_vector_bool_type_p (type1);
16084 bool2_p = s390_vector_bool_type_p (type2);
16086 /* Mixing signed and unsigned types is forbidden for all
16088 if (!bool1_p && !bool2_p
16089 && TYPE_UNSIGNED (type1) != TYPE_UNSIGNED (type2))
16090 return N_("types differ in signedness");
16092 plusminus_p = (op == PLUS_EXPR || op == MINUS_EXPR);
16093 muldiv_p = (op == MULT_EXPR || op == RDIV_EXPR || op == TRUNC_DIV_EXPR
16094 || op == CEIL_DIV_EXPR || op == FLOOR_DIV_EXPR
16095 || op == ROUND_DIV_EXPR);
16096 compare_p = (op == LT_EXPR || op == LE_EXPR || op == GT_EXPR || op == GE_EXPR
16097 || op == EQ_EXPR || op == NE_EXPR);
16099 if (bool1_p && bool2_p && (plusminus_p || muldiv_p))
16100 return N_("binary operator does not support two vector bool operands");
16102 if (bool1_p != bool2_p && (muldiv_p || compare_p))
16103 return N_("binary operator does not support vector bool operand");
16105 mode1 = TYPE_MODE (type1);
16106 mode2 = TYPE_MODE (type2);
16108 if (bool1_p != bool2_p && plusminus_p
16109 && (GET_MODE_CLASS (mode1) == MODE_VECTOR_FLOAT
16110 || GET_MODE_CLASS (mode2) == MODE_VECTOR_FLOAT))
16111 return N_("binary operator does not support mixing vector "
16112 "bool with floating point vector operands");
16117 /* Implement TARGET_C_EXCESS_PRECISION.
16119 FIXME: For historical reasons, float_t and double_t are typedef'ed to
16120 double on s390, causing operations on float_t to operate in a higher
16121 precision than is necessary. However, it is not the case that SFmode
16122 operations have implicit excess precision, and we generate more optimal
16123 code if we let the compiler know no implicit extra precision is added.
16125 That means when we are compiling with -fexcess-precision=fast, the value
16126 we set for FLT_EVAL_METHOD will be out of line with the actual precision of
16127 float_t (though they would be correct for -fexcess-precision=standard).
16129 A complete fix would modify glibc to remove the unnecessary typedef
16130 of float_t to double. */
16132 static enum flt_eval_method
16133 s390_excess_precision (enum excess_precision_type type)
16137 case EXCESS_PRECISION_TYPE_IMPLICIT:
16138 case EXCESS_PRECISION_TYPE_FAST:
16139 /* The fastest type to promote to will always be the native type,
16140 whether that occurs with implicit excess precision or
16142 return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT;
16143 case EXCESS_PRECISION_TYPE_STANDARD:
16144 /* Otherwise, when we are in a standards compliant mode, to
16145 ensure consistency with the implementation in glibc, report that
16146 float is evaluated to the range and precision of double. */
16147 return FLT_EVAL_METHOD_PROMOTE_TO_DOUBLE;
16149 gcc_unreachable ();
16151 return FLT_EVAL_METHOD_UNPREDICTABLE;
16154 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
16156 static unsigned HOST_WIDE_INT
16157 s390_asan_shadow_offset (void)
16159 return TARGET_64BIT ? HOST_WIDE_INT_1U << 52 : HOST_WIDE_INT_UC (0x20000000);
16162 /* Initialize GCC target structure. */
16164 #undef TARGET_ASM_ALIGNED_HI_OP
16165 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
16166 #undef TARGET_ASM_ALIGNED_DI_OP
16167 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
16168 #undef TARGET_ASM_INTEGER
16169 #define TARGET_ASM_INTEGER s390_assemble_integer
16171 #undef TARGET_ASM_OPEN_PAREN
16172 #define TARGET_ASM_OPEN_PAREN ""
16174 #undef TARGET_ASM_CLOSE_PAREN
16175 #define TARGET_ASM_CLOSE_PAREN ""
16177 #undef TARGET_OPTION_OVERRIDE
16178 #define TARGET_OPTION_OVERRIDE s390_option_override
16180 #ifdef TARGET_THREAD_SSP_OFFSET
16181 #undef TARGET_STACK_PROTECT_GUARD
16182 #define TARGET_STACK_PROTECT_GUARD hook_tree_void_null
16185 #undef TARGET_ENCODE_SECTION_INFO
16186 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
16188 #undef TARGET_SCALAR_MODE_SUPPORTED_P
16189 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
16192 #undef TARGET_HAVE_TLS
16193 #define TARGET_HAVE_TLS true
16195 #undef TARGET_CANNOT_FORCE_CONST_MEM
16196 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
16198 #undef TARGET_DELEGITIMIZE_ADDRESS
16199 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
16201 #undef TARGET_LEGITIMIZE_ADDRESS
16202 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
16204 #undef TARGET_RETURN_IN_MEMORY
16205 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
16207 #undef TARGET_INIT_BUILTINS
16208 #define TARGET_INIT_BUILTINS s390_init_builtins
16209 #undef TARGET_EXPAND_BUILTIN
16210 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
16211 #undef TARGET_BUILTIN_DECL
16212 #define TARGET_BUILTIN_DECL s390_builtin_decl
16214 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
16215 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
16217 #undef TARGET_ASM_OUTPUT_MI_THUNK
16218 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
16219 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
16220 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
16222 #undef TARGET_C_EXCESS_PRECISION
16223 #define TARGET_C_EXCESS_PRECISION s390_excess_precision
16225 #undef TARGET_SCHED_ADJUST_PRIORITY
16226 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
16227 #undef TARGET_SCHED_ISSUE_RATE
16228 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
16229 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
16230 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
16232 #undef TARGET_SCHED_VARIABLE_ISSUE
16233 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
16234 #undef TARGET_SCHED_REORDER
16235 #define TARGET_SCHED_REORDER s390_sched_reorder
16236 #undef TARGET_SCHED_INIT
16237 #define TARGET_SCHED_INIT s390_sched_init
16239 #undef TARGET_CANNOT_COPY_INSN_P
16240 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
16241 #undef TARGET_RTX_COSTS
16242 #define TARGET_RTX_COSTS s390_rtx_costs
16243 #undef TARGET_ADDRESS_COST
16244 #define TARGET_ADDRESS_COST s390_address_cost
16245 #undef TARGET_REGISTER_MOVE_COST
16246 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
16247 #undef TARGET_MEMORY_MOVE_COST
16248 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
16249 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
16250 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
16251 s390_builtin_vectorization_cost
16253 #undef TARGET_MACHINE_DEPENDENT_REORG
16254 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
16256 #undef TARGET_VALID_POINTER_MODE
16257 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
16259 #undef TARGET_BUILD_BUILTIN_VA_LIST
16260 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
16261 #undef TARGET_EXPAND_BUILTIN_VA_START
16262 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
16263 #undef TARGET_ASAN_SHADOW_OFFSET
16264 #define TARGET_ASAN_SHADOW_OFFSET s390_asan_shadow_offset
16265 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
16266 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
16268 #undef TARGET_PROMOTE_FUNCTION_MODE
16269 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
16270 #undef TARGET_PASS_BY_REFERENCE
16271 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
16273 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
16274 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
16275 #undef TARGET_FUNCTION_ARG
16276 #define TARGET_FUNCTION_ARG s390_function_arg
16277 #undef TARGET_FUNCTION_ARG_ADVANCE
16278 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
16279 #undef TARGET_FUNCTION_ARG_PADDING
16280 #define TARGET_FUNCTION_ARG_PADDING s390_function_arg_padding
16281 #undef TARGET_FUNCTION_VALUE
16282 #define TARGET_FUNCTION_VALUE s390_function_value
16283 #undef TARGET_LIBCALL_VALUE
16284 #define TARGET_LIBCALL_VALUE s390_libcall_value
16285 #undef TARGET_STRICT_ARGUMENT_NAMING
16286 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
16288 #undef TARGET_KEEP_LEAF_WHEN_PROFILED
16289 #define TARGET_KEEP_LEAF_WHEN_PROFILED s390_keep_leaf_when_profiled
16291 #undef TARGET_FIXED_CONDITION_CODE_REGS
16292 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
16294 #undef TARGET_CC_MODES_COMPATIBLE
16295 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
16297 #undef TARGET_INVALID_WITHIN_DOLOOP
16298 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_insn_null
16301 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
16302 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
16305 #undef TARGET_DWARF_FRAME_REG_MODE
16306 #define TARGET_DWARF_FRAME_REG_MODE s390_dwarf_frame_reg_mode
16308 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
16309 #undef TARGET_MANGLE_TYPE
16310 #define TARGET_MANGLE_TYPE s390_mangle_type
16313 #undef TARGET_SCALAR_MODE_SUPPORTED_P
16314 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
16316 #undef TARGET_VECTOR_MODE_SUPPORTED_P
16317 #define TARGET_VECTOR_MODE_SUPPORTED_P s390_vector_mode_supported_p
16319 #undef TARGET_PREFERRED_RELOAD_CLASS
16320 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
16322 #undef TARGET_SECONDARY_RELOAD
16323 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
16324 #undef TARGET_SECONDARY_MEMORY_NEEDED
16325 #define TARGET_SECONDARY_MEMORY_NEEDED s390_secondary_memory_needed
16326 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
16327 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE s390_secondary_memory_needed_mode
16329 #undef TARGET_LIBGCC_CMP_RETURN_MODE
16330 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
16332 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
16333 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
16335 #undef TARGET_LEGITIMATE_ADDRESS_P
16336 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
16338 #undef TARGET_LEGITIMATE_CONSTANT_P
16339 #define TARGET_LEGITIMATE_CONSTANT_P s390_legitimate_constant_p
16341 #undef TARGET_LRA_P
16342 #define TARGET_LRA_P s390_lra_p
16344 #undef TARGET_CAN_ELIMINATE
16345 #define TARGET_CAN_ELIMINATE s390_can_eliminate
16347 #undef TARGET_CONDITIONAL_REGISTER_USAGE
16348 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
16350 #undef TARGET_LOOP_UNROLL_ADJUST
16351 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
16353 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
16354 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
16355 #undef TARGET_TRAMPOLINE_INIT
16356 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
16359 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
16360 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
16362 #undef TARGET_UNWIND_WORD_MODE
16363 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
16365 #undef TARGET_CANONICALIZE_COMPARISON
16366 #define TARGET_CANONICALIZE_COMPARISON s390_canonicalize_comparison
16368 #undef TARGET_HARD_REGNO_SCRATCH_OK
16369 #define TARGET_HARD_REGNO_SCRATCH_OK s390_hard_regno_scratch_ok
16371 #undef TARGET_HARD_REGNO_NREGS
16372 #define TARGET_HARD_REGNO_NREGS s390_hard_regno_nregs
16373 #undef TARGET_HARD_REGNO_MODE_OK
16374 #define TARGET_HARD_REGNO_MODE_OK s390_hard_regno_mode_ok
16375 #undef TARGET_MODES_TIEABLE_P
16376 #define TARGET_MODES_TIEABLE_P s390_modes_tieable_p
16378 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
16379 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
16380 s390_hard_regno_call_part_clobbered
16382 #undef TARGET_ATTRIBUTE_TABLE
16383 #define TARGET_ATTRIBUTE_TABLE s390_attribute_table
16385 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
16386 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
16388 #undef TARGET_SET_UP_BY_PROLOGUE
16389 #define TARGET_SET_UP_BY_PROLOGUE s300_set_up_by_prologue
16391 #undef TARGET_EXTRA_LIVE_ON_ENTRY
16392 #define TARGET_EXTRA_LIVE_ON_ENTRY s390_live_on_entry
16394 #undef TARGET_USE_BY_PIECES_INFRASTRUCTURE_P
16395 #define TARGET_USE_BY_PIECES_INFRASTRUCTURE_P \
16396 s390_use_by_pieces_infrastructure_p
16398 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
16399 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV s390_atomic_assign_expand_fenv
16401 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
16402 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN s390_invalid_arg_for_unprototyped_fn
16404 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
16405 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE s390_preferred_simd_mode
16407 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
16408 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT s390_support_vector_misalignment
16410 #undef TARGET_VECTOR_ALIGNMENT
16411 #define TARGET_VECTOR_ALIGNMENT s390_vector_alignment
16413 #undef TARGET_INVALID_BINARY_OP
16414 #define TARGET_INVALID_BINARY_OP s390_invalid_binary_op
16416 #ifdef HAVE_AS_MACHINE_MACHINEMODE
16417 #undef TARGET_ASM_FILE_START
16418 #define TARGET_ASM_FILE_START s390_asm_file_start
16421 #undef TARGET_ASM_FILE_END
16422 #define TARGET_ASM_FILE_END s390_asm_file_end
16424 #if S390_USE_TARGET_ATTRIBUTE
16425 #undef TARGET_SET_CURRENT_FUNCTION
16426 #define TARGET_SET_CURRENT_FUNCTION s390_set_current_function
16428 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
16429 #define TARGET_OPTION_VALID_ATTRIBUTE_P s390_valid_target_attribute_p
16431 #undef TARGET_CAN_INLINE_P
16432 #define TARGET_CAN_INLINE_P s390_can_inline_p
16435 #undef TARGET_OPTION_RESTORE
16436 #define TARGET_OPTION_RESTORE s390_function_specific_restore
16438 #undef TARGET_CAN_CHANGE_MODE_CLASS
16439 #define TARGET_CAN_CHANGE_MODE_CLASS s390_can_change_mode_class
16441 #undef TARGET_CONSTANT_ALIGNMENT
16442 #define TARGET_CONSTANT_ALIGNMENT s390_constant_alignment
16444 struct gcc_target targetm = TARGET_INITIALIZER;
16446 #include "gt-s390.h"