1 /* Subroutines used for code generation on IBM S/390 and zSeries
2 Copyright (C) 1999-2014 Free Software Foundation, Inc.
3 Contributed by Hartmut Penner (hpenner@de.ibm.com) and
4 Ulrich Weigand (uweigand@de.ibm.com) and
5 Andreas Krebbel (Andreas.Krebbel@de.ibm.com).
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
29 #include "print-tree.h"
30 #include "stringpool.h"
31 #include "stor-layout.h"
36 #include "hard-reg-set.h"
37 #include "insn-config.h"
38 #include "conditions.h"
40 #include "insn-attr.h"
47 #include "diagnostic-core.h"
48 #include "basic-block.h"
51 #include "target-def.h"
53 #include "langhooks.h"
55 #include "hash-table.h"
57 #include "basic-block.h"
58 #include "tree-ssa-alias.h"
59 #include "internal-fn.h"
60 #include "gimple-fold.h"
62 #include "gimple-expr.h"
70 #include "tree-pass.h"
74 /* Define the specific costs for a given cpu. */
76 struct processor_costs
79 const int m; /* cost of an M instruction. */
80 const int mghi; /* cost of an MGHI instruction. */
81 const int mh; /* cost of an MH instruction. */
82 const int mhi; /* cost of an MHI instruction. */
83 const int ml; /* cost of an ML instruction. */
84 const int mr; /* cost of an MR instruction. */
85 const int ms; /* cost of an MS instruction. */
86 const int msg; /* cost of an MSG instruction. */
87 const int msgf; /* cost of an MSGF instruction. */
88 const int msgfr; /* cost of an MSGFR instruction. */
89 const int msgr; /* cost of an MSGR instruction. */
90 const int msr; /* cost of an MSR instruction. */
91 const int mult_df; /* cost of multiplication in DFmode. */
94 const int sqxbr; /* cost of square root in TFmode. */
95 const int sqdbr; /* cost of square root in DFmode. */
96 const int sqebr; /* cost of square root in SFmode. */
97 /* multiply and add */
98 const int madbr; /* cost of multiply and add in DFmode. */
99 const int maebr; /* cost of multiply and add in SFmode. */
111 const struct processor_costs *s390_cost;
114 struct processor_costs z900_cost =
116 COSTS_N_INSNS (5), /* M */
117 COSTS_N_INSNS (10), /* MGHI */
118 COSTS_N_INSNS (5), /* MH */
119 COSTS_N_INSNS (4), /* MHI */
120 COSTS_N_INSNS (5), /* ML */
121 COSTS_N_INSNS (5), /* MR */
122 COSTS_N_INSNS (4), /* MS */
123 COSTS_N_INSNS (15), /* MSG */
124 COSTS_N_INSNS (7), /* MSGF */
125 COSTS_N_INSNS (7), /* MSGFR */
126 COSTS_N_INSNS (10), /* MSGR */
127 COSTS_N_INSNS (4), /* MSR */
128 COSTS_N_INSNS (7), /* multiplication in DFmode */
129 COSTS_N_INSNS (13), /* MXBR */
130 COSTS_N_INSNS (136), /* SQXBR */
131 COSTS_N_INSNS (44), /* SQDBR */
132 COSTS_N_INSNS (35), /* SQEBR */
133 COSTS_N_INSNS (18), /* MADBR */
134 COSTS_N_INSNS (13), /* MAEBR */
135 COSTS_N_INSNS (134), /* DXBR */
136 COSTS_N_INSNS (30), /* DDBR */
137 COSTS_N_INSNS (27), /* DEBR */
138 COSTS_N_INSNS (220), /* DLGR */
139 COSTS_N_INSNS (34), /* DLR */
140 COSTS_N_INSNS (34), /* DR */
141 COSTS_N_INSNS (32), /* DSGFR */
142 COSTS_N_INSNS (32), /* DSGR */
146 struct processor_costs z990_cost =
148 COSTS_N_INSNS (4), /* M */
149 COSTS_N_INSNS (2), /* MGHI */
150 COSTS_N_INSNS (2), /* MH */
151 COSTS_N_INSNS (2), /* MHI */
152 COSTS_N_INSNS (4), /* ML */
153 COSTS_N_INSNS (4), /* MR */
154 COSTS_N_INSNS (5), /* MS */
155 COSTS_N_INSNS (6), /* MSG */
156 COSTS_N_INSNS (4), /* MSGF */
157 COSTS_N_INSNS (4), /* MSGFR */
158 COSTS_N_INSNS (4), /* MSGR */
159 COSTS_N_INSNS (4), /* MSR */
160 COSTS_N_INSNS (1), /* multiplication in DFmode */
161 COSTS_N_INSNS (28), /* MXBR */
162 COSTS_N_INSNS (130), /* SQXBR */
163 COSTS_N_INSNS (66), /* SQDBR */
164 COSTS_N_INSNS (38), /* SQEBR */
165 COSTS_N_INSNS (1), /* MADBR */
166 COSTS_N_INSNS (1), /* MAEBR */
167 COSTS_N_INSNS (60), /* DXBR */
168 COSTS_N_INSNS (40), /* DDBR */
169 COSTS_N_INSNS (26), /* DEBR */
170 COSTS_N_INSNS (176), /* DLGR */
171 COSTS_N_INSNS (31), /* DLR */
172 COSTS_N_INSNS (31), /* DR */
173 COSTS_N_INSNS (31), /* DSGFR */
174 COSTS_N_INSNS (31), /* DSGR */
178 struct processor_costs z9_109_cost =
180 COSTS_N_INSNS (4), /* M */
181 COSTS_N_INSNS (2), /* MGHI */
182 COSTS_N_INSNS (2), /* MH */
183 COSTS_N_INSNS (2), /* MHI */
184 COSTS_N_INSNS (4), /* ML */
185 COSTS_N_INSNS (4), /* MR */
186 COSTS_N_INSNS (5), /* MS */
187 COSTS_N_INSNS (6), /* MSG */
188 COSTS_N_INSNS (4), /* MSGF */
189 COSTS_N_INSNS (4), /* MSGFR */
190 COSTS_N_INSNS (4), /* MSGR */
191 COSTS_N_INSNS (4), /* MSR */
192 COSTS_N_INSNS (1), /* multiplication in DFmode */
193 COSTS_N_INSNS (28), /* MXBR */
194 COSTS_N_INSNS (130), /* SQXBR */
195 COSTS_N_INSNS (66), /* SQDBR */
196 COSTS_N_INSNS (38), /* SQEBR */
197 COSTS_N_INSNS (1), /* MADBR */
198 COSTS_N_INSNS (1), /* MAEBR */
199 COSTS_N_INSNS (60), /* DXBR */
200 COSTS_N_INSNS (40), /* DDBR */
201 COSTS_N_INSNS (26), /* DEBR */
202 COSTS_N_INSNS (30), /* DLGR */
203 COSTS_N_INSNS (23), /* DLR */
204 COSTS_N_INSNS (23), /* DR */
205 COSTS_N_INSNS (24), /* DSGFR */
206 COSTS_N_INSNS (24), /* DSGR */
210 struct processor_costs z10_cost =
212 COSTS_N_INSNS (10), /* M */
213 COSTS_N_INSNS (10), /* MGHI */
214 COSTS_N_INSNS (10), /* MH */
215 COSTS_N_INSNS (10), /* MHI */
216 COSTS_N_INSNS (10), /* ML */
217 COSTS_N_INSNS (10), /* MR */
218 COSTS_N_INSNS (10), /* MS */
219 COSTS_N_INSNS (10), /* MSG */
220 COSTS_N_INSNS (10), /* MSGF */
221 COSTS_N_INSNS (10), /* MSGFR */
222 COSTS_N_INSNS (10), /* MSGR */
223 COSTS_N_INSNS (10), /* MSR */
224 COSTS_N_INSNS (1) , /* multiplication in DFmode */
225 COSTS_N_INSNS (50), /* MXBR */
226 COSTS_N_INSNS (120), /* SQXBR */
227 COSTS_N_INSNS (52), /* SQDBR */
228 COSTS_N_INSNS (38), /* SQEBR */
229 COSTS_N_INSNS (1), /* MADBR */
230 COSTS_N_INSNS (1), /* MAEBR */
231 COSTS_N_INSNS (111), /* DXBR */
232 COSTS_N_INSNS (39), /* DDBR */
233 COSTS_N_INSNS (32), /* DEBR */
234 COSTS_N_INSNS (160), /* DLGR */
235 COSTS_N_INSNS (71), /* DLR */
236 COSTS_N_INSNS (71), /* DR */
237 COSTS_N_INSNS (71), /* DSGFR */
238 COSTS_N_INSNS (71), /* DSGR */
242 struct processor_costs z196_cost =
244 COSTS_N_INSNS (7), /* M */
245 COSTS_N_INSNS (5), /* MGHI */
246 COSTS_N_INSNS (5), /* MH */
247 COSTS_N_INSNS (5), /* MHI */
248 COSTS_N_INSNS (7), /* ML */
249 COSTS_N_INSNS (7), /* MR */
250 COSTS_N_INSNS (6), /* MS */
251 COSTS_N_INSNS (8), /* MSG */
252 COSTS_N_INSNS (6), /* MSGF */
253 COSTS_N_INSNS (6), /* MSGFR */
254 COSTS_N_INSNS (8), /* MSGR */
255 COSTS_N_INSNS (6), /* MSR */
256 COSTS_N_INSNS (1) , /* multiplication in DFmode */
257 COSTS_N_INSNS (40), /* MXBR B+40 */
258 COSTS_N_INSNS (100), /* SQXBR B+100 */
259 COSTS_N_INSNS (42), /* SQDBR B+42 */
260 COSTS_N_INSNS (28), /* SQEBR B+28 */
261 COSTS_N_INSNS (1), /* MADBR B */
262 COSTS_N_INSNS (1), /* MAEBR B */
263 COSTS_N_INSNS (101), /* DXBR B+101 */
264 COSTS_N_INSNS (29), /* DDBR */
265 COSTS_N_INSNS (22), /* DEBR */
266 COSTS_N_INSNS (160), /* DLGR cracked */
267 COSTS_N_INSNS (160), /* DLR cracked */
268 COSTS_N_INSNS (160), /* DR expanded */
269 COSTS_N_INSNS (160), /* DSGFR cracked */
270 COSTS_N_INSNS (160), /* DSGR cracked */
274 struct processor_costs zEC12_cost =
276 COSTS_N_INSNS (7), /* M */
277 COSTS_N_INSNS (5), /* MGHI */
278 COSTS_N_INSNS (5), /* MH */
279 COSTS_N_INSNS (5), /* MHI */
280 COSTS_N_INSNS (7), /* ML */
281 COSTS_N_INSNS (7), /* MR */
282 COSTS_N_INSNS (6), /* MS */
283 COSTS_N_INSNS (8), /* MSG */
284 COSTS_N_INSNS (6), /* MSGF */
285 COSTS_N_INSNS (6), /* MSGFR */
286 COSTS_N_INSNS (8), /* MSGR */
287 COSTS_N_INSNS (6), /* MSR */
288 COSTS_N_INSNS (1) , /* multiplication in DFmode */
289 COSTS_N_INSNS (40), /* MXBR B+40 */
290 COSTS_N_INSNS (100), /* SQXBR B+100 */
291 COSTS_N_INSNS (42), /* SQDBR B+42 */
292 COSTS_N_INSNS (28), /* SQEBR B+28 */
293 COSTS_N_INSNS (1), /* MADBR B */
294 COSTS_N_INSNS (1), /* MAEBR B */
295 COSTS_N_INSNS (131), /* DXBR B+131 */
296 COSTS_N_INSNS (29), /* DDBR */
297 COSTS_N_INSNS (22), /* DEBR */
298 COSTS_N_INSNS (160), /* DLGR cracked */
299 COSTS_N_INSNS (160), /* DLR cracked */
300 COSTS_N_INSNS (160), /* DR expanded */
301 COSTS_N_INSNS (160), /* DSGFR cracked */
302 COSTS_N_INSNS (160), /* DSGR cracked */
305 extern int reload_completed;
307 /* Kept up to date using the SCHED_VARIABLE_ISSUE hook. */
308 static rtx_insn *last_scheduled_insn;
310 /* Structure used to hold the components of a S/390 memory
311 address. A legitimate address on S/390 is of the general
313 base + index + displacement
314 where any of the components is optional.
316 base and index are registers of the class ADDR_REGS,
317 displacement is an unsigned 12-bit immediate constant. */
328 /* The following structure is embedded in the machine
329 specific part of struct function. */
331 struct GTY (()) s390_frame_layout
333 /* Offset within stack frame. */
334 HOST_WIDE_INT gprs_offset;
335 HOST_WIDE_INT f0_offset;
336 HOST_WIDE_INT f4_offset;
337 HOST_WIDE_INT f8_offset;
338 HOST_WIDE_INT backchain_offset;
340 /* Number of first and last gpr where slots in the register
341 save area are reserved for. */
342 int first_save_gpr_slot;
343 int last_save_gpr_slot;
345 /* Location (FP register number) where GPRs (r0-r15) should
347 0 - does not need to be saved at all
349 signed char gpr_save_slots[16];
351 /* Number of first and last gpr to be saved, restored. */
353 int first_restore_gpr;
355 int last_restore_gpr;
357 /* Bits standing for floating point registers. Set, if the
358 respective register has to be saved. Starting with reg 16 (f0)
359 at the rightmost bit.
360 Bit 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
361 fpr 15 13 11 9 14 12 10 8 7 5 3 1 6 4 2 0
362 reg 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 */
363 unsigned int fpr_bitmap;
365 /* Number of floating point registers f8-f15 which must be saved. */
368 /* Set if return address needs to be saved.
369 This flag is set by s390_return_addr_rtx if it could not use
370 the initial value of r14 and therefore depends on r14 saved
372 bool save_return_addr_p;
374 /* Size of stack frame. */
375 HOST_WIDE_INT frame_size;
378 /* Define the structure for the machine field in struct function. */
380 struct GTY(()) machine_function
382 struct s390_frame_layout frame_layout;
384 /* Literal pool base register. */
387 /* True if we may need to perform branch splitting. */
388 bool split_branches_pending_p;
390 /* Some local-dynamic TLS symbol name. */
391 const char *some_ld_name;
393 bool has_landing_pad_p;
395 /* True if the current function may contain a tbegin clobbering
400 /* Few accessor macros for struct cfun->machine->s390_frame_layout. */
402 #define cfun_frame_layout (cfun->machine->frame_layout)
403 #define cfun_save_high_fprs_p (!!cfun_frame_layout.high_fprs)
404 #define cfun_save_arg_fprs_p (!!(TARGET_64BIT \
405 ? cfun_frame_layout.fpr_bitmap & 0x0f \
406 : cfun_frame_layout.fpr_bitmap & 0x03))
407 #define cfun_gprs_save_area_size ((cfun_frame_layout.last_save_gpr_slot - \
408 cfun_frame_layout.first_save_gpr_slot + 1) * UNITS_PER_LONG)
409 #define cfun_set_fpr_save(REGNO) (cfun->machine->frame_layout.fpr_bitmap |= \
410 (1 << (REGNO - FPR0_REGNUM)))
411 #define cfun_fpr_save_p(REGNO) (!!(cfun->machine->frame_layout.fpr_bitmap & \
412 (1 << (REGNO - FPR0_REGNUM))))
413 #define cfun_gpr_save_slot(REGNO) \
414 cfun->machine->frame_layout.gpr_save_slots[REGNO]
416 /* Number of GPRs and FPRs used for argument passing. */
417 #define GP_ARG_NUM_REG 5
418 #define FP_ARG_NUM_REG (TARGET_64BIT? 4 : 2)
420 /* A couple of shortcuts. */
421 #define CONST_OK_FOR_J(x) \
422 CONST_OK_FOR_CONSTRAINT_P((x), 'J', "J")
423 #define CONST_OK_FOR_K(x) \
424 CONST_OK_FOR_CONSTRAINT_P((x), 'K', "K")
425 #define CONST_OK_FOR_Os(x) \
426 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Os")
427 #define CONST_OK_FOR_Op(x) \
428 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "Op")
429 #define CONST_OK_FOR_On(x) \
430 CONST_OK_FOR_CONSTRAINT_P((x), 'O', "On")
432 #define REGNO_PAIR_OK(REGNO, MODE) \
433 (HARD_REGNO_NREGS ((REGNO), (MODE)) == 1 || !((REGNO) & 1))
435 /* That's the read ahead of the dynamic branch prediction unit in
436 bytes on a z10 (or higher) CPU. */
437 #define PREDICT_DISTANCE (TARGET_Z10 ? 384 : 2048)
439 static const int s390_hotpatch_trampoline_halfwords_default = 12;
440 static const int s390_hotpatch_trampoline_halfwords_max = 1000000;
441 static int s390_hotpatch_trampoline_halfwords = -1;
443 /* Return the argument of the given hotpatch attribute or the default value if
444 no argument is present. */
447 get_hotpatch_attribute (tree hotpatch_attr)
451 args = TREE_VALUE (hotpatch_attr);
454 TREE_INT_CST_LOW (TREE_VALUE (args)):
455 s390_hotpatch_trampoline_halfwords_default;
458 /* Check whether the hotpatch attribute is applied to a function and, if it has
459 an argument, the argument is valid. */
462 s390_handle_hotpatch_attribute (tree *node, tree name, tree args,
463 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
465 if (TREE_CODE (*node) != FUNCTION_DECL)
467 warning (OPT_Wattributes, "%qE attribute only applies to functions",
469 *no_add_attrs = true;
473 tree expr = TREE_VALUE (args);
475 if (TREE_CODE (expr) != INTEGER_CST
476 || !INTEGRAL_TYPE_P (TREE_TYPE (expr))
477 || wi::gtu_p (expr, s390_hotpatch_trampoline_halfwords_max))
479 error ("requested %qE attribute is not a non-negative integer"
480 " constant or too large (max. %d)", name,
481 s390_hotpatch_trampoline_halfwords_max);
482 *no_add_attrs = true;
489 static const struct attribute_spec s390_attribute_table[] = {
490 { "hotpatch", 0, 1, true, false, false, s390_handle_hotpatch_attribute, false
493 { NULL, 0, 0, false, false, false, NULL, false }
496 /* Return the alignment for LABEL. We default to the -falign-labels
497 value except for the literal pool base label. */
499 s390_label_align (rtx label)
501 rtx prev_insn = prev_active_insn (label);
503 if (prev_insn == NULL_RTX)
506 prev_insn = single_set (prev_insn);
508 if (prev_insn == NULL_RTX)
511 prev_insn = SET_SRC (prev_insn);
513 /* Don't align literal pool base labels. */
514 if (GET_CODE (prev_insn) == UNSPEC
515 && XINT (prev_insn, 1) == UNSPEC_MAIN_BASE)
519 return align_labels_log;
522 static enum machine_mode
523 s390_libgcc_cmp_return_mode (void)
525 return TARGET_64BIT ? DImode : SImode;
528 static enum machine_mode
529 s390_libgcc_shift_count_mode (void)
531 return TARGET_64BIT ? DImode : SImode;
534 static enum machine_mode
535 s390_unwind_word_mode (void)
537 return TARGET_64BIT ? DImode : SImode;
540 /* Return true if the back end supports mode MODE. */
542 s390_scalar_mode_supported_p (enum machine_mode mode)
544 /* In contrast to the default implementation reject TImode constants on 31bit
545 TARGET_ZARCH for ABI compliance. */
546 if (!TARGET_64BIT && TARGET_ZARCH && mode == TImode)
549 if (DECIMAL_FLOAT_MODE_P (mode))
550 return default_decimal_float_supported_p ();
552 return default_scalar_mode_supported_p (mode);
555 /* Set the has_landing_pad_p flag in struct machine_function to VALUE. */
558 s390_set_has_landing_pad_p (bool value)
560 cfun->machine->has_landing_pad_p = value;
563 /* If two condition code modes are compatible, return a condition code
564 mode which is compatible with both. Otherwise, return
567 static enum machine_mode
568 s390_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
576 if (m2 == CCUmode || m2 == CCTmode || m2 == CCZ1mode
577 || m2 == CCSmode || m2 == CCSRmode || m2 == CCURmode)
598 /* Return true if SET either doesn't set the CC register, or else
599 the source and destination have matching CC modes and that
600 CC mode is at least as constrained as REQ_MODE. */
603 s390_match_ccmode_set (rtx set, enum machine_mode req_mode)
605 enum machine_mode set_mode;
607 gcc_assert (GET_CODE (set) == SET);
609 if (GET_CODE (SET_DEST (set)) != REG || !CC_REGNO_P (REGNO (SET_DEST (set))))
612 set_mode = GET_MODE (SET_DEST (set));
626 if (req_mode != set_mode)
631 if (req_mode != CCSmode && req_mode != CCUmode && req_mode != CCTmode
632 && req_mode != CCSRmode && req_mode != CCURmode)
638 if (req_mode != CCAmode)
646 return (GET_MODE (SET_SRC (set)) == set_mode);
649 /* Return true if every SET in INSN that sets the CC register
650 has source and destination with matching CC modes and that
651 CC mode is at least as constrained as REQ_MODE.
652 If REQ_MODE is VOIDmode, always return false. */
655 s390_match_ccmode (rtx_insn *insn, enum machine_mode req_mode)
659 /* s390_tm_ccmode returns VOIDmode to indicate failure. */
660 if (req_mode == VOIDmode)
663 if (GET_CODE (PATTERN (insn)) == SET)
664 return s390_match_ccmode_set (PATTERN (insn), req_mode);
666 if (GET_CODE (PATTERN (insn)) == PARALLEL)
667 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
669 rtx set = XVECEXP (PATTERN (insn), 0, i);
670 if (GET_CODE (set) == SET)
671 if (!s390_match_ccmode_set (set, req_mode))
678 /* If a test-under-mask instruction can be used to implement
679 (compare (and ... OP1) OP2), return the CC mode required
680 to do that. Otherwise, return VOIDmode.
681 MIXED is true if the instruction can distinguish between
682 CC1 and CC2 for mixed selected bits (TMxx), it is false
683 if the instruction cannot (TM). */
686 s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
690 /* ??? Fixme: should work on CONST_DOUBLE as well. */
691 if (GET_CODE (op1) != CONST_INT || GET_CODE (op2) != CONST_INT)
694 /* Selected bits all zero: CC0.
695 e.g.: int a; if ((a & (16 + 128)) == 0) */
696 if (INTVAL (op2) == 0)
699 /* Selected bits all one: CC3.
700 e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
701 if (INTVAL (op2) == INTVAL (op1))
704 /* Exactly two bits selected, mixed zeroes and ones: CC1 or CC2. e.g.:
706 if ((a & (16 + 128)) == 16) -> CCT1
707 if ((a & (16 + 128)) == 128) -> CCT2 */
710 bit1 = exact_log2 (INTVAL (op2));
711 bit0 = exact_log2 (INTVAL (op1) ^ INTVAL (op2));
712 if (bit0 != -1 && bit1 != -1)
713 return bit0 > bit1 ? CCT1mode : CCT2mode;
719 /* Given a comparison code OP (EQ, NE, etc.) and the operands
720 OP0 and OP1 of a COMPARE, return the mode to be used for the
724 s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
730 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
731 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
733 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
734 && CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
736 if ((GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
737 || GET_CODE (op1) == NEG)
738 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
741 if (GET_CODE (op0) == AND)
743 /* Check whether we can potentially do it via TM. */
744 enum machine_mode ccmode;
745 ccmode = s390_tm_ccmode (XEXP (op0, 1), op1, 1);
746 if (ccmode != VOIDmode)
748 /* Relax CCTmode to CCZmode to allow fall-back to AND
749 if that turns out to be beneficial. */
750 return ccmode == CCTmode ? CCZmode : ccmode;
754 if (register_operand (op0, HImode)
755 && GET_CODE (op1) == CONST_INT
756 && (INTVAL (op1) == -1 || INTVAL (op1) == 65535))
758 if (register_operand (op0, QImode)
759 && GET_CODE (op1) == CONST_INT
760 && (INTVAL (op1) == -1 || INTVAL (op1) == 255))
769 /* The only overflow condition of NEG and ABS happens when
770 -INT_MAX is used as parameter, which stays negative. So
771 we have an overflow from a positive value to a negative.
772 Using CCAP mode the resulting cc can be used for comparisons. */
773 if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
774 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
777 /* If constants are involved in an add instruction it is possible to use
778 the resulting cc for comparisons with zero. Knowing the sign of the
779 constant the overflow behavior gets predictable. e.g.:
780 int a, b; if ((b = a + c) > 0)
781 with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
782 if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
783 && (CONST_OK_FOR_K (INTVAL (XEXP (op0, 1)))
784 || (CONST_OK_FOR_CONSTRAINT_P (INTVAL (XEXP (op0, 1)), 'O', "Os")
785 /* Avoid INT32_MIN on 32 bit. */
786 && (!TARGET_ZARCH || INTVAL (XEXP (op0, 1)) != -0x7fffffff - 1))))
788 if (INTVAL (XEXP((op0), 1)) < 0)
802 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
803 && GET_CODE (op1) != CONST_INT)
809 if (GET_CODE (op0) == PLUS
810 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
813 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
814 && GET_CODE (op1) != CONST_INT)
820 if (GET_CODE (op0) == MINUS
821 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
824 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
825 && GET_CODE (op1) != CONST_INT)
834 /* Replace the comparison OP0 CODE OP1 by a semantically equivalent one
835 that we can implement more efficiently. */
838 s390_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
839 bool op0_preserve_value)
841 if (op0_preserve_value)
844 /* Convert ZERO_EXTRACT back to AND to enable TM patterns. */
845 if ((*code == EQ || *code == NE)
846 && *op1 == const0_rtx
847 && GET_CODE (*op0) == ZERO_EXTRACT
848 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
849 && GET_CODE (XEXP (*op0, 2)) == CONST_INT
850 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
852 rtx inner = XEXP (*op0, 0);
853 HOST_WIDE_INT modesize = GET_MODE_BITSIZE (GET_MODE (inner));
854 HOST_WIDE_INT len = INTVAL (XEXP (*op0, 1));
855 HOST_WIDE_INT pos = INTVAL (XEXP (*op0, 2));
857 if (len > 0 && len < modesize
858 && pos >= 0 && pos + len <= modesize
859 && modesize <= HOST_BITS_PER_WIDE_INT)
861 unsigned HOST_WIDE_INT block;
862 block = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
863 block <<= modesize - pos - len;
865 *op0 = gen_rtx_AND (GET_MODE (inner), inner,
866 gen_int_mode (block, GET_MODE (inner)));
870 /* Narrow AND of memory against immediate to enable TM. */
871 if ((*code == EQ || *code == NE)
872 && *op1 == const0_rtx
873 && GET_CODE (*op0) == AND
874 && GET_CODE (XEXP (*op0, 1)) == CONST_INT
875 && SCALAR_INT_MODE_P (GET_MODE (XEXP (*op0, 0))))
877 rtx inner = XEXP (*op0, 0);
878 rtx mask = XEXP (*op0, 1);
880 /* Ignore paradoxical SUBREGs if all extra bits are masked out. */
881 if (GET_CODE (inner) == SUBREG
882 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (inner)))
883 && (GET_MODE_SIZE (GET_MODE (inner))
884 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
886 & GET_MODE_MASK (GET_MODE (inner))
887 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (inner))))
889 inner = SUBREG_REG (inner);
891 /* Do not change volatile MEMs. */
892 if (MEM_P (inner) && !MEM_VOLATILE_P (inner))
894 int part = s390_single_part (XEXP (*op0, 1),
895 GET_MODE (inner), QImode, 0);
898 mask = gen_int_mode (s390_extract_part (mask, QImode, 0), QImode);
899 inner = adjust_address_nv (inner, QImode, part);
900 *op0 = gen_rtx_AND (QImode, inner, mask);
905 /* Narrow comparisons against 0xffff to HImode if possible. */
906 if ((*code == EQ || *code == NE)
907 && GET_CODE (*op1) == CONST_INT
908 && INTVAL (*op1) == 0xffff
909 && SCALAR_INT_MODE_P (GET_MODE (*op0))
910 && (nonzero_bits (*op0, GET_MODE (*op0))
911 & ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
913 *op0 = gen_lowpart (HImode, *op0);
917 /* Remove redundant UNSPEC_STRCMPCC_TO_INT conversions if possible. */
918 if (GET_CODE (*op0) == UNSPEC
919 && XINT (*op0, 1) == UNSPEC_STRCMPCC_TO_INT
920 && XVECLEN (*op0, 0) == 1
921 && GET_MODE (XVECEXP (*op0, 0, 0)) == CCUmode
922 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
923 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
924 && *op1 == const0_rtx)
926 enum rtx_code new_code = UNKNOWN;
929 case EQ: new_code = EQ; break;
930 case NE: new_code = NE; break;
931 case LT: new_code = GTU; break;
932 case GT: new_code = LTU; break;
933 case LE: new_code = GEU; break;
934 case GE: new_code = LEU; break;
938 if (new_code != UNKNOWN)
940 *op0 = XVECEXP (*op0, 0, 0);
945 /* Remove redundant UNSPEC_CC_TO_INT conversions if possible. */
946 if (GET_CODE (*op0) == UNSPEC
947 && XINT (*op0, 1) == UNSPEC_CC_TO_INT
948 && XVECLEN (*op0, 0) == 1
949 && GET_CODE (XVECEXP (*op0, 0, 0)) == REG
950 && REGNO (XVECEXP (*op0, 0, 0)) == CC_REGNUM
951 && CONST_INT_P (*op1))
953 enum rtx_code new_code = UNKNOWN;
954 switch (GET_MODE (XVECEXP (*op0, 0, 0)))
960 case EQ: new_code = EQ; break;
961 case NE: new_code = NE; break;
968 if (new_code != UNKNOWN)
970 /* For CCRAWmode put the required cc mask into the second
972 if (GET_MODE (XVECEXP (*op0, 0, 0)) == CCRAWmode
973 && INTVAL (*op1) >= 0 && INTVAL (*op1) <= 3)
974 *op1 = gen_rtx_CONST_INT (VOIDmode, 1 << (3 - INTVAL (*op1)));
975 *op0 = XVECEXP (*op0, 0, 0);
980 /* Simplify cascaded EQ, NE with const0_rtx. */
981 if ((*code == NE || *code == EQ)
982 && (GET_CODE (*op0) == EQ || GET_CODE (*op0) == NE)
983 && GET_MODE (*op0) == SImode
984 && GET_MODE (XEXP (*op0, 0)) == CCZ1mode
985 && REG_P (XEXP (*op0, 0))
986 && XEXP (*op0, 1) == const0_rtx
987 && *op1 == const0_rtx)
989 if ((*code == EQ && GET_CODE (*op0) == NE)
990 || (*code == NE && GET_CODE (*op0) == EQ))
994 *op0 = XEXP (*op0, 0);
997 /* Prefer register over memory as first operand. */
998 if (MEM_P (*op0) && REG_P (*op1))
1000 rtx tem = *op0; *op0 = *op1; *op1 = tem;
1001 *code = (int)swap_condition ((enum rtx_code)*code);
1005 /* Emit a compare instruction suitable to implement the comparison
1006 OP0 CODE OP1. Return the correct condition RTL to be placed in
1007 the IF_THEN_ELSE of the conditional branch testing the result. */
1010 s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
1012 enum machine_mode mode = s390_select_ccmode (code, op0, op1);
1015 /* Do not output a redundant compare instruction if a compare_and_swap
1016 pattern already computed the result and the machine modes are compatible. */
1017 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
1019 gcc_assert (s390_cc_modes_compatible (GET_MODE (op0), mode)
1025 cc = gen_rtx_REG (mode, CC_REGNUM);
1026 emit_insn (gen_rtx_SET (VOIDmode, cc, gen_rtx_COMPARE (mode, op0, op1)));
1029 return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
1032 /* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
1034 Return the correct condition RTL to be placed in the IF_THEN_ELSE of the
1035 conditional branch testing the result. */
1038 s390_emit_compare_and_swap (enum rtx_code code, rtx old, rtx mem,
1039 rtx cmp, rtx new_rtx)
1041 emit_insn (gen_atomic_compare_and_swapsi_internal (old, mem, cmp, new_rtx));
1042 return s390_emit_compare (code, gen_rtx_REG (CCZ1mode, CC_REGNUM),
1046 /* Emit a jump instruction to TARGET and return it. If COND is
1047 NULL_RTX, emit an unconditional jump, else a conditional jump under
1051 s390_emit_jump (rtx target, rtx cond)
1055 target = gen_rtx_LABEL_REF (VOIDmode, target);
1057 target = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, target, pc_rtx);
1059 insn = gen_rtx_SET (VOIDmode, pc_rtx, target);
1060 return emit_jump_insn (insn);
1063 /* Return branch condition mask to implement a branch
1064 specified by CODE. Return -1 for invalid comparisons. */
1067 s390_branch_condition_mask (rtx code)
1069 const int CC0 = 1 << 3;
1070 const int CC1 = 1 << 2;
1071 const int CC2 = 1 << 1;
1072 const int CC3 = 1 << 0;
1074 gcc_assert (GET_CODE (XEXP (code, 0)) == REG);
1075 gcc_assert (REGNO (XEXP (code, 0)) == CC_REGNUM);
1076 gcc_assert (XEXP (code, 1) == const0_rtx
1077 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
1078 && CONST_INT_P (XEXP (code, 1))));
1081 switch (GET_MODE (XEXP (code, 0)))
1085 switch (GET_CODE (code))
1087 case EQ: return CC0;
1088 case NE: return CC1 | CC2 | CC3;
1094 switch (GET_CODE (code))
1096 case EQ: return CC1;
1097 case NE: return CC0 | CC2 | CC3;
1103 switch (GET_CODE (code))
1105 case EQ: return CC2;
1106 case NE: return CC0 | CC1 | CC3;
1112 switch (GET_CODE (code))
1114 case EQ: return CC3;
1115 case NE: return CC0 | CC1 | CC2;
1121 switch (GET_CODE (code))
1123 case EQ: return CC0 | CC2;
1124 case NE: return CC1 | CC3;
1130 switch (GET_CODE (code))
1132 case LTU: return CC2 | CC3; /* carry */
1133 case GEU: return CC0 | CC1; /* no carry */
1139 switch (GET_CODE (code))
1141 case GTU: return CC0 | CC1; /* borrow */
1142 case LEU: return CC2 | CC3; /* no borrow */
1148 switch (GET_CODE (code))
1150 case EQ: return CC0 | CC2;
1151 case NE: return CC1 | CC3;
1152 case LTU: return CC1;
1153 case GTU: return CC3;
1154 case LEU: return CC1 | CC2;
1155 case GEU: return CC2 | CC3;
1160 switch (GET_CODE (code))
1162 case EQ: return CC0;
1163 case NE: return CC1 | CC2 | CC3;
1164 case LTU: return CC1;
1165 case GTU: return CC2;
1166 case LEU: return CC0 | CC1;
1167 case GEU: return CC0 | CC2;
1173 switch (GET_CODE (code))
1175 case EQ: return CC0;
1176 case NE: return CC2 | CC1 | CC3;
1177 case LTU: return CC2;
1178 case GTU: return CC1;
1179 case LEU: return CC0 | CC2;
1180 case GEU: return CC0 | CC1;
1186 switch (GET_CODE (code))
1188 case EQ: return CC0;
1189 case NE: return CC1 | CC2 | CC3;
1190 case LT: return CC1 | CC3;
1191 case GT: return CC2;
1192 case LE: return CC0 | CC1 | CC3;
1193 case GE: return CC0 | CC2;
1199 switch (GET_CODE (code))
1201 case EQ: return CC0;
1202 case NE: return CC1 | CC2 | CC3;
1203 case LT: return CC1;
1204 case GT: return CC2 | CC3;
1205 case LE: return CC0 | CC1;
1206 case GE: return CC0 | CC2 | CC3;
1212 switch (GET_CODE (code))
1214 case EQ: return CC0;
1215 case NE: return CC1 | CC2 | CC3;
1216 case LT: return CC1;
1217 case GT: return CC2;
1218 case LE: return CC0 | CC1;
1219 case GE: return CC0 | CC2;
1220 case UNORDERED: return CC3;
1221 case ORDERED: return CC0 | CC1 | CC2;
1222 case UNEQ: return CC0 | CC3;
1223 case UNLT: return CC1 | CC3;
1224 case UNGT: return CC2 | CC3;
1225 case UNLE: return CC0 | CC1 | CC3;
1226 case UNGE: return CC0 | CC2 | CC3;
1227 case LTGT: return CC1 | CC2;
1233 switch (GET_CODE (code))
1235 case EQ: return CC0;
1236 case NE: return CC2 | CC1 | CC3;
1237 case LT: return CC2;
1238 case GT: return CC1;
1239 case LE: return CC0 | CC2;
1240 case GE: return CC0 | CC1;
1241 case UNORDERED: return CC3;
1242 case ORDERED: return CC0 | CC2 | CC1;
1243 case UNEQ: return CC0 | CC3;
1244 case UNLT: return CC2 | CC3;
1245 case UNGT: return CC1 | CC3;
1246 case UNLE: return CC0 | CC2 | CC3;
1247 case UNGE: return CC0 | CC1 | CC3;
1248 case LTGT: return CC2 | CC1;
1254 switch (GET_CODE (code))
1257 return INTVAL (XEXP (code, 1));
1259 return (INTVAL (XEXP (code, 1))) ^ 0xf;
1270 /* Return branch condition mask to implement a compare and branch
1271 specified by CODE. Return -1 for invalid comparisons. */
1274 s390_compare_and_branch_condition_mask (rtx code)
1276 const int CC0 = 1 << 3;
1277 const int CC1 = 1 << 2;
1278 const int CC2 = 1 << 1;
1280 switch (GET_CODE (code))
1304 /* If INV is false, return assembler mnemonic string to implement
1305 a branch specified by CODE. If INV is true, return mnemonic
1306 for the corresponding inverted branch. */
1309 s390_branch_condition_mnemonic (rtx code, int inv)
1313 static const char *const mnemonic[16] =
1315 NULL, "o", "h", "nle",
1316 "l", "nhe", "lh", "ne",
1317 "e", "nlh", "he", "nl",
1318 "le", "nh", "no", NULL
1321 if (GET_CODE (XEXP (code, 0)) == REG
1322 && REGNO (XEXP (code, 0)) == CC_REGNUM
1323 && (XEXP (code, 1) == const0_rtx
1324 || (GET_MODE (XEXP (code, 0)) == CCRAWmode
1325 && CONST_INT_P (XEXP (code, 1)))))
1326 mask = s390_branch_condition_mask (code);
1328 mask = s390_compare_and_branch_condition_mask (code);
1330 gcc_assert (mask >= 0);
1335 gcc_assert (mask >= 1 && mask <= 14);
1337 return mnemonic[mask];
1340 /* Return the part of op which has a value different from def.
1341 The size of the part is determined by mode.
1342 Use this function only if you already know that op really
1343 contains such a part. */
1345 unsigned HOST_WIDE_INT
1346 s390_extract_part (rtx op, enum machine_mode mode, int def)
1348 unsigned HOST_WIDE_INT value = 0;
1349 int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);
1350 int part_bits = GET_MODE_BITSIZE (mode);
1351 unsigned HOST_WIDE_INT part_mask
1352 = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;
1355 for (i = 0; i < max_parts; i++)
1358 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1360 value >>= part_bits;
1362 if ((value & part_mask) != (def & part_mask))
1363 return value & part_mask;
1369 /* If OP is an integer constant of mode MODE with exactly one
1370 part of mode PART_MODE unequal to DEF, return the number of that
1371 part. Otherwise, return -1. */
1374 s390_single_part (rtx op,
1375 enum machine_mode mode,
1376 enum machine_mode part_mode,
1379 unsigned HOST_WIDE_INT value = 0;
1380 int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);
1381 unsigned HOST_WIDE_INT part_mask
1382 = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;
1385 if (GET_CODE (op) != CONST_INT)
1388 for (i = 0; i < n_parts; i++)
1391 value = (unsigned HOST_WIDE_INT) INTVAL (op);
1393 value >>= GET_MODE_BITSIZE (part_mode);
1395 if ((value & part_mask) != (def & part_mask))
1403 return part == -1 ? -1 : n_parts - 1 - part;
1406 /* Return true if IN contains a contiguous bitfield in the lower SIZE
1407 bits and no other bits are set in IN. POS and LENGTH can be used
1408 to obtain the start position and the length of the bitfield.
1410 POS gives the position of the first bit of the bitfield counting
1411 from the lowest order bit starting with zero. In order to use this
1412 value for S/390 instructions this has to be converted to "bits big
1416 s390_contiguous_bitmask_p (unsigned HOST_WIDE_INT in, int size,
1417 int *pos, int *length)
1422 unsigned HOST_WIDE_INT mask = 1ULL;
1423 bool contiguous = false;
1425 for (i = 0; i < size; mask <<= 1, i++)
1449 /* Calculate a mask for all bits beyond the contiguous bits. */
1450 mask = (-1LL & ~(((1ULL << (tmp_length + tmp_pos - 1)) << 1) - 1));
1455 if (tmp_length + tmp_pos - 1 > size)
1459 *length = tmp_length;
1467 /* Check whether a rotate of ROTL followed by an AND of CONTIG is
1468 equivalent to a shift followed by the AND. In particular, CONTIG
1469 should not overlap the (rotated) bit 0/bit 63 gap. Negative values
1470 for ROTL indicate a rotate to the right. */
1473 s390_extzv_shift_ok (int bitsize, int rotl, unsigned HOST_WIDE_INT contig)
1478 ok = s390_contiguous_bitmask_p (contig, bitsize, &pos, &len);
1481 return ((rotl >= 0 && rotl <= pos)
1482 || (rotl < 0 && -rotl <= bitsize - len - pos));
1485 /* Check whether we can (and want to) split a double-word
1486 move in mode MODE from SRC to DST into two single-word
1487 moves, moving the subword FIRST_SUBWORD first. */
1490 s390_split_ok_p (rtx dst, rtx src, enum machine_mode mode, int first_subword)
1492 /* Floating point registers cannot be split. */
1493 if (FP_REG_P (src) || FP_REG_P (dst))
1496 /* We don't need to split if operands are directly accessible. */
1497 if (s_operand (src, mode) || s_operand (dst, mode))
1500 /* Non-offsettable memory references cannot be split. */
1501 if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))
1502 || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))
1505 /* Moving the first subword must not clobber a register
1506 needed to move the second subword. */
1507 if (register_operand (dst, mode))
1509 rtx subreg = operand_subword (dst, first_subword, 0, mode);
1510 if (reg_overlap_mentioned_p (subreg, src))
1517 /* Return true if it can be proven that [MEM1, MEM1 + SIZE]
1518 and [MEM2, MEM2 + SIZE] do overlap and false
1522 s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
1524 rtx addr1, addr2, addr_delta;
1525 HOST_WIDE_INT delta;
1527 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1533 addr1 = XEXP (mem1, 0);
1534 addr2 = XEXP (mem2, 0);
1536 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1538 /* This overlapping check is used by peepholes merging memory block operations.
1539 Overlapping operations would otherwise be recognized by the S/390 hardware
1540 and would fall back to a slower implementation. Allowing overlapping
1541 operations would lead to slow code but not to wrong code. Therefore we are
1542 somewhat optimistic if we cannot prove that the memory blocks are
1544 That's why we return false here although this may accept operations on
1545 overlapping memory areas. */
1546 if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)
1549 delta = INTVAL (addr_delta);
1552 || (delta > 0 && delta < size)
1553 || (delta < 0 && -delta < size))
1559 /* Check whether the address of memory reference MEM2 equals exactly
1560 the address of memory reference MEM1 plus DELTA. Return true if
1561 we can prove this to be the case, false otherwise. */
1564 s390_offset_p (rtx mem1, rtx mem2, rtx delta)
1566 rtx addr1, addr2, addr_delta;
1568 if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)
1571 addr1 = XEXP (mem1, 0);
1572 addr2 = XEXP (mem2, 0);
1574 addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);
1575 if (!addr_delta || !rtx_equal_p (addr_delta, delta))
1581 /* Expand logical operator CODE in mode MODE with operands OPERANDS. */
1584 s390_expand_logical_operator (enum rtx_code code, enum machine_mode mode,
1587 enum machine_mode wmode = mode;
1588 rtx dst = operands[0];
1589 rtx src1 = operands[1];
1590 rtx src2 = operands[2];
1593 /* If we cannot handle the operation directly, use a temp register. */
1594 if (!s390_logical_operator_ok_p (operands))
1595 dst = gen_reg_rtx (mode);
1597 /* QImode and HImode patterns make sense only if we have a destination
1598 in memory. Otherwise perform the operation in SImode. */
1599 if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)
1602 /* Widen operands if required. */
1605 if (GET_CODE (dst) == SUBREG
1606 && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)
1608 else if (REG_P (dst))
1609 dst = gen_rtx_SUBREG (wmode, dst, 0);
1611 dst = gen_reg_rtx (wmode);
1613 if (GET_CODE (src1) == SUBREG
1614 && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)
1616 else if (GET_MODE (src1) != VOIDmode)
1617 src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);
1619 if (GET_CODE (src2) == SUBREG
1620 && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)
1622 else if (GET_MODE (src2) != VOIDmode)
1623 src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);
1626 /* Emit the instruction. */
1627 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, wmode, src1, src2));
1628 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
1629 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
1631 /* Fix up the destination if needed. */
1632 if (dst != operands[0])
1633 emit_move_insn (operands[0], gen_lowpart (mode, dst));
1636 /* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */
1639 s390_logical_operator_ok_p (rtx *operands)
1641 /* If the destination operand is in memory, it needs to coincide
1642 with one of the source operands. After reload, it has to be
1643 the first source operand. */
1644 if (GET_CODE (operands[0]) == MEM)
1645 return rtx_equal_p (operands[0], operands[1])
1646 || (!reload_completed && rtx_equal_p (operands[0], operands[2]));
1651 /* Narrow logical operation CODE of memory operand MEMOP with immediate
1652 operand IMMOP to switch from SS to SI type instructions. */
1655 s390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop)
1657 int def = code == AND ? -1 : 0;
1661 gcc_assert (GET_CODE (*memop) == MEM);
1662 gcc_assert (!MEM_VOLATILE_P (*memop));
1664 mask = s390_extract_part (*immop, QImode, def);
1665 part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);
1666 gcc_assert (part >= 0);
1668 *memop = adjust_address (*memop, QImode, part);
1669 *immop = gen_int_mode (mask, QImode);
1673 /* How to allocate a 'struct machine_function'. */
1675 static struct machine_function *
1676 s390_init_machine_status (void)
1678 return ggc_cleared_alloc<machine_function> ();
1681 /* Map for smallest class containing reg regno. */
1683 const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
1684 { GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1685 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1686 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1687 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
1688 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1689 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1690 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1691 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
1692 ADDR_REGS, CC_REGS, ADDR_REGS, ADDR_REGS,
1693 ACCESS_REGS, ACCESS_REGS
1696 /* Return attribute type of insn. */
1698 static enum attr_type
1699 s390_safe_attr_type (rtx insn)
1701 if (recog_memoized (insn) >= 0)
1702 return get_attr_type (insn);
1707 /* Return true if DISP is a valid short displacement. */
1710 s390_short_displacement (rtx disp)
1712 /* No displacement is OK. */
1716 /* Without the long displacement facility we don't need to
1717 distingiush between long and short displacement. */
1718 if (!TARGET_LONG_DISPLACEMENT)
1721 /* Integer displacement in range. */
1722 if (GET_CODE (disp) == CONST_INT)
1723 return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;
1725 /* GOT offset is not OK, the GOT can be large. */
1726 if (GET_CODE (disp) == CONST
1727 && GET_CODE (XEXP (disp, 0)) == UNSPEC
1728 && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT
1729 || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))
1732 /* All other symbolic constants are literal pool references,
1733 which are OK as the literal pool must be small. */
1734 if (GET_CODE (disp) == CONST)
1740 /* Decompose a RTL expression ADDR for a memory address into
1741 its components, returned in OUT.
1743 Returns false if ADDR is not a valid memory address, true
1744 otherwise. If OUT is NULL, don't return the components,
1745 but check for validity only.
1747 Note: Only addresses in canonical form are recognized.
1748 LEGITIMIZE_ADDRESS should convert non-canonical forms to the
1749 canonical form so that they will be recognized. */
1752 s390_decompose_address (rtx addr, struct s390_address *out)
1754 HOST_WIDE_INT offset = 0;
1755 rtx base = NULL_RTX;
1756 rtx indx = NULL_RTX;
1757 rtx disp = NULL_RTX;
1759 bool pointer = false;
1760 bool base_ptr = false;
1761 bool indx_ptr = false;
1762 bool literal_pool = false;
1764 /* We may need to substitute the literal pool base register into the address
1765 below. However, at this point we do not know which register is going to
1766 be used as base, so we substitute the arg pointer register. This is going
1767 to be treated as holding a pointer below -- it shouldn't be used for any
1769 rtx fake_pool_base = gen_rtx_REG (Pmode, ARG_POINTER_REGNUM);
1771 /* Decompose address into base + index + displacement. */
1773 if (GET_CODE (addr) == REG || GET_CODE (addr) == UNSPEC)
1776 else if (GET_CODE (addr) == PLUS)
1778 rtx op0 = XEXP (addr, 0);
1779 rtx op1 = XEXP (addr, 1);
1780 enum rtx_code code0 = GET_CODE (op0);
1781 enum rtx_code code1 = GET_CODE (op1);
1783 if (code0 == REG || code0 == UNSPEC)
1785 if (code1 == REG || code1 == UNSPEC)
1787 indx = op0; /* index + base */
1793 base = op0; /* base + displacement */
1798 else if (code0 == PLUS)
1800 indx = XEXP (op0, 0); /* index + base + disp */
1801 base = XEXP (op0, 1);
1812 disp = addr; /* displacement */
1814 /* Extract integer part of displacement. */
1818 if (GET_CODE (disp) == CONST_INT)
1820 offset = INTVAL (disp);
1823 else if (GET_CODE (disp) == CONST
1824 && GET_CODE (XEXP (disp, 0)) == PLUS
1825 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
1827 offset = INTVAL (XEXP (XEXP (disp, 0), 1));
1828 disp = XEXP (XEXP (disp, 0), 0);
1832 /* Strip off CONST here to avoid special case tests later. */
1833 if (disp && GET_CODE (disp) == CONST)
1834 disp = XEXP (disp, 0);
1836 /* We can convert literal pool addresses to
1837 displacements by basing them off the base register. */
1838 if (disp && GET_CODE (disp) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (disp))
1840 /* Either base or index must be free to hold the base register. */
1842 base = fake_pool_base, literal_pool = true;
1844 indx = fake_pool_base, literal_pool = true;
1848 /* Mark up the displacement. */
1849 disp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, disp),
1850 UNSPEC_LTREL_OFFSET);
1853 /* Validate base register. */
1856 if (GET_CODE (base) == UNSPEC)
1857 switch (XINT (base, 1))
1861 disp = gen_rtx_UNSPEC (Pmode,
1862 gen_rtvec (1, XVECEXP (base, 0, 0)),
1863 UNSPEC_LTREL_OFFSET);
1867 base = XVECEXP (base, 0, 1);
1870 case UNSPEC_LTREL_BASE:
1871 if (XVECLEN (base, 0) == 1)
1872 base = fake_pool_base, literal_pool = true;
1874 base = XVECEXP (base, 0, 1);
1882 || (GET_MODE (base) != SImode
1883 && GET_MODE (base) != Pmode))
1886 if (REGNO (base) == STACK_POINTER_REGNUM
1887 || REGNO (base) == FRAME_POINTER_REGNUM
1888 || ((reload_completed || reload_in_progress)
1889 && frame_pointer_needed
1890 && REGNO (base) == HARD_FRAME_POINTER_REGNUM)
1891 || REGNO (base) == ARG_POINTER_REGNUM
1893 && REGNO (base) == PIC_OFFSET_TABLE_REGNUM))
1894 pointer = base_ptr = true;
1896 if ((reload_completed || reload_in_progress)
1897 && base == cfun->machine->base_reg)
1898 pointer = base_ptr = literal_pool = true;
1901 /* Validate index register. */
1904 if (GET_CODE (indx) == UNSPEC)
1905 switch (XINT (indx, 1))
1909 disp = gen_rtx_UNSPEC (Pmode,
1910 gen_rtvec (1, XVECEXP (indx, 0, 0)),
1911 UNSPEC_LTREL_OFFSET);
1915 indx = XVECEXP (indx, 0, 1);
1918 case UNSPEC_LTREL_BASE:
1919 if (XVECLEN (indx, 0) == 1)
1920 indx = fake_pool_base, literal_pool = true;
1922 indx = XVECEXP (indx, 0, 1);
1930 || (GET_MODE (indx) != SImode
1931 && GET_MODE (indx) != Pmode))
1934 if (REGNO (indx) == STACK_POINTER_REGNUM
1935 || REGNO (indx) == FRAME_POINTER_REGNUM
1936 || ((reload_completed || reload_in_progress)
1937 && frame_pointer_needed
1938 && REGNO (indx) == HARD_FRAME_POINTER_REGNUM)
1939 || REGNO (indx) == ARG_POINTER_REGNUM
1941 && REGNO (indx) == PIC_OFFSET_TABLE_REGNUM))
1942 pointer = indx_ptr = true;
1944 if ((reload_completed || reload_in_progress)
1945 && indx == cfun->machine->base_reg)
1946 pointer = indx_ptr = literal_pool = true;
1949 /* Prefer to use pointer as base, not index. */
1950 if (base && indx && !base_ptr
1951 && (indx_ptr || (!REG_POINTER (base) && REG_POINTER (indx))))
1958 /* Validate displacement. */
1961 /* If virtual registers are involved, the displacement will change later
1962 anyway as the virtual registers get eliminated. This could make a
1963 valid displacement invalid, but it is more likely to make an invalid
1964 displacement valid, because we sometimes access the register save area
1965 via negative offsets to one of those registers.
1966 Thus we don't check the displacement for validity here. If after
1967 elimination the displacement turns out to be invalid after all,
1968 this is fixed up by reload in any case. */
1969 /* LRA maintains always displacements up to date and we need to
1970 know the displacement is right during all LRA not only at the
1971 final elimination. */
1973 || (base != arg_pointer_rtx
1974 && indx != arg_pointer_rtx
1975 && base != return_address_pointer_rtx
1976 && indx != return_address_pointer_rtx
1977 && base != frame_pointer_rtx
1978 && indx != frame_pointer_rtx
1979 && base != virtual_stack_vars_rtx
1980 && indx != virtual_stack_vars_rtx))
1981 if (!DISP_IN_RANGE (offset))
1986 /* All the special cases are pointers. */
1989 /* In the small-PIC case, the linker converts @GOT
1990 and @GOTNTPOFF offsets to possible displacements. */
1991 if (GET_CODE (disp) == UNSPEC
1992 && (XINT (disp, 1) == UNSPEC_GOT
1993 || XINT (disp, 1) == UNSPEC_GOTNTPOFF)
1999 /* Accept pool label offsets. */
2000 else if (GET_CODE (disp) == UNSPEC
2001 && XINT (disp, 1) == UNSPEC_POOL_OFFSET)
2004 /* Accept literal pool references. */
2005 else if (GET_CODE (disp) == UNSPEC
2006 && XINT (disp, 1) == UNSPEC_LTREL_OFFSET)
2008 /* In case CSE pulled a non literal pool reference out of
2009 the pool we have to reject the address. This is
2010 especially important when loading the GOT pointer on non
2011 zarch CPUs. In this case the literal pool contains an lt
2012 relative offset to the _GLOBAL_OFFSET_TABLE_ label which
2013 will most likely exceed the displacement. */
2014 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
2015 || !CONSTANT_POOL_ADDRESS_P (XVECEXP (disp, 0, 0)))
2018 orig_disp = gen_rtx_CONST (Pmode, disp);
2021 /* If we have an offset, make sure it does not
2022 exceed the size of the constant pool entry. */
2023 rtx sym = XVECEXP (disp, 0, 0);
2024 if (offset >= GET_MODE_SIZE (get_pool_mode (sym)))
2027 orig_disp = plus_constant (Pmode, orig_disp, offset);
2042 out->disp = orig_disp;
2043 out->pointer = pointer;
2044 out->literal_pool = literal_pool;
2050 /* Decompose a RTL expression OP for a shift count into its components,
2051 and return the base register in BASE and the offset in OFFSET.
2053 Return true if OP is a valid shift count, false if not. */
2056 s390_decompose_shift_count (rtx op, rtx *base, HOST_WIDE_INT *offset)
2058 HOST_WIDE_INT off = 0;
2060 /* We can have an integer constant, an address register,
2061 or a sum of the two. */
2062 if (GET_CODE (op) == CONST_INT)
2067 if (op && GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT)
2069 off = INTVAL (XEXP (op, 1));
2072 while (op && GET_CODE (op) == SUBREG)
2073 op = SUBREG_REG (op);
2075 if (op && GET_CODE (op) != REG)
2087 /* Return true if CODE is a valid address without index. */
2090 s390_legitimate_address_without_index_p (rtx op)
2092 struct s390_address addr;
2094 if (!s390_decompose_address (XEXP (op, 0), &addr))
2103 /* Return TRUE if ADDR is an operand valid for a load/store relative
2104 instruction. Be aware that the alignment of the operand needs to
2105 be checked separately.
2106 Valid addresses are single references or a sum of a reference and a
2107 constant integer. Return these parts in SYMREF and ADDEND. You can
2108 pass NULL in REF and/or ADDEND if you are not interested in these
2109 values. Literal pool references are *not* considered symbol
2113 s390_loadrelative_operand_p (rtx addr, rtx *symref, HOST_WIDE_INT *addend)
2115 HOST_WIDE_INT tmpaddend = 0;
2117 if (GET_CODE (addr) == CONST)
2118 addr = XEXP (addr, 0);
2120 if (GET_CODE (addr) == PLUS)
2122 if (!CONST_INT_P (XEXP (addr, 1)))
2125 tmpaddend = INTVAL (XEXP (addr, 1));
2126 addr = XEXP (addr, 0);
2129 if ((GET_CODE (addr) == SYMBOL_REF && !CONSTANT_POOL_ADDRESS_P (addr))
2130 || (GET_CODE (addr) == UNSPEC
2131 && (XINT (addr, 1) == UNSPEC_GOTENT
2132 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
2137 *addend = tmpaddend;
2144 /* Return true if the address in OP is valid for constraint letter C
2145 if wrapped in a MEM rtx. Set LIT_POOL_OK to true if it literal
2146 pool MEMs should be accepted. Only the Q, R, S, T constraint
2147 letters are allowed for C. */
2150 s390_check_qrst_address (char c, rtx op, bool lit_pool_ok)
2152 struct s390_address addr;
2153 bool decomposed = false;
2155 /* This check makes sure that no symbolic address (except literal
2156 pool references) are accepted by the R or T constraints. */
2157 if (s390_loadrelative_operand_p (op, NULL, NULL))
2160 /* Ensure literal pool references are only accepted if LIT_POOL_OK. */
2163 if (!s390_decompose_address (op, &addr))
2165 if (addr.literal_pool)
2172 case 'Q': /* no index short displacement */
2173 if (!decomposed && !s390_decompose_address (op, &addr))
2177 if (!s390_short_displacement (addr.disp))
2181 case 'R': /* with index short displacement */
2182 if (TARGET_LONG_DISPLACEMENT)
2184 if (!decomposed && !s390_decompose_address (op, &addr))
2186 if (!s390_short_displacement (addr.disp))
2189 /* Any invalid address here will be fixed up by reload,
2190 so accept it for the most generic constraint. */
2193 case 'S': /* no index long displacement */
2194 if (!TARGET_LONG_DISPLACEMENT)
2196 if (!decomposed && !s390_decompose_address (op, &addr))
2200 if (s390_short_displacement (addr.disp))
2204 case 'T': /* with index long displacement */
2205 if (!TARGET_LONG_DISPLACEMENT)
2207 /* Any invalid address here will be fixed up by reload,
2208 so accept it for the most generic constraint. */
2209 if ((decomposed || s390_decompose_address (op, &addr))
2210 && s390_short_displacement (addr.disp))
2220 /* Evaluates constraint strings described by the regular expression
2221 ([A|B|Z](Q|R|S|T))|U|W|Y and returns 1 if OP is a valid operand for
2222 the constraint given in STR, or 0 else. */
2225 s390_mem_constraint (const char *str, rtx op)
2232 /* Check for offsettable variants of memory constraints. */
2233 if (!MEM_P (op) || MEM_VOLATILE_P (op))
2235 if ((reload_completed || reload_in_progress)
2236 ? !offsettable_memref_p (op) : !offsettable_nonstrict_memref_p (op))
2238 return s390_check_qrst_address (str[1], XEXP (op, 0), true);
2240 /* Check for non-literal-pool variants of memory constraints. */
2243 return s390_check_qrst_address (str[1], XEXP (op, 0), false);
2248 if (GET_CODE (op) != MEM)
2250 return s390_check_qrst_address (c, XEXP (op, 0), true);
2252 return (s390_check_qrst_address ('Q', op, true)
2253 || s390_check_qrst_address ('R', op, true));
2255 return (s390_check_qrst_address ('S', op, true)
2256 || s390_check_qrst_address ('T', op, true));
2258 /* Simply check for the basic form of a shift count. Reload will
2259 take care of making sure we have a proper base register. */
2260 if (!s390_decompose_shift_count (op, NULL, NULL))
2264 return s390_check_qrst_address (str[1], op, true);
2272 /* Evaluates constraint strings starting with letter O. Input
2273 parameter C is the second letter following the "O" in the constraint
2274 string. Returns 1 if VALUE meets the respective constraint and 0
2278 s390_O_constraint_str (const char c, HOST_WIDE_INT value)
2286 return trunc_int_for_mode (value, SImode) == value;
2290 || s390_single_part (GEN_INT (value), DImode, SImode, 0) == 1;
2293 return s390_single_part (GEN_INT (value - 1), DImode, SImode, -1) == 1;
2301 /* Evaluates constraint strings starting with letter N. Parameter STR
2302 contains the letters following letter "N" in the constraint string.
2303 Returns true if VALUE matches the constraint. */
2306 s390_N_constraint_str (const char *str, HOST_WIDE_INT value)
2308 enum machine_mode mode, part_mode;
2310 int part, part_goal;
2316 part_goal = str[0] - '0';
2360 if (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (part_mode))
2363 part = s390_single_part (GEN_INT (value), mode, part_mode, def);
2366 if (part_goal != -1 && part_goal != part)
2373 /* Returns true if the input parameter VALUE is a float zero. */
2376 s390_float_const_zero_p (rtx value)
2378 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
2379 && value == CONST0_RTX (GET_MODE (value)));
2382 /* Implement TARGET_REGISTER_MOVE_COST. */
2385 s390_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2386 reg_class_t from, reg_class_t to)
2388 /* On s390, copy between fprs and gprs is expensive as long as no
2389 ldgr/lgdr can be used. */
2390 if ((!TARGET_Z10 || GET_MODE_SIZE (mode) != 8)
2391 && ((reg_classes_intersect_p (from, GENERAL_REGS)
2392 && reg_classes_intersect_p (to, FP_REGS))
2393 || (reg_classes_intersect_p (from, FP_REGS)
2394 && reg_classes_intersect_p (to, GENERAL_REGS))))
2400 /* Implement TARGET_MEMORY_MOVE_COST. */
2403 s390_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2404 reg_class_t rclass ATTRIBUTE_UNUSED,
2405 bool in ATTRIBUTE_UNUSED)
2410 /* Compute a (partial) cost for rtx X. Return true if the complete
2411 cost has been computed, and false if subexpressions should be
2412 scanned. In either case, *TOTAL contains the cost result.
2413 CODE contains GET_CODE (x), OUTER_CODE contains the code
2414 of the superexpression of x. */
2417 s390_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2418 int *total, bool speed ATTRIBUTE_UNUSED)
2441 *total = COSTS_N_INSNS (1);
2446 *total = COSTS_N_INSNS (1);
2450 switch (GET_MODE (x))
2454 rtx left = XEXP (x, 0);
2455 rtx right = XEXP (x, 1);
2456 if (GET_CODE (right) == CONST_INT
2457 && CONST_OK_FOR_K (INTVAL (right)))
2458 *total = s390_cost->mhi;
2459 else if (GET_CODE (left) == SIGN_EXTEND)
2460 *total = s390_cost->mh;
2462 *total = s390_cost->ms; /* msr, ms, msy */
2467 rtx left = XEXP (x, 0);
2468 rtx right = XEXP (x, 1);
2471 if (GET_CODE (right) == CONST_INT
2472 && CONST_OK_FOR_K (INTVAL (right)))
2473 *total = s390_cost->mghi;
2474 else if (GET_CODE (left) == SIGN_EXTEND)
2475 *total = s390_cost->msgf;
2477 *total = s390_cost->msg; /* msgr, msg */
2479 else /* TARGET_31BIT */
2481 if (GET_CODE (left) == SIGN_EXTEND
2482 && GET_CODE (right) == SIGN_EXTEND)
2483 /* mulsidi case: mr, m */
2484 *total = s390_cost->m;
2485 else if (GET_CODE (left) == ZERO_EXTEND
2486 && GET_CODE (right) == ZERO_EXTEND
2487 && TARGET_CPU_ZARCH)
2488 /* umulsidi case: ml, mlr */
2489 *total = s390_cost->ml;
2491 /* Complex calculation is required. */
2492 *total = COSTS_N_INSNS (40);
2498 *total = s390_cost->mult_df;
2501 *total = s390_cost->mxbr;
2509 switch (GET_MODE (x))
2512 *total = s390_cost->madbr;
2515 *total = s390_cost->maebr;
2520 /* Negate in the third argument is free: FMSUB. */
2521 if (GET_CODE (XEXP (x, 2)) == NEG)
2523 *total += (rtx_cost (XEXP (x, 0), FMA, 0, speed)
2524 + rtx_cost (XEXP (x, 1), FMA, 1, speed)
2525 + rtx_cost (XEXP (XEXP (x, 2), 0), FMA, 2, speed));
2532 if (GET_MODE (x) == TImode) /* 128 bit division */
2533 *total = s390_cost->dlgr;
2534 else if (GET_MODE (x) == DImode)
2536 rtx right = XEXP (x, 1);
2537 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2538 *total = s390_cost->dlr;
2539 else /* 64 by 64 bit division */
2540 *total = s390_cost->dlgr;
2542 else if (GET_MODE (x) == SImode) /* 32 bit division */
2543 *total = s390_cost->dlr;
2548 if (GET_MODE (x) == DImode)
2550 rtx right = XEXP (x, 1);
2551 if (GET_CODE (right) == ZERO_EXTEND) /* 64 by 32 bit division */
2553 *total = s390_cost->dsgfr;
2555 *total = s390_cost->dr;
2556 else /* 64 by 64 bit division */
2557 *total = s390_cost->dsgr;
2559 else if (GET_MODE (x) == SImode) /* 32 bit division */
2560 *total = s390_cost->dlr;
2561 else if (GET_MODE (x) == SFmode)
2563 *total = s390_cost->debr;
2565 else if (GET_MODE (x) == DFmode)
2567 *total = s390_cost->ddbr;
2569 else if (GET_MODE (x) == TFmode)
2571 *total = s390_cost->dxbr;
2576 if (GET_MODE (x) == SFmode)
2577 *total = s390_cost->sqebr;
2578 else if (GET_MODE (x) == DFmode)
2579 *total = s390_cost->sqdbr;
2581 *total = s390_cost->sqxbr;
2586 if (outer_code == MULT || outer_code == DIV || outer_code == MOD
2587 || outer_code == PLUS || outer_code == MINUS
2588 || outer_code == COMPARE)
2593 *total = COSTS_N_INSNS (1);
2594 if (GET_CODE (XEXP (x, 0)) == AND
2595 && GET_CODE (XEXP (x, 1)) == CONST_INT
2596 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2598 rtx op0 = XEXP (XEXP (x, 0), 0);
2599 rtx op1 = XEXP (XEXP (x, 0), 1);
2600 rtx op2 = XEXP (x, 1);
2602 if (memory_operand (op0, GET_MODE (op0))
2603 && s390_tm_ccmode (op1, op2, 0) != VOIDmode)
2605 if (register_operand (op0, GET_MODE (op0))
2606 && s390_tm_ccmode (op1, op2, 1) != VOIDmode)
2616 /* Return the cost of an address rtx ADDR. */
2619 s390_address_cost (rtx addr, enum machine_mode mode ATTRIBUTE_UNUSED,
2620 addr_space_t as ATTRIBUTE_UNUSED,
2621 bool speed ATTRIBUTE_UNUSED)
2623 struct s390_address ad;
2624 if (!s390_decompose_address (addr, &ad))
2627 return ad.indx? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (1);
2630 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
2631 otherwise return 0. */
2634 tls_symbolic_operand (rtx op)
2636 if (GET_CODE (op) != SYMBOL_REF)
2638 return SYMBOL_REF_TLS_MODEL (op);
2641 /* Split DImode access register reference REG (on 64-bit) into its constituent
2642 low and high parts, and store them into LO and HI. Note that gen_lowpart/
2643 gen_highpart cannot be used as they assume all registers are word-sized,
2644 while our access registers have only half that size. */
2647 s390_split_access_reg (rtx reg, rtx *lo, rtx *hi)
2649 gcc_assert (TARGET_64BIT);
2650 gcc_assert (ACCESS_REG_P (reg));
2651 gcc_assert (GET_MODE (reg) == DImode);
2652 gcc_assert (!(REGNO (reg) & 1));
2654 *lo = gen_rtx_REG (SImode, REGNO (reg) + 1);
2655 *hi = gen_rtx_REG (SImode, REGNO (reg));
2658 /* Return true if OP contains a symbol reference */
2661 symbolic_reference_mentioned_p (rtx op)
2666 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
2669 fmt = GET_RTX_FORMAT (GET_CODE (op));
2670 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2676 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2677 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2681 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
2688 /* Return true if OP contains a reference to a thread-local symbol. */
2691 tls_symbolic_reference_mentioned_p (rtx op)
2696 if (GET_CODE (op) == SYMBOL_REF)
2697 return tls_symbolic_operand (op);
2699 fmt = GET_RTX_FORMAT (GET_CODE (op));
2700 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
2706 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
2707 if (tls_symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
2711 else if (fmt[i] == 'e' && tls_symbolic_reference_mentioned_p (XEXP (op, i)))
2719 /* Return true if OP is a legitimate general operand when
2720 generating PIC code. It is given that flag_pic is on
2721 and that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2724 legitimate_pic_operand_p (rtx op)
2726 /* Accept all non-symbolic constants. */
2727 if (!SYMBOLIC_CONST (op))
2730 /* Reject everything else; must be handled
2731 via emit_symbolic_move. */
2735 /* Returns true if the constant value OP is a legitimate general operand.
2736 It is given that OP satisfies CONSTANT_P or is a CONST_DOUBLE. */
2739 s390_legitimate_constant_p (enum machine_mode mode, rtx op)
2741 /* Accept all non-symbolic constants. */
2742 if (!SYMBOLIC_CONST (op))
2745 /* Accept immediate LARL operands. */
2746 if (TARGET_CPU_ZARCH && larl_operand (op, mode))
2749 /* Thread-local symbols are never legal constants. This is
2750 so that emit_call knows that computing such addresses
2751 might require a function call. */
2752 if (TLS_SYMBOLIC_CONST (op))
2755 /* In the PIC case, symbolic constants must *not* be
2756 forced into the literal pool. We accept them here,
2757 so that they will be handled by emit_symbolic_move. */
2761 /* All remaining non-PIC symbolic constants are
2762 forced into the literal pool. */
2766 /* Determine if it's legal to put X into the constant pool. This
2767 is not possible if X contains the address of a symbol that is
2768 not constant (TLS) or not known at final link time (PIC). */
2771 s390_cannot_force_const_mem (enum machine_mode mode, rtx x)
2773 switch (GET_CODE (x))
2777 /* Accept all non-symbolic constants. */
2781 /* Labels are OK iff we are non-PIC. */
2782 return flag_pic != 0;
2785 /* 'Naked' TLS symbol references are never OK,
2786 non-TLS symbols are OK iff we are non-PIC. */
2787 if (tls_symbolic_operand (x))
2790 return flag_pic != 0;
2793 return s390_cannot_force_const_mem (mode, XEXP (x, 0));
2796 return s390_cannot_force_const_mem (mode, XEXP (x, 0))
2797 || s390_cannot_force_const_mem (mode, XEXP (x, 1));
2800 switch (XINT (x, 1))
2802 /* Only lt-relative or GOT-relative UNSPECs are OK. */
2803 case UNSPEC_LTREL_OFFSET:
2811 case UNSPEC_GOTNTPOFF:
2812 case UNSPEC_INDNTPOFF:
2815 /* If the literal pool shares the code section, be put
2816 execute template placeholders into the pool as well. */
2818 return TARGET_CPU_ZARCH;
2830 /* Returns true if the constant value OP is a legitimate general
2831 operand during and after reload. The difference to
2832 legitimate_constant_p is that this function will not accept
2833 a constant that would need to be forced to the literal pool
2834 before it can be used as operand.
2835 This function accepts all constants which can be loaded directly
2839 legitimate_reload_constant_p (rtx op)
2841 /* Accept la(y) operands. */
2842 if (GET_CODE (op) == CONST_INT
2843 && DISP_IN_RANGE (INTVAL (op)))
2846 /* Accept l(g)hi/l(g)fi operands. */
2847 if (GET_CODE (op) == CONST_INT
2848 && (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_Os (INTVAL (op))))
2851 /* Accept lliXX operands. */
2853 && GET_CODE (op) == CONST_INT
2854 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2855 && s390_single_part (op, word_mode, HImode, 0) >= 0)
2859 && GET_CODE (op) == CONST_INT
2860 && trunc_int_for_mode (INTVAL (op), word_mode) == INTVAL (op)
2861 && s390_single_part (op, word_mode, SImode, 0) >= 0)
2864 /* Accept larl operands. */
2865 if (TARGET_CPU_ZARCH
2866 && larl_operand (op, VOIDmode))
2869 /* Accept floating-point zero operands that fit into a single GPR. */
2870 if (GET_CODE (op) == CONST_DOUBLE
2871 && s390_float_const_zero_p (op)
2872 && GET_MODE_SIZE (GET_MODE (op)) <= UNITS_PER_WORD)
2875 /* Accept double-word operands that can be split. */
2876 if (GET_CODE (op) == CONST_INT
2877 && trunc_int_for_mode (INTVAL (op), word_mode) != INTVAL (op))
2879 enum machine_mode dword_mode = word_mode == SImode ? DImode : TImode;
2880 rtx hi = operand_subword (op, 0, 0, dword_mode);
2881 rtx lo = operand_subword (op, 1, 0, dword_mode);
2882 return legitimate_reload_constant_p (hi)
2883 && legitimate_reload_constant_p (lo);
2886 /* Everything else cannot be handled without reload. */
2890 /* Returns true if the constant value OP is a legitimate fp operand
2891 during and after reload.
2892 This function accepts all constants which can be loaded directly
2896 legitimate_reload_fp_constant_p (rtx op)
2898 /* Accept floating-point zero operands if the load zero instruction
2899 can be used. Prior to z196 the load fp zero instruction caused a
2900 performance penalty if the result is used as BFP number. */
2902 && GET_CODE (op) == CONST_DOUBLE
2903 && s390_float_const_zero_p (op))
2909 /* Given an rtx OP being reloaded into a reg required to be in class RCLASS,
2910 return the class of reg to actually use. */
2913 s390_preferred_reload_class (rtx op, reg_class_t rclass)
2915 switch (GET_CODE (op))
2917 /* Constants we cannot reload into general registers
2918 must be forced into the literal pool. */
2921 if (reg_class_subset_p (GENERAL_REGS, rclass)
2922 && legitimate_reload_constant_p (op))
2923 return GENERAL_REGS;
2924 else if (reg_class_subset_p (ADDR_REGS, rclass)
2925 && legitimate_reload_constant_p (op))
2927 else if (reg_class_subset_p (FP_REGS, rclass)
2928 && legitimate_reload_fp_constant_p (op))
2932 /* If a symbolic constant or a PLUS is reloaded,
2933 it is most likely being used as an address, so
2934 prefer ADDR_REGS. If 'class' is not a superset
2935 of ADDR_REGS, e.g. FP_REGS, reject this reload. */
2937 /* Symrefs cannot be pushed into the literal pool with -fPIC
2938 so we *MUST NOT* return NO_REGS for these cases
2939 (s390_cannot_force_const_mem will return true).
2941 On the other hand we MUST return NO_REGS for symrefs with
2942 invalid addend which might have been pushed to the literal
2943 pool (no -fPIC). Usually we would expect them to be
2944 handled via secondary reload but this does not happen if
2945 they are used as literal pool slot replacement in reload
2946 inheritance (see emit_input_reload_insns). */
2947 if (TARGET_CPU_ZARCH
2948 && GET_CODE (XEXP (op, 0)) == PLUS
2949 && GET_CODE (XEXP (XEXP(op, 0), 0)) == SYMBOL_REF
2950 && GET_CODE (XEXP (XEXP(op, 0), 1)) == CONST_INT)
2952 if (flag_pic && reg_class_subset_p (ADDR_REGS, rclass))
2960 if (!legitimate_reload_constant_p (op))
2964 /* load address will be used. */
2965 if (reg_class_subset_p (ADDR_REGS, rclass))
2977 /* Return true if ADDR is SYMBOL_REF + addend with addend being a
2978 multiple of ALIGNMENT and the SYMBOL_REF being naturally
2982 s390_check_symref_alignment (rtx addr, HOST_WIDE_INT alignment)
2984 HOST_WIDE_INT addend;
2987 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
2990 if (addend & (alignment - 1))
2993 if (GET_CODE (symref) == SYMBOL_REF
2994 && !SYMBOL_REF_NOT_NATURALLY_ALIGNED_P (symref))
2997 if (GET_CODE (symref) == UNSPEC
2998 && alignment <= UNITS_PER_LONG)
3004 /* ADDR is moved into REG using larl. If ADDR isn't a valid larl
3005 operand SCRATCH is used to reload the even part of the address and
3009 s390_reload_larl_operand (rtx reg, rtx addr, rtx scratch)
3011 HOST_WIDE_INT addend;
3014 if (!s390_loadrelative_operand_p (addr, &symref, &addend))
3018 /* Easy case. The addend is even so larl will do fine. */
3019 emit_move_insn (reg, addr);
3022 /* We can leave the scratch register untouched if the target
3023 register is a valid base register. */
3024 if (REGNO (reg) < FIRST_PSEUDO_REGISTER
3025 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS)
3028 gcc_assert (REGNO (scratch) < FIRST_PSEUDO_REGISTER);
3029 gcc_assert (REGNO_REG_CLASS (REGNO (scratch)) == ADDR_REGS);
3032 emit_move_insn (scratch,
3033 gen_rtx_CONST (Pmode,
3034 gen_rtx_PLUS (Pmode, symref,
3035 GEN_INT (addend - 1))));
3037 emit_move_insn (scratch, symref);
3039 /* Increment the address using la in order to avoid clobbering cc. */
3040 s390_load_address (reg, gen_rtx_PLUS (Pmode, scratch, const1_rtx));
3044 /* Generate what is necessary to move between REG and MEM using
3045 SCRATCH. The direction is given by TOMEM. */
3048 s390_reload_symref_address (rtx reg, rtx mem, rtx scratch, bool tomem)
3050 /* Reload might have pulled a constant out of the literal pool.
3051 Force it back in. */
3052 if (CONST_INT_P (mem) || GET_CODE (mem) == CONST_DOUBLE
3053 || GET_CODE (mem) == CONST)
3054 mem = force_const_mem (GET_MODE (reg), mem);
3056 gcc_assert (MEM_P (mem));
3058 /* For a load from memory we can leave the scratch register
3059 untouched if the target register is a valid base register. */
3061 && REGNO (reg) < FIRST_PSEUDO_REGISTER
3062 && REGNO_REG_CLASS (REGNO (reg)) == ADDR_REGS
3063 && GET_MODE (reg) == GET_MODE (scratch))
3066 /* Load address into scratch register. Since we can't have a
3067 secondary reload for a secondary reload we have to cover the case
3068 where larl would need a secondary reload here as well. */
3069 s390_reload_larl_operand (scratch, XEXP (mem, 0), scratch);
3071 /* Now we can use a standard load/store to do the move. */
3073 emit_move_insn (replace_equiv_address (mem, scratch), reg);
3075 emit_move_insn (reg, replace_equiv_address (mem, scratch));
3078 /* Inform reload about cases where moving X with a mode MODE to a register in
3079 RCLASS requires an extra scratch or immediate register. Return the class
3080 needed for the immediate register. */
3083 s390_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
3084 enum machine_mode mode, secondary_reload_info *sri)
3086 enum reg_class rclass = (enum reg_class) rclass_i;
3088 /* Intermediate register needed. */
3089 if (reg_classes_intersect_p (CC_REGS, rclass))
3090 return GENERAL_REGS;
3094 HOST_WIDE_INT offset;
3097 /* On z10 several optimizer steps may generate larl operands with
3100 && s390_loadrelative_operand_p (x, &symref, &offset)
3102 && !SYMBOL_REF_ALIGN1_P (symref)
3103 && (offset & 1) == 1)
3104 sri->icode = ((mode == DImode) ? CODE_FOR_reloaddi_larl_odd_addend_z10
3105 : CODE_FOR_reloadsi_larl_odd_addend_z10);
3107 /* On z10 we need a scratch register when moving QI, TI or floating
3108 point mode values from or to a memory location with a SYMBOL_REF
3109 or if the symref addend of a SI or DI move is not aligned to the
3110 width of the access. */
3112 && s390_loadrelative_operand_p (XEXP (x, 0), NULL, NULL)
3113 && (mode == QImode || mode == TImode || FLOAT_MODE_P (mode)
3114 || (!TARGET_ZARCH && mode == DImode)
3115 || ((mode == HImode || mode == SImode || mode == DImode)
3116 && (!s390_check_symref_alignment (XEXP (x, 0),
3117 GET_MODE_SIZE (mode))))))
3119 #define __SECONDARY_RELOAD_CASE(M,m) \
3122 sri->icode = in_p ? CODE_FOR_reload##m##di_toreg_z10 : \
3123 CODE_FOR_reload##m##di_tomem_z10; \
3125 sri->icode = in_p ? CODE_FOR_reload##m##si_toreg_z10 : \
3126 CODE_FOR_reload##m##si_tomem_z10; \
3129 switch (GET_MODE (x))
3131 __SECONDARY_RELOAD_CASE (QI, qi);
3132 __SECONDARY_RELOAD_CASE (HI, hi);
3133 __SECONDARY_RELOAD_CASE (SI, si);
3134 __SECONDARY_RELOAD_CASE (DI, di);
3135 __SECONDARY_RELOAD_CASE (TI, ti);
3136 __SECONDARY_RELOAD_CASE (SF, sf);
3137 __SECONDARY_RELOAD_CASE (DF, df);
3138 __SECONDARY_RELOAD_CASE (TF, tf);
3139 __SECONDARY_RELOAD_CASE (SD, sd);
3140 __SECONDARY_RELOAD_CASE (DD, dd);
3141 __SECONDARY_RELOAD_CASE (TD, td);
3146 #undef __SECONDARY_RELOAD_CASE
3150 /* We need a scratch register when loading a PLUS expression which
3151 is not a legitimate operand of the LOAD ADDRESS instruction. */
3152 /* LRA can deal with transformation of plus op very well -- so we
3153 don't need to prompt LRA in this case. */
3154 if (! lra_in_progress && in_p && s390_plus_operand (x, mode))
3155 sri->icode = (TARGET_64BIT ?
3156 CODE_FOR_reloaddi_plus : CODE_FOR_reloadsi_plus);
3158 /* Performing a multiword move from or to memory we have to make sure the
3159 second chunk in memory is addressable without causing a displacement
3160 overflow. If that would be the case we calculate the address in
3161 a scratch register. */
3163 && GET_CODE (XEXP (x, 0)) == PLUS
3164 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3165 && !DISP_IN_RANGE (INTVAL (XEXP (XEXP (x, 0), 1))
3166 + GET_MODE_SIZE (mode) - 1))
3168 /* For GENERAL_REGS a displacement overflow is no problem if occurring
3169 in a s_operand address since we may fallback to lm/stm. So we only
3170 have to care about overflows in the b+i+d case. */
3171 if ((reg_classes_intersect_p (GENERAL_REGS, rclass)
3172 && s390_class_max_nregs (GENERAL_REGS, mode) > 1
3173 && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
3174 /* For FP_REGS no lm/stm is available so this check is triggered
3175 for displacement overflows in b+i+d and b+d like addresses. */
3176 || (reg_classes_intersect_p (FP_REGS, rclass)
3177 && s390_class_max_nregs (FP_REGS, mode) > 1))
3180 sri->icode = (TARGET_64BIT ?
3181 CODE_FOR_reloaddi_nonoffmem_in :
3182 CODE_FOR_reloadsi_nonoffmem_in);
3184 sri->icode = (TARGET_64BIT ?
3185 CODE_FOR_reloaddi_nonoffmem_out :
3186 CODE_FOR_reloadsi_nonoffmem_out);
3190 /* A scratch address register is needed when a symbolic constant is
3191 copied to r0 compiling with -fPIC. In other cases the target
3192 register might be used as temporary (see legitimize_pic_address). */
3193 if (in_p && SYMBOLIC_CONST (x) && flag_pic == 2 && rclass != ADDR_REGS)
3194 sri->icode = (TARGET_64BIT ?
3195 CODE_FOR_reloaddi_PIC_addr :
3196 CODE_FOR_reloadsi_PIC_addr);
3198 /* Either scratch or no register needed. */
3202 /* Generate code to load SRC, which is PLUS that is not a
3203 legitimate operand for the LA instruction, into TARGET.
3204 SCRATCH may be used as scratch register. */
3207 s390_expand_plus_operand (rtx target, rtx src,
3211 struct s390_address ad;
3213 /* src must be a PLUS; get its two operands. */
3214 gcc_assert (GET_CODE (src) == PLUS);
3215 gcc_assert (GET_MODE (src) == Pmode);
3217 /* Check if any of the two operands is already scheduled
3218 for replacement by reload. This can happen e.g. when
3219 float registers occur in an address. */
3220 sum1 = find_replacement (&XEXP (src, 0));
3221 sum2 = find_replacement (&XEXP (src, 1));
3222 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3224 /* If the address is already strictly valid, there's nothing to do. */
3225 if (!s390_decompose_address (src, &ad)
3226 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3227 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
3229 /* Otherwise, one of the operands cannot be an address register;
3230 we reload its value into the scratch register. */
3231 if (true_regnum (sum1) < 1 || true_regnum (sum1) > 15)
3233 emit_move_insn (scratch, sum1);
3236 if (true_regnum (sum2) < 1 || true_regnum (sum2) > 15)
3238 emit_move_insn (scratch, sum2);
3242 /* According to the way these invalid addresses are generated
3243 in reload.c, it should never happen (at least on s390) that
3244 *neither* of the PLUS components, after find_replacements
3245 was applied, is an address register. */
3246 if (sum1 == scratch && sum2 == scratch)
3252 src = gen_rtx_PLUS (Pmode, sum1, sum2);
3255 /* Emit the LOAD ADDRESS pattern. Note that reload of PLUS
3256 is only ever performed on addresses, so we can mark the
3257 sum as legitimate for LA in any case. */
3258 s390_load_address (target, src);
3262 /* Return true if ADDR is a valid memory address.
3263 STRICT specifies whether strict register checking applies. */
3266 s390_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3268 struct s390_address ad;
3271 && larl_operand (addr, VOIDmode)
3272 && (mode == VOIDmode
3273 || s390_check_symref_alignment (addr, GET_MODE_SIZE (mode))))
3276 if (!s390_decompose_address (addr, &ad))
3281 if (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
3284 if (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx)))
3290 && !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
3291 || REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
3295 && !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
3296 || REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
3302 /* Return true if OP is a valid operand for the LA instruction.
3303 In 31-bit, we need to prove that the result is used as an
3304 address, as LA performs only a 31-bit addition. */
3307 legitimate_la_operand_p (rtx op)
3309 struct s390_address addr;
3310 if (!s390_decompose_address (op, &addr))
3313 return (TARGET_64BIT || addr.pointer);
3316 /* Return true if it is valid *and* preferable to use LA to
3317 compute the sum of OP1 and OP2. */
3320 preferred_la_operand_p (rtx op1, rtx op2)
3322 struct s390_address addr;
3324 if (op2 != const0_rtx)
3325 op1 = gen_rtx_PLUS (Pmode, op1, op2);
3327 if (!s390_decompose_address (op1, &addr))
3329 if (addr.base && !REGNO_OK_FOR_BASE_P (REGNO (addr.base)))
3331 if (addr.indx && !REGNO_OK_FOR_INDEX_P (REGNO (addr.indx)))
3334 /* Avoid LA instructions with index register on z196; it is
3335 preferable to use regular add instructions when possible.
3336 Starting with zEC12 the la with index register is "uncracked"
3338 if (addr.indx && s390_tune == PROCESSOR_2817_Z196)
3341 if (!TARGET_64BIT && !addr.pointer)
3347 if ((addr.base && REG_P (addr.base) && REG_POINTER (addr.base))
3348 || (addr.indx && REG_P (addr.indx) && REG_POINTER (addr.indx)))
3354 /* Emit a forced load-address operation to load SRC into DST.
3355 This will use the LOAD ADDRESS instruction even in situations
3356 where legitimate_la_operand_p (SRC) returns false. */
3359 s390_load_address (rtx dst, rtx src)
3362 emit_move_insn (dst, src);
3364 emit_insn (gen_force_la_31 (dst, src));
3367 /* Return a legitimate reference for ORIG (an address) using the
3368 register REG. If REG is 0, a new pseudo is generated.
3370 There are two types of references that must be handled:
3372 1. Global data references must load the address from the GOT, via
3373 the PIC reg. An insn is emitted to do this load, and the reg is
3376 2. Static data references, constant pool addresses, and code labels
3377 compute the address as an offset from the GOT, whose base is in
3378 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
3379 differentiate them from global data objects. The returned
3380 address is the PIC reg + an unspec constant.
3382 TARGET_LEGITIMIZE_ADDRESS_P rejects symbolic references unless the PIC
3383 reg also appears in the address. */
3386 legitimize_pic_address (rtx orig, rtx reg)
3389 rtx addend = const0_rtx;
3392 gcc_assert (!TLS_SYMBOLIC_CONST (addr));
3394 if (GET_CODE (addr) == CONST)
3395 addr = XEXP (addr, 0);
3397 if (GET_CODE (addr) == PLUS)
3399 addend = XEXP (addr, 1);
3400 addr = XEXP (addr, 0);
3403 if ((GET_CODE (addr) == LABEL_REF
3404 || (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (addr))
3405 || (GET_CODE (addr) == UNSPEC &&
3406 (XINT (addr, 1) == UNSPEC_GOTENT
3407 || (TARGET_CPU_ZARCH && XINT (addr, 1) == UNSPEC_PLT))))
3408 && GET_CODE (addend) == CONST_INT)
3410 /* This can be locally addressed. */
3412 /* larl_operand requires UNSPECs to be wrapped in a const rtx. */
3413 rtx const_addr = (GET_CODE (addr) == UNSPEC ?
3414 gen_rtx_CONST (Pmode, addr) : addr);
3416 if (TARGET_CPU_ZARCH
3417 && larl_operand (const_addr, VOIDmode)
3418 && INTVAL (addend) < (HOST_WIDE_INT)1 << 31
3419 && INTVAL (addend) >= -((HOST_WIDE_INT)1 << 31))
3421 if (INTVAL (addend) & 1)
3423 /* LARL can't handle odd offsets, so emit a pair of LARL
3425 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3427 if (!DISP_IN_RANGE (INTVAL (addend)))
3429 HOST_WIDE_INT even = INTVAL (addend) - 1;
3430 addr = gen_rtx_PLUS (Pmode, addr, GEN_INT (even));
3431 addr = gen_rtx_CONST (Pmode, addr);
3432 addend = const1_rtx;
3435 emit_move_insn (temp, addr);
3436 new_rtx = gen_rtx_PLUS (Pmode, temp, addend);
3440 s390_load_address (reg, new_rtx);
3446 /* If the offset is even, we can just use LARL. This
3447 will happen automatically. */
3452 /* No larl - Access local symbols relative to the GOT. */
3454 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3456 if (reload_in_progress || reload_completed)
3457 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3459 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
3460 if (addend != const0_rtx)
3461 addr = gen_rtx_PLUS (Pmode, addr, addend);
3462 addr = gen_rtx_CONST (Pmode, addr);
3463 addr = force_const_mem (Pmode, addr);
3464 emit_move_insn (temp, addr);
3466 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3469 s390_load_address (reg, new_rtx);
3474 else if (GET_CODE (addr) == SYMBOL_REF && addend == const0_rtx)
3476 /* A non-local symbol reference without addend.
3478 The symbol ref is wrapped into an UNSPEC to make sure the
3479 proper operand modifier (@GOT or @GOTENT) will be emitted.
3480 This will tell the linker to put the symbol into the GOT.
3482 Additionally the code dereferencing the GOT slot is emitted here.
3484 An addend to the symref needs to be added afterwards.
3485 legitimize_pic_address calls itself recursively to handle
3486 that case. So no need to do it here. */
3489 reg = gen_reg_rtx (Pmode);
3493 /* Use load relative if possible.
3494 lgrl <target>, sym@GOTENT */
3495 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
3496 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3497 new_rtx = gen_const_mem (GET_MODE (reg), new_rtx);
3499 emit_move_insn (reg, new_rtx);
3502 else if (flag_pic == 1)
3504 /* Assume GOT offset is a valid displacement operand (< 4k
3505 or < 512k with z990). This is handled the same way in
3506 both 31- and 64-bit code (@GOT).
3507 lg <target>, sym@GOT(r12) */
3509 if (reload_in_progress || reload_completed)
3510 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3512 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3513 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3514 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3515 new_rtx = gen_const_mem (Pmode, new_rtx);
3516 emit_move_insn (reg, new_rtx);
3519 else if (TARGET_CPU_ZARCH)
3521 /* If the GOT offset might be >= 4k, we determine the position
3522 of the GOT entry via a PC-relative LARL (@GOTENT).
3523 larl temp, sym@GOTENT
3524 lg <target>, 0(temp) */
3526 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3528 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3529 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3531 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTENT);
3532 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3533 emit_move_insn (temp, new_rtx);
3535 new_rtx = gen_const_mem (Pmode, temp);
3536 emit_move_insn (reg, new_rtx);
3542 /* If the GOT offset might be >= 4k, we have to load it
3543 from the literal pool (@GOT).
3545 lg temp, lit-litbase(r13)
3546 lg <target>, 0(temp)
3547 lit: .long sym@GOT */
3549 rtx temp = reg ? reg : gen_reg_rtx (Pmode);
3551 gcc_assert (REGNO (temp) >= FIRST_PSEUDO_REGISTER
3552 || REGNO_REG_CLASS (REGNO (temp)) == ADDR_REGS);
3554 if (reload_in_progress || reload_completed)
3555 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3557 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
3558 addr = gen_rtx_CONST (Pmode, addr);
3559 addr = force_const_mem (Pmode, addr);
3560 emit_move_insn (temp, addr);
3562 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3563 new_rtx = gen_const_mem (Pmode, new_rtx);
3564 emit_move_insn (reg, new_rtx);
3568 else if (GET_CODE (addr) == UNSPEC && GET_CODE (addend) == CONST_INT)
3570 gcc_assert (XVECLEN (addr, 0) == 1);
3571 switch (XINT (addr, 1))
3573 /* These address symbols (or PLT slots) relative to the GOT
3574 (not GOT slots!). In general this will exceed the
3575 displacement range so these value belong into the literal
3579 new_rtx = force_const_mem (Pmode, orig);
3582 /* For -fPIC the GOT size might exceed the displacement
3583 range so make sure the value is in the literal pool. */
3586 new_rtx = force_const_mem (Pmode, orig);
3589 /* For @GOTENT larl is used. This is handled like local
3595 /* @PLT is OK as is on 64-bit, must be converted to
3596 GOT-relative @PLTOFF on 31-bit. */
3598 if (!TARGET_CPU_ZARCH)
3600 rtx temp = reg? reg : gen_reg_rtx (Pmode);
3602 if (reload_in_progress || reload_completed)
3603 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3605 addr = XVECEXP (addr, 0, 0);
3606 addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
3608 if (addend != const0_rtx)
3609 addr = gen_rtx_PLUS (Pmode, addr, addend);
3610 addr = gen_rtx_CONST (Pmode, addr);
3611 addr = force_const_mem (Pmode, addr);
3612 emit_move_insn (temp, addr);
3614 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3617 s390_load_address (reg, new_rtx);
3622 /* On 64 bit larl can be used. This case is handled like
3623 local symbol refs. */
3627 /* Everything else cannot happen. */
3632 else if (addend != const0_rtx)
3634 /* Otherwise, compute the sum. */
3636 rtx base = legitimize_pic_address (addr, reg);
3637 new_rtx = legitimize_pic_address (addend,
3638 base == reg ? NULL_RTX : reg);
3639 if (GET_CODE (new_rtx) == CONST_INT)
3640 new_rtx = plus_constant (Pmode, base, INTVAL (new_rtx));
3643 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
3645 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
3646 new_rtx = XEXP (new_rtx, 1);
3648 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
3651 if (GET_CODE (new_rtx) == CONST)
3652 new_rtx = XEXP (new_rtx, 0);
3653 new_rtx = force_operand (new_rtx, 0);
3659 /* Load the thread pointer into a register. */
3662 s390_get_thread_pointer (void)
3664 rtx tp = gen_reg_rtx (Pmode);
3666 emit_move_insn (tp, gen_rtx_REG (Pmode, TP_REGNUM));
3667 mark_reg_pointer (tp, BITS_PER_WORD);
3672 /* Emit a tls call insn. The call target is the SYMBOL_REF stored
3673 in s390_tls_symbol which always refers to __tls_get_offset.
3674 The returned offset is written to RESULT_REG and an USE rtx is
3675 generated for TLS_CALL. */
3677 static GTY(()) rtx s390_tls_symbol;
3680 s390_emit_tls_call_insn (rtx result_reg, rtx tls_call)
3685 emit_insn (s390_load_got ());
3687 if (!s390_tls_symbol)
3688 s390_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_offset");
3690 insn = s390_emit_call (s390_tls_symbol, tls_call, result_reg,
3691 gen_rtx_REG (Pmode, RETURN_REGNUM));
3693 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), result_reg);
3694 RTL_CONST_CALL_P (insn) = 1;
3697 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3698 this (thread-local) address. REG may be used as temporary. */
3701 legitimize_tls_address (rtx addr, rtx reg)
3703 rtx new_rtx, tls_call, temp, base, r2, insn;
3705 if (GET_CODE (addr) == SYMBOL_REF)
3706 switch (tls_symbolic_operand (addr))
3708 case TLS_MODEL_GLOBAL_DYNAMIC:
3710 r2 = gen_rtx_REG (Pmode, 2);
3711 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_TLSGD);
3712 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3713 new_rtx = force_const_mem (Pmode, new_rtx);
3714 emit_move_insn (r2, new_rtx);
3715 s390_emit_tls_call_insn (r2, tls_call);
3716 insn = get_insns ();
3719 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3720 temp = gen_reg_rtx (Pmode);
3721 emit_libcall_block (insn, temp, r2, new_rtx);
3723 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3726 s390_load_address (reg, new_rtx);
3731 case TLS_MODEL_LOCAL_DYNAMIC:
3733 r2 = gen_rtx_REG (Pmode, 2);
3734 tls_call = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM);
3735 new_rtx = gen_rtx_CONST (Pmode, tls_call);
3736 new_rtx = force_const_mem (Pmode, new_rtx);
3737 emit_move_insn (r2, new_rtx);
3738 s390_emit_tls_call_insn (r2, tls_call);
3739 insn = get_insns ();
3742 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TLSLDM_NTPOFF);
3743 temp = gen_reg_rtx (Pmode);
3744 emit_libcall_block (insn, temp, r2, new_rtx);
3746 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3747 base = gen_reg_rtx (Pmode);
3748 s390_load_address (base, new_rtx);
3750 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_DTPOFF);
3751 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3752 new_rtx = force_const_mem (Pmode, new_rtx);
3753 temp = gen_reg_rtx (Pmode);
3754 emit_move_insn (temp, new_rtx);
3756 new_rtx = gen_rtx_PLUS (Pmode, base, temp);
3759 s390_load_address (reg, new_rtx);
3764 case TLS_MODEL_INITIAL_EXEC:
3767 /* Assume GOT offset < 4k. This is handled the same way
3768 in both 31- and 64-bit code. */
3770 if (reload_in_progress || reload_completed)
3771 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3773 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3774 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3775 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
3776 new_rtx = gen_const_mem (Pmode, new_rtx);
3777 temp = gen_reg_rtx (Pmode);
3778 emit_move_insn (temp, new_rtx);
3780 else if (TARGET_CPU_ZARCH)
3782 /* If the GOT offset might be >= 4k, we determine the position
3783 of the GOT entry via a PC-relative LARL. */
3785 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3786 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3787 temp = gen_reg_rtx (Pmode);
3788 emit_move_insn (temp, new_rtx);
3790 new_rtx = gen_const_mem (Pmode, temp);
3791 temp = gen_reg_rtx (Pmode);
3792 emit_move_insn (temp, new_rtx);
3796 /* If the GOT offset might be >= 4k, we have to load it
3797 from the literal pool. */
3799 if (reload_in_progress || reload_completed)
3800 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
3802 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTNTPOFF);
3803 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3804 new_rtx = force_const_mem (Pmode, new_rtx);
3805 temp = gen_reg_rtx (Pmode);
3806 emit_move_insn (temp, new_rtx);
3808 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, temp);
3809 new_rtx = gen_const_mem (Pmode, new_rtx);
3811 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3812 temp = gen_reg_rtx (Pmode);
3813 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3817 /* In position-dependent code, load the absolute address of
3818 the GOT entry from the literal pool. */
3820 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_INDNTPOFF);
3821 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3822 new_rtx = force_const_mem (Pmode, new_rtx);
3823 temp = gen_reg_rtx (Pmode);
3824 emit_move_insn (temp, new_rtx);
3827 new_rtx = gen_const_mem (Pmode, new_rtx);
3828 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, new_rtx, addr), UNSPEC_TLS_LOAD);
3829 temp = gen_reg_rtx (Pmode);
3830 emit_insn (gen_rtx_SET (Pmode, temp, new_rtx));
3833 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3836 s390_load_address (reg, new_rtx);
3841 case TLS_MODEL_LOCAL_EXEC:
3842 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_NTPOFF);
3843 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3844 new_rtx = force_const_mem (Pmode, new_rtx);
3845 temp = gen_reg_rtx (Pmode);
3846 emit_move_insn (temp, new_rtx);
3848 new_rtx = gen_rtx_PLUS (Pmode, s390_get_thread_pointer (), temp);
3851 s390_load_address (reg, new_rtx);
3860 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == UNSPEC)
3862 switch (XINT (XEXP (addr, 0), 1))
3864 case UNSPEC_INDNTPOFF:
3865 gcc_assert (TARGET_CPU_ZARCH);
3874 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
3875 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
3877 new_rtx = XEXP (XEXP (addr, 0), 0);
3878 if (GET_CODE (new_rtx) != SYMBOL_REF)
3879 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
3881 new_rtx = legitimize_tls_address (new_rtx, reg);
3882 new_rtx = plus_constant (Pmode, new_rtx,
3883 INTVAL (XEXP (XEXP (addr, 0), 1)));
3884 new_rtx = force_operand (new_rtx, 0);
3888 gcc_unreachable (); /* for now ... */
3893 /* Emit insns making the address in operands[1] valid for a standard
3894 move to operands[0]. operands[1] is replaced by an address which
3895 should be used instead of the former RTX to emit the move
3899 emit_symbolic_move (rtx *operands)
3901 rtx temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
3903 if (GET_CODE (operands[0]) == MEM)
3904 operands[1] = force_reg (Pmode, operands[1]);
3905 else if (TLS_SYMBOLIC_CONST (operands[1]))
3906 operands[1] = legitimize_tls_address (operands[1], temp);
3908 operands[1] = legitimize_pic_address (operands[1], temp);
3911 /* Try machine-dependent ways of modifying an illegitimate address X
3912 to be legitimate. If we find one, return the new, valid address.
3914 OLDX is the address as it was before break_out_memory_refs was called.
3915 In some cases it is useful to look at this to decide what needs to be done.
3917 MODE is the mode of the operand pointed to by X.
3919 When -fpic is used, special handling is needed for symbolic references.
3920 See comments by legitimize_pic_address for details. */
3923 s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3924 enum machine_mode mode ATTRIBUTE_UNUSED)
3926 rtx constant_term = const0_rtx;
3928 if (TLS_SYMBOLIC_CONST (x))
3930 x = legitimize_tls_address (x, 0);
3932 if (s390_legitimate_address_p (mode, x, FALSE))
3935 else if (GET_CODE (x) == PLUS
3936 && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
3937 || TLS_SYMBOLIC_CONST (XEXP (x, 1))))
3943 if (SYMBOLIC_CONST (x)
3944 || (GET_CODE (x) == PLUS
3945 && (SYMBOLIC_CONST (XEXP (x, 0))
3946 || SYMBOLIC_CONST (XEXP (x, 1)))))
3947 x = legitimize_pic_address (x, 0);
3949 if (s390_legitimate_address_p (mode, x, FALSE))
3953 x = eliminate_constant_term (x, &constant_term);
3955 /* Optimize loading of large displacements by splitting them
3956 into the multiple of 4K and the rest; this allows the
3957 former to be CSE'd if possible.
3959 Don't do this if the displacement is added to a register
3960 pointing into the stack frame, as the offsets will
3961 change later anyway. */
3963 if (GET_CODE (constant_term) == CONST_INT
3964 && !TARGET_LONG_DISPLACEMENT
3965 && !DISP_IN_RANGE (INTVAL (constant_term))
3966 && !(REG_P (x) && REGNO_PTR_FRAME_P (REGNO (x))))
3968 HOST_WIDE_INT lower = INTVAL (constant_term) & 0xfff;
3969 HOST_WIDE_INT upper = INTVAL (constant_term) ^ lower;
3971 rtx temp = gen_reg_rtx (Pmode);
3972 rtx val = force_operand (GEN_INT (upper), temp);
3974 emit_move_insn (temp, val);
3976 x = gen_rtx_PLUS (Pmode, x, temp);
3977 constant_term = GEN_INT (lower);
3980 if (GET_CODE (x) == PLUS)
3982 if (GET_CODE (XEXP (x, 0)) == REG)
3984 rtx temp = gen_reg_rtx (Pmode);
3985 rtx val = force_operand (XEXP (x, 1), temp);
3987 emit_move_insn (temp, val);
3989 x = gen_rtx_PLUS (Pmode, XEXP (x, 0), temp);
3992 else if (GET_CODE (XEXP (x, 1)) == REG)
3994 rtx temp = gen_reg_rtx (Pmode);
3995 rtx val = force_operand (XEXP (x, 0), temp);
3997 emit_move_insn (temp, val);
3999 x = gen_rtx_PLUS (Pmode, temp, XEXP (x, 1));
4003 if (constant_term != const0_rtx)
4004 x = gen_rtx_PLUS (Pmode, x, constant_term);
4009 /* Try a machine-dependent way of reloading an illegitimate address AD
4010 operand. If we find one, push the reload and return the new address.
4012 MODE is the mode of the enclosing MEM. OPNUM is the operand number
4013 and TYPE is the reload type of the current reload. */
4016 legitimize_reload_address (rtx ad, enum machine_mode mode ATTRIBUTE_UNUSED,
4017 int opnum, int type)
4019 if (!optimize || TARGET_LONG_DISPLACEMENT)
4022 if (GET_CODE (ad) == PLUS)
4024 rtx tem = simplify_binary_operation (PLUS, Pmode,
4025 XEXP (ad, 0), XEXP (ad, 1));
4030 if (GET_CODE (ad) == PLUS
4031 && GET_CODE (XEXP (ad, 0)) == REG
4032 && GET_CODE (XEXP (ad, 1)) == CONST_INT
4033 && !DISP_IN_RANGE (INTVAL (XEXP (ad, 1))))
4035 HOST_WIDE_INT lower = INTVAL (XEXP (ad, 1)) & 0xfff;
4036 HOST_WIDE_INT upper = INTVAL (XEXP (ad, 1)) ^ lower;
4037 rtx cst, tem, new_rtx;
4039 cst = GEN_INT (upper);
4040 if (!legitimate_reload_constant_p (cst))
4041 cst = force_const_mem (Pmode, cst);
4043 tem = gen_rtx_PLUS (Pmode, XEXP (ad, 0), cst);
4044 new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
4046 push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
4047 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
4048 opnum, (enum reload_type) type);
4055 /* Emit code to move LEN bytes from DST to SRC. */
4058 s390_expand_movmem (rtx dst, rtx src, rtx len)
4060 /* When tuning for z10 or higher we rely on the Glibc functions to
4061 do the right thing. Only for constant lengths below 64k we will
4062 generate inline code. */
4063 if (s390_tune >= PROCESSOR_2097_Z10
4064 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
4067 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4069 if (INTVAL (len) > 0)
4070 emit_insn (gen_movmem_short (dst, src, GEN_INT (INTVAL (len) - 1)));
4073 else if (TARGET_MVCLE)
4075 emit_insn (gen_movmem_long (dst, src, convert_to_mode (Pmode, len, 1)));
4080 rtx dst_addr, src_addr, count, blocks, temp;
4081 rtx_code_label *loop_start_label = gen_label_rtx ();
4082 rtx_code_label *loop_end_label = gen_label_rtx ();
4083 rtx_code_label *end_label = gen_label_rtx ();
4084 enum machine_mode mode;
4086 mode = GET_MODE (len);
4087 if (mode == VOIDmode)
4090 dst_addr = gen_reg_rtx (Pmode);
4091 src_addr = gen_reg_rtx (Pmode);
4092 count = gen_reg_rtx (mode);
4093 blocks = gen_reg_rtx (mode);
4095 convert_move (count, len, 1);
4096 emit_cmp_and_jump_insns (count, const0_rtx,
4097 EQ, NULL_RTX, mode, 1, end_label);
4099 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4100 emit_move_insn (src_addr, force_operand (XEXP (src, 0), NULL_RTX));
4101 dst = change_address (dst, VOIDmode, dst_addr);
4102 src = change_address (src, VOIDmode, src_addr);
4104 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4107 emit_move_insn (count, temp);
4109 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4112 emit_move_insn (blocks, temp);
4114 emit_cmp_and_jump_insns (blocks, const0_rtx,
4115 EQ, NULL_RTX, mode, 1, loop_end_label);
4117 emit_label (loop_start_label);
4120 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 768))
4124 /* Issue a read prefetch for the +3 cache line. */
4125 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, src_addr, GEN_INT (768)),
4126 const0_rtx, const0_rtx);
4127 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4128 emit_insn (prefetch);
4130 /* Issue a write prefetch for the +3 cache line. */
4131 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (768)),
4132 const1_rtx, const0_rtx);
4133 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4134 emit_insn (prefetch);
4137 emit_insn (gen_movmem_short (dst, src, GEN_INT (255)));
4138 s390_load_address (dst_addr,
4139 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4140 s390_load_address (src_addr,
4141 gen_rtx_PLUS (Pmode, src_addr, GEN_INT (256)));
4143 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4146 emit_move_insn (blocks, temp);
4148 emit_cmp_and_jump_insns (blocks, const0_rtx,
4149 EQ, NULL_RTX, mode, 1, loop_end_label);
4151 emit_jump (loop_start_label);
4152 emit_label (loop_end_label);
4154 emit_insn (gen_movmem_short (dst, src,
4155 convert_to_mode (Pmode, count, 1)));
4156 emit_label (end_label);
4161 /* Emit code to set LEN bytes at DST to VAL.
4162 Make use of clrmem if VAL is zero. */
4165 s390_expand_setmem (rtx dst, rtx len, rtx val)
4167 if (GET_CODE (len) == CONST_INT && INTVAL (len) == 0)
4170 gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
4172 if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
4174 if (val == const0_rtx && INTVAL (len) <= 256)
4175 emit_insn (gen_clrmem_short (dst, GEN_INT (INTVAL (len) - 1)));
4178 /* Initialize memory by storing the first byte. */
4179 emit_move_insn (adjust_address (dst, QImode, 0), val);
4181 if (INTVAL (len) > 1)
4183 /* Initiate 1 byte overlap move.
4184 The first byte of DST is propagated through DSTP1.
4185 Prepare a movmem for: DST+1 = DST (length = LEN - 1).
4186 DST is set to size 1 so the rest of the memory location
4187 does not count as source operand. */
4188 rtx dstp1 = adjust_address (dst, VOIDmode, 1);
4189 set_mem_size (dst, 1);
4191 emit_insn (gen_movmem_short (dstp1, dst,
4192 GEN_INT (INTVAL (len) - 2)));
4197 else if (TARGET_MVCLE)
4199 val = force_not_mem (convert_modes (Pmode, QImode, val, 1));
4200 emit_insn (gen_setmem_long (dst, convert_to_mode (Pmode, len, 1), val));
4205 rtx dst_addr, count, blocks, temp, dstp1 = NULL_RTX;
4206 rtx_code_label *loop_start_label = gen_label_rtx ();
4207 rtx_code_label *loop_end_label = gen_label_rtx ();
4208 rtx_code_label *end_label = gen_label_rtx ();
4209 enum machine_mode mode;
4211 mode = GET_MODE (len);
4212 if (mode == VOIDmode)
4215 dst_addr = gen_reg_rtx (Pmode);
4216 count = gen_reg_rtx (mode);
4217 blocks = gen_reg_rtx (mode);
4219 convert_move (count, len, 1);
4220 emit_cmp_and_jump_insns (count, const0_rtx,
4221 EQ, NULL_RTX, mode, 1, end_label);
4223 emit_move_insn (dst_addr, force_operand (XEXP (dst, 0), NULL_RTX));
4224 dst = change_address (dst, VOIDmode, dst_addr);
4226 if (val == const0_rtx)
4227 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4231 dstp1 = adjust_address (dst, VOIDmode, 1);
4232 set_mem_size (dst, 1);
4234 /* Initialize memory by storing the first byte. */
4235 emit_move_insn (adjust_address (dst, QImode, 0), val);
4237 /* If count is 1 we are done. */
4238 emit_cmp_and_jump_insns (count, const1_rtx,
4239 EQ, NULL_RTX, mode, 1, end_label);
4241 temp = expand_binop (mode, add_optab, count, GEN_INT (-2), count, 1,
4245 emit_move_insn (count, temp);
4247 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4250 emit_move_insn (blocks, temp);
4252 emit_cmp_and_jump_insns (blocks, const0_rtx,
4253 EQ, NULL_RTX, mode, 1, loop_end_label);
4255 emit_label (loop_start_label);
4258 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 1024))
4260 /* Issue a write prefetch for the +4 cache line. */
4261 rtx prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, dst_addr,
4263 const1_rtx, const0_rtx);
4264 emit_insn (prefetch);
4265 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4268 if (val == const0_rtx)
4269 emit_insn (gen_clrmem_short (dst, GEN_INT (255)));
4271 emit_insn (gen_movmem_short (dstp1, dst, GEN_INT (255)));
4272 s390_load_address (dst_addr,
4273 gen_rtx_PLUS (Pmode, dst_addr, GEN_INT (256)));
4275 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4278 emit_move_insn (blocks, temp);
4280 emit_cmp_and_jump_insns (blocks, const0_rtx,
4281 EQ, NULL_RTX, mode, 1, loop_end_label);
4283 emit_jump (loop_start_label);
4284 emit_label (loop_end_label);
4286 if (val == const0_rtx)
4287 emit_insn (gen_clrmem_short (dst, convert_to_mode (Pmode, count, 1)));
4289 emit_insn (gen_movmem_short (dstp1, dst, convert_to_mode (Pmode, count, 1)));
4290 emit_label (end_label);
4294 /* Emit code to compare LEN bytes at OP0 with those at OP1,
4295 and return the result in TARGET. */
4298 s390_expand_cmpmem (rtx target, rtx op0, rtx op1, rtx len)
4300 rtx ccreg = gen_rtx_REG (CCUmode, CC_REGNUM);
4303 /* When tuning for z10 or higher we rely on the Glibc functions to
4304 do the right thing. Only for constant lengths below 64k we will
4305 generate inline code. */
4306 if (s390_tune >= PROCESSOR_2097_Z10
4307 && (GET_CODE (len) != CONST_INT || INTVAL (len) > (1<<16)))
4310 /* As the result of CMPINT is inverted compared to what we need,
4311 we have to swap the operands. */
4312 tmp = op0; op0 = op1; op1 = tmp;
4314 if (GET_CODE (len) == CONST_INT && INTVAL (len) >= 0 && INTVAL (len) <= 256)
4316 if (INTVAL (len) > 0)
4318 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (INTVAL (len) - 1)));
4319 emit_insn (gen_cmpint (target, ccreg));
4322 emit_move_insn (target, const0_rtx);
4324 else if (TARGET_MVCLE)
4326 emit_insn (gen_cmpmem_long (op0, op1, convert_to_mode (Pmode, len, 1)));
4327 emit_insn (gen_cmpint (target, ccreg));
4331 rtx addr0, addr1, count, blocks, temp;
4332 rtx_code_label *loop_start_label = gen_label_rtx ();
4333 rtx_code_label *loop_end_label = gen_label_rtx ();
4334 rtx_code_label *end_label = gen_label_rtx ();
4335 enum machine_mode mode;
4337 mode = GET_MODE (len);
4338 if (mode == VOIDmode)
4341 addr0 = gen_reg_rtx (Pmode);
4342 addr1 = gen_reg_rtx (Pmode);
4343 count = gen_reg_rtx (mode);
4344 blocks = gen_reg_rtx (mode);
4346 convert_move (count, len, 1);
4347 emit_cmp_and_jump_insns (count, const0_rtx,
4348 EQ, NULL_RTX, mode, 1, end_label);
4350 emit_move_insn (addr0, force_operand (XEXP (op0, 0), NULL_RTX));
4351 emit_move_insn (addr1, force_operand (XEXP (op1, 0), NULL_RTX));
4352 op0 = change_address (op0, VOIDmode, addr0);
4353 op1 = change_address (op1, VOIDmode, addr1);
4355 temp = expand_binop (mode, add_optab, count, constm1_rtx, count, 1,
4358 emit_move_insn (count, temp);
4360 temp = expand_binop (mode, lshr_optab, count, GEN_INT (8), blocks, 1,
4363 emit_move_insn (blocks, temp);
4365 emit_cmp_and_jump_insns (blocks, const0_rtx,
4366 EQ, NULL_RTX, mode, 1, loop_end_label);
4368 emit_label (loop_start_label);
4371 && (GET_CODE (len) != CONST_INT || INTVAL (len) > 512))
4375 /* Issue a read prefetch for the +2 cache line of operand 1. */
4376 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr0, GEN_INT (512)),
4377 const0_rtx, const0_rtx);
4378 emit_insn (prefetch);
4379 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4381 /* Issue a read prefetch for the +2 cache line of operand 2. */
4382 prefetch = gen_prefetch (gen_rtx_PLUS (Pmode, addr1, GEN_INT (512)),
4383 const0_rtx, const0_rtx);
4384 emit_insn (prefetch);
4385 PREFETCH_SCHEDULE_BARRIER_P (prefetch) = true;
4388 emit_insn (gen_cmpmem_short (op0, op1, GEN_INT (255)));
4389 temp = gen_rtx_NE (VOIDmode, ccreg, const0_rtx);
4390 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
4391 gen_rtx_LABEL_REF (VOIDmode, end_label), pc_rtx);
4392 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
4393 emit_jump_insn (temp);
4395 s390_load_address (addr0,
4396 gen_rtx_PLUS (Pmode, addr0, GEN_INT (256)));
4397 s390_load_address (addr1,
4398 gen_rtx_PLUS (Pmode, addr1, GEN_INT (256)));
4400 temp = expand_binop (mode, add_optab, blocks, constm1_rtx, blocks, 1,
4403 emit_move_insn (blocks, temp);
4405 emit_cmp_and_jump_insns (blocks, const0_rtx,
4406 EQ, NULL_RTX, mode, 1, loop_end_label);
4408 emit_jump (loop_start_label);
4409 emit_label (loop_end_label);
4411 emit_insn (gen_cmpmem_short (op0, op1,
4412 convert_to_mode (Pmode, count, 1)));
4413 emit_label (end_label);
4415 emit_insn (gen_cmpint (target, ccreg));
4421 /* Expand conditional increment or decrement using alc/slb instructions.
4422 Should generate code setting DST to either SRC or SRC + INCREMENT,
4423 depending on the result of the comparison CMP_OP0 CMP_CODE CMP_OP1.
4424 Returns true if successful, false otherwise.
4426 That makes it possible to implement some if-constructs without jumps e.g.:
4427 (borrow = CC0 | CC1 and carry = CC2 | CC3)
4428 unsigned int a, b, c;
4429 if (a < b) c++; -> CCU b > a -> CC2; c += carry;
4430 if (a < b) c--; -> CCL3 a - b -> borrow; c -= borrow;
4431 if (a <= b) c++; -> CCL3 b - a -> borrow; c += carry;
4432 if (a <= b) c--; -> CCU a <= b -> borrow; c -= borrow;
4434 Checks for EQ and NE with a nonzero value need an additional xor e.g.:
4435 if (a == b) c++; -> CCL3 a ^= b; 0 - a -> borrow; c += carry;
4436 if (a == b) c--; -> CCU a ^= b; a <= 0 -> CC0 | CC1; c -= borrow;
4437 if (a != b) c++; -> CCU a ^= b; a > 0 -> CC2; c += carry;
4438 if (a != b) c--; -> CCL3 a ^= b; 0 - a -> borrow; c -= borrow; */
4441 s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
4442 rtx dst, rtx src, rtx increment)
4444 enum machine_mode cmp_mode;
4445 enum machine_mode cc_mode;
4451 if ((GET_MODE (cmp_op0) == SImode || GET_MODE (cmp_op0) == VOIDmode)
4452 && (GET_MODE (cmp_op1) == SImode || GET_MODE (cmp_op1) == VOIDmode))
4454 else if ((GET_MODE (cmp_op0) == DImode || GET_MODE (cmp_op0) == VOIDmode)
4455 && (GET_MODE (cmp_op1) == DImode || GET_MODE (cmp_op1) == VOIDmode))
4460 /* Try ADD LOGICAL WITH CARRY. */
4461 if (increment == const1_rtx)
4463 /* Determine CC mode to use. */
4464 if (cmp_code == EQ || cmp_code == NE)
4466 if (cmp_op1 != const0_rtx)
4468 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4469 NULL_RTX, 0, OPTAB_WIDEN);
4470 cmp_op1 = const0_rtx;
4473 cmp_code = cmp_code == EQ ? LEU : GTU;
4476 if (cmp_code == LTU || cmp_code == LEU)
4481 cmp_code = swap_condition (cmp_code);
4498 /* Emit comparison instruction pattern. */
4499 if (!register_operand (cmp_op0, cmp_mode))
4500 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4502 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4503 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4504 /* We use insn_invalid_p here to add clobbers if required. */
4505 ret = insn_invalid_p (emit_insn (insn), false);
4508 /* Emit ALC instruction pattern. */
4509 op_res = gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4510 gen_rtx_REG (cc_mode, CC_REGNUM),
4513 if (src != const0_rtx)
4515 if (!register_operand (src, GET_MODE (dst)))
4516 src = force_reg (GET_MODE (dst), src);
4518 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, src);
4519 op_res = gen_rtx_PLUS (GET_MODE (dst), op_res, const0_rtx);
4522 p = rtvec_alloc (2);
4524 gen_rtx_SET (VOIDmode, dst, op_res);
4526 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4527 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4532 /* Try SUBTRACT LOGICAL WITH BORROW. */
4533 if (increment == constm1_rtx)
4535 /* Determine CC mode to use. */
4536 if (cmp_code == EQ || cmp_code == NE)
4538 if (cmp_op1 != const0_rtx)
4540 cmp_op0 = expand_simple_binop (cmp_mode, XOR, cmp_op0, cmp_op1,
4541 NULL_RTX, 0, OPTAB_WIDEN);
4542 cmp_op1 = const0_rtx;
4545 cmp_code = cmp_code == EQ ? LEU : GTU;
4548 if (cmp_code == GTU || cmp_code == GEU)
4553 cmp_code = swap_condition (cmp_code);
4570 /* Emit comparison instruction pattern. */
4571 if (!register_operand (cmp_op0, cmp_mode))
4572 cmp_op0 = force_reg (cmp_mode, cmp_op0);
4574 insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (cc_mode, CC_REGNUM),
4575 gen_rtx_COMPARE (cc_mode, cmp_op0, cmp_op1));
4576 /* We use insn_invalid_p here to add clobbers if required. */
4577 ret = insn_invalid_p (emit_insn (insn), false);
4580 /* Emit SLB instruction pattern. */
4581 if (!register_operand (src, GET_MODE (dst)))
4582 src = force_reg (GET_MODE (dst), src);
4584 op_res = gen_rtx_MINUS (GET_MODE (dst),
4585 gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
4586 gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
4587 gen_rtx_REG (cc_mode, CC_REGNUM),
4589 p = rtvec_alloc (2);
4591 gen_rtx_SET (VOIDmode, dst, op_res);
4593 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4594 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
4602 /* Expand code for the insv template. Return true if successful. */
4605 s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
4607 int bitsize = INTVAL (op1);
4608 int bitpos = INTVAL (op2);
4609 enum machine_mode mode = GET_MODE (dest);
4610 enum machine_mode smode;
4611 int smode_bsize, mode_bsize;
4614 if (bitsize + bitpos > GET_MODE_BITSIZE (mode))
4617 /* Generate INSERT IMMEDIATE (IILL et al). */
4618 /* (set (ze (reg)) (const_int)). */
4620 && register_operand (dest, word_mode)
4621 && (bitpos % 16) == 0
4622 && (bitsize % 16) == 0
4623 && const_int_operand (src, VOIDmode))
4625 HOST_WIDE_INT val = INTVAL (src);
4626 int regpos = bitpos + bitsize;
4628 while (regpos > bitpos)
4630 enum machine_mode putmode;
4633 if (TARGET_EXTIMM && (regpos % 32 == 0) && (regpos >= bitpos + 32))
4638 putsize = GET_MODE_BITSIZE (putmode);
4640 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4643 gen_int_mode (val, putmode));
4646 gcc_assert (regpos == bitpos);
4650 smode = smallest_mode_for_size (bitsize, MODE_INT);
4651 smode_bsize = GET_MODE_BITSIZE (smode);
4652 mode_bsize = GET_MODE_BITSIZE (mode);
4654 /* Generate STORE CHARACTERS UNDER MASK (STCM et al). */
4656 && (bitsize % BITS_PER_UNIT) == 0
4658 && (register_operand (src, word_mode)
4659 || const_int_operand (src, VOIDmode)))
4661 /* Emit standard pattern if possible. */
4662 if (smode_bsize == bitsize)
4664 emit_move_insn (adjust_address (dest, smode, 0),
4665 gen_lowpart (smode, src));
4669 /* (set (ze (mem)) (const_int)). */
4670 else if (const_int_operand (src, VOIDmode))
4672 int size = bitsize / BITS_PER_UNIT;
4673 rtx src_mem = adjust_address (force_const_mem (word_mode, src),
4675 UNITS_PER_WORD - size);
4677 dest = adjust_address (dest, BLKmode, 0);
4678 set_mem_size (dest, size);
4679 s390_expand_movmem (dest, src_mem, GEN_INT (size));
4683 /* (set (ze (mem)) (reg)). */
4684 else if (register_operand (src, word_mode))
4687 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, op1,
4691 /* Emit st,stcmh sequence. */
4692 int stcmh_width = bitsize - 32;
4693 int size = stcmh_width / BITS_PER_UNIT;
4695 emit_move_insn (adjust_address (dest, SImode, size),
4696 gen_lowpart (SImode, src));
4697 set_mem_size (dest, size);
4698 emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
4699 GEN_INT (stcmh_width),
4701 gen_rtx_LSHIFTRT (word_mode, src, GEN_INT (32)));
4707 /* Generate INSERT CHARACTERS UNDER MASK (IC, ICM et al). */
4708 if ((bitpos % BITS_PER_UNIT) == 0
4709 && (bitsize % BITS_PER_UNIT) == 0
4710 && (bitpos & 32) == ((bitpos + bitsize - 1) & 32)
4712 && (mode == DImode || mode == SImode)
4713 && register_operand (dest, mode))
4715 /* Emit a strict_low_part pattern if possible. */
4716 if (smode_bsize == bitsize && bitpos == mode_bsize - smode_bsize)
4718 op = gen_rtx_STRICT_LOW_PART (VOIDmode, gen_lowpart (smode, dest));
4719 op = gen_rtx_SET (VOIDmode, op, gen_lowpart (smode, src));
4720 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4721 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber)));
4725 /* ??? There are more powerful versions of ICM that are not
4726 completely represented in the md file. */
4729 /* For z10, generate ROTATE THEN INSERT SELECTED BITS (RISBG et al). */
4730 if (TARGET_Z10 && (mode == DImode || mode == SImode))
4732 enum machine_mode mode_s = GET_MODE (src);
4734 if (mode_s == VOIDmode)
4736 /* Assume const_int etc already in the proper mode. */
4737 src = force_reg (mode, src);
4739 else if (mode_s != mode)
4741 gcc_assert (GET_MODE_BITSIZE (mode_s) >= bitsize);
4742 src = force_reg (mode_s, src);
4743 src = gen_lowpart (mode, src);
4746 op = gen_rtx_ZERO_EXTRACT (mode, dest, op1, op2),
4747 op = gen_rtx_SET (VOIDmode, op, src);
4751 clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
4752 op = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clobber));
4762 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic which returns a
4763 register that holds VAL of mode MODE shifted by COUNT bits. */
4766 s390_expand_mask_and_shift (rtx val, enum machine_mode mode, rtx count)
4768 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
4769 NULL_RTX, 1, OPTAB_DIRECT);
4770 return expand_simple_binop (SImode, ASHIFT, val, count,
4771 NULL_RTX, 1, OPTAB_DIRECT);
4774 /* Structure to hold the initial parameters for a compare_and_swap operation
4775 in HImode and QImode. */
4777 struct alignment_context
4779 rtx memsi; /* SI aligned memory location. */
4780 rtx shift; /* Bit offset with regard to lsb. */
4781 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
4782 rtx modemaski; /* ~modemask */
4783 bool aligned; /* True if memory is aligned, false else. */
4786 /* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize
4787 structure AC for transparent simplifying, if the memory alignment is known
4788 to be at least 32bit. MEM is the memory location for the actual operation
4789 and MODE its mode. */
4792 init_alignment_context (struct alignment_context *ac, rtx mem,
4793 enum machine_mode mode)
4795 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
4796 ac->aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
4799 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
4802 /* Alignment is unknown. */
4803 rtx byteoffset, addr, align;
4805 /* Force the address into a register. */
4806 addr = force_reg (Pmode, XEXP (mem, 0));
4808 /* Align it to SImode. */
4809 align = expand_simple_binop (Pmode, AND, addr,
4810 GEN_INT (-GET_MODE_SIZE (SImode)),
4811 NULL_RTX, 1, OPTAB_DIRECT);
4813 ac->memsi = gen_rtx_MEM (SImode, align);
4814 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
4815 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
4816 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
4818 /* Calculate shiftcount. */
4819 byteoffset = expand_simple_binop (Pmode, AND, addr,
4820 GEN_INT (GET_MODE_SIZE (SImode) - 1),
4821 NULL_RTX, 1, OPTAB_DIRECT);
4822 /* As we already have some offset, evaluate the remaining distance. */
4823 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
4824 NULL_RTX, 1, OPTAB_DIRECT);
4827 /* Shift is the byte count, but we need the bitcount. */
4828 ac->shift = expand_simple_binop (SImode, ASHIFT, ac->shift, GEN_INT (3),
4829 NULL_RTX, 1, OPTAB_DIRECT);
4831 /* Calculate masks. */
4832 ac->modemask = expand_simple_binop (SImode, ASHIFT,
4833 GEN_INT (GET_MODE_MASK (mode)),
4834 ac->shift, NULL_RTX, 1, OPTAB_DIRECT);
4835 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask,
4839 /* A subroutine of s390_expand_cs_hqi. Insert INS into VAL. If possible,
4840 use a single insv insn into SEQ2. Otherwise, put prep insns in SEQ1 and
4841 perform the merge in SEQ2. */
4844 s390_two_part_insv (struct alignment_context *ac, rtx *seq1, rtx *seq2,
4845 enum machine_mode mode, rtx val, rtx ins)
4852 tmp = copy_to_mode_reg (SImode, val);
4853 if (s390_expand_insv (tmp, GEN_INT (GET_MODE_BITSIZE (mode)),
4857 *seq2 = get_insns ();
4864 /* Failed to use insv. Generate a two part shift and mask. */
4866 tmp = s390_expand_mask_and_shift (ins, mode, ac->shift);
4867 *seq1 = get_insns ();
4871 tmp = expand_simple_binop (SImode, IOR, tmp, val, NULL_RTX, 1, OPTAB_DIRECT);
4872 *seq2 = get_insns ();
4878 /* Expand an atomic compare and swap operation for HImode and QImode. MEM is
4879 the memory location, CMP the old value to compare MEM with and NEW_RTX the
4880 value to set if CMP == MEM. */
4883 s390_expand_cs_hqi (enum machine_mode mode, rtx btarget, rtx vtarget, rtx mem,
4884 rtx cmp, rtx new_rtx, bool is_weak)
4886 struct alignment_context ac;
4887 rtx cmpv, newv, val, cc, seq0, seq1, seq2, seq3;
4888 rtx res = gen_reg_rtx (SImode);
4889 rtx_code_label *csloop = NULL, *csend = NULL;
4891 gcc_assert (MEM_P (mem));
4893 init_alignment_context (&ac, mem, mode);
4895 /* Load full word. Subsequent loads are performed by CS. */
4896 val = expand_simple_binop (SImode, AND, ac.memsi, ac.modemaski,
4897 NULL_RTX, 1, OPTAB_DIRECT);
4899 /* Prepare insertions of cmp and new_rtx into the loaded value. When
4900 possible, we try to use insv to make this happen efficiently. If
4901 that fails we'll generate code both inside and outside the loop. */
4902 cmpv = s390_two_part_insv (&ac, &seq0, &seq2, mode, val, cmp);
4903 newv = s390_two_part_insv (&ac, &seq1, &seq3, mode, val, new_rtx);
4910 /* Start CS loop. */
4913 /* Begin assuming success. */
4914 emit_move_insn (btarget, const1_rtx);
4916 csloop = gen_label_rtx ();
4917 csend = gen_label_rtx ();
4918 emit_label (csloop);
4921 /* val = "<mem>00..0<mem>"
4922 * cmp = "00..0<cmp>00..0"
4923 * new = "00..0<new>00..0"
4929 cc = s390_emit_compare_and_swap (EQ, res, ac.memsi, cmpv, newv);
4931 emit_insn (gen_cstorecc4 (btarget, cc, XEXP (cc, 0), XEXP (cc, 1)));
4936 /* Jump to end if we're done (likely?). */
4937 s390_emit_jump (csend, cc);
4939 /* Check for changes outside mode, and loop internal if so.
4940 Arrange the moves so that the compare is adjacent to the
4941 branch so that we can generate CRJ. */
4942 tmp = copy_to_reg (val);
4943 force_expand_binop (SImode, and_optab, res, ac.modemaski, val,
4945 cc = s390_emit_compare (NE, val, tmp);
4946 s390_emit_jump (csloop, cc);
4949 emit_move_insn (btarget, const0_rtx);
4953 /* Return the correct part of the bitfield. */
4954 convert_move (vtarget, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
4955 NULL_RTX, 1, OPTAB_DIRECT), 1);
4958 /* Expand an atomic operation CODE of mode MODE. MEM is the memory location
4959 and VAL the value to play with. If AFTER is true then store the value
4960 MEM holds after the operation, if AFTER is false then store the value MEM
4961 holds before the operation. If TARGET is zero then discard that value, else
4962 store it to TARGET. */
4965 s390_expand_atomic (enum machine_mode mode, enum rtx_code code,
4966 rtx target, rtx mem, rtx val, bool after)
4968 struct alignment_context ac;
4970 rtx new_rtx = gen_reg_rtx (SImode);
4971 rtx orig = gen_reg_rtx (SImode);
4972 rtx_code_label *csloop = gen_label_rtx ();
4974 gcc_assert (!target || register_operand (target, VOIDmode));
4975 gcc_assert (MEM_P (mem));
4977 init_alignment_context (&ac, mem, mode);
4979 /* Shift val to the correct bit positions.
4980 Preserve "icm", but prevent "ex icm". */
4981 if (!(ac.aligned && code == SET && MEM_P (val)))
4982 val = s390_expand_mask_and_shift (val, mode, ac.shift);
4984 /* Further preparation insns. */
4985 if (code == PLUS || code == MINUS)
4986 emit_move_insn (orig, val);
4987 else if (code == MULT || code == AND) /* val = "11..1<val>11..1" */
4988 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
4989 NULL_RTX, 1, OPTAB_DIRECT);
4991 /* Load full word. Subsequent loads are performed by CS. */
4992 cmp = force_reg (SImode, ac.memsi);
4994 /* Start CS loop. */
4995 emit_label (csloop);
4996 emit_move_insn (new_rtx, cmp);
4998 /* Patch new with val at correct position. */
5003 val = expand_simple_binop (SImode, code, new_rtx, orig,
5004 NULL_RTX, 1, OPTAB_DIRECT);
5005 val = expand_simple_binop (SImode, AND, val, ac.modemask,
5006 NULL_RTX, 1, OPTAB_DIRECT);
5009 if (ac.aligned && MEM_P (val))
5010 store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0,
5014 new_rtx = expand_simple_binop (SImode, AND, new_rtx, ac.modemaski,
5015 NULL_RTX, 1, OPTAB_DIRECT);
5016 new_rtx = expand_simple_binop (SImode, IOR, new_rtx, val,
5017 NULL_RTX, 1, OPTAB_DIRECT);
5023 new_rtx = expand_simple_binop (SImode, code, new_rtx, val,
5024 NULL_RTX, 1, OPTAB_DIRECT);
5026 case MULT: /* NAND */
5027 new_rtx = expand_simple_binop (SImode, AND, new_rtx, val,
5028 NULL_RTX, 1, OPTAB_DIRECT);
5029 new_rtx = expand_simple_binop (SImode, XOR, new_rtx, ac.modemask,
5030 NULL_RTX, 1, OPTAB_DIRECT);
5036 s390_emit_jump (csloop, s390_emit_compare_and_swap (NE, cmp,
5037 ac.memsi, cmp, new_rtx));
5039 /* Return the correct part of the bitfield. */
5041 convert_move (target, expand_simple_binop (SImode, LSHIFTRT,
5042 after ? new_rtx : cmp, ac.shift,
5043 NULL_RTX, 1, OPTAB_DIRECT), 1);
5046 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
5047 We need to emit DTP-relative relocations. */
5049 static void s390_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
5052 s390_output_dwarf_dtprel (FILE *file, int size, rtx x)
5057 fputs ("\t.long\t", file);
5060 fputs ("\t.quad\t", file);
5065 output_addr_const (file, x);
5066 fputs ("@DTPOFF", file);
5069 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
5070 /* Implement TARGET_MANGLE_TYPE. */
5073 s390_mangle_type (const_tree type)
5075 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
5076 && TARGET_LONG_DOUBLE_128)
5079 /* For all other types, use normal C++ mangling. */
5084 /* In the name of slightly smaller debug output, and to cater to
5085 general assembler lossage, recognize various UNSPEC sequences
5086 and turn them back into a direct symbol reference. */
5089 s390_delegitimize_address (rtx orig_x)
5093 orig_x = delegitimize_mem_from_attrs (orig_x);
5096 /* Extract the symbol ref from:
5097 (plus:SI (reg:SI 12 %r12)
5098 (const:SI (unspec:SI [(symbol_ref/f:SI ("*.LC0"))]
5099 UNSPEC_GOTOFF/PLTOFF)))
5101 (plus:SI (reg:SI 12 %r12)
5102 (const:SI (plus:SI (unspec:SI [(symbol_ref:SI ("L"))]
5103 UNSPEC_GOTOFF/PLTOFF)
5104 (const_int 4 [0x4])))) */
5105 if (GET_CODE (x) == PLUS
5106 && REG_P (XEXP (x, 0))
5107 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
5108 && GET_CODE (XEXP (x, 1)) == CONST)
5110 HOST_WIDE_INT offset = 0;
5112 /* The const operand. */
5113 y = XEXP (XEXP (x, 1), 0);
5115 if (GET_CODE (y) == PLUS
5116 && GET_CODE (XEXP (y, 1)) == CONST_INT)
5118 offset = INTVAL (XEXP (y, 1));
5122 if (GET_CODE (y) == UNSPEC
5123 && (XINT (y, 1) == UNSPEC_GOTOFF
5124 || XINT (y, 1) == UNSPEC_PLTOFF))
5125 return plus_constant (Pmode, XVECEXP (y, 0, 0), offset);
5128 if (GET_CODE (x) != MEM)
5132 if (GET_CODE (x) == PLUS
5133 && GET_CODE (XEXP (x, 1)) == CONST
5134 && GET_CODE (XEXP (x, 0)) == REG
5135 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
5137 y = XEXP (XEXP (x, 1), 0);
5138 if (GET_CODE (y) == UNSPEC
5139 && XINT (y, 1) == UNSPEC_GOT)
5140 y = XVECEXP (y, 0, 0);
5144 else if (GET_CODE (x) == CONST)
5146 /* Extract the symbol ref from:
5147 (mem:QI (const:DI (unspec:DI [(symbol_ref:DI ("foo"))]
5148 UNSPEC_PLT/GOTENT))) */
5151 if (GET_CODE (y) == UNSPEC
5152 && (XINT (y, 1) == UNSPEC_GOTENT
5153 || XINT (y, 1) == UNSPEC_PLT))
5154 y = XVECEXP (y, 0, 0);
5161 if (GET_MODE (orig_x) != Pmode)
5163 if (GET_MODE (orig_x) == BLKmode)
5165 y = lowpart_subreg (GET_MODE (orig_x), y, Pmode);
5172 /* Output operand OP to stdio stream FILE.
5173 OP is an address (register + offset) which is not used to address data;
5174 instead the rightmost bits are interpreted as the value. */
5177 print_shift_count_operand (FILE *file, rtx op)
5179 HOST_WIDE_INT offset;
5182 /* Extract base register and offset. */
5183 if (!s390_decompose_shift_count (op, &base, &offset))
5189 gcc_assert (GET_CODE (base) == REG);
5190 gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
5191 gcc_assert (REGNO_REG_CLASS (REGNO (base)) == ADDR_REGS);
5194 /* Offsets are constricted to twelve bits. */
5195 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset & ((1 << 12) - 1));
5197 fprintf (file, "(%s)", reg_names[REGNO (base)]);
5200 /* See 'get_some_local_dynamic_name'. */
5203 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
5207 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
5209 x = get_pool_constant (x);
5210 return for_each_rtx (&x, get_some_local_dynamic_name_1, 0);
5213 if (GET_CODE (x) == SYMBOL_REF
5214 && tls_symbolic_operand (x) == TLS_MODEL_LOCAL_DYNAMIC)
5216 cfun->machine->some_ld_name = XSTR (x, 0);
5223 /* Locate some local-dynamic symbol still in use by this function
5224 so that we can print its name in local-dynamic base patterns. */
5227 get_some_local_dynamic_name (void)
5231 if (cfun->machine->some_ld_name)
5232 return cfun->machine->some_ld_name;
5234 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5236 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5237 return cfun->machine->some_ld_name;
5242 /* Returns -1 if the function should not be made hotpatchable. Otherwise it
5243 returns a number >= 0 that is the desired size of the hotpatch trampoline
5246 static int s390_function_num_hotpatch_trampoline_halfwords (tree decl,
5251 if (DECL_DECLARED_INLINE_P (decl)
5252 || DECL_ARTIFICIAL (decl)
5253 || MAIN_NAME_P (DECL_NAME (decl)))
5255 /* - Explicitly inlined functions cannot be hotpatched.
5256 - Artificial functions need not be hotpatched.
5257 - Making the main function hotpatchable is useless. */
5260 attr = lookup_attribute ("hotpatch", DECL_ATTRIBUTES (decl));
5261 if (attr || s390_hotpatch_trampoline_halfwords >= 0)
5263 if (lookup_attribute ("always_inline", DECL_ATTRIBUTES (decl)))
5266 warning (OPT_Wattributes, "function %qE with the %qs attribute"
5267 " is not hotpatchable", DECL_NAME (decl), "always_inline");
5273 get_hotpatch_attribute (attr) : s390_hotpatch_trampoline_halfwords;
5280 /* Hook to determine if one function can safely inline another. */
5283 s390_can_inline_p (tree caller, tree callee)
5285 if (s390_function_num_hotpatch_trampoline_halfwords (callee, false) >= 0)
5288 return default_target_can_inline_p (caller, callee);
5291 /* Write the extra assembler code needed to declare a function properly. */
5294 s390_asm_output_function_label (FILE *asm_out_file, const char *fname,
5297 int hotpatch_trampoline_halfwords = -1;
5301 hotpatch_trampoline_halfwords =
5302 s390_function_num_hotpatch_trampoline_halfwords (decl, true);
5303 if (hotpatch_trampoline_halfwords >= 0
5304 && decl_function_context (decl) != NULL_TREE)
5306 warning_at (DECL_SOURCE_LOCATION (decl), OPT_mhotpatch,
5307 "hotpatching is not compatible with nested functions");
5308 hotpatch_trampoline_halfwords = -1;
5312 if (hotpatch_trampoline_halfwords > 0)
5316 /* Add a trampoline code area before the function label and initialize it
5317 with two-byte nop instructions. This area can be overwritten with code
5318 that jumps to a patched version of the function. */
5319 for (i = 0; i < hotpatch_trampoline_halfwords; i++)
5320 asm_fprintf (asm_out_file, "\tnopr\t%%r7\n");
5321 /* Note: The function label must be aligned so that (a) the bytes of the
5322 following nop do not cross a cacheline boundary, and (b) a jump address
5323 (eight bytes for 64 bit targets, 4 bytes for 32 bit targets) can be
5324 stored directly before the label without crossing a cacheline
5325 boundary. All this is necessary to make sure the trampoline code can
5326 be changed atomically. */
5329 ASM_OUTPUT_LABEL (asm_out_file, fname);
5331 /* Output a four-byte nop if hotpatching is enabled. This can be overwritten
5332 atomically with a relative backwards jump to the trampoline area. */
5333 if (hotpatch_trampoline_halfwords >= 0)
5334 asm_fprintf (asm_out_file, "\tnop\t0\n");
5337 /* Output machine-dependent UNSPECs occurring in address constant X
5338 in assembler syntax to stdio stream FILE. Returns true if the
5339 constant X could be recognized, false otherwise. */
5342 s390_output_addr_const_extra (FILE *file, rtx x)
5344 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
5345 switch (XINT (x, 1))
5348 output_addr_const (file, XVECEXP (x, 0, 0));
5349 fprintf (file, "@GOTENT");
5352 output_addr_const (file, XVECEXP (x, 0, 0));
5353 fprintf (file, "@GOT");
5356 output_addr_const (file, XVECEXP (x, 0, 0));
5357 fprintf (file, "@GOTOFF");
5360 output_addr_const (file, XVECEXP (x, 0, 0));
5361 fprintf (file, "@PLT");
5364 output_addr_const (file, XVECEXP (x, 0, 0));
5365 fprintf (file, "@PLTOFF");
5368 output_addr_const (file, XVECEXP (x, 0, 0));
5369 fprintf (file, "@TLSGD");
5372 assemble_name (file, get_some_local_dynamic_name ());
5373 fprintf (file, "@TLSLDM");
5376 output_addr_const (file, XVECEXP (x, 0, 0));
5377 fprintf (file, "@DTPOFF");
5380 output_addr_const (file, XVECEXP (x, 0, 0));
5381 fprintf (file, "@NTPOFF");
5383 case UNSPEC_GOTNTPOFF:
5384 output_addr_const (file, XVECEXP (x, 0, 0));
5385 fprintf (file, "@GOTNTPOFF");
5387 case UNSPEC_INDNTPOFF:
5388 output_addr_const (file, XVECEXP (x, 0, 0));
5389 fprintf (file, "@INDNTPOFF");
5393 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 2)
5394 switch (XINT (x, 1))
5396 case UNSPEC_POOL_OFFSET:
5397 x = gen_rtx_MINUS (GET_MODE (x), XVECEXP (x, 0, 0), XVECEXP (x, 0, 1));
5398 output_addr_const (file, x);
5404 /* Output address operand ADDR in assembler syntax to
5405 stdio stream FILE. */
5408 print_operand_address (FILE *file, rtx addr)
5410 struct s390_address ad;
5412 if (s390_loadrelative_operand_p (addr, NULL, NULL))
5416 output_operand_lossage ("symbolic memory references are "
5417 "only supported on z10 or later");
5420 output_addr_const (file, addr);
5424 if (!s390_decompose_address (addr, &ad)
5425 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5426 || (ad.indx && !REGNO_OK_FOR_INDEX_P (REGNO (ad.indx))))
5427 output_operand_lossage ("cannot decompose address");
5430 output_addr_const (file, ad.disp);
5432 fprintf (file, "0");
5434 if (ad.base && ad.indx)
5435 fprintf (file, "(%s,%s)", reg_names[REGNO (ad.indx)],
5436 reg_names[REGNO (ad.base)]);
5438 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5441 /* Output operand X in assembler syntax to stdio stream FILE.
5442 CODE specified the format flag. The following format flags
5445 'C': print opcode suffix for branch condition.
5446 'D': print opcode suffix for inverse branch condition.
5447 'E': print opcode suffix for branch on index instruction.
5448 'G': print the size of the operand in bytes.
5449 'J': print tls_load/tls_gdcall/tls_ldcall suffix
5450 'M': print the second word of a TImode operand.
5451 'N': print the second word of a DImode operand.
5452 'O': print only the displacement of a memory reference.
5453 'R': print only the base register of a memory reference.
5454 'S': print S-type memory reference (base+displacement).
5455 'Y': print shift count operand.
5457 'b': print integer X as if it's an unsigned byte.
5458 'c': print integer X as if it's an signed byte.
5459 'e': "end" of DImode contiguous bitmask X.
5460 'f': "end" of SImode contiguous bitmask X.
5461 'h': print integer X as if it's a signed halfword.
5462 'i': print the first nonzero HImode part of X.
5463 'j': print the first HImode part unequal to -1 of X.
5464 'k': print the first nonzero SImode part of X.
5465 'm': print the first SImode part unequal to -1 of X.
5466 'o': print integer X as if it's an unsigned 32bit word.
5467 's': "start" of DImode contiguous bitmask X.
5468 't': "start" of SImode contiguous bitmask X.
5469 'x': print integer X as if it's an unsigned halfword.
5473 print_operand (FILE *file, rtx x, int code)
5480 fprintf (file, s390_branch_condition_mnemonic (x, FALSE));
5484 fprintf (file, s390_branch_condition_mnemonic (x, TRUE));
5488 if (GET_CODE (x) == LE)
5489 fprintf (file, "l");
5490 else if (GET_CODE (x) == GT)
5491 fprintf (file, "h");
5493 output_operand_lossage ("invalid comparison operator "
5494 "for 'E' output modifier");
5498 if (GET_CODE (x) == SYMBOL_REF)
5500 fprintf (file, "%s", ":tls_load:");
5501 output_addr_const (file, x);
5503 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
5505 fprintf (file, "%s", ":tls_gdcall:");
5506 output_addr_const (file, XVECEXP (x, 0, 0));
5508 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM)
5510 fprintf (file, "%s", ":tls_ldcall:");
5511 assemble_name (file, get_some_local_dynamic_name ());
5514 output_operand_lossage ("invalid reference for 'J' output modifier");
5518 fprintf (file, "%u", GET_MODE_SIZE (GET_MODE (x)));
5523 struct s390_address ad;
5528 output_operand_lossage ("memory reference expected for "
5529 "'O' output modifier");
5533 ret = s390_decompose_address (XEXP (x, 0), &ad);
5536 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5539 output_operand_lossage ("invalid address for 'O' output modifier");
5544 output_addr_const (file, ad.disp);
5546 fprintf (file, "0");
5552 struct s390_address ad;
5557 output_operand_lossage ("memory reference expected for "
5558 "'R' output modifier");
5562 ret = s390_decompose_address (XEXP (x, 0), &ad);
5565 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5568 output_operand_lossage ("invalid address for 'R' output modifier");
5573 fprintf (file, "%s", reg_names[REGNO (ad.base)]);
5575 fprintf (file, "0");
5581 struct s390_address ad;
5586 output_operand_lossage ("memory reference expected for "
5587 "'S' output modifier");
5590 ret = s390_decompose_address (XEXP (x, 0), &ad);
5593 || (ad.base && !REGNO_OK_FOR_BASE_P (REGNO (ad.base)))
5596 output_operand_lossage ("invalid address for 'S' output modifier");
5601 output_addr_const (file, ad.disp);
5603 fprintf (file, "0");
5606 fprintf (file, "(%s)", reg_names[REGNO (ad.base)]);
5611 if (GET_CODE (x) == REG)
5612 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5613 else if (GET_CODE (x) == MEM)
5614 x = change_address (x, VOIDmode,
5615 plus_constant (Pmode, XEXP (x, 0), 4));
5617 output_operand_lossage ("register or memory expression expected "
5618 "for 'N' output modifier");
5622 if (GET_CODE (x) == REG)
5623 x = gen_rtx_REG (GET_MODE (x), REGNO (x) + 1);
5624 else if (GET_CODE (x) == MEM)
5625 x = change_address (x, VOIDmode,
5626 plus_constant (Pmode, XEXP (x, 0), 8));
5628 output_operand_lossage ("register or memory expression expected "
5629 "for 'M' output modifier");
5633 print_shift_count_operand (file, x);
5637 switch (GET_CODE (x))
5640 fprintf (file, "%s", reg_names[REGNO (x)]);
5644 output_address (XEXP (x, 0));
5651 output_addr_const (file, x);
5664 ival = ((ival & 0xff) ^ 0x80) - 0x80;
5670 ival = ((ival & 0xffff) ^ 0x8000) - 0x8000;
5673 ival = s390_extract_part (x, HImode, 0);
5676 ival = s390_extract_part (x, HImode, -1);
5679 ival = s390_extract_part (x, SImode, 0);
5682 ival = s390_extract_part (x, SImode, -1);
5693 len = (code == 's' || code == 'e' ? 64 : 32);
5694 ok = s390_contiguous_bitmask_p (ival, len, &pos, &len);
5696 if (code == 's' || code == 't')
5697 ival = 64 - pos - len;
5699 ival = 64 - 1 - pos;
5703 output_operand_lossage ("invalid constant for output modifier '%c'", code);
5705 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
5709 gcc_assert (GET_MODE (x) == VOIDmode);
5711 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xff);
5712 else if (code == 'x')
5713 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x) & 0xffff);
5714 else if (code == 'h')
5715 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5716 ((CONST_DOUBLE_LOW (x) & 0xffff) ^ 0x8000) - 0x8000);
5720 output_operand_lossage ("invalid constant - try using "
5721 "an output modifier");
5723 output_operand_lossage ("invalid constant for output modifier '%c'",
5730 output_operand_lossage ("invalid expression - try using "
5731 "an output modifier");
5733 output_operand_lossage ("invalid expression for output "
5734 "modifier '%c'", code);
5739 /* Target hook for assembling integer objects. We need to define it
5740 here to work a round a bug in some versions of GAS, which couldn't
5741 handle values smaller than INT_MIN when printed in decimal. */
5744 s390_assemble_integer (rtx x, unsigned int size, int aligned_p)
5746 if (size == 8 && aligned_p
5747 && GET_CODE (x) == CONST_INT && INTVAL (x) < INT_MIN)
5749 fprintf (asm_out_file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n",
5753 return default_assemble_integer (x, size, aligned_p);
5756 /* Returns true if register REGNO is used for forming
5757 a memory address in expression X. */
5760 reg_used_in_mem_p (int regno, rtx x)
5762 enum rtx_code code = GET_CODE (x);
5768 if (refers_to_regno_p (regno, regno+1,
5772 else if (code == SET
5773 && GET_CODE (SET_DEST (x)) == PC)
5775 if (refers_to_regno_p (regno, regno+1,
5780 fmt = GET_RTX_FORMAT (code);
5781 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5784 && reg_used_in_mem_p (regno, XEXP (x, i)))
5787 else if (fmt[i] == 'E')
5788 for (j = 0; j < XVECLEN (x, i); j++)
5789 if (reg_used_in_mem_p (regno, XVECEXP (x, i, j)))
5795 /* Returns true if expression DEP_RTX sets an address register
5796 used by instruction INSN to address memory. */
5799 addr_generation_dependency_p (rtx dep_rtx, rtx insn)
5803 if (NONJUMP_INSN_P (dep_rtx))
5804 dep_rtx = PATTERN (dep_rtx);
5806 if (GET_CODE (dep_rtx) == SET)
5808 target = SET_DEST (dep_rtx);
5809 if (GET_CODE (target) == STRICT_LOW_PART)
5810 target = XEXP (target, 0);
5811 while (GET_CODE (target) == SUBREG)
5812 target = SUBREG_REG (target);
5814 if (GET_CODE (target) == REG)
5816 int regno = REGNO (target);
5818 if (s390_safe_attr_type (insn) == TYPE_LA)
5820 pat = PATTERN (insn);
5821 if (GET_CODE (pat) == PARALLEL)
5823 gcc_assert (XVECLEN (pat, 0) == 2);
5824 pat = XVECEXP (pat, 0, 0);
5826 gcc_assert (GET_CODE (pat) == SET);
5827 return refers_to_regno_p (regno, regno+1, SET_SRC (pat), 0);
5829 else if (get_attr_atype (insn) == ATYPE_AGEN)
5830 return reg_used_in_mem_p (regno, PATTERN (insn));
5836 /* Return 1, if dep_insn sets register used in insn in the agen unit. */
5839 s390_agen_dep_p (rtx dep_insn, rtx insn)
5841 rtx dep_rtx = PATTERN (dep_insn);
5844 if (GET_CODE (dep_rtx) == SET
5845 && addr_generation_dependency_p (dep_rtx, insn))
5847 else if (GET_CODE (dep_rtx) == PARALLEL)
5849 for (i = 0; i < XVECLEN (dep_rtx, 0); i++)
5851 if (addr_generation_dependency_p (XVECEXP (dep_rtx, 0, i), insn))
5859 /* A C statement (sans semicolon) to update the integer scheduling priority
5860 INSN_PRIORITY (INSN). Increase the priority to execute the INSN earlier,
5861 reduce the priority to execute INSN later. Do not define this macro if
5862 you do not need to adjust the scheduling priorities of insns.
5864 A STD instruction should be scheduled earlier,
5865 in order to use the bypass. */
5867 s390_adjust_priority (rtx_insn *insn, int priority)
5869 if (! INSN_P (insn))
5872 if (s390_tune != PROCESSOR_2084_Z990
5873 && s390_tune != PROCESSOR_2094_Z9_109
5874 && s390_tune != PROCESSOR_2097_Z10
5875 && s390_tune != PROCESSOR_2817_Z196
5876 && s390_tune != PROCESSOR_2827_ZEC12)
5879 switch (s390_safe_attr_type (insn))
5883 priority = priority << 3;
5887 priority = priority << 1;
5896 /* The number of instructions that can be issued per cycle. */
5899 s390_issue_rate (void)
5903 case PROCESSOR_2084_Z990:
5904 case PROCESSOR_2094_Z9_109:
5905 case PROCESSOR_2817_Z196:
5907 case PROCESSOR_2097_Z10:
5908 case PROCESSOR_2827_ZEC12:
5916 s390_first_cycle_multipass_dfa_lookahead (void)
5921 /* Annotate every literal pool reference in X by an UNSPEC_LTREF expression.
5922 Fix up MEMs as required. */
5925 annotate_constant_pool_refs (rtx *x)
5930 gcc_assert (GET_CODE (*x) != SYMBOL_REF
5931 || !CONSTANT_POOL_ADDRESS_P (*x));
5933 /* Literal pool references can only occur inside a MEM ... */
5934 if (GET_CODE (*x) == MEM)
5936 rtx memref = XEXP (*x, 0);
5938 if (GET_CODE (memref) == SYMBOL_REF
5939 && CONSTANT_POOL_ADDRESS_P (memref))
5941 rtx base = cfun->machine->base_reg;
5942 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, memref, base),
5945 *x = replace_equiv_address (*x, addr);
5949 if (GET_CODE (memref) == CONST
5950 && GET_CODE (XEXP (memref, 0)) == PLUS
5951 && GET_CODE (XEXP (XEXP (memref, 0), 1)) == CONST_INT
5952 && GET_CODE (XEXP (XEXP (memref, 0), 0)) == SYMBOL_REF
5953 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (memref, 0), 0)))
5955 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (memref, 0), 1));
5956 rtx sym = XEXP (XEXP (memref, 0), 0);
5957 rtx base = cfun->machine->base_reg;
5958 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5961 *x = replace_equiv_address (*x, plus_constant (Pmode, addr, off));
5966 /* ... or a load-address type pattern. */
5967 if (GET_CODE (*x) == SET)
5969 rtx addrref = SET_SRC (*x);
5971 if (GET_CODE (addrref) == SYMBOL_REF
5972 && CONSTANT_POOL_ADDRESS_P (addrref))
5974 rtx base = cfun->machine->base_reg;
5975 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addrref, base),
5978 SET_SRC (*x) = addr;
5982 if (GET_CODE (addrref) == CONST
5983 && GET_CODE (XEXP (addrref, 0)) == PLUS
5984 && GET_CODE (XEXP (XEXP (addrref, 0), 1)) == CONST_INT
5985 && GET_CODE (XEXP (XEXP (addrref, 0), 0)) == SYMBOL_REF
5986 && CONSTANT_POOL_ADDRESS_P (XEXP (XEXP (addrref, 0), 0)))
5988 HOST_WIDE_INT off = INTVAL (XEXP (XEXP (addrref, 0), 1));
5989 rtx sym = XEXP (XEXP (addrref, 0), 0);
5990 rtx base = cfun->machine->base_reg;
5991 rtx addr = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, sym, base),
5994 SET_SRC (*x) = plus_constant (Pmode, addr, off);
5999 /* Annotate LTREL_BASE as well. */
6000 if (GET_CODE (*x) == UNSPEC
6001 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
6003 rtx base = cfun->machine->base_reg;
6004 *x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XVECEXP (*x, 0, 0), base),
6009 fmt = GET_RTX_FORMAT (GET_CODE (*x));
6010 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
6014 annotate_constant_pool_refs (&XEXP (*x, i));
6016 else if (fmt[i] == 'E')
6018 for (j = 0; j < XVECLEN (*x, i); j++)
6019 annotate_constant_pool_refs (&XVECEXP (*x, i, j));
6024 /* Split all branches that exceed the maximum distance.
6025 Returns true if this created a new literal pool entry. */
6028 s390_split_branches (void)
6030 rtx temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
6031 int new_literal = 0, ret;
6036 /* We need correct insn addresses. */
6038 shorten_branches (get_insns ());
6040 /* Find all branches that exceed 64KB, and split them. */
6042 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6044 if (! JUMP_P (insn) || tablejump_p (insn, NULL, NULL))
6047 pat = PATTERN (insn);
6048 if (GET_CODE (pat) == PARALLEL)
6049 pat = XVECEXP (pat, 0, 0);
6050 if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
6053 if (GET_CODE (SET_SRC (pat)) == LABEL_REF)
6055 label = &SET_SRC (pat);
6057 else if (GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE)
6059 if (GET_CODE (XEXP (SET_SRC (pat), 1)) == LABEL_REF)
6060 label = &XEXP (SET_SRC (pat), 1);
6061 else if (GET_CODE (XEXP (SET_SRC (pat), 2)) == LABEL_REF)
6062 label = &XEXP (SET_SRC (pat), 2);
6069 if (get_attr_length (insn) <= 4)
6072 /* We are going to use the return register as scratch register,
6073 make sure it will be saved/restored by the prologue/epilogue. */
6074 cfun_frame_layout.save_return_addr_p = 1;
6079 rtx mem = force_const_mem (Pmode, *label);
6080 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, mem), insn);
6081 INSN_ADDRESSES_NEW (set_insn, -1);
6082 annotate_constant_pool_refs (&PATTERN (set_insn));
6089 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, *label),
6090 UNSPEC_LTREL_OFFSET);
6091 target = gen_rtx_CONST (Pmode, target);
6092 target = force_const_mem (Pmode, target);
6093 rtx_insn *set_insn = emit_insn_before (gen_rtx_SET (Pmode, temp_reg, target), insn);
6094 INSN_ADDRESSES_NEW (set_insn, -1);
6095 annotate_constant_pool_refs (&PATTERN (set_insn));
6097 target = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, XEXP (target, 0),
6098 cfun->machine->base_reg),
6100 target = gen_rtx_PLUS (Pmode, temp_reg, target);
6103 ret = validate_change (insn, label, target, 0);
6111 /* Find an annotated literal pool symbol referenced in RTX X,
6112 and store it at REF. Will abort if X contains references to
6113 more than one such pool symbol; multiple references to the same
6114 symbol are allowed, however.
6116 The rtx pointed to by REF must be initialized to NULL_RTX
6117 by the caller before calling this routine. */
6120 find_constant_pool_ref (rtx x, rtx *ref)
6125 /* Ignore LTREL_BASE references. */
6126 if (GET_CODE (x) == UNSPEC
6127 && XINT (x, 1) == UNSPEC_LTREL_BASE)
6129 /* Likewise POOL_ENTRY insns. */
6130 if (GET_CODE (x) == UNSPEC_VOLATILE
6131 && XINT (x, 1) == UNSPECV_POOL_ENTRY)
6134 gcc_assert (GET_CODE (x) != SYMBOL_REF
6135 || !CONSTANT_POOL_ADDRESS_P (x));
6137 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_LTREF)
6139 rtx sym = XVECEXP (x, 0, 0);
6140 gcc_assert (GET_CODE (sym) == SYMBOL_REF
6141 && CONSTANT_POOL_ADDRESS_P (sym));
6143 if (*ref == NULL_RTX)
6146 gcc_assert (*ref == sym);
6151 fmt = GET_RTX_FORMAT (GET_CODE (x));
6152 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6156 find_constant_pool_ref (XEXP (x, i), ref);
6158 else if (fmt[i] == 'E')
6160 for (j = 0; j < XVECLEN (x, i); j++)
6161 find_constant_pool_ref (XVECEXP (x, i, j), ref);
6166 /* Replace every reference to the annotated literal pool
6167 symbol REF in X by its base plus OFFSET. */
6170 replace_constant_pool_ref (rtx *x, rtx ref, rtx offset)
6175 gcc_assert (*x != ref);
6177 if (GET_CODE (*x) == UNSPEC
6178 && XINT (*x, 1) == UNSPEC_LTREF
6179 && XVECEXP (*x, 0, 0) == ref)
6181 *x = gen_rtx_PLUS (Pmode, XVECEXP (*x, 0, 1), offset);
6185 if (GET_CODE (*x) == PLUS
6186 && GET_CODE (XEXP (*x, 1)) == CONST_INT
6187 && GET_CODE (XEXP (*x, 0)) == UNSPEC
6188 && XINT (XEXP (*x, 0), 1) == UNSPEC_LTREF
6189 && XVECEXP (XEXP (*x, 0), 0, 0) == ref)
6191 rtx addr = gen_rtx_PLUS (Pmode, XVECEXP (XEXP (*x, 0), 0, 1), offset);
6192 *x = plus_constant (Pmode, addr, INTVAL (XEXP (*x, 1)));
6196 fmt = GET_RTX_FORMAT (GET_CODE (*x));
6197 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
6201 replace_constant_pool_ref (&XEXP (*x, i), ref, offset);
6203 else if (fmt[i] == 'E')
6205 for (j = 0; j < XVECLEN (*x, i); j++)
6206 replace_constant_pool_ref (&XVECEXP (*x, i, j), ref, offset);
6211 /* Check whether X contains an UNSPEC_LTREL_BASE.
6212 Return its constant pool symbol if found, NULL_RTX otherwise. */
6215 find_ltrel_base (rtx x)
6220 if (GET_CODE (x) == UNSPEC
6221 && XINT (x, 1) == UNSPEC_LTREL_BASE)
6222 return XVECEXP (x, 0, 0);
6224 fmt = GET_RTX_FORMAT (GET_CODE (x));
6225 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6229 rtx fnd = find_ltrel_base (XEXP (x, i));
6233 else if (fmt[i] == 'E')
6235 for (j = 0; j < XVECLEN (x, i); j++)
6237 rtx fnd = find_ltrel_base (XVECEXP (x, i, j));
6247 /* Replace any occurrence of UNSPEC_LTREL_BASE in X with its base. */
6250 replace_ltrel_base (rtx *x)
6255 if (GET_CODE (*x) == UNSPEC
6256 && XINT (*x, 1) == UNSPEC_LTREL_BASE)
6258 *x = XVECEXP (*x, 0, 1);
6262 fmt = GET_RTX_FORMAT (GET_CODE (*x));
6263 for (i = GET_RTX_LENGTH (GET_CODE (*x)) - 1; i >= 0; i--)
6267 replace_ltrel_base (&XEXP (*x, i));
6269 else if (fmt[i] == 'E')
6271 for (j = 0; j < XVECLEN (*x, i); j++)
6272 replace_ltrel_base (&XVECEXP (*x, i, j));
6278 /* We keep a list of constants which we have to add to internal
6279 constant tables in the middle of large functions. */
6281 #define NR_C_MODES 11
6282 enum machine_mode constant_modes[NR_C_MODES] =
6284 TFmode, TImode, TDmode,
6285 DFmode, DImode, DDmode,
6286 SFmode, SImode, SDmode,
6293 struct constant *next;
6295 rtx_code_label *label;
6298 struct constant_pool
6300 struct constant_pool *next;
6301 rtx_insn *first_insn;
6302 rtx_insn *pool_insn;
6304 rtx_insn *emit_pool_after;
6306 struct constant *constants[NR_C_MODES];
6307 struct constant *execute;
6308 rtx_code_label *label;
6312 /* Allocate new constant_pool structure. */
6314 static struct constant_pool *
6315 s390_alloc_pool (void)
6317 struct constant_pool *pool;
6320 pool = (struct constant_pool *) xmalloc (sizeof *pool);
6322 for (i = 0; i < NR_C_MODES; i++)
6323 pool->constants[i] = NULL;
6325 pool->execute = NULL;
6326 pool->label = gen_label_rtx ();
6327 pool->first_insn = NULL;
6328 pool->pool_insn = NULL;
6329 pool->insns = BITMAP_ALLOC (NULL);
6331 pool->emit_pool_after = NULL;
6336 /* Create new constant pool covering instructions starting at INSN
6337 and chain it to the end of POOL_LIST. */
6339 static struct constant_pool *
6340 s390_start_pool (struct constant_pool **pool_list, rtx_insn *insn)
6342 struct constant_pool *pool, **prev;
6344 pool = s390_alloc_pool ();
6345 pool->first_insn = insn;
6347 for (prev = pool_list; *prev; prev = &(*prev)->next)
6354 /* End range of instructions covered by POOL at INSN and emit
6355 placeholder insn representing the pool. */
6358 s390_end_pool (struct constant_pool *pool, rtx_insn *insn)
6360 rtx pool_size = GEN_INT (pool->size + 8 /* alignment slop */);
6363 insn = get_last_insn ();
6365 pool->pool_insn = emit_insn_after (gen_pool (pool_size), insn);
6366 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6369 /* Add INSN to the list of insns covered by POOL. */
6372 s390_add_pool_insn (struct constant_pool *pool, rtx insn)
6374 bitmap_set_bit (pool->insns, INSN_UID (insn));
6377 /* Return pool out of POOL_LIST that covers INSN. */
6379 static struct constant_pool *
6380 s390_find_pool (struct constant_pool *pool_list, rtx insn)
6382 struct constant_pool *pool;
6384 for (pool = pool_list; pool; pool = pool->next)
6385 if (bitmap_bit_p (pool->insns, INSN_UID (insn)))
6391 /* Add constant VAL of mode MODE to the constant pool POOL. */
6394 s390_add_constant (struct constant_pool *pool, rtx val, enum machine_mode mode)
6399 for (i = 0; i < NR_C_MODES; i++)
6400 if (constant_modes[i] == mode)
6402 gcc_assert (i != NR_C_MODES);
6404 for (c = pool->constants[i]; c != NULL; c = c->next)
6405 if (rtx_equal_p (val, c->value))
6410 c = (struct constant *) xmalloc (sizeof *c);
6412 c->label = gen_label_rtx ();
6413 c->next = pool->constants[i];
6414 pool->constants[i] = c;
6415 pool->size += GET_MODE_SIZE (mode);
6419 /* Return an rtx that represents the offset of X from the start of
6423 s390_pool_offset (struct constant_pool *pool, rtx x)
6427 label = gen_rtx_LABEL_REF (GET_MODE (x), pool->label);
6428 x = gen_rtx_UNSPEC (GET_MODE (x), gen_rtvec (2, x, label),
6429 UNSPEC_POOL_OFFSET);
6430 return gen_rtx_CONST (GET_MODE (x), x);
6433 /* Find constant VAL of mode MODE in the constant pool POOL.
6434 Return an RTX describing the distance from the start of
6435 the pool to the location of the new constant. */
6438 s390_find_constant (struct constant_pool *pool, rtx val,
6439 enum machine_mode mode)
6444 for (i = 0; i < NR_C_MODES; i++)
6445 if (constant_modes[i] == mode)
6447 gcc_assert (i != NR_C_MODES);
6449 for (c = pool->constants[i]; c != NULL; c = c->next)
6450 if (rtx_equal_p (val, c->value))
6455 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6458 /* Check whether INSN is an execute. Return the label_ref to its
6459 execute target template if so, NULL_RTX otherwise. */
6462 s390_execute_label (rtx insn)
6464 if (NONJUMP_INSN_P (insn)
6465 && GET_CODE (PATTERN (insn)) == PARALLEL
6466 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == UNSPEC
6467 && XINT (XVECEXP (PATTERN (insn), 0, 0), 1) == UNSPEC_EXECUTE)
6468 return XVECEXP (XVECEXP (PATTERN (insn), 0, 0), 0, 2);
6473 /* Add execute target for INSN to the constant pool POOL. */
6476 s390_add_execute (struct constant_pool *pool, rtx insn)
6480 for (c = pool->execute; c != NULL; c = c->next)
6481 if (INSN_UID (insn) == INSN_UID (c->value))
6486 c = (struct constant *) xmalloc (sizeof *c);
6488 c->label = gen_label_rtx ();
6489 c->next = pool->execute;
6495 /* Find execute target for INSN in the constant pool POOL.
6496 Return an RTX describing the distance from the start of
6497 the pool to the location of the execute target. */
6500 s390_find_execute (struct constant_pool *pool, rtx insn)
6504 for (c = pool->execute; c != NULL; c = c->next)
6505 if (INSN_UID (insn) == INSN_UID (c->value))
6510 return s390_pool_offset (pool, gen_rtx_LABEL_REF (Pmode, c->label));
6513 /* For an execute INSN, extract the execute target template. */
6516 s390_execute_target (rtx insn)
6518 rtx pattern = PATTERN (insn);
6519 gcc_assert (s390_execute_label (insn));
6521 if (XVECLEN (pattern, 0) == 2)
6523 pattern = copy_rtx (XVECEXP (pattern, 0, 1));
6527 rtvec vec = rtvec_alloc (XVECLEN (pattern, 0) - 1);
6530 for (i = 0; i < XVECLEN (pattern, 0) - 1; i++)
6531 RTVEC_ELT (vec, i) = copy_rtx (XVECEXP (pattern, 0, i + 1));
6533 pattern = gen_rtx_PARALLEL (VOIDmode, vec);
6539 /* Indicate that INSN cannot be duplicated. This is the case for
6540 execute insns that carry a unique label. */
6543 s390_cannot_copy_insn_p (rtx_insn *insn)
6545 rtx label = s390_execute_label (insn);
6546 return label && label != const0_rtx;
6549 /* Dump out the constants in POOL. If REMOTE_LABEL is true,
6550 do not emit the pool base label. */
6553 s390_dump_pool (struct constant_pool *pool, bool remote_label)
6556 rtx_insn *insn = pool->pool_insn;
6559 /* Switch to rodata section. */
6560 if (TARGET_CPU_ZARCH)
6562 insn = emit_insn_after (gen_pool_section_start (), insn);
6563 INSN_ADDRESSES_NEW (insn, -1);
6566 /* Ensure minimum pool alignment. */
6567 if (TARGET_CPU_ZARCH)
6568 insn = emit_insn_after (gen_pool_align (GEN_INT (8)), insn);
6570 insn = emit_insn_after (gen_pool_align (GEN_INT (4)), insn);
6571 INSN_ADDRESSES_NEW (insn, -1);
6573 /* Emit pool base label. */
6576 insn = emit_label_after (pool->label, insn);
6577 INSN_ADDRESSES_NEW (insn, -1);
6580 /* Dump constants in descending alignment requirement order,
6581 ensuring proper alignment for every constant. */
6582 for (i = 0; i < NR_C_MODES; i++)
6583 for (c = pool->constants[i]; c; c = c->next)
6585 /* Convert UNSPEC_LTREL_OFFSET unspecs to pool-relative references. */
6586 rtx value = copy_rtx (c->value);
6587 if (GET_CODE (value) == CONST
6588 && GET_CODE (XEXP (value, 0)) == UNSPEC
6589 && XINT (XEXP (value, 0), 1) == UNSPEC_LTREL_OFFSET
6590 && XVECLEN (XEXP (value, 0), 0) == 1)
6591 value = s390_pool_offset (pool, XVECEXP (XEXP (value, 0), 0, 0));
6593 insn = emit_label_after (c->label, insn);
6594 INSN_ADDRESSES_NEW (insn, -1);
6596 value = gen_rtx_UNSPEC_VOLATILE (constant_modes[i],
6597 gen_rtvec (1, value),
6598 UNSPECV_POOL_ENTRY);
6599 insn = emit_insn_after (value, insn);
6600 INSN_ADDRESSES_NEW (insn, -1);
6603 /* Ensure minimum alignment for instructions. */
6604 insn = emit_insn_after (gen_pool_align (GEN_INT (2)), insn);
6605 INSN_ADDRESSES_NEW (insn, -1);
6607 /* Output in-pool execute template insns. */
6608 for (c = pool->execute; c; c = c->next)
6610 insn = emit_label_after (c->label, insn);
6611 INSN_ADDRESSES_NEW (insn, -1);
6613 insn = emit_insn_after (s390_execute_target (c->value), insn);
6614 INSN_ADDRESSES_NEW (insn, -1);
6617 /* Switch back to previous section. */
6618 if (TARGET_CPU_ZARCH)
6620 insn = emit_insn_after (gen_pool_section_end (), insn);
6621 INSN_ADDRESSES_NEW (insn, -1);
6624 insn = emit_barrier_after (insn);
6625 INSN_ADDRESSES_NEW (insn, -1);
6627 /* Remove placeholder insn. */
6628 remove_insn (pool->pool_insn);
6631 /* Free all memory used by POOL. */
6634 s390_free_pool (struct constant_pool *pool)
6636 struct constant *c, *next;
6639 for (i = 0; i < NR_C_MODES; i++)
6640 for (c = pool->constants[i]; c; c = next)
6646 for (c = pool->execute; c; c = next)
6652 BITMAP_FREE (pool->insns);
6657 /* Collect main literal pool. Return NULL on overflow. */
6659 static struct constant_pool *
6660 s390_mainpool_start (void)
6662 struct constant_pool *pool;
6665 pool = s390_alloc_pool ();
6667 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6669 if (NONJUMP_INSN_P (insn)
6670 && GET_CODE (PATTERN (insn)) == SET
6671 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC_VOLATILE
6672 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPECV_MAIN_POOL)
6674 /* There might be two main_pool instructions if base_reg
6675 is call-clobbered; one for shrink-wrapped code and one
6676 for the rest. We want to keep the first. */
6677 if (pool->pool_insn)
6679 insn = PREV_INSN (insn);
6680 delete_insn (NEXT_INSN (insn));
6683 pool->pool_insn = insn;
6686 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6688 s390_add_execute (pool, insn);
6690 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
6692 rtx pool_ref = NULL_RTX;
6693 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6696 rtx constant = get_pool_constant (pool_ref);
6697 enum machine_mode mode = get_pool_mode (pool_ref);
6698 s390_add_constant (pool, constant, mode);
6702 /* If hot/cold partitioning is enabled we have to make sure that
6703 the literal pool is emitted in the same section where the
6704 initialization of the literal pool base pointer takes place.
6705 emit_pool_after is only used in the non-overflow case on non
6706 Z cpus where we can emit the literal pool at the end of the
6707 function body within the text section. */
6709 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS
6710 && !pool->emit_pool_after)
6711 pool->emit_pool_after = PREV_INSN (insn);
6714 gcc_assert (pool->pool_insn || pool->size == 0);
6716 if (pool->size >= 4096)
6718 /* We're going to chunkify the pool, so remove the main
6719 pool placeholder insn. */
6720 remove_insn (pool->pool_insn);
6722 s390_free_pool (pool);
6726 /* If the functions ends with the section where the literal pool
6727 should be emitted set the marker to its end. */
6728 if (pool && !pool->emit_pool_after)
6729 pool->emit_pool_after = get_last_insn ();
6734 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6735 Modify the current function to output the pool constants as well as
6736 the pool register setup instruction. */
6739 s390_mainpool_finish (struct constant_pool *pool)
6741 rtx base_reg = cfun->machine->base_reg;
6743 /* If the pool is empty, we're done. */
6744 if (pool->size == 0)
6746 /* We don't actually need a base register after all. */
6747 cfun->machine->base_reg = NULL_RTX;
6749 if (pool->pool_insn)
6750 remove_insn (pool->pool_insn);
6751 s390_free_pool (pool);
6755 /* We need correct insn addresses. */
6756 shorten_branches (get_insns ());
6758 /* On zSeries, we use a LARL to load the pool register. The pool is
6759 located in the .rodata section, so we emit it after the function. */
6760 if (TARGET_CPU_ZARCH)
6762 rtx set = gen_main_base_64 (base_reg, pool->label);
6763 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
6764 INSN_ADDRESSES_NEW (insn, -1);
6765 remove_insn (pool->pool_insn);
6767 insn = get_last_insn ();
6768 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6769 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6771 s390_dump_pool (pool, 0);
6774 /* On S/390, if the total size of the function's code plus literal pool
6775 does not exceed 4096 bytes, we use BASR to set up a function base
6776 pointer, and emit the literal pool at the end of the function. */
6777 else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
6778 + pool->size + 8 /* alignment slop */ < 4096)
6780 rtx set = gen_main_base_31_small (base_reg, pool->label);
6781 rtx_insn *insn = emit_insn_after (set, pool->pool_insn);
6782 INSN_ADDRESSES_NEW (insn, -1);
6783 remove_insn (pool->pool_insn);
6785 insn = emit_label_after (pool->label, insn);
6786 INSN_ADDRESSES_NEW (insn, -1);
6788 /* emit_pool_after will be set by s390_mainpool_start to the
6789 last insn of the section where the literal pool should be
6791 insn = pool->emit_pool_after;
6793 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6794 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6796 s390_dump_pool (pool, 1);
6799 /* Otherwise, we emit an inline literal pool and use BASR to branch
6800 over it, setting up the pool register at the same time. */
6803 rtx_code_label *pool_end = gen_label_rtx ();
6805 rtx pat = gen_main_base_31_large (base_reg, pool->label, pool_end);
6806 rtx_insn *insn = emit_jump_insn_after (pat, pool->pool_insn);
6807 JUMP_LABEL (insn) = pool_end;
6808 INSN_ADDRESSES_NEW (insn, -1);
6809 remove_insn (pool->pool_insn);
6811 insn = emit_label_after (pool->label, insn);
6812 INSN_ADDRESSES_NEW (insn, -1);
6814 pool->pool_insn = emit_insn_after (gen_pool (const0_rtx), insn);
6815 INSN_ADDRESSES_NEW (pool->pool_insn, -1);
6817 insn = emit_label_after (pool_end, pool->pool_insn);
6818 INSN_ADDRESSES_NEW (insn, -1);
6820 s390_dump_pool (pool, 1);
6824 /* Replace all literal pool references. */
6826 for (rtx_insn *insn = get_insns (); insn; insn = NEXT_INSN (insn))
6829 replace_ltrel_base (&PATTERN (insn));
6831 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
6833 rtx addr, pool_ref = NULL_RTX;
6834 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6837 if (s390_execute_label (insn))
6838 addr = s390_find_execute (pool, insn);
6840 addr = s390_find_constant (pool, get_pool_constant (pool_ref),
6841 get_pool_mode (pool_ref));
6843 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
6844 INSN_CODE (insn) = -1;
6850 /* Free the pool. */
6851 s390_free_pool (pool);
6854 /* POOL holds the main literal pool as collected by s390_mainpool_start.
6855 We have decided we cannot use this pool, so revert all changes
6856 to the current function that were done by s390_mainpool_start. */
6858 s390_mainpool_cancel (struct constant_pool *pool)
6860 /* We didn't actually change the instruction stream, so simply
6861 free the pool memory. */
6862 s390_free_pool (pool);
6866 /* Chunkify the literal pool. */
6868 #define S390_POOL_CHUNK_MIN 0xc00
6869 #define S390_POOL_CHUNK_MAX 0xe00
6871 static struct constant_pool *
6872 s390_chunkify_start (void)
6874 struct constant_pool *curr_pool = NULL, *pool_list = NULL;
6877 rtx pending_ltrel = NULL_RTX;
6880 rtx (*gen_reload_base) (rtx, rtx) =
6881 TARGET_CPU_ZARCH? gen_reload_base_64 : gen_reload_base_31;
6884 /* We need correct insn addresses. */
6886 shorten_branches (get_insns ());
6888 /* Scan all insns and move literals to pool chunks. */
6890 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6892 bool section_switch_p = false;
6894 /* Check for pending LTREL_BASE. */
6897 rtx ltrel_base = find_ltrel_base (PATTERN (insn));
6900 gcc_assert (ltrel_base == pending_ltrel);
6901 pending_ltrel = NULL_RTX;
6905 if (!TARGET_CPU_ZARCH && s390_execute_label (insn))
6908 curr_pool = s390_start_pool (&pool_list, insn);
6910 s390_add_execute (curr_pool, insn);
6911 s390_add_pool_insn (curr_pool, insn);
6913 else if (NONJUMP_INSN_P (insn) || CALL_P (insn))
6915 rtx pool_ref = NULL_RTX;
6916 find_constant_pool_ref (PATTERN (insn), &pool_ref);
6919 rtx constant = get_pool_constant (pool_ref);
6920 enum machine_mode mode = get_pool_mode (pool_ref);
6923 curr_pool = s390_start_pool (&pool_list, insn);
6925 s390_add_constant (curr_pool, constant, mode);
6926 s390_add_pool_insn (curr_pool, insn);
6928 /* Don't split the pool chunk between a LTREL_OFFSET load
6929 and the corresponding LTREL_BASE. */
6930 if (GET_CODE (constant) == CONST
6931 && GET_CODE (XEXP (constant, 0)) == UNSPEC
6932 && XINT (XEXP (constant, 0), 1) == UNSPEC_LTREL_OFFSET)
6934 gcc_assert (!pending_ltrel);
6935 pending_ltrel = pool_ref;
6940 if (JUMP_P (insn) || JUMP_TABLE_DATA_P (insn) || LABEL_P (insn))
6943 s390_add_pool_insn (curr_pool, insn);
6944 /* An LTREL_BASE must follow within the same basic block. */
6945 gcc_assert (!pending_ltrel);
6949 switch (NOTE_KIND (insn))
6951 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
6952 section_switch_p = true;
6954 case NOTE_INSN_VAR_LOCATION:
6955 case NOTE_INSN_CALL_ARG_LOCATION:
6962 || INSN_ADDRESSES_SIZE () <= (size_t) INSN_UID (insn)
6963 || INSN_ADDRESSES (INSN_UID (insn)) == -1)
6966 if (TARGET_CPU_ZARCH)
6968 if (curr_pool->size < S390_POOL_CHUNK_MAX)
6971 s390_end_pool (curr_pool, NULL);
6976 int chunk_size = INSN_ADDRESSES (INSN_UID (insn))
6977 - INSN_ADDRESSES (INSN_UID (curr_pool->first_insn))
6980 /* We will later have to insert base register reload insns.
6981 Those will have an effect on code size, which we need to
6982 consider here. This calculation makes rather pessimistic
6983 worst-case assumptions. */
6987 if (chunk_size < S390_POOL_CHUNK_MIN
6988 && curr_pool->size < S390_POOL_CHUNK_MIN
6989 && !section_switch_p)
6992 /* Pool chunks can only be inserted after BARRIERs ... */
6993 if (BARRIER_P (insn))
6995 s390_end_pool (curr_pool, insn);
7000 /* ... so if we don't find one in time, create one. */
7001 else if (chunk_size > S390_POOL_CHUNK_MAX
7002 || curr_pool->size > S390_POOL_CHUNK_MAX
7003 || section_switch_p)
7005 rtx_insn *label, *jump, *barrier, *next, *prev;
7007 if (!section_switch_p)
7009 /* We can insert the barrier only after a 'real' insn. */
7010 if (! NONJUMP_INSN_P (insn) && ! CALL_P (insn))
7012 if (get_attr_length (insn) == 0)
7014 /* Don't separate LTREL_BASE from the corresponding
7015 LTREL_OFFSET load. */
7022 next = NEXT_INSN (insn);
7026 && (NOTE_KIND (next) == NOTE_INSN_VAR_LOCATION
7027 || NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION));
7031 gcc_assert (!pending_ltrel);
7033 /* The old pool has to end before the section switch
7034 note in order to make it part of the current
7036 insn = PREV_INSN (insn);
7039 label = gen_label_rtx ();
7041 if (prev && NOTE_P (prev))
7042 prev = prev_nonnote_insn (prev);
7044 jump = emit_jump_insn_after_setloc (gen_jump (label), insn,
7045 INSN_LOCATION (prev));
7047 jump = emit_jump_insn_after_noloc (gen_jump (label), insn);
7048 barrier = emit_barrier_after (jump);
7049 insn = emit_label_after (label, barrier);
7050 JUMP_LABEL (jump) = label;
7051 LABEL_NUSES (label) = 1;
7053 INSN_ADDRESSES_NEW (jump, -1);
7054 INSN_ADDRESSES_NEW (barrier, -1);
7055 INSN_ADDRESSES_NEW (insn, -1);
7057 s390_end_pool (curr_pool, barrier);
7065 s390_end_pool (curr_pool, NULL);
7066 gcc_assert (!pending_ltrel);
7068 /* Find all labels that are branched into
7069 from an insn belonging to a different chunk. */
7071 far_labels = BITMAP_ALLOC (NULL);
7073 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7075 rtx_jump_table_data *table;
7077 /* Labels marked with LABEL_PRESERVE_P can be target
7078 of non-local jumps, so we have to mark them.
7079 The same holds for named labels.
7081 Don't do that, however, if it is the label before
7085 && (LABEL_PRESERVE_P (insn) || LABEL_NAME (insn)))
7087 rtx_insn *vec_insn = NEXT_INSN (insn);
7088 if (! vec_insn || ! JUMP_TABLE_DATA_P (vec_insn))
7089 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (insn));
7091 /* Check potential targets in a table jump (casesi_jump). */
7092 else if (tablejump_p (insn, NULL, &table))
7094 rtx vec_pat = PATTERN (table);
7095 int i, diff_p = GET_CODE (vec_pat) == ADDR_DIFF_VEC;
7097 for (i = 0; i < XVECLEN (vec_pat, diff_p); i++)
7099 rtx label = XEXP (XVECEXP (vec_pat, diff_p, i), 0);
7101 if (s390_find_pool (pool_list, label)
7102 != s390_find_pool (pool_list, insn))
7103 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
7106 /* If we have a direct jump (conditional or unconditional),
7107 check all potential targets. */
7108 else if (JUMP_P (insn))
7110 rtx pat = PATTERN (insn);
7112 if (GET_CODE (pat) == PARALLEL)
7113 pat = XVECEXP (pat, 0, 0);
7115 if (GET_CODE (pat) == SET)
7117 rtx label = JUMP_LABEL (insn);
7118 if (label && !ANY_RETURN_P (label))
7120 if (s390_find_pool (pool_list, label)
7121 != s390_find_pool (pool_list, insn))
7122 bitmap_set_bit (far_labels, CODE_LABEL_NUMBER (label));
7128 /* Insert base register reload insns before every pool. */
7130 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
7132 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
7134 rtx_insn *insn = curr_pool->first_insn;
7135 INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
7138 /* Insert base register reload insns at every far label. */
7140 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7142 && bitmap_bit_p (far_labels, CODE_LABEL_NUMBER (insn)))
7144 struct constant_pool *pool = s390_find_pool (pool_list, insn);
7147 rtx new_insn = gen_reload_base (cfun->machine->base_reg,
7149 INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
7154 BITMAP_FREE (far_labels);
7157 /* Recompute insn addresses. */
7159 init_insn_lengths ();
7160 shorten_branches (get_insns ());
7165 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
7166 After we have decided to use this list, finish implementing
7167 all changes to the current function as required. */
7170 s390_chunkify_finish (struct constant_pool *pool_list)
7172 struct constant_pool *curr_pool = NULL;
7176 /* Replace all literal pool references. */
7178 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7181 replace_ltrel_base (&PATTERN (insn));
7183 curr_pool = s390_find_pool (pool_list, insn);
7187 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
7189 rtx addr, pool_ref = NULL_RTX;
7190 find_constant_pool_ref (PATTERN (insn), &pool_ref);
7193 if (s390_execute_label (insn))
7194 addr = s390_find_execute (curr_pool, insn);
7196 addr = s390_find_constant (curr_pool,
7197 get_pool_constant (pool_ref),
7198 get_pool_mode (pool_ref));
7200 replace_constant_pool_ref (&PATTERN (insn), pool_ref, addr);
7201 INSN_CODE (insn) = -1;
7206 /* Dump out all literal pools. */
7208 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
7209 s390_dump_pool (curr_pool, 0);
7211 /* Free pool list. */
7215 struct constant_pool *next = pool_list->next;
7216 s390_free_pool (pool_list);
7221 /* POOL_LIST is a chunk list as prepared by s390_chunkify_start.
7222 We have decided we cannot use this list, so revert all changes
7223 to the current function that were done by s390_chunkify_start. */
7226 s390_chunkify_cancel (struct constant_pool *pool_list)
7228 struct constant_pool *curr_pool = NULL;
7231 /* Remove all pool placeholder insns. */
7233 for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
7235 /* Did we insert an extra barrier? Remove it. */
7236 rtx_insn *barrier = PREV_INSN (curr_pool->pool_insn);
7237 rtx_insn *jump = barrier? PREV_INSN (barrier) : NULL;
7238 rtx_insn *label = NEXT_INSN (curr_pool->pool_insn);
7240 if (jump && JUMP_P (jump)
7241 && barrier && BARRIER_P (barrier)
7242 && label && LABEL_P (label)
7243 && GET_CODE (PATTERN (jump)) == SET
7244 && SET_DEST (PATTERN (jump)) == pc_rtx
7245 && GET_CODE (SET_SRC (PATTERN (jump))) == LABEL_REF
7246 && XEXP (SET_SRC (PATTERN (jump)), 0) == label)
7249 remove_insn (barrier);
7250 remove_insn (label);
7253 remove_insn (curr_pool->pool_insn);
7256 /* Remove all base register reload insns. */
7258 for (insn = get_insns (); insn; )
7260 rtx_insn *next_insn = NEXT_INSN (insn);
7262 if (NONJUMP_INSN_P (insn)
7263 && GET_CODE (PATTERN (insn)) == SET
7264 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
7265 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_RELOAD_BASE)
7271 /* Free pool list. */
7275 struct constant_pool *next = pool_list->next;
7276 s390_free_pool (pool_list);
7281 /* Output the constant pool entry EXP in mode MODE with alignment ALIGN. */
7284 s390_output_pool_entry (rtx exp, enum machine_mode mode, unsigned int align)
7288 switch (GET_MODE_CLASS (mode))
7291 case MODE_DECIMAL_FLOAT:
7292 gcc_assert (GET_CODE (exp) == CONST_DOUBLE);
7294 REAL_VALUE_FROM_CONST_DOUBLE (r, exp);
7295 assemble_real (r, mode, align);
7299 assemble_integer (exp, GET_MODE_SIZE (mode), align, 1);
7300 mark_symbol_refs_as_used (exp);
7309 /* Return an RTL expression representing the value of the return address
7310 for the frame COUNT steps up from the current frame. FRAME is the
7311 frame pointer of that frame. */
7314 s390_return_addr_rtx (int count, rtx frame ATTRIBUTE_UNUSED)
7319 /* Without backchain, we fail for all but the current frame. */
7321 if (!TARGET_BACKCHAIN && count > 0)
7324 /* For the current frame, we need to make sure the initial
7325 value of RETURN_REGNUM is actually saved. */
7329 /* On non-z architectures branch splitting could overwrite r14. */
7330 if (TARGET_CPU_ZARCH)
7331 return get_hard_reg_initial_val (Pmode, RETURN_REGNUM);
7334 cfun_frame_layout.save_return_addr_p = true;
7335 return gen_rtx_MEM (Pmode, return_address_pointer_rtx);
7339 if (TARGET_PACKED_STACK)
7340 offset = -2 * UNITS_PER_LONG;
7342 offset = RETURN_REGNUM * UNITS_PER_LONG;
7344 addr = plus_constant (Pmode, frame, offset);
7345 addr = memory_address (Pmode, addr);
7346 return gen_rtx_MEM (Pmode, addr);
7349 /* Return an RTL expression representing the back chain stored in
7350 the current stack frame. */
7353 s390_back_chain_rtx (void)
7357 gcc_assert (TARGET_BACKCHAIN);
7359 if (TARGET_PACKED_STACK)
7360 chain = plus_constant (Pmode, stack_pointer_rtx,
7361 STACK_POINTER_OFFSET - UNITS_PER_LONG);
7363 chain = stack_pointer_rtx;
7365 chain = gen_rtx_MEM (Pmode, chain);
7369 /* Find first call clobbered register unused in a function.
7370 This could be used as base register in a leaf function
7371 or for holding the return address before epilogue. */
7374 find_unused_clobbered_reg (void)
7377 for (i = 0; i < 6; i++)
7378 if (!df_regs_ever_live_p (i))
7384 /* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
7385 clobbered hard regs in SETREG. */
7388 s390_reg_clobbered_rtx (rtx setreg, const_rtx set_insn ATTRIBUTE_UNUSED, void *data)
7390 char *regs_ever_clobbered = (char *)data;
7391 unsigned int i, regno;
7392 enum machine_mode mode = GET_MODE (setreg);
7394 if (GET_CODE (setreg) == SUBREG)
7396 rtx inner = SUBREG_REG (setreg);
7397 if (!GENERAL_REG_P (inner) && !FP_REG_P (inner))
7399 regno = subreg_regno (setreg);
7401 else if (GENERAL_REG_P (setreg) || FP_REG_P (setreg))
7402 regno = REGNO (setreg);
7407 i < regno + HARD_REGNO_NREGS (regno, mode);
7409 regs_ever_clobbered[i] = 1;
7412 /* Walks through all basic blocks of the current function looking
7413 for clobbered hard regs using s390_reg_clobbered_rtx. The fields
7414 of the passed integer array REGS_EVER_CLOBBERED are set to one for
7415 each of those regs. */
7418 s390_regs_ever_clobbered (char regs_ever_clobbered[])
7424 memset (regs_ever_clobbered, 0, 32);
7426 /* For non-leaf functions we have to consider all call clobbered regs to be
7430 for (i = 0; i < 32; i++)
7431 regs_ever_clobbered[i] = call_really_used_regs[i];
7434 /* Make the "magic" eh_return registers live if necessary. For regs_ever_live
7435 this work is done by liveness analysis (mark_regs_live_at_end).
7436 Special care is needed for functions containing landing pads. Landing pads
7437 may use the eh registers, but the code which sets these registers is not
7438 contained in that function. Hence s390_regs_ever_clobbered is not able to
7439 deal with this automatically. */
7440 if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
7441 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
7442 if (crtl->calls_eh_return
7443 || (cfun->machine->has_landing_pad_p
7444 && df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
7445 regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
7447 /* For nonlocal gotos all call-saved registers have to be saved.
7448 This flag is also set for the unwinding code in libgcc.
7449 See expand_builtin_unwind_init. For regs_ever_live this is done by
7451 if (crtl->saves_all_registers)
7452 for (i = 0; i < 32; i++)
7453 if (!call_really_used_regs[i])
7454 regs_ever_clobbered[i] = 1;
7456 FOR_EACH_BB_FN (cur_bb, cfun)
7458 FOR_BB_INSNS (cur_bb, cur_insn)
7462 if (!INSN_P (cur_insn))
7465 pat = PATTERN (cur_insn);
7467 /* Ignore GPR restore insns. */
7468 if (epilogue_completed && RTX_FRAME_RELATED_P (cur_insn))
7470 if (GET_CODE (pat) == SET
7471 && GENERAL_REG_P (SET_DEST (pat)))
7474 if (GET_MODE (SET_SRC (pat)) == DImode
7475 && FP_REG_P (SET_SRC (pat)))
7479 if (GET_CODE (SET_SRC (pat)) == MEM)
7484 if (GET_CODE (pat) == PARALLEL
7485 && load_multiple_operation (pat, VOIDmode))
7490 s390_reg_clobbered_rtx,
7491 regs_ever_clobbered);
7496 /* Determine the frame area which actually has to be accessed
7497 in the function epilogue. The values are stored at the
7498 given pointers AREA_BOTTOM (address of the lowest used stack
7499 address) and AREA_TOP (address of the first item which does
7500 not belong to the stack frame). */
7503 s390_frame_area (int *area_bottom, int *area_top)
7510 if (cfun_frame_layout.first_restore_gpr != -1)
7512 b = (cfun_frame_layout.gprs_offset
7513 + cfun_frame_layout.first_restore_gpr * UNITS_PER_LONG);
7514 t = b + (cfun_frame_layout.last_restore_gpr
7515 - cfun_frame_layout.first_restore_gpr + 1) * UNITS_PER_LONG;
7518 if (TARGET_64BIT && cfun_save_high_fprs_p)
7520 b = MIN (b, cfun_frame_layout.f8_offset);
7521 t = MAX (t, (cfun_frame_layout.f8_offset
7522 + cfun_frame_layout.high_fprs * 8));
7527 if (cfun_fpr_save_p (FPR4_REGNUM))
7529 b = MIN (b, cfun_frame_layout.f4_offset);
7530 t = MAX (t, cfun_frame_layout.f4_offset + 8);
7532 if (cfun_fpr_save_p (FPR6_REGNUM))
7534 b = MIN (b, cfun_frame_layout.f4_offset + 8);
7535 t = MAX (t, cfun_frame_layout.f4_offset + 16);
7541 /* Update gpr_save_slots in the frame layout trying to make use of
7542 FPRs as GPR save slots.
7543 This is a helper routine of s390_register_info. */
7546 s390_register_info_gprtofpr ()
7548 int save_reg_slot = FPR0_REGNUM;
7551 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
7554 for (i = 15; i >= 6; i--)
7556 if (cfun_gpr_save_slot (i) == 0)
7559 /* Advance to the next FP register which can be used as a
7561 while ((!call_really_used_regs[save_reg_slot]
7562 || df_regs_ever_live_p (save_reg_slot)
7563 || cfun_fpr_save_p (save_reg_slot))
7564 && FP_REGNO_P (save_reg_slot))
7566 if (!FP_REGNO_P (save_reg_slot))
7568 /* We only want to use ldgr/lgdr if we can get rid of
7569 stm/lm entirely. So undo the gpr slot allocation in
7570 case we ran out of FPR save slots. */
7571 for (j = 6; j <= 15; j++)
7572 if (FP_REGNO_P (cfun_gpr_save_slot (j)))
7573 cfun_gpr_save_slot (j) = -1;
7576 cfun_gpr_save_slot (i) = save_reg_slot++;
7580 /* Set the bits in fpr_bitmap for FPRs which need to be saved due to
7582 This is a helper routine for s390_register_info. */
7585 s390_register_info_stdarg_fpr ()
7591 /* Save the FP argument regs for stdarg. f0, f2 for 31 bit and
7592 f0-f4 for 64 bit. */
7594 || !TARGET_HARD_FLOAT
7595 || !cfun->va_list_fpr_size
7596 || crtl->args.info.fprs >= FP_ARG_NUM_REG)
7599 min_fpr = crtl->args.info.fprs;
7600 max_fpr = min_fpr + cfun->va_list_fpr_size;
7601 if (max_fpr > FP_ARG_NUM_REG)
7602 max_fpr = FP_ARG_NUM_REG;
7604 for (i = min_fpr; i < max_fpr; i++)
7605 cfun_set_fpr_save (i + FPR0_REGNUM);
7608 /* Reserve the GPR save slots for GPRs which need to be saved due to
7610 This is a helper routine for s390_register_info. */
7613 s390_register_info_stdarg_gpr ()
7620 || !cfun->va_list_gpr_size
7621 || crtl->args.info.gprs >= GP_ARG_NUM_REG)
7624 min_gpr = crtl->args.info.gprs;
7625 max_gpr = min_gpr + cfun->va_list_gpr_size;
7626 if (max_gpr > GP_ARG_NUM_REG)
7627 max_gpr = GP_ARG_NUM_REG;
7629 for (i = min_gpr; i < max_gpr; i++)
7630 cfun_gpr_save_slot (2 + i) = -1;
7633 /* The GPR and FPR save slots in cfun->machine->frame_layout are set
7634 for registers which need to be saved in function prologue.
7635 This function can be used until the insns emitted for save/restore
7636 of the regs are visible in the RTL stream. */
7639 s390_register_info ()
7642 char clobbered_regs[32];
7644 gcc_assert (!epilogue_completed);
7646 if (reload_completed)
7647 /* After reload we rely on our own routine to determine which
7648 registers need saving. */
7649 s390_regs_ever_clobbered (clobbered_regs);
7651 /* During reload we use regs_ever_live as a base since reload
7652 does changes in there which we otherwise would not be aware
7654 for (i = 0; i < 32; i++)
7655 clobbered_regs[i] = df_regs_ever_live_p (i);
7657 for (i = 0; i < 32; i++)
7658 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
7660 /* Mark the call-saved FPRs which need to be saved.
7661 This needs to be done before checking the special GPRs since the
7662 stack pointer usage depends on whether high FPRs have to be saved
7664 cfun_frame_layout.fpr_bitmap = 0;
7665 cfun_frame_layout.high_fprs = 0;
7666 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
7667 if (clobbered_regs[i] && !call_really_used_regs[i])
7669 cfun_set_fpr_save (i);
7670 if (i >= FPR8_REGNUM)
7671 cfun_frame_layout.high_fprs++;
7675 clobbered_regs[PIC_OFFSET_TABLE_REGNUM]
7676 |= !!df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
7678 clobbered_regs[BASE_REGNUM]
7679 |= (cfun->machine->base_reg
7680 && REGNO (cfun->machine->base_reg) == BASE_REGNUM);
7682 clobbered_regs[HARD_FRAME_POINTER_REGNUM]
7683 |= !!frame_pointer_needed;
7685 /* On pre z900 machines this might take until machine dependent
7687 save_return_addr_p will only be set on non-zarch machines so
7688 there is no risk that r14 goes into an FPR instead of a stack
7690 clobbered_regs[RETURN_REGNUM]
7692 || TARGET_TPF_PROFILING
7693 || cfun->machine->split_branches_pending_p
7694 || cfun_frame_layout.save_return_addr_p
7695 || crtl->calls_eh_return);
7697 clobbered_regs[STACK_POINTER_REGNUM]
7699 || TARGET_TPF_PROFILING
7700 || cfun_save_high_fprs_p
7701 || get_frame_size () > 0
7702 || (reload_completed && cfun_frame_layout.frame_size > 0)
7703 || cfun->calls_alloca);
7705 memset (cfun_frame_layout.gpr_save_slots, 0, 16);
7707 for (i = 6; i < 16; i++)
7708 if (clobbered_regs[i])
7709 cfun_gpr_save_slot (i) = -1;
7711 s390_register_info_stdarg_fpr ();
7712 s390_register_info_gprtofpr ();
7714 /* First find the range of GPRs to be restored. Vararg regs don't
7715 need to be restored so we do it before assigning slots to the
7717 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != -1; i++);
7718 for (j = 15; j > i && cfun_gpr_save_slot (j) != -1; j--);
7719 cfun_frame_layout.first_restore_gpr = (i == 16) ? -1 : i;
7720 cfun_frame_layout.last_restore_gpr = (i == 16) ? -1 : j;
7722 /* stdarg functions might need to save GPRs 2 to 6. This might
7723 override the GPR->FPR save decision made above for r6 since
7724 vararg regs must go to the stack. */
7725 s390_register_info_stdarg_gpr ();
7727 /* Now the range of GPRs which need saving. */
7728 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != -1; i++);
7729 for (j = 15; j > i && cfun_gpr_save_slot (j) != -1; j--);
7730 cfun_frame_layout.first_save_gpr = (i == 16) ? -1 : i;
7731 cfun_frame_layout.last_save_gpr = (i == 16) ? -1 : j;
7734 /* This function is called by s390_optimize_prologue in order to get
7735 rid of unnecessary GPR save/restore instructions. The register info
7736 for the GPRs is re-computed and the ranges are re-calculated. */
7739 s390_optimize_register_info ()
7741 char clobbered_regs[32];
7744 gcc_assert (epilogue_completed);
7745 gcc_assert (!cfun->machine->split_branches_pending_p);
7747 s390_regs_ever_clobbered (clobbered_regs);
7749 for (i = 0; i < 32; i++)
7750 clobbered_regs[i] = clobbered_regs[i] && !global_regs[i];
7752 /* There is still special treatment needed for cases invisible to
7753 s390_regs_ever_clobbered. */
7754 clobbered_regs[RETURN_REGNUM]
7755 |= (TARGET_TPF_PROFILING
7756 /* When expanding builtin_return_addr in ESA mode we do not
7757 know whether r14 will later be needed as scratch reg when
7758 doing branch splitting. So the builtin always accesses the
7759 r14 save slot and we need to stick to the save/restore
7760 decision for r14 even if it turns out that it didn't get
7762 || cfun_frame_layout.save_return_addr_p
7763 || crtl->calls_eh_return);
7765 memset (cfun_frame_layout.gpr_save_slots, 0, 6);
7767 for (i = 6; i < 16; i++)
7768 if (!clobbered_regs[i])
7769 cfun_gpr_save_slot (i) = 0;
7771 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != -1; i++);
7772 for (j = 15; j > i && cfun_gpr_save_slot (j) != -1; j--);
7773 cfun_frame_layout.first_restore_gpr = (i == 16) ? -1 : i;
7774 cfun_frame_layout.last_restore_gpr = (i == 16) ? -1 : j;
7776 s390_register_info_stdarg_gpr ();
7778 for (i = 0; i < 16 && cfun_gpr_save_slot (i) != -1; i++);
7779 for (j = 15; j > i && cfun_gpr_save_slot (j) != -1; j--);
7780 cfun_frame_layout.first_save_gpr = (i == 16) ? -1 : i;
7781 cfun_frame_layout.last_save_gpr = (i == 16) ? -1 : j;
7784 /* Fill cfun->machine with info about frame of current function. */
7787 s390_frame_info (void)
7789 HOST_WIDE_INT lowest_offset;
7791 cfun_frame_layout.first_save_gpr_slot = cfun_frame_layout.first_save_gpr;
7792 cfun_frame_layout.last_save_gpr_slot = cfun_frame_layout.last_save_gpr;
7794 /* The va_arg builtin uses a constant distance of 16 *
7795 UNITS_PER_LONG (r0-r15) to reach the FPRs from the reg_save_area
7796 pointer. So even if we are going to save the stack pointer in an
7797 FPR we need the stack space in order to keep the offsets
7799 if (cfun->stdarg && cfun_save_arg_fprs_p)
7801 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
7803 if (cfun_frame_layout.first_save_gpr_slot == -1)
7804 cfun_frame_layout.first_save_gpr_slot = STACK_POINTER_REGNUM;
7807 cfun_frame_layout.frame_size = get_frame_size ();
7808 if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
7809 fatal_error ("total size of local variables exceeds architecture limit");
7811 if (!TARGET_PACKED_STACK)
7813 /* Fixed stack layout. */
7814 cfun_frame_layout.backchain_offset = 0;
7815 cfun_frame_layout.f0_offset = 16 * UNITS_PER_LONG;
7816 cfun_frame_layout.f4_offset = cfun_frame_layout.f0_offset + 2 * 8;
7817 cfun_frame_layout.f8_offset = -cfun_frame_layout.high_fprs * 8;
7818 cfun_frame_layout.gprs_offset = (cfun_frame_layout.first_save_gpr_slot
7821 else if (TARGET_BACKCHAIN)
7823 /* Kernel stack layout - packed stack, backchain, no float */
7824 gcc_assert (TARGET_SOFT_FLOAT);
7825 cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
7828 /* The distance between the backchain and the return address
7829 save slot must not change. So we always need a slot for the
7830 stack pointer which resides in between. */
7831 cfun_frame_layout.last_save_gpr_slot = STACK_POINTER_REGNUM;
7833 cfun_frame_layout.gprs_offset
7834 = cfun_frame_layout.backchain_offset - cfun_gprs_save_area_size;
7836 /* FPRs will not be saved. Nevertheless pick sane values to
7837 keep area calculations valid. */
7838 cfun_frame_layout.f0_offset =
7839 cfun_frame_layout.f4_offset =
7840 cfun_frame_layout.f8_offset = cfun_frame_layout.gprs_offset;
7846 /* Packed stack layout without backchain. */
7848 /* With stdarg FPRs need their dedicated slots. */
7849 num_fprs = (TARGET_64BIT && cfun->stdarg ? 2
7850 : (cfun_fpr_save_p (FPR4_REGNUM) +
7851 cfun_fpr_save_p (FPR6_REGNUM)));
7852 cfun_frame_layout.f4_offset = STACK_POINTER_OFFSET - 8 * num_fprs;
7854 num_fprs = (cfun->stdarg ? 2
7855 : (cfun_fpr_save_p (FPR0_REGNUM)
7856 + cfun_fpr_save_p (FPR2_REGNUM)));
7857 cfun_frame_layout.f0_offset = cfun_frame_layout.f4_offset - 8 * num_fprs;
7859 cfun_frame_layout.gprs_offset
7860 = cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
7862 cfun_frame_layout.f8_offset = (cfun_frame_layout.gprs_offset
7863 - cfun_frame_layout.high_fprs * 8);
7866 if (cfun_save_high_fprs_p)
7867 cfun_frame_layout.frame_size += cfun_frame_layout.high_fprs * 8;
7870 cfun_frame_layout.frame_size += crtl->outgoing_args_size;
7872 /* In the following cases we have to allocate a STACK_POINTER_OFFSET
7873 sized area at the bottom of the stack. This is required also for
7874 leaf functions. When GCC generates a local stack reference it
7875 will always add STACK_POINTER_OFFSET to all these references. */
7877 && !TARGET_TPF_PROFILING
7878 && cfun_frame_layout.frame_size == 0
7879 && !cfun->calls_alloca)
7882 /* Calculate the number of bytes we have used in our own register
7883 save area. With the packed stack layout we can re-use the
7884 remaining bytes for normal stack elements. */
7886 if (TARGET_PACKED_STACK)
7887 lowest_offset = MIN (MIN (cfun_frame_layout.f0_offset,
7888 cfun_frame_layout.f4_offset),
7889 cfun_frame_layout.gprs_offset);
7893 if (TARGET_BACKCHAIN)
7894 lowest_offset = MIN (lowest_offset, cfun_frame_layout.backchain_offset);
7896 cfun_frame_layout.frame_size += STACK_POINTER_OFFSET - lowest_offset;
7898 /* If under 31 bit an odd number of gprs has to be saved we have to
7899 adjust the frame size to sustain 8 byte alignment of stack
7901 cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
7902 STACK_BOUNDARY / BITS_PER_UNIT - 1)
7903 & ~(STACK_BOUNDARY / BITS_PER_UNIT - 1));
7906 /* Generate frame layout. Fills in register and frame data for the current
7907 function in cfun->machine. This routine can be called multiple times;
7908 it will re-do the complete frame layout every time. */
7911 s390_init_frame_layout (void)
7913 HOST_WIDE_INT frame_size;
7916 gcc_assert (!reload_completed);
7918 /* On S/390 machines, we may need to perform branch splitting, which
7919 will require both base and return address register. We have no
7920 choice but to assume we're going to need them until right at the
7921 end of the machine dependent reorg phase. */
7922 if (!TARGET_CPU_ZARCH)
7923 cfun->machine->split_branches_pending_p = true;
7927 frame_size = cfun_frame_layout.frame_size;
7929 /* Try to predict whether we'll need the base register. */
7930 base_used = cfun->machine->split_branches_pending_p
7931 || crtl->uses_const_pool
7932 || (!DISP_IN_RANGE (frame_size)
7933 && !CONST_OK_FOR_K (frame_size));
7935 /* Decide which register to use as literal pool base. In small
7936 leaf functions, try to use an unused call-clobbered register
7937 as base register to avoid save/restore overhead. */
7939 cfun->machine->base_reg = NULL_RTX;
7940 else if (crtl->is_leaf && !df_regs_ever_live_p (5))
7941 cfun->machine->base_reg = gen_rtx_REG (Pmode, 5);
7943 cfun->machine->base_reg = gen_rtx_REG (Pmode, BASE_REGNUM);
7945 s390_register_info ();
7948 while (frame_size != cfun_frame_layout.frame_size);
7951 /* Remove the FPR clobbers from a tbegin insn if it can be proven that
7952 the TX is nonescaping. A transaction is considered escaping if
7953 there is at least one path from tbegin returning CC0 to the
7954 function exit block without an tend.
7956 The check so far has some limitations:
7957 - only single tbegin/tend BBs are supported
7958 - the first cond jump after tbegin must separate the CC0 path from ~CC0
7959 - when CC is copied to a GPR and the CC0 check is done with the GPR
7960 this is not supported
7964 s390_optimize_nonescaping_tx (void)
7966 const unsigned int CC0 = 1 << 3;
7967 basic_block tbegin_bb = NULL;
7968 basic_block tend_bb = NULL;
7973 rtx_insn *tbegin_insn = NULL;
7975 if (!cfun->machine->tbegin_p)
7978 for (bb_index = 0; bb_index < n_basic_blocks_for_fn (cfun); bb_index++)
7980 bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
7985 FOR_BB_INSNS (bb, insn)
7987 rtx ite, cc, pat, target;
7988 unsigned HOST_WIDE_INT mask;
7990 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
7993 pat = PATTERN (insn);
7995 if (GET_CODE (pat) == PARALLEL)
7996 pat = XVECEXP (pat, 0, 0);
7998 if (GET_CODE (pat) != SET
7999 || GET_CODE (SET_SRC (pat)) != UNSPEC_VOLATILE)
8002 if (XINT (SET_SRC (pat), 1) == UNSPECV_TBEGIN)
8008 /* Just return if the tbegin doesn't have clobbers. */
8009 if (GET_CODE (PATTERN (insn)) != PARALLEL)
8012 if (tbegin_bb != NULL)
8015 /* Find the next conditional jump. */
8016 for (tmp = NEXT_INSN (insn);
8018 tmp = NEXT_INSN (tmp))
8020 if (reg_set_p (gen_rtx_REG (CCmode, CC_REGNUM), tmp))
8025 ite = SET_SRC (PATTERN (tmp));
8026 if (GET_CODE (ite) != IF_THEN_ELSE)
8029 cc = XEXP (XEXP (ite, 0), 0);
8030 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc))
8031 || GET_MODE (cc) != CCRAWmode
8032 || GET_CODE (XEXP (XEXP (ite, 0), 1)) != CONST_INT)
8035 if (bb->succs->length () != 2)
8038 mask = INTVAL (XEXP (XEXP (ite, 0), 1));
8039 if (GET_CODE (XEXP (ite, 0)) == NE)
8043 target = XEXP (ite, 1);
8044 else if (mask == (CC0 ^ 0xf))
8045 target = XEXP (ite, 2);
8053 ei = ei_start (bb->succs);
8054 e1 = ei_safe_edge (ei);
8056 e2 = ei_safe_edge (ei);
8058 if (e2->flags & EDGE_FALLTHRU)
8061 e1 = ei_safe_edge (ei);
8064 if (!(e1->flags & EDGE_FALLTHRU))
8067 tbegin_bb = (target == pc_rtx) ? e1->dest : e2->dest;
8069 if (tmp == BB_END (bb))
8074 if (XINT (SET_SRC (pat), 1) == UNSPECV_TEND)
8076 if (tend_bb != NULL)
8083 /* Either we successfully remove the FPR clobbers here or we are not
8084 able to do anything for this TX. Both cases don't qualify for
8086 cfun->machine->tbegin_p = false;
8088 if (tbegin_bb == NULL || tend_bb == NULL)
8091 calculate_dominance_info (CDI_POST_DOMINATORS);
8092 result = dominated_by_p (CDI_POST_DOMINATORS, tbegin_bb, tend_bb);
8093 free_dominance_info (CDI_POST_DOMINATORS);
8098 PATTERN (tbegin_insn) = gen_rtx_PARALLEL (VOIDmode,
8100 XVECEXP (PATTERN (tbegin_insn), 0, 0),
8101 XVECEXP (PATTERN (tbegin_insn), 0, 1)));
8102 INSN_CODE (tbegin_insn) = -1;
8103 df_insn_rescan (tbegin_insn);
8108 /* Return true if it is legal to put a value with MODE into REGNO. */
8111 s390_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
8113 switch (REGNO_REG_CLASS (regno))
8116 if (REGNO_PAIR_OK (regno, mode))
8118 if (mode == SImode || mode == DImode)
8121 if (FLOAT_MODE_P (mode) && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
8126 if (FRAME_REGNO_P (regno) && mode == Pmode)
8131 if (REGNO_PAIR_OK (regno, mode))
8134 || (mode != TFmode && mode != TCmode && mode != TDmode))
8139 if (GET_MODE_CLASS (mode) == MODE_CC)
8143 if (REGNO_PAIR_OK (regno, mode))
8145 if (mode == SImode || mode == Pmode)
8156 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
8159 s390_hard_regno_rename_ok (unsigned int old_reg, unsigned int new_reg)
8161 /* Once we've decided upon a register to use as base register, it must
8162 no longer be used for any other purpose. */
8163 if (cfun->machine->base_reg)
8164 if (REGNO (cfun->machine->base_reg) == old_reg
8165 || REGNO (cfun->machine->base_reg) == new_reg)
8168 /* Prevent regrename from using call-saved regs which haven't
8169 actually been saved. This is necessary since regrename assumes
8170 the backend save/restore decisions are based on
8171 df_regs_ever_live. Since we have our own routine we have to tell
8172 regrename manually about it. */
8173 if (GENERAL_REGNO_P (new_reg)
8174 && !call_really_used_regs[new_reg]
8175 && cfun_gpr_save_slot (new_reg) == 0)
8181 /* Return nonzero if register REGNO can be used as a scratch register
8185 s390_hard_regno_scratch_ok (unsigned int regno)
8187 /* See s390_hard_regno_rename_ok. */
8188 if (GENERAL_REGNO_P (regno)
8189 && !call_really_used_regs[regno]
8190 && cfun_gpr_save_slot (regno) == 0)
8196 /* Maximum number of registers to represent a value of mode MODE
8197 in a register of class RCLASS. */
8200 s390_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
8205 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
8206 return 2 * ((GET_MODE_SIZE (mode) / 2 + 8 - 1) / 8);
8208 return (GET_MODE_SIZE (mode) + 8 - 1) / 8;
8210 return (GET_MODE_SIZE (mode) + 4 - 1) / 4;
8214 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
8217 /* Return true if we use LRA instead of reload pass. */
8221 return s390_lra_flag;
8224 /* Return true if register FROM can be eliminated via register TO. */
8227 s390_can_eliminate (const int from, const int to)
8229 /* On zSeries machines, we have not marked the base register as fixed.
8230 Instead, we have an elimination rule BASE_REGNUM -> BASE_REGNUM.
8231 If a function requires the base register, we say here that this
8232 elimination cannot be performed. This will cause reload to free
8233 up the base register (as if it were fixed). On the other hand,
8234 if the current function does *not* require the base register, we
8235 say here the elimination succeeds, which in turn allows reload
8236 to allocate the base register for any other purpose. */
8237 if (from == BASE_REGNUM && to == BASE_REGNUM)
8239 if (TARGET_CPU_ZARCH)
8241 s390_init_frame_layout ();
8242 return cfun->machine->base_reg == NULL_RTX;
8248 /* Everything else must point into the stack frame. */
8249 gcc_assert (to == STACK_POINTER_REGNUM
8250 || to == HARD_FRAME_POINTER_REGNUM);
8252 gcc_assert (from == FRAME_POINTER_REGNUM
8253 || from == ARG_POINTER_REGNUM
8254 || from == RETURN_ADDRESS_POINTER_REGNUM);
8256 /* Make sure we actually saved the return address. */
8257 if (from == RETURN_ADDRESS_POINTER_REGNUM)
8258 if (!crtl->calls_eh_return
8260 && !cfun_frame_layout.save_return_addr_p)
8266 /* Return offset between register FROM and TO initially after prolog. */
8269 s390_initial_elimination_offset (int from, int to)
8271 HOST_WIDE_INT offset;
8273 /* ??? Why are we called for non-eliminable pairs? */
8274 if (!s390_can_eliminate (from, to))
8279 case FRAME_POINTER_REGNUM:
8280 offset = (get_frame_size()
8281 + STACK_POINTER_OFFSET
8282 + crtl->outgoing_args_size);
8285 case ARG_POINTER_REGNUM:
8286 s390_init_frame_layout ();
8287 offset = cfun_frame_layout.frame_size + STACK_POINTER_OFFSET;
8290 case RETURN_ADDRESS_POINTER_REGNUM:
8291 s390_init_frame_layout ();
8293 if (cfun_frame_layout.first_save_gpr_slot == -1)
8295 /* If it turns out that for stdarg nothing went into the reg
8296 save area we also do not need the return address
8298 if (cfun->stdarg && !cfun_save_arg_fprs_p)
8304 /* In order to make the following work it is not necessary for
8305 r14 to have a save slot. It is sufficient if one other GPR
8306 got one. Since the GPRs are always stored without gaps we
8307 are able to calculate where the r14 save slot would
8309 offset = (cfun_frame_layout.frame_size + cfun_frame_layout.gprs_offset +
8310 (RETURN_REGNUM - cfun_frame_layout.first_save_gpr_slot) *
8325 /* Emit insn to save fpr REGNUM at offset OFFSET relative
8326 to register BASE. Return generated insn. */
8329 save_fpr (rtx base, int offset, int regnum)
8332 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
8334 if (regnum >= 16 && regnum <= (16 + FP_ARG_NUM_REG))
8335 set_mem_alias_set (addr, get_varargs_alias_set ());
8337 set_mem_alias_set (addr, get_frame_alias_set ());
8339 return emit_move_insn (addr, gen_rtx_REG (DFmode, regnum));
8342 /* Emit insn to restore fpr REGNUM from offset OFFSET relative
8343 to register BASE. Return generated insn. */
8346 restore_fpr (rtx base, int offset, int regnum)
8349 addr = gen_rtx_MEM (DFmode, plus_constant (Pmode, base, offset));
8350 set_mem_alias_set (addr, get_frame_alias_set ());
8352 return emit_move_insn (gen_rtx_REG (DFmode, regnum), addr);
8355 /* Return true if REGNO is a global register, but not one
8356 of the special ones that need to be saved/restored in anyway. */
8359 global_not_special_regno_p (int regno)
8361 return (global_regs[regno]
8362 /* These registers are special and need to be
8363 restored in any case. */
8364 && !(regno == STACK_POINTER_REGNUM
8365 || regno == RETURN_REGNUM
8366 || regno == BASE_REGNUM
8367 || (flag_pic && regno == (int)PIC_OFFSET_TABLE_REGNUM)));
8370 /* Generate insn to save registers FIRST to LAST into
8371 the register save area located at offset OFFSET
8372 relative to register BASE. */
8375 save_gprs (rtx base, int offset, int first, int last)
8377 rtx addr, insn, note;
8380 addr = plus_constant (Pmode, base, offset);
8381 addr = gen_rtx_MEM (Pmode, addr);
8383 set_mem_alias_set (addr, get_frame_alias_set ());
8385 /* Special-case single register. */
8389 insn = gen_movdi (addr, gen_rtx_REG (Pmode, first));
8391 insn = gen_movsi (addr, gen_rtx_REG (Pmode, first));
8393 if (!global_not_special_regno_p (first))
8394 RTX_FRAME_RELATED_P (insn) = 1;
8399 insn = gen_store_multiple (addr,
8400 gen_rtx_REG (Pmode, first),
8401 GEN_INT (last - first + 1));
8403 if (first <= 6 && cfun->stdarg)
8404 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
8406 rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
8409 set_mem_alias_set (mem, get_varargs_alias_set ());
8412 /* We need to set the FRAME_RELATED flag on all SETs
8413 inside the store-multiple pattern.
8415 However, we must not emit DWARF records for registers 2..5
8416 if they are stored for use by variable arguments ...
8418 ??? Unfortunately, it is not enough to simply not the
8419 FRAME_RELATED flags for those SETs, because the first SET
8420 of the PARALLEL is always treated as if it had the flag
8421 set, even if it does not. Therefore we emit a new pattern
8422 without those registers as REG_FRAME_RELATED_EXPR note. */
8424 if (first >= 6 && !global_not_special_regno_p (first))
8426 rtx pat = PATTERN (insn);
8428 for (i = 0; i < XVECLEN (pat, 0); i++)
8429 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
8430 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (pat,
8432 RTX_FRAME_RELATED_P (XVECEXP (pat, 0, i)) = 1;
8434 RTX_FRAME_RELATED_P (insn) = 1;
8440 for (start = first >= 6 ? first : 6; start <= last; start++)
8441 if (!global_not_special_regno_p (start))
8447 addr = plus_constant (Pmode, base,
8448 offset + (start - first) * UNITS_PER_LONG);
8453 note = gen_movdi (gen_rtx_MEM (Pmode, addr),
8454 gen_rtx_REG (Pmode, start));
8456 note = gen_movsi (gen_rtx_MEM (Pmode, addr),
8457 gen_rtx_REG (Pmode, start));
8458 note = PATTERN (note);
8460 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
8461 RTX_FRAME_RELATED_P (insn) = 1;
8466 note = gen_store_multiple (gen_rtx_MEM (Pmode, addr),
8467 gen_rtx_REG (Pmode, start),
8468 GEN_INT (last - start + 1));
8469 note = PATTERN (note);
8471 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
8473 for (i = 0; i < XVECLEN (note, 0); i++)
8474 if (GET_CODE (XVECEXP (note, 0, i)) == SET
8475 && !global_not_special_regno_p (REGNO (SET_SRC (XVECEXP (note,
8477 RTX_FRAME_RELATED_P (XVECEXP (note, 0, i)) = 1;
8479 RTX_FRAME_RELATED_P (insn) = 1;
8485 /* Generate insn to restore registers FIRST to LAST from
8486 the register save area located at offset OFFSET
8487 relative to register BASE. */
8490 restore_gprs (rtx base, int offset, int first, int last)
8494 addr = plus_constant (Pmode, base, offset);
8495 addr = gen_rtx_MEM (Pmode, addr);
8496 set_mem_alias_set (addr, get_frame_alias_set ());
8498 /* Special-case single register. */
8502 insn = gen_movdi (gen_rtx_REG (Pmode, first), addr);
8504 insn = gen_movsi (gen_rtx_REG (Pmode, first), addr);
8506 RTX_FRAME_RELATED_P (insn) = 1;
8510 insn = gen_load_multiple (gen_rtx_REG (Pmode, first),
8512 GEN_INT (last - first + 1));
8513 RTX_FRAME_RELATED_P (insn) = 1;
8517 /* Return insn sequence to load the GOT register. */
8519 static GTY(()) rtx got_symbol;
8521 s390_load_got (void)
8525 /* We cannot use pic_offset_table_rtx here since we use this
8526 function also for non-pic if __tls_get_offset is called and in
8527 that case PIC_OFFSET_TABLE_REGNUM as well as pic_offset_table_rtx
8529 rtx got_rtx = gen_rtx_REG (Pmode, 12);
8533 got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
8534 SYMBOL_REF_FLAGS (got_symbol) = SYMBOL_FLAG_LOCAL;
8539 if (TARGET_CPU_ZARCH)
8541 emit_move_insn (got_rtx, got_symbol);
8547 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got_symbol),
8548 UNSPEC_LTREL_OFFSET);
8549 offset = gen_rtx_CONST (Pmode, offset);
8550 offset = force_const_mem (Pmode, offset);
8552 emit_move_insn (got_rtx, offset);
8554 offset = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (offset, 0)),
8556 offset = gen_rtx_PLUS (Pmode, got_rtx, offset);
8558 emit_move_insn (got_rtx, offset);
8561 insns = get_insns ();
8566 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
8567 and the change to the stack pointer. */
8570 s390_emit_stack_tie (void)
8572 rtx mem = gen_frame_mem (BLKmode,
8573 gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
8575 emit_insn (gen_stack_tie (mem));
8578 /* Copy GPRS into FPR save slots. */
8581 s390_save_gprs_to_fprs (void)
8585 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
8588 for (i = 6; i < 16; i++)
8590 if (FP_REGNO_P (cfun_gpr_save_slot (i)))
8593 emit_move_insn (gen_rtx_REG (DImode, cfun_gpr_save_slot (i)),
8594 gen_rtx_REG (DImode, i));
8595 RTX_FRAME_RELATED_P (insn) = 1;
8600 /* Restore GPRs from FPR save slots. */
8603 s390_restore_gprs_from_fprs (void)
8607 if (!TARGET_Z10 || !TARGET_HARD_FLOAT || !crtl->is_leaf)
8610 for (i = 6; i < 16; i++)
8612 if (FP_REGNO_P (cfun_gpr_save_slot (i)))
8615 emit_move_insn (gen_rtx_REG (DImode, i),
8616 gen_rtx_REG (DImode, cfun_gpr_save_slot (i)));
8617 df_set_regs_ever_live (i, true);
8618 add_reg_note (insn, REG_CFA_RESTORE, gen_rtx_REG (DImode, i));
8619 if (i == STACK_POINTER_REGNUM)
8620 add_reg_note (insn, REG_CFA_DEF_CFA,
8621 plus_constant (Pmode, stack_pointer_rtx,
8622 STACK_POINTER_OFFSET));
8623 RTX_FRAME_RELATED_P (insn) = 1;
8629 /* A pass run immediately before shrink-wrapping and prologue and epilogue
8634 const pass_data pass_data_s390_early_mach =
8636 RTL_PASS, /* type */
8637 "early_mach", /* name */
8638 OPTGROUP_NONE, /* optinfo_flags */
8639 TV_MACH_DEP, /* tv_id */
8640 0, /* properties_required */
8641 0, /* properties_provided */
8642 0, /* properties_destroyed */
8643 0, /* todo_flags_start */
8644 ( TODO_df_verify | TODO_df_finish ), /* todo_flags_finish */
8647 class pass_s390_early_mach : public rtl_opt_pass
8650 pass_s390_early_mach (gcc::context *ctxt)
8651 : rtl_opt_pass (pass_data_s390_early_mach, ctxt)
8654 /* opt_pass methods: */
8655 virtual unsigned int execute (function *);
8657 }; // class pass_s390_early_mach
8660 pass_s390_early_mach::execute (function *fun)
8664 /* Try to get rid of the FPR clobbers. */
8665 s390_optimize_nonescaping_tx ();
8667 /* Re-compute register info. */
8668 s390_register_info ();
8670 /* If we're using a base register, ensure that it is always valid for
8671 the first non-prologue instruction. */
8672 if (fun->machine->base_reg)
8673 emit_insn_at_entry (gen_main_pool (fun->machine->base_reg));
8675 /* Annotate all constant pool references to let the scheduler know
8676 they implicitly use the base register. */
8677 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8680 annotate_constant_pool_refs (&PATTERN (insn));
8681 df_insn_rescan (insn);
8688 /* Expand the prologue into a bunch of separate insns. */
8691 s390_emit_prologue (void)
8699 /* Choose best register to use for temp use within prologue.
8700 See below for why TPF must use the register 1. */
8702 if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
8704 && !TARGET_TPF_PROFILING)
8705 temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
8707 temp_reg = gen_rtx_REG (Pmode, 1);
8709 s390_save_gprs_to_fprs ();
8711 /* Save call saved gprs. */
8712 if (cfun_frame_layout.first_save_gpr != -1)
8714 insn = save_gprs (stack_pointer_rtx,
8715 cfun_frame_layout.gprs_offset +
8716 UNITS_PER_LONG * (cfun_frame_layout.first_save_gpr
8717 - cfun_frame_layout.first_save_gpr_slot),
8718 cfun_frame_layout.first_save_gpr,
8719 cfun_frame_layout.last_save_gpr);
8723 /* Dummy insn to mark literal pool slot. */
8725 if (cfun->machine->base_reg)
8726 emit_insn (gen_main_pool (cfun->machine->base_reg));
8728 offset = cfun_frame_layout.f0_offset;
8730 /* Save f0 and f2. */
8731 for (i = FPR0_REGNUM; i <= FPR0_REGNUM + 1; i++)
8733 if (cfun_fpr_save_p (i))
8735 save_fpr (stack_pointer_rtx, offset, i);
8738 else if (!TARGET_PACKED_STACK || cfun->stdarg)
8742 /* Save f4 and f6. */
8743 offset = cfun_frame_layout.f4_offset;
8744 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
8746 if (cfun_fpr_save_p (i))
8748 insn = save_fpr (stack_pointer_rtx, offset, i);
8751 /* If f4 and f6 are call clobbered they are saved due to
8752 stdargs and therefore are not frame related. */
8753 if (!call_really_used_regs[i])
8754 RTX_FRAME_RELATED_P (insn) = 1;
8756 else if (!TARGET_PACKED_STACK || call_really_used_regs[i])
8760 if (TARGET_PACKED_STACK
8761 && cfun_save_high_fprs_p
8762 && cfun_frame_layout.f8_offset + cfun_frame_layout.high_fprs * 8 > 0)
8764 offset = (cfun_frame_layout.f8_offset
8765 + (cfun_frame_layout.high_fprs - 1) * 8);
8767 for (i = FPR15_REGNUM; i >= FPR8_REGNUM && offset >= 0; i--)
8768 if (cfun_fpr_save_p (i))
8770 insn = save_fpr (stack_pointer_rtx, offset, i);
8772 RTX_FRAME_RELATED_P (insn) = 1;
8775 if (offset >= cfun_frame_layout.f8_offset)
8779 if (!TARGET_PACKED_STACK)
8780 next_fpr = cfun_save_high_fprs_p ? FPR15_REGNUM : 0;
8782 if (flag_stack_usage_info)
8783 current_function_static_stack_size = cfun_frame_layout.frame_size;
8785 /* Decrement stack pointer. */
8787 if (cfun_frame_layout.frame_size > 0)
8789 rtx frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8792 if (s390_stack_size)
8794 HOST_WIDE_INT stack_guard;
8796 if (s390_stack_guard)
8797 stack_guard = s390_stack_guard;
8800 /* If no value for stack guard is provided the smallest power of 2
8801 larger than the current frame size is chosen. */
8803 while (stack_guard < cfun_frame_layout.frame_size)
8807 if (cfun_frame_layout.frame_size >= s390_stack_size)
8809 warning (0, "frame size of function %qs is %wd"
8810 " bytes exceeding user provided stack limit of "
8812 "An unconditional trap is added.",
8813 current_function_name(), cfun_frame_layout.frame_size,
8815 emit_insn (gen_trap ());
8819 /* stack_guard has to be smaller than s390_stack_size.
8820 Otherwise we would emit an AND with zero which would
8821 not match the test under mask pattern. */
8822 if (stack_guard >= s390_stack_size)
8824 warning (0, "frame size of function %qs is %wd"
8825 " bytes which is more than half the stack size. "
8826 "The dynamic check would not be reliable. "
8827 "No check emitted for this function.",
8828 current_function_name(),
8829 cfun_frame_layout.frame_size);
8833 HOST_WIDE_INT stack_check_mask = ((s390_stack_size - 1)
8834 & ~(stack_guard - 1));
8836 rtx t = gen_rtx_AND (Pmode, stack_pointer_rtx,
8837 GEN_INT (stack_check_mask));
8839 emit_insn (gen_ctrapdi4 (gen_rtx_EQ (VOIDmode,
8841 t, const0_rtx, const0_rtx));
8843 emit_insn (gen_ctrapsi4 (gen_rtx_EQ (VOIDmode,
8845 t, const0_rtx, const0_rtx));
8850 if (s390_warn_framesize > 0
8851 && cfun_frame_layout.frame_size >= s390_warn_framesize)
8852 warning (0, "frame size of %qs is %wd bytes",
8853 current_function_name (), cfun_frame_layout.frame_size);
8855 if (s390_warn_dynamicstack_p && cfun->calls_alloca)
8856 warning (0, "%qs uses dynamic stack allocation", current_function_name ());
8858 /* Save incoming stack pointer into temp reg. */
8859 if (TARGET_BACKCHAIN || next_fpr)
8860 insn = emit_insn (gen_move_insn (temp_reg, stack_pointer_rtx));
8862 /* Subtract frame size from stack pointer. */
8864 if (DISP_IN_RANGE (INTVAL (frame_off)))
8866 insn = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8867 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8869 insn = emit_insn (insn);
8873 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
8874 frame_off = force_const_mem (Pmode, frame_off);
8876 insn = emit_insn (gen_add2_insn (stack_pointer_rtx, frame_off));
8877 annotate_constant_pool_refs (&PATTERN (insn));
8880 RTX_FRAME_RELATED_P (insn) = 1;
8881 real_frame_off = GEN_INT (-cfun_frame_layout.frame_size);
8882 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8883 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8884 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8887 /* Set backchain. */
8889 if (TARGET_BACKCHAIN)
8891 if (cfun_frame_layout.backchain_offset)
8892 addr = gen_rtx_MEM (Pmode,
8893 plus_constant (Pmode, stack_pointer_rtx,
8894 cfun_frame_layout.backchain_offset));
8896 addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
8897 set_mem_alias_set (addr, get_frame_alias_set ());
8898 insn = emit_insn (gen_move_insn (addr, temp_reg));
8901 /* If we support non-call exceptions (e.g. for Java),
8902 we need to make sure the backchain pointer is set up
8903 before any possibly trapping memory access. */
8904 if (TARGET_BACKCHAIN && cfun->can_throw_non_call_exceptions)
8906 addr = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
8907 emit_clobber (addr);
8911 /* Save fprs 8 - 15 (64 bit ABI). */
8913 if (cfun_save_high_fprs_p && next_fpr)
8915 /* If the stack might be accessed through a different register
8916 we have to make sure that the stack pointer decrement is not
8917 moved below the use of the stack slots. */
8918 s390_emit_stack_tie ();
8920 insn = emit_insn (gen_add2_insn (temp_reg,
8921 GEN_INT (cfun_frame_layout.f8_offset)));
8925 for (i = FPR8_REGNUM; i <= next_fpr; i++)
8926 if (cfun_fpr_save_p (i))
8928 rtx addr = plus_constant (Pmode, stack_pointer_rtx,
8929 cfun_frame_layout.frame_size
8930 + cfun_frame_layout.f8_offset
8933 insn = save_fpr (temp_reg, offset, i);
8935 RTX_FRAME_RELATED_P (insn) = 1;
8936 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
8937 gen_rtx_SET (VOIDmode,
8938 gen_rtx_MEM (DFmode, addr),
8939 gen_rtx_REG (DFmode, i)));
8943 /* Set frame pointer, if needed. */
8945 if (frame_pointer_needed)
8947 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8948 RTX_FRAME_RELATED_P (insn) = 1;
8951 /* Set up got pointer, if needed. */
8953 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
8955 rtx_insn *insns = s390_load_got ();
8957 for (rtx_insn *insn = insns; insn; insn = NEXT_INSN (insn))
8958 annotate_constant_pool_refs (&PATTERN (insn));
8963 if (TARGET_TPF_PROFILING)
8965 /* Generate a BAS instruction to serve as a function
8966 entry intercept to facilitate the use of tracing
8967 algorithms located at the branch target. */
8968 emit_insn (gen_prologue_tpf ());
8970 /* Emit a blockage here so that all code
8971 lies between the profiling mechanisms. */
8972 emit_insn (gen_blockage ());
8976 /* Expand the epilogue into a bunch of separate insns. */
8979 s390_emit_epilogue (bool sibcall)
8981 rtx frame_pointer, return_reg, cfa_restores = NULL_RTX;
8982 int area_bottom, area_top, offset = 0;
8987 if (TARGET_TPF_PROFILING)
8990 /* Generate a BAS instruction to serve as a function
8991 entry intercept to facilitate the use of tracing
8992 algorithms located at the branch target. */
8994 /* Emit a blockage here so that all code
8995 lies between the profiling mechanisms. */
8996 emit_insn (gen_blockage ());
8998 emit_insn (gen_epilogue_tpf ());
9001 /* Check whether to use frame or stack pointer for restore. */
9003 frame_pointer = (frame_pointer_needed
9004 ? hard_frame_pointer_rtx : stack_pointer_rtx);
9006 s390_frame_area (&area_bottom, &area_top);
9008 /* Check whether we can access the register save area.
9009 If not, increment the frame pointer as required. */
9011 if (area_top <= area_bottom)
9013 /* Nothing to restore. */
9015 else if (DISP_IN_RANGE (cfun_frame_layout.frame_size + area_bottom)
9016 && DISP_IN_RANGE (cfun_frame_layout.frame_size + area_top - 1))
9018 /* Area is in range. */
9019 offset = cfun_frame_layout.frame_size;
9023 rtx insn, frame_off, cfa;
9025 offset = area_bottom < 0 ? -area_bottom : 0;
9026 frame_off = GEN_INT (cfun_frame_layout.frame_size - offset);
9028 cfa = gen_rtx_SET (VOIDmode, frame_pointer,
9029 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
9030 if (DISP_IN_RANGE (INTVAL (frame_off)))
9032 insn = gen_rtx_SET (VOIDmode, frame_pointer,
9033 gen_rtx_PLUS (Pmode, frame_pointer, frame_off));
9034 insn = emit_insn (insn);
9038 if (!CONST_OK_FOR_K (INTVAL (frame_off)))
9039 frame_off = force_const_mem (Pmode, frame_off);
9041 insn = emit_insn (gen_add2_insn (frame_pointer, frame_off));
9042 annotate_constant_pool_refs (&PATTERN (insn));
9044 add_reg_note (insn, REG_CFA_ADJUST_CFA, cfa);
9045 RTX_FRAME_RELATED_P (insn) = 1;
9048 /* Restore call saved fprs. */
9052 if (cfun_save_high_fprs_p)
9054 next_offset = cfun_frame_layout.f8_offset;
9055 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
9057 if (cfun_fpr_save_p (i))
9059 restore_fpr (frame_pointer,
9060 offset + next_offset, i);
9062 = alloc_reg_note (REG_CFA_RESTORE,
9063 gen_rtx_REG (DFmode, i), cfa_restores);
9072 next_offset = cfun_frame_layout.f4_offset;
9074 for (i = FPR4_REGNUM; i <= FPR4_REGNUM + 1; i++)
9076 if (cfun_fpr_save_p (i))
9078 restore_fpr (frame_pointer,
9079 offset + next_offset, i);
9081 = alloc_reg_note (REG_CFA_RESTORE,
9082 gen_rtx_REG (DFmode, i), cfa_restores);
9085 else if (!TARGET_PACKED_STACK)
9091 /* Return register. */
9093 return_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
9095 /* Restore call saved gprs. */
9097 if (cfun_frame_layout.first_restore_gpr != -1)
9102 /* Check for global register and save them
9103 to stack location from where they get restored. */
9105 for (i = cfun_frame_layout.first_restore_gpr;
9106 i <= cfun_frame_layout.last_restore_gpr;
9109 if (global_not_special_regno_p (i))
9111 addr = plus_constant (Pmode, frame_pointer,
9112 offset + cfun_frame_layout.gprs_offset
9113 + (i - cfun_frame_layout.first_save_gpr_slot)
9115 addr = gen_rtx_MEM (Pmode, addr);
9116 set_mem_alias_set (addr, get_frame_alias_set ());
9117 emit_move_insn (addr, gen_rtx_REG (Pmode, i));
9121 = alloc_reg_note (REG_CFA_RESTORE,
9122 gen_rtx_REG (Pmode, i), cfa_restores);
9127 /* Fetch return address from stack before load multiple,
9128 this will do good for scheduling. */
9130 if (cfun_frame_layout.save_return_addr_p
9131 || (cfun_frame_layout.first_restore_gpr < BASE_REGNUM
9132 && cfun_frame_layout.last_restore_gpr > RETURN_REGNUM))
9134 int return_regnum = find_unused_clobbered_reg();
9137 return_reg = gen_rtx_REG (Pmode, return_regnum);
9139 addr = plus_constant (Pmode, frame_pointer,
9140 offset + cfun_frame_layout.gprs_offset
9142 - cfun_frame_layout.first_save_gpr_slot)
9144 addr = gen_rtx_MEM (Pmode, addr);
9145 set_mem_alias_set (addr, get_frame_alias_set ());
9146 emit_move_insn (return_reg, addr);
9150 insn = restore_gprs (frame_pointer,
9151 offset + cfun_frame_layout.gprs_offset
9152 + (cfun_frame_layout.first_restore_gpr
9153 - cfun_frame_layout.first_save_gpr_slot)
9155 cfun_frame_layout.first_restore_gpr,
9156 cfun_frame_layout.last_restore_gpr);
9157 insn = emit_insn (insn);
9158 REG_NOTES (insn) = cfa_restores;
9159 add_reg_note (insn, REG_CFA_DEF_CFA,
9160 plus_constant (Pmode, stack_pointer_rtx,
9161 STACK_POINTER_OFFSET));
9162 RTX_FRAME_RELATED_P (insn) = 1;
9165 s390_restore_gprs_from_fprs ();
9170 /* Return to caller. */
9172 p = rtvec_alloc (2);
9174 RTVEC_ELT (p, 0) = ret_rtx;
9175 RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, return_reg);
9176 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
9180 /* Implement TARGET_SET_UP_BY_PROLOGUE. */
9183 s300_set_up_by_prologue (hard_reg_set_container *regs)
9185 if (cfun->machine->base_reg
9186 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
9187 SET_HARD_REG_BIT (regs->set, REGNO (cfun->machine->base_reg));
9190 /* Return true if the function can use simple_return to return outside
9191 of a shrink-wrapped region. At present shrink-wrapping is supported
9195 s390_can_use_simple_return_insn (void)
9200 /* Return true if the epilogue is guaranteed to contain only a return
9201 instruction and if a direct return can therefore be used instead.
9202 One of the main advantages of using direct return instructions
9203 is that we can then use conditional returns. */
9206 s390_can_use_return_insn (void)
9210 if (!reload_completed)
9216 if (TARGET_TPF_PROFILING)
9219 for (i = 0; i < 16; i++)
9220 if (cfun_gpr_save_slot (i))
9223 /* For 31 bit this is not covered by the frame_size check below
9224 since f4, f6 are saved in the register save area without needing
9225 additional stack space. */
9227 && (cfun_fpr_save_p (FPR4_REGNUM) || cfun_fpr_save_p (FPR6_REGNUM)))
9230 if (cfun->machine->base_reg
9231 && !call_really_used_regs[REGNO (cfun->machine->base_reg)])
9234 return cfun_frame_layout.frame_size == 0;
9237 /* Return the size in bytes of a function argument of
9238 type TYPE and/or mode MODE. At least one of TYPE or
9239 MODE must be specified. */
9242 s390_function_arg_size (enum machine_mode mode, const_tree type)
9245 return int_size_in_bytes (type);
9247 /* No type info available for some library calls ... */
9248 if (mode != BLKmode)
9249 return GET_MODE_SIZE (mode);
9251 /* If we have neither type nor mode, abort */
9255 /* Return true if a function argument of type TYPE and mode MODE
9256 is to be passed in a floating-point register, if available. */
9259 s390_function_arg_float (enum machine_mode mode, const_tree type)
9261 int size = s390_function_arg_size (mode, type);
9265 /* Soft-float changes the ABI: no floating-point registers are used. */
9266 if (TARGET_SOFT_FLOAT)
9269 /* No type info available for some library calls ... */
9271 return mode == SFmode || mode == DFmode || mode == SDmode || mode == DDmode;
9273 /* The ABI says that record types with a single member are treated
9274 just like that member would be. */
9275 while (TREE_CODE (type) == RECORD_TYPE)
9277 tree field, single = NULL_TREE;
9279 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
9281 if (TREE_CODE (field) != FIELD_DECL)
9284 if (single == NULL_TREE)
9285 single = TREE_TYPE (field);
9290 if (single == NULL_TREE)
9296 return TREE_CODE (type) == REAL_TYPE;
9299 /* Return true if a function argument of type TYPE and mode MODE
9300 is to be passed in an integer register, or a pair of integer
9301 registers, if available. */
9304 s390_function_arg_integer (enum machine_mode mode, const_tree type)
9306 int size = s390_function_arg_size (mode, type);
9310 /* No type info available for some library calls ... */
9312 return GET_MODE_CLASS (mode) == MODE_INT
9313 || (TARGET_SOFT_FLOAT && SCALAR_FLOAT_MODE_P (mode));
9315 /* We accept small integral (and similar) types. */
9316 if (INTEGRAL_TYPE_P (type)
9317 || POINTER_TYPE_P (type)
9318 || TREE_CODE (type) == NULLPTR_TYPE
9319 || TREE_CODE (type) == OFFSET_TYPE
9320 || (TARGET_SOFT_FLOAT && TREE_CODE (type) == REAL_TYPE))
9323 /* We also accept structs of size 1, 2, 4, 8 that are not
9324 passed in floating-point registers. */
9325 if (AGGREGATE_TYPE_P (type)
9326 && exact_log2 (size) >= 0
9327 && !s390_function_arg_float (mode, type))
9333 /* Return 1 if a function argument of type TYPE and mode MODE
9334 is to be passed by reference. The ABI specifies that only
9335 structures of size 1, 2, 4, or 8 bytes are passed by value,
9336 all other structures (and complex numbers) are passed by
9340 s390_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
9341 enum machine_mode mode, const_tree type,
9342 bool named ATTRIBUTE_UNUSED)
9344 int size = s390_function_arg_size (mode, type);
9350 if (AGGREGATE_TYPE_P (type) && exact_log2 (size) < 0)
9353 if (TREE_CODE (type) == COMPLEX_TYPE
9354 || TREE_CODE (type) == VECTOR_TYPE)
9361 /* Update the data in CUM to advance over an argument of mode MODE and
9362 data type TYPE. (TYPE is null for libcalls where that information
9363 may not be available.). The boolean NAMED specifies whether the
9364 argument is a named argument (as opposed to an unnamed argument
9365 matching an ellipsis). */
9368 s390_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
9369 const_tree type, bool named ATTRIBUTE_UNUSED)
9371 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9373 if (s390_function_arg_float (mode, type))
9377 else if (s390_function_arg_integer (mode, type))
9379 int size = s390_function_arg_size (mode, type);
9380 cum->gprs += ((size + UNITS_PER_LONG - 1) / UNITS_PER_LONG);
9386 /* Define where to put the arguments to a function.
9387 Value is zero to push the argument on the stack,
9388 or a hard register in which to store the argument.
9390 MODE is the argument's machine mode.
9391 TYPE is the data type of the argument (as a tree).
9392 This is null for libcalls where that information may
9394 CUM is a variable of type CUMULATIVE_ARGS which gives info about
9395 the preceding args and about the function being called.
9396 NAMED is nonzero if this argument is a named parameter
9397 (otherwise it is an extra parameter matching an ellipsis).
9399 On S/390, we use general purpose registers 2 through 6 to
9400 pass integer, pointer, and certain structure arguments, and
9401 floating point registers 0 and 2 (0, 2, 4, and 6 on 64-bit)
9402 to pass floating point arguments. All remaining arguments
9403 are pushed to the stack. */
9406 s390_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
9407 const_tree type, bool named ATTRIBUTE_UNUSED)
9409 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
9411 if (s390_function_arg_float (mode, type))
9413 if (cum->fprs + 1 > FP_ARG_NUM_REG)
9416 return gen_rtx_REG (mode, cum->fprs + 16);
9418 else if (s390_function_arg_integer (mode, type))
9420 int size = s390_function_arg_size (mode, type);
9421 int n_gprs = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
9423 if (cum->gprs + n_gprs > GP_ARG_NUM_REG)
9425 else if (n_gprs == 1 || UNITS_PER_WORD == UNITS_PER_LONG)
9426 return gen_rtx_REG (mode, cum->gprs + 2);
9427 else if (n_gprs == 2)
9429 rtvec p = rtvec_alloc (2);
9432 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 2),
9435 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, cum->gprs + 3),
9438 return gen_rtx_PARALLEL (mode, p);
9442 /* After the real arguments, expand_call calls us once again
9443 with a void_type_node type. Whatever we return here is
9444 passed as operand 2 to the call expanders.
9446 We don't need this feature ... */
9447 else if (type == void_type_node)
9453 /* Return true if return values of type TYPE should be returned
9454 in a memory buffer whose address is passed by the caller as
9455 hidden first argument. */
9458 s390_return_in_memory (const_tree type, const_tree fundecl ATTRIBUTE_UNUSED)
9460 /* We accept small integral (and similar) types. */
9461 if (INTEGRAL_TYPE_P (type)
9462 || POINTER_TYPE_P (type)
9463 || TREE_CODE (type) == OFFSET_TYPE
9464 || TREE_CODE (type) == REAL_TYPE)
9465 return int_size_in_bytes (type) > 8;
9467 /* Aggregates and similar constructs are always returned
9469 if (AGGREGATE_TYPE_P (type)
9470 || TREE_CODE (type) == COMPLEX_TYPE
9471 || TREE_CODE (type) == VECTOR_TYPE)
9474 /* ??? We get called on all sorts of random stuff from
9475 aggregate_value_p. We can't abort, but it's not clear
9476 what's safe to return. Pretend it's a struct I guess. */
9480 /* Function arguments and return values are promoted to word size. */
9482 static enum machine_mode
9483 s390_promote_function_mode (const_tree type, enum machine_mode mode,
9485 const_tree fntype ATTRIBUTE_UNUSED,
9486 int for_return ATTRIBUTE_UNUSED)
9488 if (INTEGRAL_MODE_P (mode)
9489 && GET_MODE_SIZE (mode) < UNITS_PER_LONG)
9491 if (type != NULL_TREE && POINTER_TYPE_P (type))
9492 *punsignedp = POINTERS_EXTEND_UNSIGNED;
9499 /* Define where to return a (scalar) value of type RET_TYPE.
9500 If RET_TYPE is null, define where to return a (scalar)
9501 value of mode MODE from a libcall. */
9504 s390_function_and_libcall_value (enum machine_mode mode,
9505 const_tree ret_type,
9506 const_tree fntype_or_decl,
9507 bool outgoing ATTRIBUTE_UNUSED)
9509 /* For normal functions perform the promotion as
9510 promote_function_mode would do. */
9513 int unsignedp = TYPE_UNSIGNED (ret_type);
9514 mode = promote_function_mode (ret_type, mode, &unsignedp,
9518 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT || SCALAR_FLOAT_MODE_P (mode));
9519 gcc_assert (GET_MODE_SIZE (mode) <= 8);
9521 if (TARGET_HARD_FLOAT && SCALAR_FLOAT_MODE_P (mode))
9522 return gen_rtx_REG (mode, 16);
9523 else if (GET_MODE_SIZE (mode) <= UNITS_PER_LONG
9524 || UNITS_PER_LONG == UNITS_PER_WORD)
9525 return gen_rtx_REG (mode, 2);
9526 else if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_LONG)
9528 /* This case is triggered when returning a 64 bit value with
9529 -m31 -mzarch. Although the value would fit into a single
9530 register it has to be forced into a 32 bit register pair in
9531 order to match the ABI. */
9532 rtvec p = rtvec_alloc (2);
9535 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 2), const0_rtx);
9537 = gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, 3), GEN_INT (4));
9539 return gen_rtx_PARALLEL (mode, p);
9545 /* Define where to return a scalar return value of type RET_TYPE. */
9548 s390_function_value (const_tree ret_type, const_tree fn_decl_or_type,
9551 return s390_function_and_libcall_value (TYPE_MODE (ret_type), ret_type,
9552 fn_decl_or_type, outgoing);
9555 /* Define where to return a scalar libcall return value of mode
9559 s390_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
9561 return s390_function_and_libcall_value (mode, NULL_TREE,
9566 /* Create and return the va_list datatype.
9568 On S/390, va_list is an array type equivalent to
9570 typedef struct __va_list_tag
9574 void *__overflow_arg_area;
9575 void *__reg_save_area;
9578 where __gpr and __fpr hold the number of general purpose
9579 or floating point arguments used up to now, respectively,
9580 __overflow_arg_area points to the stack location of the
9581 next argument passed on the stack, and __reg_save_area
9582 always points to the start of the register area in the
9583 call frame of the current function. The function prologue
9584 saves all registers used for argument passing into this
9585 area if the function uses variable arguments. */
9588 s390_build_builtin_va_list (void)
9590 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
9592 record = lang_hooks.types.make_type (RECORD_TYPE);
9595 build_decl (BUILTINS_LOCATION,
9596 TYPE_DECL, get_identifier ("__va_list_tag"), record);
9598 f_gpr = build_decl (BUILTINS_LOCATION,
9599 FIELD_DECL, get_identifier ("__gpr"),
9600 long_integer_type_node);
9601 f_fpr = build_decl (BUILTINS_LOCATION,
9602 FIELD_DECL, get_identifier ("__fpr"),
9603 long_integer_type_node);
9604 f_ovf = build_decl (BUILTINS_LOCATION,
9605 FIELD_DECL, get_identifier ("__overflow_arg_area"),
9607 f_sav = build_decl (BUILTINS_LOCATION,
9608 FIELD_DECL, get_identifier ("__reg_save_area"),
9611 va_list_gpr_counter_field = f_gpr;
9612 va_list_fpr_counter_field = f_fpr;
9614 DECL_FIELD_CONTEXT (f_gpr) = record;
9615 DECL_FIELD_CONTEXT (f_fpr) = record;
9616 DECL_FIELD_CONTEXT (f_ovf) = record;
9617 DECL_FIELD_CONTEXT (f_sav) = record;
9619 TYPE_STUB_DECL (record) = type_decl;
9620 TYPE_NAME (record) = type_decl;
9621 TYPE_FIELDS (record) = f_gpr;
9622 DECL_CHAIN (f_gpr) = f_fpr;
9623 DECL_CHAIN (f_fpr) = f_ovf;
9624 DECL_CHAIN (f_ovf) = f_sav;
9626 layout_type (record);
9628 /* The correct type is an array type of one element. */
9629 return build_array_type (record, build_index_type (size_zero_node));
9632 /* Implement va_start by filling the va_list structure VALIST.
9633 STDARG_P is always true, and ignored.
9634 NEXTARG points to the first anonymous stack argument.
9636 The following global variables are used to initialize
9637 the va_list structure:
9640 holds number of gprs and fprs used for named arguments.
9641 crtl->args.arg_offset_rtx:
9642 holds the offset of the first anonymous stack argument
9643 (relative to the virtual arg pointer). */
9646 s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
9648 HOST_WIDE_INT n_gpr, n_fpr;
9650 tree f_gpr, f_fpr, f_ovf, f_sav;
9651 tree gpr, fpr, ovf, sav, t;
9653 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9654 f_fpr = DECL_CHAIN (f_gpr);
9655 f_ovf = DECL_CHAIN (f_fpr);
9656 f_sav = DECL_CHAIN (f_ovf);
9658 valist = build_simple_mem_ref (valist);
9659 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9660 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
9661 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
9662 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
9664 /* Count number of gp and fp argument registers used. */
9666 n_gpr = crtl->args.info.gprs;
9667 n_fpr = crtl->args.info.fprs;
9669 if (cfun->va_list_gpr_size)
9671 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
9672 build_int_cst (NULL_TREE, n_gpr));
9673 TREE_SIDE_EFFECTS (t) = 1;
9674 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9677 if (cfun->va_list_fpr_size)
9679 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
9680 build_int_cst (NULL_TREE, n_fpr));
9681 TREE_SIDE_EFFECTS (t) = 1;
9682 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9685 /* Find the overflow area. */
9686 if (n_gpr + cfun->va_list_gpr_size > GP_ARG_NUM_REG
9687 || n_fpr + cfun->va_list_fpr_size > FP_ARG_NUM_REG)
9689 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
9691 off = INTVAL (crtl->args.arg_offset_rtx);
9692 off = off < 0 ? 0 : off;
9693 if (TARGET_DEBUG_ARG)
9694 fprintf (stderr, "va_start: n_gpr = %d, n_fpr = %d off %d\n",
9695 (int)n_gpr, (int)n_fpr, off);
9697 t = fold_build_pointer_plus_hwi (t, off);
9699 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
9700 TREE_SIDE_EFFECTS (t) = 1;
9701 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9704 /* Find the register save area. */
9705 if ((cfun->va_list_gpr_size && n_gpr < GP_ARG_NUM_REG)
9706 || (cfun->va_list_fpr_size && n_fpr < FP_ARG_NUM_REG))
9708 t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
9709 t = fold_build_pointer_plus_hwi (t, -RETURN_REGNUM * UNITS_PER_LONG);
9711 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
9712 TREE_SIDE_EFFECTS (t) = 1;
9713 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
9717 /* Implement va_arg by updating the va_list structure
9718 VALIST as required to retrieve an argument of type
9719 TYPE, and returning that argument.
9721 Generates code equivalent to:
9723 if (integral value) {
9724 if (size <= 4 && args.gpr < 5 ||
9725 size > 4 && args.gpr < 4 )
9726 ret = args.reg_save_area[args.gpr+8]
9728 ret = *args.overflow_arg_area++;
9729 } else if (float value) {
9731 ret = args.reg_save_area[args.fpr+64]
9733 ret = *args.overflow_arg_area++;
9734 } else if (aggregate value) {
9736 ret = *args.reg_save_area[args.gpr]
9738 ret = **args.overflow_arg_area++;
9742 s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
9743 gimple_seq *post_p ATTRIBUTE_UNUSED)
9745 tree f_gpr, f_fpr, f_ovf, f_sav;
9746 tree gpr, fpr, ovf, sav, reg, t, u;
9747 int indirect_p, size, n_reg, sav_ofs, sav_scale, max_reg;
9748 tree lab_false, lab_over, addr;
9750 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
9751 f_fpr = DECL_CHAIN (f_gpr);
9752 f_ovf = DECL_CHAIN (f_fpr);
9753 f_sav = DECL_CHAIN (f_ovf);
9755 valist = build_va_arg_indirect_ref (valist);
9756 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
9757 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
9758 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
9760 /* The tree for args* cannot be shared between gpr/fpr and ovf since
9761 both appear on a lhs. */
9762 valist = unshare_expr (valist);
9763 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
9765 size = int_size_in_bytes (type);
9767 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
9769 if (TARGET_DEBUG_ARG)
9771 fprintf (stderr, "va_arg: aggregate type");
9775 /* Aggregates are passed by reference. */
9780 /* kernel stack layout on 31 bit: It is assumed here that no padding
9781 will be added by s390_frame_info because for va_args always an even
9782 number of gprs has to be saved r15-r2 = 14 regs. */
9783 sav_ofs = 2 * UNITS_PER_LONG;
9784 sav_scale = UNITS_PER_LONG;
9785 size = UNITS_PER_LONG;
9786 max_reg = GP_ARG_NUM_REG - n_reg;
9788 else if (s390_function_arg_float (TYPE_MODE (type), type))
9790 if (TARGET_DEBUG_ARG)
9792 fprintf (stderr, "va_arg: float type");
9796 /* FP args go in FP registers, if present. */
9800 sav_ofs = 16 * UNITS_PER_LONG;
9802 max_reg = FP_ARG_NUM_REG - n_reg;
9806 if (TARGET_DEBUG_ARG)
9808 fprintf (stderr, "va_arg: other type");
9812 /* Otherwise into GP registers. */
9815 n_reg = (size + UNITS_PER_LONG - 1) / UNITS_PER_LONG;
9817 /* kernel stack layout on 31 bit: It is assumed here that no padding
9818 will be added by s390_frame_info because for va_args always an even
9819 number of gprs has to be saved r15-r2 = 14 regs. */
9820 sav_ofs = 2 * UNITS_PER_LONG;
9822 if (size < UNITS_PER_LONG)
9823 sav_ofs += UNITS_PER_LONG - size;
9825 sav_scale = UNITS_PER_LONG;
9826 max_reg = GP_ARG_NUM_REG - n_reg;
9829 /* Pull the value out of the saved registers ... */
9831 lab_false = create_artificial_label (UNKNOWN_LOCATION);
9832 lab_over = create_artificial_label (UNKNOWN_LOCATION);
9833 addr = create_tmp_var (ptr_type_node, "addr");
9835 t = fold_convert (TREE_TYPE (reg), size_int (max_reg));
9836 t = build2 (GT_EXPR, boolean_type_node, reg, t);
9837 u = build1 (GOTO_EXPR, void_type_node, lab_false);
9838 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
9839 gimplify_and_add (t, pre_p);
9841 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
9842 u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
9843 fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
9844 t = fold_build_pointer_plus (t, u);
9846 gimplify_assign (addr, t, pre_p);
9848 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
9850 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
9853 /* ... Otherwise out of the overflow area. */
9856 if (size < UNITS_PER_LONG)
9857 t = fold_build_pointer_plus_hwi (t, UNITS_PER_LONG - size);
9859 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
9861 gimplify_assign (addr, t, pre_p);
9863 t = fold_build_pointer_plus_hwi (t, size);
9864 gimplify_assign (ovf, t, pre_p);
9866 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
9869 /* Increment register save count. */
9871 u = build2 (PREINCREMENT_EXPR, TREE_TYPE (reg), reg,
9872 fold_convert (TREE_TYPE (reg), size_int (n_reg)));
9873 gimplify_and_add (u, pre_p);
9877 t = build_pointer_type_for_mode (build_pointer_type (type),
9879 addr = fold_convert (t, addr);
9880 addr = build_va_arg_indirect_ref (addr);
9884 t = build_pointer_type_for_mode (type, ptr_mode, true);
9885 addr = fold_convert (t, addr);
9888 return build_va_arg_indirect_ref (addr);
9891 /* Emit rtl for the tbegin or tbegin_retry (RETRY != NULL_RTX)
9893 DEST - Register location where CC will be stored.
9894 TDB - Pointer to a 256 byte area where to store the transaction.
9895 diagnostic block. NULL if TDB is not needed.
9896 RETRY - Retry count value. If non-NULL a retry loop for CC2
9898 CLOBBER_FPRS_P - If true clobbers for all FPRs are emitted as part
9899 of the tbegin instruction pattern. */
9902 s390_expand_tbegin (rtx dest, rtx tdb, rtx retry, bool clobber_fprs_p)
9904 rtx retry_plus_two = gen_reg_rtx (SImode);
9905 rtx retry_reg = gen_reg_rtx (SImode);
9906 rtx_code_label *retry_label = NULL;
9908 if (retry != NULL_RTX)
9910 emit_move_insn (retry_reg, retry);
9911 emit_insn (gen_addsi3 (retry_plus_two, retry_reg, const2_rtx));
9912 emit_insn (gen_addsi3 (retry_reg, retry_reg, const1_rtx));
9913 retry_label = gen_label_rtx ();
9914 emit_label (retry_label);
9918 emit_insn (gen_tbegin_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK), tdb));
9920 emit_insn (gen_tbegin_nofloat_1 (gen_rtx_CONST_INT (VOIDmode, TBEGIN_MASK),
9923 emit_move_insn (dest, gen_rtx_UNSPEC (SImode,
9924 gen_rtvec (1, gen_rtx_REG (CCRAWmode,
9927 if (retry != NULL_RTX)
9929 const int CC0 = 1 << 3;
9930 const int CC1 = 1 << 2;
9931 const int CC3 = 1 << 0;
9933 rtx count = gen_reg_rtx (SImode);
9934 rtx_code_label *leave_label = gen_label_rtx ();
9936 /* Exit for success and permanent failures. */
9937 jump = s390_emit_jump (leave_label,
9938 gen_rtx_EQ (VOIDmode,
9939 gen_rtx_REG (CCRAWmode, CC_REGNUM),
9940 gen_rtx_CONST_INT (VOIDmode, CC0 | CC1 | CC3)));
9941 LABEL_NUSES (leave_label) = 1;
9943 /* CC2 - transient failure. Perform retry with ppa. */
9944 emit_move_insn (count, retry_plus_two);
9945 emit_insn (gen_subsi3 (count, count, retry_reg));
9946 emit_insn (gen_tx_assist (count));
9947 jump = emit_jump_insn (gen_doloop_si64 (retry_label,
9950 JUMP_LABEL (jump) = retry_label;
9951 LABEL_NUSES (retry_label) = 1;
9952 emit_label (leave_label);
9960 S390_BUILTIN_TBEGIN,
9961 S390_BUILTIN_TBEGIN_NOFLOAT,
9962 S390_BUILTIN_TBEGIN_RETRY,
9963 S390_BUILTIN_TBEGIN_RETRY_NOFLOAT,
9964 S390_BUILTIN_TBEGINC,
9966 S390_BUILTIN_TABORT,
9967 S390_BUILTIN_NON_TX_STORE,
9968 S390_BUILTIN_TX_NESTING_DEPTH,
9969 S390_BUILTIN_TX_ASSIST,
9974 static enum insn_code const code_for_builtin[S390_BUILTIN_max] = {
9976 CODE_FOR_tbegin_nofloat,
9977 CODE_FOR_tbegin_retry,
9978 CODE_FOR_tbegin_retry_nofloat,
9988 s390_init_builtins (void)
9990 tree ftype, uint64_type;
9991 tree returns_twice_attr = tree_cons (get_identifier ("returns_twice"),
9993 tree noreturn_attr = tree_cons (get_identifier ("noreturn"), NULL, NULL);
9995 /* void foo (void) */
9996 ftype = build_function_type_list (void_type_node, NULL_TREE);
9997 add_builtin_function ("__builtin_tbeginc", ftype, S390_BUILTIN_TBEGINC,
9998 BUILT_IN_MD, NULL, NULL_TREE);
10000 /* void foo (int) */
10001 ftype = build_function_type_list (void_type_node, integer_type_node,
10003 add_builtin_function ("__builtin_tabort", ftype,
10004 S390_BUILTIN_TABORT, BUILT_IN_MD, NULL, noreturn_attr);
10005 add_builtin_function ("__builtin_tx_assist", ftype,
10006 S390_BUILTIN_TX_ASSIST, BUILT_IN_MD, NULL, NULL_TREE);
10008 /* int foo (void *) */
10009 ftype = build_function_type_list (integer_type_node, ptr_type_node, NULL_TREE);
10010 add_builtin_function ("__builtin_tbegin", ftype, S390_BUILTIN_TBEGIN,
10011 BUILT_IN_MD, NULL, returns_twice_attr);
10012 add_builtin_function ("__builtin_tbegin_nofloat", ftype,
10013 S390_BUILTIN_TBEGIN_NOFLOAT,
10014 BUILT_IN_MD, NULL, returns_twice_attr);
10016 /* int foo (void *, int) */
10017 ftype = build_function_type_list (integer_type_node, ptr_type_node,
10018 integer_type_node, NULL_TREE);
10019 add_builtin_function ("__builtin_tbegin_retry", ftype,
10020 S390_BUILTIN_TBEGIN_RETRY,
10022 NULL, returns_twice_attr);
10023 add_builtin_function ("__builtin_tbegin_retry_nofloat", ftype,
10024 S390_BUILTIN_TBEGIN_RETRY_NOFLOAT,
10026 NULL, returns_twice_attr);
10028 /* int foo (void) */
10029 ftype = build_function_type_list (integer_type_node, NULL_TREE);
10030 add_builtin_function ("__builtin_tx_nesting_depth", ftype,
10031 S390_BUILTIN_TX_NESTING_DEPTH,
10032 BUILT_IN_MD, NULL, NULL_TREE);
10033 add_builtin_function ("__builtin_tend", ftype,
10034 S390_BUILTIN_TEND, BUILT_IN_MD, NULL, NULL_TREE);
10036 /* void foo (uint64_t *, uint64_t) */
10038 uint64_type = long_unsigned_type_node;
10040 uint64_type = long_long_unsigned_type_node;
10042 ftype = build_function_type_list (void_type_node,
10043 build_pointer_type (uint64_type),
10044 uint64_type, NULL_TREE);
10045 add_builtin_function ("__builtin_non_tx_store", ftype,
10046 S390_BUILTIN_NON_TX_STORE,
10047 BUILT_IN_MD, NULL, NULL_TREE);
10050 /* Expand an expression EXP that calls a built-in function,
10051 with result going to TARGET if that's convenient
10052 (and in mode MODE if that's convenient).
10053 SUBTARGET may be used as the target for computing one of EXP's operands.
10054 IGNORE is nonzero if the value is to be ignored. */
10057 s390_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
10058 enum machine_mode mode ATTRIBUTE_UNUSED,
10059 int ignore ATTRIBUTE_UNUSED)
10063 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10064 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10065 enum insn_code icode;
10066 rtx op[MAX_ARGS], pat;
10070 call_expr_arg_iterator iter;
10072 if (fcode >= S390_BUILTIN_max)
10073 internal_error ("bad builtin fcode");
10074 icode = code_for_builtin[fcode];
10076 internal_error ("bad builtin fcode");
10079 error ("Transactional execution builtins not enabled (-mhtm)\n");
10081 /* Set a flag in the machine specific cfun part in order to support
10082 saving/restoring of FPRs. */
10083 if (fcode == S390_BUILTIN_TBEGIN || fcode == S390_BUILTIN_TBEGIN_RETRY)
10084 cfun->machine->tbegin_p = true;
10086 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
10089 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
10091 const struct insn_operand_data *insn_op;
10093 if (arg == error_mark_node)
10095 if (arity >= MAX_ARGS)
10098 insn_op = &insn_data[icode].operand[arity + nonvoid];
10100 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
10102 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
10104 if (insn_op->predicate == memory_operand)
10106 /* Don't move a NULL pointer into a register. Otherwise
10107 we have to rely on combine being able to move it back
10108 in order to get an immediate 0 in the instruction. */
10109 if (op[arity] != const0_rtx)
10110 op[arity] = copy_to_mode_reg (Pmode, op[arity]);
10111 op[arity] = gen_rtx_MEM (insn_op->mode, op[arity]);
10114 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
10122 enum machine_mode tmode = insn_data[icode].operand[0].mode;
10124 || GET_MODE (target) != tmode
10125 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
10126 target = gen_reg_rtx (tmode);
10132 pat = GEN_FCN (icode) (target);
10136 pat = GEN_FCN (icode) (target, op[0]);
10138 pat = GEN_FCN (icode) (op[0]);
10142 pat = GEN_FCN (icode) (target, op[0], op[1]);
10144 pat = GEN_FCN (icode) (op[0], op[1]);
10147 gcc_unreachable ();
10159 /* We call mcount before the function prologue. So a profiled leaf
10160 function should stay a leaf function. */
10163 s390_keep_leaf_when_profiled ()
10168 /* Output assembly code for the trampoline template to
10171 On S/390, we use gpr 1 internally in the trampoline code;
10172 gpr 0 is used to hold the static chain. */
10175 s390_asm_trampoline_template (FILE *file)
10178 op[0] = gen_rtx_REG (Pmode, 0);
10179 op[1] = gen_rtx_REG (Pmode, 1);
10183 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
10184 output_asm_insn ("lmg\t%0,%1,14(%1)", op); /* 6 byte */
10185 output_asm_insn ("br\t%1", op); /* 2 byte */
10186 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 10));
10190 output_asm_insn ("basr\t%1,0", op); /* 2 byte */
10191 output_asm_insn ("lm\t%0,%1,6(%1)", op); /* 4 byte */
10192 output_asm_insn ("br\t%1", op); /* 2 byte */
10193 ASM_OUTPUT_SKIP (file, (HOST_WIDE_INT)(TRAMPOLINE_SIZE - 8));
10197 /* Emit RTL insns to initialize the variable parts of a trampoline.
10198 FNADDR is an RTX for the address of the function's pure code.
10199 CXT is an RTX for the static chain value for the function. */
10202 s390_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
10204 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
10207 emit_block_move (m_tramp, assemble_trampoline_template (),
10208 GEN_INT (2 * UNITS_PER_LONG), BLOCK_OP_NORMAL);
10210 mem = adjust_address (m_tramp, Pmode, 2 * UNITS_PER_LONG);
10211 emit_move_insn (mem, cxt);
10212 mem = adjust_address (m_tramp, Pmode, 3 * UNITS_PER_LONG);
10213 emit_move_insn (mem, fnaddr);
10216 /* Output assembler code to FILE to increment profiler label # LABELNO
10217 for profiling a function entry. */
10220 s390_function_profiler (FILE *file, int labelno)
10225 ASM_GENERATE_INTERNAL_LABEL (label, "LP", labelno);
10227 fprintf (file, "# function profiler \n");
10229 op[0] = gen_rtx_REG (Pmode, RETURN_REGNUM);
10230 op[1] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
10231 op[1] = gen_rtx_MEM (Pmode, plus_constant (Pmode, op[1], UNITS_PER_LONG));
10233 op[2] = gen_rtx_REG (Pmode, 1);
10234 op[3] = gen_rtx_SYMBOL_REF (Pmode, label);
10235 SYMBOL_REF_FLAGS (op[3]) = SYMBOL_FLAG_LOCAL;
10237 op[4] = gen_rtx_SYMBOL_REF (Pmode, "_mcount");
10240 op[4] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[4]), UNSPEC_PLT);
10241 op[4] = gen_rtx_CONST (Pmode, op[4]);
10246 output_asm_insn ("stg\t%0,%1", op);
10247 output_asm_insn ("larl\t%2,%3", op);
10248 output_asm_insn ("brasl\t%0,%4", op);
10249 output_asm_insn ("lg\t%0,%1", op);
10251 else if (!flag_pic)
10253 op[6] = gen_label_rtx ();
10255 output_asm_insn ("st\t%0,%1", op);
10256 output_asm_insn ("bras\t%2,%l6", op);
10257 output_asm_insn (".long\t%4", op);
10258 output_asm_insn (".long\t%3", op);
10259 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
10260 output_asm_insn ("l\t%0,0(%2)", op);
10261 output_asm_insn ("l\t%2,4(%2)", op);
10262 output_asm_insn ("basr\t%0,%0", op);
10263 output_asm_insn ("l\t%0,%1", op);
10267 op[5] = gen_label_rtx ();
10268 op[6] = gen_label_rtx ();
10270 output_asm_insn ("st\t%0,%1", op);
10271 output_asm_insn ("bras\t%2,%l6", op);
10272 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[5]));
10273 output_asm_insn (".long\t%4-%l5", op);
10274 output_asm_insn (".long\t%3-%l5", op);
10275 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[6]));
10276 output_asm_insn ("lr\t%0,%2", op);
10277 output_asm_insn ("a\t%0,0(%2)", op);
10278 output_asm_insn ("a\t%2,4(%2)", op);
10279 output_asm_insn ("basr\t%0,%0", op);
10280 output_asm_insn ("l\t%0,%1", op);
10284 /* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
10285 into its SYMBOL_REF_FLAGS. */
10288 s390_encode_section_info (tree decl, rtx rtl, int first)
10290 default_encode_section_info (decl, rtl, first);
10292 if (TREE_CODE (decl) == VAR_DECL)
10294 /* If a variable has a forced alignment to < 2 bytes, mark it
10295 with SYMBOL_FLAG_ALIGN1 to prevent it from being used as LARL
10297 if (DECL_USER_ALIGN (decl) && DECL_ALIGN (decl) < 16)
10298 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_ALIGN1;
10299 if (!DECL_SIZE (decl)
10300 || !DECL_ALIGN (decl)
10301 || !tree_fits_shwi_p (DECL_SIZE (decl))
10302 || (DECL_ALIGN (decl) <= 64
10303 && DECL_ALIGN (decl) != tree_to_shwi (DECL_SIZE (decl))))
10304 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
10307 /* Literal pool references don't have a decl so they are handled
10308 differently here. We rely on the information in the MEM_ALIGN
10309 entry to decide upon natural alignment. */
10311 && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF
10312 && TREE_CONSTANT_POOL_ADDRESS_P (XEXP (rtl, 0))
10313 && (MEM_ALIGN (rtl) == 0
10314 || GET_MODE_BITSIZE (GET_MODE (rtl)) == 0
10315 || MEM_ALIGN (rtl) < GET_MODE_BITSIZE (GET_MODE (rtl))))
10316 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED;
10319 /* Output thunk to FILE that implements a C++ virtual function call (with
10320 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
10321 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
10322 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
10323 relative to the resulting this pointer. */
10326 s390_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
10327 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
10333 /* Make sure unwind info is emitted for the thunk if needed. */
10334 final_start_function (emit_barrier (), file, 1);
10336 /* Operand 0 is the target function. */
10337 op[0] = XEXP (DECL_RTL (function), 0);
10338 if (flag_pic && !SYMBOL_REF_LOCAL_P (op[0]))
10341 op[0] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op[0]),
10342 TARGET_64BIT ? UNSPEC_PLT : UNSPEC_GOT);
10343 op[0] = gen_rtx_CONST (Pmode, op[0]);
10346 /* Operand 1 is the 'this' pointer. */
10347 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
10348 op[1] = gen_rtx_REG (Pmode, 3);
10350 op[1] = gen_rtx_REG (Pmode, 2);
10352 /* Operand 2 is the delta. */
10353 op[2] = GEN_INT (delta);
10355 /* Operand 3 is the vcall_offset. */
10356 op[3] = GEN_INT (vcall_offset);
10358 /* Operand 4 is the temporary register. */
10359 op[4] = gen_rtx_REG (Pmode, 1);
10361 /* Operands 5 to 8 can be used as labels. */
10367 /* Operand 9 can be used for temporary register. */
10370 /* Generate code. */
10373 /* Setup literal pool pointer if required. */
10374 if ((!DISP_IN_RANGE (delta)
10375 && !CONST_OK_FOR_K (delta)
10376 && !CONST_OK_FOR_Os (delta))
10377 || (!DISP_IN_RANGE (vcall_offset)
10378 && !CONST_OK_FOR_K (vcall_offset)
10379 && !CONST_OK_FOR_Os (vcall_offset)))
10381 op[5] = gen_label_rtx ();
10382 output_asm_insn ("larl\t%4,%5", op);
10385 /* Add DELTA to this pointer. */
10388 if (CONST_OK_FOR_J (delta))
10389 output_asm_insn ("la\t%1,%2(%1)", op);
10390 else if (DISP_IN_RANGE (delta))
10391 output_asm_insn ("lay\t%1,%2(%1)", op);
10392 else if (CONST_OK_FOR_K (delta))
10393 output_asm_insn ("aghi\t%1,%2", op);
10394 else if (CONST_OK_FOR_Os (delta))
10395 output_asm_insn ("agfi\t%1,%2", op);
10398 op[6] = gen_label_rtx ();
10399 output_asm_insn ("agf\t%1,%6-%5(%4)", op);
10403 /* Perform vcall adjustment. */
10406 if (DISP_IN_RANGE (vcall_offset))
10408 output_asm_insn ("lg\t%4,0(%1)", op);
10409 output_asm_insn ("ag\t%1,%3(%4)", op);
10411 else if (CONST_OK_FOR_K (vcall_offset))
10413 output_asm_insn ("lghi\t%4,%3", op);
10414 output_asm_insn ("ag\t%4,0(%1)", op);
10415 output_asm_insn ("ag\t%1,0(%4)", op);
10417 else if (CONST_OK_FOR_Os (vcall_offset))
10419 output_asm_insn ("lgfi\t%4,%3", op);
10420 output_asm_insn ("ag\t%4,0(%1)", op);
10421 output_asm_insn ("ag\t%1,0(%4)", op);
10425 op[7] = gen_label_rtx ();
10426 output_asm_insn ("llgf\t%4,%7-%5(%4)", op);
10427 output_asm_insn ("ag\t%4,0(%1)", op);
10428 output_asm_insn ("ag\t%1,0(%4)", op);
10432 /* Jump to target. */
10433 output_asm_insn ("jg\t%0", op);
10435 /* Output literal pool if required. */
10438 output_asm_insn (".align\t4", op);
10439 targetm.asm_out.internal_label (file, "L",
10440 CODE_LABEL_NUMBER (op[5]));
10444 targetm.asm_out.internal_label (file, "L",
10445 CODE_LABEL_NUMBER (op[6]));
10446 output_asm_insn (".long\t%2", op);
10450 targetm.asm_out.internal_label (file, "L",
10451 CODE_LABEL_NUMBER (op[7]));
10452 output_asm_insn (".long\t%3", op);
10457 /* Setup base pointer if required. */
10459 || (!DISP_IN_RANGE (delta)
10460 && !CONST_OK_FOR_K (delta)
10461 && !CONST_OK_FOR_Os (delta))
10462 || (!DISP_IN_RANGE (delta)
10463 && !CONST_OK_FOR_K (vcall_offset)
10464 && !CONST_OK_FOR_Os (vcall_offset)))
10466 op[5] = gen_label_rtx ();
10467 output_asm_insn ("basr\t%4,0", op);
10468 targetm.asm_out.internal_label (file, "L",
10469 CODE_LABEL_NUMBER (op[5]));
10472 /* Add DELTA to this pointer. */
10475 if (CONST_OK_FOR_J (delta))
10476 output_asm_insn ("la\t%1,%2(%1)", op);
10477 else if (DISP_IN_RANGE (delta))
10478 output_asm_insn ("lay\t%1,%2(%1)", op);
10479 else if (CONST_OK_FOR_K (delta))
10480 output_asm_insn ("ahi\t%1,%2", op);
10481 else if (CONST_OK_FOR_Os (delta))
10482 output_asm_insn ("afi\t%1,%2", op);
10485 op[6] = gen_label_rtx ();
10486 output_asm_insn ("a\t%1,%6-%5(%4)", op);
10490 /* Perform vcall adjustment. */
10493 if (CONST_OK_FOR_J (vcall_offset))
10495 output_asm_insn ("l\t%4,0(%1)", op);
10496 output_asm_insn ("a\t%1,%3(%4)", op);
10498 else if (DISP_IN_RANGE (vcall_offset))
10500 output_asm_insn ("l\t%4,0(%1)", op);
10501 output_asm_insn ("ay\t%1,%3(%4)", op);
10503 else if (CONST_OK_FOR_K (vcall_offset))
10505 output_asm_insn ("lhi\t%4,%3", op);
10506 output_asm_insn ("a\t%4,0(%1)", op);
10507 output_asm_insn ("a\t%1,0(%4)", op);
10509 else if (CONST_OK_FOR_Os (vcall_offset))
10511 output_asm_insn ("iilf\t%4,%3", op);
10512 output_asm_insn ("a\t%4,0(%1)", op);
10513 output_asm_insn ("a\t%1,0(%4)", op);
10517 op[7] = gen_label_rtx ();
10518 output_asm_insn ("l\t%4,%7-%5(%4)", op);
10519 output_asm_insn ("a\t%4,0(%1)", op);
10520 output_asm_insn ("a\t%1,0(%4)", op);
10523 /* We had to clobber the base pointer register.
10524 Re-setup the base pointer (with a different base). */
10525 op[5] = gen_label_rtx ();
10526 output_asm_insn ("basr\t%4,0", op);
10527 targetm.asm_out.internal_label (file, "L",
10528 CODE_LABEL_NUMBER (op[5]));
10531 /* Jump to target. */
10532 op[8] = gen_label_rtx ();
10535 output_asm_insn ("l\t%4,%8-%5(%4)", op);
10536 else if (!nonlocal)
10537 output_asm_insn ("a\t%4,%8-%5(%4)", op);
10538 /* We cannot call through .plt, since .plt requires %r12 loaded. */
10539 else if (flag_pic == 1)
10541 output_asm_insn ("a\t%4,%8-%5(%4)", op);
10542 output_asm_insn ("l\t%4,%0(%4)", op);
10544 else if (flag_pic == 2)
10546 op[9] = gen_rtx_REG (Pmode, 0);
10547 output_asm_insn ("l\t%9,%8-4-%5(%4)", op);
10548 output_asm_insn ("a\t%4,%8-%5(%4)", op);
10549 output_asm_insn ("ar\t%4,%9", op);
10550 output_asm_insn ("l\t%4,0(%4)", op);
10553 output_asm_insn ("br\t%4", op);
10555 /* Output literal pool. */
10556 output_asm_insn (".align\t4", op);
10558 if (nonlocal && flag_pic == 2)
10559 output_asm_insn (".long\t%0", op);
10562 op[0] = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
10563 SYMBOL_REF_FLAGS (op[0]) = SYMBOL_FLAG_LOCAL;
10566 targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (op[8]));
10568 output_asm_insn (".long\t%0", op);
10570 output_asm_insn (".long\t%0-%5", op);
10574 targetm.asm_out.internal_label (file, "L",
10575 CODE_LABEL_NUMBER (op[6]));
10576 output_asm_insn (".long\t%2", op);
10580 targetm.asm_out.internal_label (file, "L",
10581 CODE_LABEL_NUMBER (op[7]));
10582 output_asm_insn (".long\t%3", op);
10585 final_end_function ();
10589 s390_valid_pointer_mode (enum machine_mode mode)
10591 return (mode == SImode || (TARGET_64BIT && mode == DImode));
10594 /* Checks whether the given CALL_EXPR would use a caller
10595 saved register. This is used to decide whether sibling call
10596 optimization could be performed on the respective function
10600 s390_call_saved_register_used (tree call_expr)
10602 CUMULATIVE_ARGS cum_v;
10603 cumulative_args_t cum;
10605 enum machine_mode mode;
10610 INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0);
10611 cum = pack_cumulative_args (&cum_v);
10613 for (i = 0; i < call_expr_nargs (call_expr); i++)
10615 parameter = CALL_EXPR_ARG (call_expr, i);
10616 gcc_assert (parameter);
10618 /* For an undeclared variable passed as parameter we will get
10619 an ERROR_MARK node here. */
10620 if (TREE_CODE (parameter) == ERROR_MARK)
10623 type = TREE_TYPE (parameter);
10626 mode = TYPE_MODE (type);
10629 if (pass_by_reference (&cum_v, mode, type, true))
10632 type = build_pointer_type (type);
10635 parm_rtx = s390_function_arg (cum, mode, type, 0);
10637 s390_function_arg_advance (cum, mode, type, 0);
10642 if (REG_P (parm_rtx))
10645 reg < HARD_REGNO_NREGS (REGNO (parm_rtx), GET_MODE (parm_rtx));
10647 if (!call_used_regs[reg + REGNO (parm_rtx)])
10651 if (GET_CODE (parm_rtx) == PARALLEL)
10655 for (i = 0; i < XVECLEN (parm_rtx, 0); i++)
10657 rtx r = XEXP (XVECEXP (parm_rtx, 0, i), 0);
10659 gcc_assert (REG_P (r));
10662 reg < HARD_REGNO_NREGS (REGNO (r), GET_MODE (r));
10664 if (!call_used_regs[reg + REGNO (r)])
10673 /* Return true if the given call expression can be
10674 turned into a sibling call.
10675 DECL holds the declaration of the function to be called whereas
10676 EXP is the call expression itself. */
10679 s390_function_ok_for_sibcall (tree decl, tree exp)
10681 /* The TPF epilogue uses register 1. */
10682 if (TARGET_TPF_PROFILING)
10685 /* The 31 bit PLT code uses register 12 (GOT pointer - caller saved)
10686 which would have to be restored before the sibcall. */
10687 if (!TARGET_64BIT && flag_pic && decl && !targetm.binds_local_p (decl))
10690 /* Register 6 on s390 is available as an argument register but unfortunately
10691 "caller saved". This makes functions needing this register for arguments
10692 not suitable for sibcalls. */
10693 return !s390_call_saved_register_used (exp);
10696 /* Return the fixed registers used for condition codes. */
10699 s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
10702 *p2 = INVALID_REGNUM;
10707 /* This function is used by the call expanders of the machine description.
10708 It emits the call insn itself together with the necessary operations
10709 to adjust the target address and returns the emitted insn.
10710 ADDR_LOCATION is the target address rtx
10711 TLS_CALL the location of the thread-local symbol
10712 RESULT_REG the register where the result of the call should be stored
10713 RETADDR_REG the register where the return address should be stored
10714 If this parameter is NULL_RTX the call is considered
10715 to be a sibling call. */
10718 s390_emit_call (rtx addr_location, rtx tls_call, rtx result_reg,
10721 bool plt_call = false;
10727 /* Direct function calls need special treatment. */
10728 if (GET_CODE (addr_location) == SYMBOL_REF)
10730 /* When calling a global routine in PIC mode, we must
10731 replace the symbol itself with the PLT stub. */
10732 if (flag_pic && !SYMBOL_REF_LOCAL_P (addr_location))
10734 if (retaddr_reg != NULL_RTX)
10736 addr_location = gen_rtx_UNSPEC (Pmode,
10737 gen_rtvec (1, addr_location),
10739 addr_location = gen_rtx_CONST (Pmode, addr_location);
10743 /* For -fpic code the PLT entries might use r12 which is
10744 call-saved. Therefore we cannot do a sibcall when
10745 calling directly using a symbol ref. When reaching
10746 this point we decided (in s390_function_ok_for_sibcall)
10747 to do a sibcall for a function pointer but one of the
10748 optimizers was able to get rid of the function pointer
10749 by propagating the symbol ref into the call. This
10750 optimization is illegal for S/390 so we turn the direct
10751 call into a indirect call again. */
10752 addr_location = force_reg (Pmode, addr_location);
10755 /* Unless we can use the bras(l) insn, force the
10756 routine address into a register. */
10757 if (!TARGET_SMALL_EXEC && !TARGET_CPU_ZARCH)
10760 addr_location = legitimize_pic_address (addr_location, 0);
10762 addr_location = force_reg (Pmode, addr_location);
10766 /* If it is already an indirect call or the code above moved the
10767 SYMBOL_REF to somewhere else make sure the address can be found in
10769 if (retaddr_reg == NULL_RTX
10770 && GET_CODE (addr_location) != SYMBOL_REF
10773 emit_move_insn (gen_rtx_REG (Pmode, SIBCALL_REGNUM), addr_location);
10774 addr_location = gen_rtx_REG (Pmode, SIBCALL_REGNUM);
10777 addr_location = gen_rtx_MEM (QImode, addr_location);
10778 call = gen_rtx_CALL (VOIDmode, addr_location, const0_rtx);
10780 if (result_reg != NULL_RTX)
10781 call = gen_rtx_SET (VOIDmode, result_reg, call);
10783 if (retaddr_reg != NULL_RTX)
10785 clobber = gen_rtx_CLOBBER (VOIDmode, retaddr_reg);
10787 if (tls_call != NULL_RTX)
10788 vec = gen_rtvec (3, call, clobber,
10789 gen_rtx_USE (VOIDmode, tls_call));
10791 vec = gen_rtvec (2, call, clobber);
10793 call = gen_rtx_PARALLEL (VOIDmode, vec);
10796 insn = emit_call_insn (call);
10798 /* 31-bit PLT stubs and tls calls use the GOT register implicitly. */
10799 if ((!TARGET_64BIT && plt_call) || tls_call != NULL_RTX)
10801 /* s390_function_ok_for_sibcall should
10802 have denied sibcalls in this case. */
10803 gcc_assert (retaddr_reg != NULL_RTX);
10804 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, 12));
10809 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
10812 s390_conditional_register_usage (void)
10818 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10819 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10821 if (TARGET_CPU_ZARCH)
10823 fixed_regs[BASE_REGNUM] = 0;
10824 call_used_regs[BASE_REGNUM] = 0;
10825 fixed_regs[RETURN_REGNUM] = 0;
10826 call_used_regs[RETURN_REGNUM] = 0;
10830 for (i = FPR8_REGNUM; i <= FPR15_REGNUM; i++)
10831 call_used_regs[i] = call_really_used_regs[i] = 0;
10835 call_used_regs[FPR4_REGNUM] = call_really_used_regs[FPR4_REGNUM] = 0;
10836 call_used_regs[FPR6_REGNUM] = call_really_used_regs[FPR6_REGNUM] = 0;
10839 if (TARGET_SOFT_FLOAT)
10841 for (i = FPR0_REGNUM; i <= FPR15_REGNUM; i++)
10842 call_used_regs[i] = fixed_regs[i] = 1;
10846 /* Corresponding function to eh_return expander. */
10848 static GTY(()) rtx s390_tpf_eh_return_symbol;
10850 s390_emit_tpf_eh_return (rtx target)
10855 if (!s390_tpf_eh_return_symbol)
10856 s390_tpf_eh_return_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tpf_eh_return");
10858 reg = gen_rtx_REG (Pmode, 2);
10859 orig_ra = gen_rtx_REG (Pmode, 3);
10861 emit_move_insn (reg, target);
10862 emit_move_insn (orig_ra, get_hard_reg_initial_val (Pmode, RETURN_REGNUM));
10863 insn = s390_emit_call (s390_tpf_eh_return_symbol, NULL_RTX, reg,
10864 gen_rtx_REG (Pmode, RETURN_REGNUM));
10865 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
10866 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), orig_ra);
10868 emit_move_insn (EH_RETURN_HANDLER_RTX, reg);
10871 /* Rework the prologue/epilogue to avoid saving/restoring
10872 registers unnecessarily. */
10875 s390_optimize_prologue (void)
10877 rtx_insn *insn, *new_insn, *next_insn;
10879 /* Do a final recompute of the frame-related data. */
10880 s390_optimize_register_info ();
10882 /* If all special registers are in fact used, there's nothing we
10883 can do, so no point in walking the insn list. */
10885 if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
10886 && cfun_frame_layout.last_save_gpr >= BASE_REGNUM
10887 && (TARGET_CPU_ZARCH
10888 || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
10889 && cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
10892 /* Search for prologue/epilogue insns and replace them. */
10894 for (insn = get_insns (); insn; insn = next_insn)
10896 int first, last, off;
10897 rtx set, base, offset;
10900 next_insn = NEXT_INSN (insn);
10902 if (! NONJUMP_INSN_P (insn) || ! RTX_FRAME_RELATED_P (insn))
10905 pat = PATTERN (insn);
10907 /* Remove ldgr/lgdr instructions used for saving and restore
10908 GPRs if possible. */
10910 && GET_CODE (pat) == SET
10911 && GET_MODE (SET_SRC (pat)) == DImode
10912 && REG_P (SET_SRC (pat))
10913 && REG_P (SET_DEST (pat)))
10915 int src_regno = REGNO (SET_SRC (pat));
10916 int dest_regno = REGNO (SET_DEST (pat));
10920 if (!((GENERAL_REGNO_P (src_regno) && FP_REGNO_P (dest_regno))
10921 || (FP_REGNO_P (src_regno) && GENERAL_REGNO_P (dest_regno))))
10924 gpr_regno = GENERAL_REGNO_P (src_regno) ? src_regno : dest_regno;
10925 fpr_regno = FP_REGNO_P (src_regno) ? src_regno : dest_regno;
10927 /* GPR must be call-saved, FPR must be call-clobbered. */
10928 if (!call_really_used_regs[fpr_regno]
10929 || call_really_used_regs[gpr_regno])
10932 /* It must not happen that what we once saved in an FPR now
10933 needs a stack slot. */
10934 gcc_assert (cfun_gpr_save_slot (gpr_regno) != -1);
10936 if (cfun_gpr_save_slot (gpr_regno) == 0)
10938 remove_insn (insn);
10943 if (GET_CODE (pat) == PARALLEL
10944 && store_multiple_operation (pat, VOIDmode))
10946 set = XVECEXP (pat, 0, 0);
10947 first = REGNO (SET_SRC (set));
10948 last = first + XVECLEN (pat, 0) - 1;
10949 offset = const0_rtx;
10950 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
10951 off = INTVAL (offset);
10953 if (GET_CODE (base) != REG || off < 0)
10955 if (cfun_frame_layout.first_save_gpr != -1
10956 && (cfun_frame_layout.first_save_gpr < first
10957 || cfun_frame_layout.last_save_gpr > last))
10959 if (REGNO (base) != STACK_POINTER_REGNUM
10960 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10962 if (first > BASE_REGNUM || last < BASE_REGNUM)
10965 if (cfun_frame_layout.first_save_gpr != -1)
10967 rtx s_pat = save_gprs (base,
10968 off + (cfun_frame_layout.first_save_gpr
10969 - first) * UNITS_PER_LONG,
10970 cfun_frame_layout.first_save_gpr,
10971 cfun_frame_layout.last_save_gpr);
10972 new_insn = emit_insn_before (s_pat, insn);
10973 INSN_ADDRESSES_NEW (new_insn, -1);
10976 remove_insn (insn);
10980 if (cfun_frame_layout.first_save_gpr == -1
10981 && GET_CODE (pat) == SET
10982 && GENERAL_REG_P (SET_SRC (pat))
10983 && GET_CODE (SET_DEST (pat)) == MEM)
10986 first = REGNO (SET_SRC (set));
10987 offset = const0_rtx;
10988 base = eliminate_constant_term (XEXP (SET_DEST (set), 0), &offset);
10989 off = INTVAL (offset);
10991 if (GET_CODE (base) != REG || off < 0)
10993 if (REGNO (base) != STACK_POINTER_REGNUM
10994 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
10997 remove_insn (insn);
11001 if (GET_CODE (pat) == PARALLEL
11002 && load_multiple_operation (pat, VOIDmode))
11004 set = XVECEXP (pat, 0, 0);
11005 first = REGNO (SET_DEST (set));
11006 last = first + XVECLEN (pat, 0) - 1;
11007 offset = const0_rtx;
11008 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
11009 off = INTVAL (offset);
11011 if (GET_CODE (base) != REG || off < 0)
11014 if (cfun_frame_layout.first_restore_gpr != -1
11015 && (cfun_frame_layout.first_restore_gpr < first
11016 || cfun_frame_layout.last_restore_gpr > last))
11018 if (REGNO (base) != STACK_POINTER_REGNUM
11019 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
11021 if (first > BASE_REGNUM || last < BASE_REGNUM)
11024 if (cfun_frame_layout.first_restore_gpr != -1)
11026 rtx rpat = restore_gprs (base,
11027 off + (cfun_frame_layout.first_restore_gpr
11028 - first) * UNITS_PER_LONG,
11029 cfun_frame_layout.first_restore_gpr,
11030 cfun_frame_layout.last_restore_gpr);
11032 /* Remove REG_CFA_RESTOREs for registers that we no
11033 longer need to save. */
11034 REG_NOTES (rpat) = REG_NOTES (insn);
11035 for (rtx *ptr = ®_NOTES (rpat); *ptr; )
11036 if (REG_NOTE_KIND (*ptr) == REG_CFA_RESTORE
11037 && ((int) REGNO (XEXP (*ptr, 0))
11038 < cfun_frame_layout.first_restore_gpr))
11039 *ptr = XEXP (*ptr, 1);
11041 ptr = &XEXP (*ptr, 1);
11042 new_insn = emit_insn_before (rpat, insn);
11043 RTX_FRAME_RELATED_P (new_insn) = 1;
11044 INSN_ADDRESSES_NEW (new_insn, -1);
11047 remove_insn (insn);
11051 if (cfun_frame_layout.first_restore_gpr == -1
11052 && GET_CODE (pat) == SET
11053 && GENERAL_REG_P (SET_DEST (pat))
11054 && GET_CODE (SET_SRC (pat)) == MEM)
11057 first = REGNO (SET_DEST (set));
11058 offset = const0_rtx;
11059 base = eliminate_constant_term (XEXP (SET_SRC (set), 0), &offset);
11060 off = INTVAL (offset);
11062 if (GET_CODE (base) != REG || off < 0)
11065 if (REGNO (base) != STACK_POINTER_REGNUM
11066 && REGNO (base) != HARD_FRAME_POINTER_REGNUM)
11069 remove_insn (insn);
11075 /* On z10 and later the dynamic branch prediction must see the
11076 backward jump within a certain windows. If not it falls back to
11077 the static prediction. This function rearranges the loop backward
11078 branch in a way which makes the static prediction always correct.
11079 The function returns true if it added an instruction. */
11081 s390_fix_long_loop_prediction (rtx_insn *insn)
11083 rtx set = single_set (insn);
11084 rtx code_label, label_ref, new_label;
11086 rtx_insn *cur_insn;
11090 /* This will exclude branch on count and branch on index patterns
11091 since these are correctly statically predicted. */
11093 || SET_DEST (set) != pc_rtx
11094 || GET_CODE (SET_SRC(set)) != IF_THEN_ELSE)
11097 /* Skip conditional returns. */
11098 if (ANY_RETURN_P (XEXP (SET_SRC (set), 1))
11099 && XEXP (SET_SRC (set), 2) == pc_rtx)
11102 label_ref = (GET_CODE (XEXP (SET_SRC (set), 1)) == LABEL_REF ?
11103 XEXP (SET_SRC (set), 1) : XEXP (SET_SRC (set), 2));
11105 gcc_assert (GET_CODE (label_ref) == LABEL_REF);
11107 code_label = XEXP (label_ref, 0);
11109 if (INSN_ADDRESSES (INSN_UID (code_label)) == -1
11110 || INSN_ADDRESSES (INSN_UID (insn)) == -1
11111 || (INSN_ADDRESSES (INSN_UID (insn))
11112 - INSN_ADDRESSES (INSN_UID (code_label)) < PREDICT_DISTANCE))
11115 for (distance = 0, cur_insn = PREV_INSN (insn);
11116 distance < PREDICT_DISTANCE - 6;
11117 distance += get_attr_length (cur_insn), cur_insn = PREV_INSN (cur_insn))
11118 if (!cur_insn || JUMP_P (cur_insn) || LABEL_P (cur_insn))
11121 new_label = gen_label_rtx ();
11122 uncond_jump = emit_jump_insn_after (
11123 gen_rtx_SET (VOIDmode, pc_rtx,
11124 gen_rtx_LABEL_REF (VOIDmode, code_label)),
11126 emit_label_after (new_label, uncond_jump);
11128 tmp = XEXP (SET_SRC (set), 1);
11129 XEXP (SET_SRC (set), 1) = XEXP (SET_SRC (set), 2);
11130 XEXP (SET_SRC (set), 2) = tmp;
11131 INSN_CODE (insn) = -1;
11133 XEXP (label_ref, 0) = new_label;
11134 JUMP_LABEL (insn) = new_label;
11135 JUMP_LABEL (uncond_jump) = code_label;
11140 /* Returns 1 if INSN reads the value of REG for purposes not related
11141 to addressing of memory, and 0 otherwise. */
11143 s390_non_addr_reg_read_p (rtx reg, rtx_insn *insn)
11145 return reg_referenced_p (reg, PATTERN (insn))
11146 && !reg_used_in_mem_p (REGNO (reg), PATTERN (insn));
11149 /* Starting from INSN find_cond_jump looks downwards in the insn
11150 stream for a single jump insn which is the last user of the
11151 condition code set in INSN. */
11153 find_cond_jump (rtx_insn *insn)
11155 for (; insn; insn = NEXT_INSN (insn))
11159 if (LABEL_P (insn))
11162 if (!JUMP_P (insn))
11164 if (reg_mentioned_p (gen_rtx_REG (CCmode, CC_REGNUM), insn))
11169 /* This will be triggered by a return. */
11170 if (GET_CODE (PATTERN (insn)) != SET)
11173 gcc_assert (SET_DEST (PATTERN (insn)) == pc_rtx);
11174 ite = SET_SRC (PATTERN (insn));
11176 if (GET_CODE (ite) != IF_THEN_ELSE)
11179 cc = XEXP (XEXP (ite, 0), 0);
11180 if (!REG_P (cc) || !CC_REGNO_P (REGNO (cc)))
11183 if (find_reg_note (insn, REG_DEAD, cc))
11191 /* Swap the condition in COND and the operands in OP0 and OP1 so that
11192 the semantics does not change. If NULL_RTX is passed as COND the
11193 function tries to find the conditional jump starting with INSN. */
11195 s390_swap_cmp (rtx cond, rtx *op0, rtx *op1, rtx_insn *insn)
11199 if (cond == NULL_RTX)
11201 rtx jump = find_cond_jump (NEXT_INSN (insn));
11202 jump = jump ? single_set (jump) : NULL_RTX;
11204 if (jump == NULL_RTX)
11207 cond = XEXP (XEXP (jump, 1), 0);
11212 PUT_CODE (cond, swap_condition (GET_CODE (cond)));
11215 /* On z10, instructions of the compare-and-branch family have the
11216 property to access the register occurring as second operand with
11217 its bits complemented. If such a compare is grouped with a second
11218 instruction that accesses the same register non-complemented, and
11219 if that register's value is delivered via a bypass, then the
11220 pipeline recycles, thereby causing significant performance decline.
11221 This function locates such situations and exchanges the two
11222 operands of the compare. The function return true whenever it
11225 s390_z10_optimize_cmp (rtx_insn *insn)
11227 rtx_insn *prev_insn, *next_insn;
11228 bool insn_added_p = false;
11229 rtx cond, *op0, *op1;
11231 if (GET_CODE (PATTERN (insn)) == PARALLEL)
11233 /* Handle compare and branch and branch on count
11235 rtx pattern = single_set (insn);
11238 || SET_DEST (pattern) != pc_rtx
11239 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE)
11242 cond = XEXP (SET_SRC (pattern), 0);
11243 op0 = &XEXP (cond, 0);
11244 op1 = &XEXP (cond, 1);
11246 else if (GET_CODE (PATTERN (insn)) == SET)
11250 /* Handle normal compare instructions. */
11251 src = SET_SRC (PATTERN (insn));
11252 dest = SET_DEST (PATTERN (insn));
11255 || !CC_REGNO_P (REGNO (dest))
11256 || GET_CODE (src) != COMPARE)
11259 /* s390_swap_cmp will try to find the conditional
11260 jump when passing NULL_RTX as condition. */
11262 op0 = &XEXP (src, 0);
11263 op1 = &XEXP (src, 1);
11268 if (!REG_P (*op0) || !REG_P (*op1))
11271 if (GET_MODE_CLASS (GET_MODE (*op0)) != MODE_INT)
11274 /* Swap the COMPARE arguments and its mask if there is a
11275 conflicting access in the previous insn. */
11276 prev_insn = prev_active_insn (insn);
11277 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
11278 && reg_referenced_p (*op1, PATTERN (prev_insn)))
11279 s390_swap_cmp (cond, op0, op1, insn);
11281 /* Check if there is a conflict with the next insn. If there
11282 was no conflict with the previous insn, then swap the
11283 COMPARE arguments and its mask. If we already swapped
11284 the operands, or if swapping them would cause a conflict
11285 with the previous insn, issue a NOP after the COMPARE in
11286 order to separate the two instuctions. */
11287 next_insn = next_active_insn (insn);
11288 if (next_insn != NULL_RTX && INSN_P (next_insn)
11289 && s390_non_addr_reg_read_p (*op1, next_insn))
11291 if (prev_insn != NULL_RTX && INSN_P (prev_insn)
11292 && s390_non_addr_reg_read_p (*op0, prev_insn))
11294 if (REGNO (*op1) == 0)
11295 emit_insn_after (gen_nop1 (), insn);
11297 emit_insn_after (gen_nop (), insn);
11298 insn_added_p = true;
11301 s390_swap_cmp (cond, op0, op1, insn);
11303 return insn_added_p;
11306 /* Perform machine-dependent processing. */
11311 bool pool_overflow = false;
11313 /* Make sure all splits have been performed; splits after
11314 machine_dependent_reorg might confuse insn length counts. */
11315 split_all_insns_noflow ();
11317 /* Install the main literal pool and the associated base
11318 register load insns.
11320 In addition, there are two problematic situations we need
11323 - the literal pool might be > 4096 bytes in size, so that
11324 some of its elements cannot be directly accessed
11326 - a branch target might be > 64K away from the branch, so that
11327 it is not possible to use a PC-relative instruction.
11329 To fix those, we split the single literal pool into multiple
11330 pool chunks, reloading the pool base register at various
11331 points throughout the function to ensure it always points to
11332 the pool chunk the following code expects, and / or replace
11333 PC-relative branches by absolute branches.
11335 However, the two problems are interdependent: splitting the
11336 literal pool can move a branch further away from its target,
11337 causing the 64K limit to overflow, and on the other hand,
11338 replacing a PC-relative branch by an absolute branch means
11339 we need to put the branch target address into the literal
11340 pool, possibly causing it to overflow.
11342 So, we loop trying to fix up both problems until we manage
11343 to satisfy both conditions at the same time. Note that the
11344 loop is guaranteed to terminate as every pass of the loop
11345 strictly decreases the total number of PC-relative branches
11346 in the function. (This is not completely true as there
11347 might be branch-over-pool insns introduced by chunkify_start.
11348 Those never need to be split however.) */
11352 struct constant_pool *pool = NULL;
11354 /* Collect the literal pool. */
11355 if (!pool_overflow)
11357 pool = s390_mainpool_start ();
11359 pool_overflow = true;
11362 /* If literal pool overflowed, start to chunkify it. */
11364 pool = s390_chunkify_start ();
11366 /* Split out-of-range branches. If this has created new
11367 literal pool entries, cancel current chunk list and
11368 recompute it. zSeries machines have large branch
11369 instructions, so we never need to split a branch. */
11370 if (!TARGET_CPU_ZARCH && s390_split_branches ())
11373 s390_chunkify_cancel (pool);
11375 s390_mainpool_cancel (pool);
11380 /* If we made it up to here, both conditions are satisfied.
11381 Finish up literal pool related changes. */
11383 s390_chunkify_finish (pool);
11385 s390_mainpool_finish (pool);
11387 /* We're done splitting branches. */
11388 cfun->machine->split_branches_pending_p = false;
11392 /* Generate out-of-pool execute target insns. */
11393 if (TARGET_CPU_ZARCH)
11395 rtx_insn *insn, *target;
11398 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11400 label = s390_execute_label (insn);
11404 gcc_assert (label != const0_rtx);
11406 target = emit_label (XEXP (label, 0));
11407 INSN_ADDRESSES_NEW (target, -1);
11409 target = emit_insn (s390_execute_target (insn));
11410 INSN_ADDRESSES_NEW (target, -1);
11414 /* Try to optimize prologue and epilogue further. */
11415 s390_optimize_prologue ();
11417 /* Walk over the insns and do some >=z10 specific changes. */
11418 if (s390_tune == PROCESSOR_2097_Z10
11419 || s390_tune == PROCESSOR_2817_Z196
11420 || s390_tune == PROCESSOR_2827_ZEC12)
11423 bool insn_added_p = false;
11425 /* The insn lengths and addresses have to be up to date for the
11426 following manipulations. */
11427 shorten_branches (get_insns ());
11429 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11431 if (!INSN_P (insn) || INSN_CODE (insn) <= 0)
11435 insn_added_p |= s390_fix_long_loop_prediction (insn);
11437 if ((GET_CODE (PATTERN (insn)) == PARALLEL
11438 || GET_CODE (PATTERN (insn)) == SET)
11439 && s390_tune == PROCESSOR_2097_Z10)
11440 insn_added_p |= s390_z10_optimize_cmp (insn);
11443 /* Adjust branches if we added new instructions. */
11445 shorten_branches (get_insns ());
11449 /* Return true if INSN is a fp load insn writing register REGNO. */
11451 s390_fpload_toreg (rtx insn, unsigned int regno)
11454 enum attr_type flag = s390_safe_attr_type (insn);
11456 if (flag != TYPE_FLOADSF && flag != TYPE_FLOADDF)
11459 set = single_set (insn);
11461 if (set == NULL_RTX)
11464 if (!REG_P (SET_DEST (set)) || !MEM_P (SET_SRC (set)))
11467 if (REGNO (SET_DEST (set)) != regno)
11473 /* This value describes the distance to be avoided between an
11474 aritmetic fp instruction and an fp load writing the same register.
11475 Z10_EARLYLOAD_DISTANCE - 1 as well as Z10_EARLYLOAD_DISTANCE + 1 is
11476 fine but the exact value has to be avoided. Otherwise the FP
11477 pipeline will throw an exception causing a major penalty. */
11478 #define Z10_EARLYLOAD_DISTANCE 7
11480 /* Rearrange the ready list in order to avoid the situation described
11481 for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
11482 moved to the very end of the ready list. */
11484 s390_z10_prevent_earlyload_conflicts (rtx_insn **ready, int *nready_p)
11486 unsigned int regno;
11487 int nready = *nready_p;
11492 enum attr_type flag;
11495 /* Skip DISTANCE - 1 active insns. */
11496 for (insn = last_scheduled_insn, distance = Z10_EARLYLOAD_DISTANCE - 1;
11497 distance > 0 && insn != NULL_RTX;
11498 distance--, insn = prev_active_insn (insn))
11499 if (CALL_P (insn) || JUMP_P (insn))
11502 if (insn == NULL_RTX)
11505 set = single_set (insn);
11507 if (set == NULL_RTX || !REG_P (SET_DEST (set))
11508 || GET_MODE_CLASS (GET_MODE (SET_DEST (set))) != MODE_FLOAT)
11511 flag = s390_safe_attr_type (insn);
11513 if (flag == TYPE_FLOADSF || flag == TYPE_FLOADDF)
11516 regno = REGNO (SET_DEST (set));
11519 while (!s390_fpload_toreg (ready[i], regno) && i > 0)
11526 memmove (&ready[1], &ready[0], sizeof (rtx_insn *) * i);
11531 /* The s390_sched_state variable tracks the state of the current or
11532 the last instruction group.
11534 0,1,2 number of instructions scheduled in the current group
11535 3 the last group is complete - normal insns
11536 4 the last group was a cracked/expanded insn */
11538 static int s390_sched_state;
11540 #define S390_OOO_SCHED_STATE_NORMAL 3
11541 #define S390_OOO_SCHED_STATE_CRACKED 4
11543 #define S390_OOO_SCHED_ATTR_MASK_CRACKED 0x1
11544 #define S390_OOO_SCHED_ATTR_MASK_EXPANDED 0x2
11545 #define S390_OOO_SCHED_ATTR_MASK_ENDGROUP 0x4
11546 #define S390_OOO_SCHED_ATTR_MASK_GROUPALONE 0x8
11548 static unsigned int
11549 s390_get_sched_attrmask (rtx insn)
11551 unsigned int mask = 0;
11553 if (get_attr_ooo_cracked (insn))
11554 mask |= S390_OOO_SCHED_ATTR_MASK_CRACKED;
11555 if (get_attr_ooo_expanded (insn))
11556 mask |= S390_OOO_SCHED_ATTR_MASK_EXPANDED;
11557 if (get_attr_ooo_endgroup (insn))
11558 mask |= S390_OOO_SCHED_ATTR_MASK_ENDGROUP;
11559 if (get_attr_ooo_groupalone (insn))
11560 mask |= S390_OOO_SCHED_ATTR_MASK_GROUPALONE;
11564 /* Return the scheduling score for INSN. The higher the score the
11565 better. The score is calculated from the OOO scheduling attributes
11566 of INSN and the scheduling state s390_sched_state. */
11568 s390_sched_score (rtx insn)
11570 unsigned int mask = s390_get_sched_attrmask (insn);
11573 switch (s390_sched_state)
11576 /* Try to put insns into the first slot which would otherwise
11578 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
11579 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
11581 if ((mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
11584 /* Prefer not cracked insns while trying to put together a
11586 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
11587 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0
11588 && (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) == 0)
11590 if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) == 0)
11594 /* Prefer not cracked insns while trying to put together a
11596 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
11597 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0
11598 && (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) == 0)
11600 /* Prefer endgroup insns in the last slot. */
11601 if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) != 0)
11604 case S390_OOO_SCHED_STATE_NORMAL:
11605 /* Prefer not cracked insns if the last was not cracked. */
11606 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) == 0
11607 && (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) == 0)
11609 if ((mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
11612 case S390_OOO_SCHED_STATE_CRACKED:
11613 /* Try to keep cracked insns together to prevent them from
11614 interrupting groups. */
11615 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
11616 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
11623 /* This function is called via hook TARGET_SCHED_REORDER before
11624 issuing one insn from list READY which contains *NREADYP entries.
11625 For target z10 it reorders load instructions to avoid early load
11626 conflicts in the floating point pipeline */
11628 s390_sched_reorder (FILE *file, int verbose,
11629 rtx_insn **ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
11631 if (s390_tune == PROCESSOR_2097_Z10)
11632 if (reload_completed && *nreadyp > 1)
11633 s390_z10_prevent_earlyload_conflicts (ready, nreadyp);
11635 if (s390_tune == PROCESSOR_2827_ZEC12
11636 && reload_completed
11640 int last_index = *nreadyp - 1;
11641 int max_index = -1;
11642 int max_score = -1;
11645 /* Just move the insn with the highest score to the top (the
11646 end) of the list. A full sort is not needed since a conflict
11647 in the hazard recognition cannot happen. So the top insn in
11648 the ready list will always be taken. */
11649 for (i = last_index; i >= 0; i--)
11653 if (recog_memoized (ready[i]) < 0)
11656 score = s390_sched_score (ready[i]);
11657 if (score > max_score)
11664 if (max_index != -1)
11666 if (max_index != last_index)
11668 tmp = ready[max_index];
11669 ready[max_index] = ready[last_index];
11670 ready[last_index] = tmp;
11674 "move insn %d to the top of list\n",
11675 INSN_UID (ready[last_index]));
11677 else if (verbose > 5)
11679 "best insn %d already on top\n",
11680 INSN_UID (ready[last_index]));
11685 fprintf (file, "ready list ooo attributes - sched state: %d\n",
11688 for (i = last_index; i >= 0; i--)
11690 if (recog_memoized (ready[i]) < 0)
11692 fprintf (file, "insn %d score: %d: ", INSN_UID (ready[i]),
11693 s390_sched_score (ready[i]));
11694 #define PRINT_OOO_ATTR(ATTR) fprintf (file, "%s ", get_attr_##ATTR (ready[i]) ? #ATTR : "!" #ATTR);
11695 PRINT_OOO_ATTR (ooo_cracked);
11696 PRINT_OOO_ATTR (ooo_expanded);
11697 PRINT_OOO_ATTR (ooo_endgroup);
11698 PRINT_OOO_ATTR (ooo_groupalone);
11699 #undef PRINT_OOO_ATTR
11700 fprintf (file, "\n");
11705 return s390_issue_rate ();
11709 /* This function is called via hook TARGET_SCHED_VARIABLE_ISSUE after
11710 the scheduler has issued INSN. It stores the last issued insn into
11711 last_scheduled_insn in order to make it available for
11712 s390_sched_reorder. */
11714 s390_sched_variable_issue (FILE *file, int verbose, rtx_insn *insn, int more)
11716 last_scheduled_insn = insn;
11718 if (s390_tune == PROCESSOR_2827_ZEC12
11719 && reload_completed
11720 && recog_memoized (insn) >= 0)
11722 unsigned int mask = s390_get_sched_attrmask (insn);
11724 if ((mask & S390_OOO_SCHED_ATTR_MASK_CRACKED) != 0
11725 || (mask & S390_OOO_SCHED_ATTR_MASK_EXPANDED) != 0)
11726 s390_sched_state = S390_OOO_SCHED_STATE_CRACKED;
11727 else if ((mask & S390_OOO_SCHED_ATTR_MASK_ENDGROUP) != 0
11728 || (mask & S390_OOO_SCHED_ATTR_MASK_GROUPALONE) != 0)
11729 s390_sched_state = S390_OOO_SCHED_STATE_NORMAL;
11732 /* Only normal insns are left (mask == 0). */
11733 switch (s390_sched_state)
11738 case S390_OOO_SCHED_STATE_NORMAL:
11739 if (s390_sched_state == S390_OOO_SCHED_STATE_NORMAL)
11740 s390_sched_state = 1;
11742 s390_sched_state++;
11745 case S390_OOO_SCHED_STATE_CRACKED:
11746 s390_sched_state = S390_OOO_SCHED_STATE_NORMAL;
11752 fprintf (file, "insn %d: ", INSN_UID (insn));
11753 #define PRINT_OOO_ATTR(ATTR) \
11754 fprintf (file, "%s ", get_attr_##ATTR (insn) ? #ATTR : "");
11755 PRINT_OOO_ATTR (ooo_cracked);
11756 PRINT_OOO_ATTR (ooo_expanded);
11757 PRINT_OOO_ATTR (ooo_endgroup);
11758 PRINT_OOO_ATTR (ooo_groupalone);
11759 #undef PRINT_OOO_ATTR
11760 fprintf (file, "\n");
11761 fprintf (file, "sched state: %d\n", s390_sched_state);
11765 if (GET_CODE (PATTERN (insn)) != USE
11766 && GET_CODE (PATTERN (insn)) != CLOBBER)
11773 s390_sched_init (FILE *file ATTRIBUTE_UNUSED,
11774 int verbose ATTRIBUTE_UNUSED,
11775 int max_ready ATTRIBUTE_UNUSED)
11777 last_scheduled_insn = NULL;
11778 s390_sched_state = 0;
11781 /* This function checks the whole of insn X for memory references. The
11782 function always returns zero because the framework it is called
11783 from would stop recursively analyzing the insn upon a return value
11784 other than zero. The real result of this function is updating
11785 counter variable MEM_COUNT. */
11787 check_dpu (rtx *x, unsigned *mem_count)
11789 if (*x != NULL_RTX && MEM_P (*x))
11794 /* This target hook implementation for TARGET_LOOP_UNROLL_ADJUST calculates
11795 a new number struct loop *loop should be unrolled if tuned for cpus with
11796 a built-in stride prefetcher.
11797 The loop is analyzed for memory accesses by calling check_dpu for
11798 each rtx of the loop. Depending on the loop_depth and the amount of
11799 memory accesses a new number <=nunroll is returned to improve the
11800 behaviour of the hardware prefetch unit. */
11802 s390_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
11807 unsigned mem_count = 0;
11809 if (s390_tune != PROCESSOR_2097_Z10
11810 && s390_tune != PROCESSOR_2817_Z196
11811 && s390_tune != PROCESSOR_2827_ZEC12)
11814 /* Count the number of memory references within the loop body. */
11815 bbs = get_loop_body (loop);
11816 for (i = 0; i < loop->num_nodes; i++)
11818 for (insn = BB_HEAD (bbs[i]); insn != BB_END (bbs[i]); insn = NEXT_INSN (insn))
11819 if (INSN_P (insn) && INSN_CODE (insn) != -1)
11820 for_each_rtx_in_insn (&insn, (rtx_function) check_dpu, &mem_count);
11824 /* Prevent division by zero, and we do not need to adjust nunroll in this case. */
11825 if (mem_count == 0)
11828 switch (loop_depth(loop))
11831 return MIN (nunroll, 28 / mem_count);
11833 return MIN (nunroll, 22 / mem_count);
11835 return MIN (nunroll, 16 / mem_count);
11840 s390_option_override (void)
11843 cl_deferred_option *opt;
11844 vec<cl_deferred_option> *v =
11845 (vec<cl_deferred_option> *) s390_deferred_options;
11848 FOR_EACH_VEC_ELT (*v, i, opt)
11850 switch (opt->opt_index)
11852 case OPT_mhotpatch:
11853 s390_hotpatch_trampoline_halfwords = (opt->value) ?
11854 s390_hotpatch_trampoline_halfwords_default : -1;
11856 case OPT_mhotpatch_:
11860 val = integral_argument (opt->arg);
11863 /* argument is not a plain number */
11864 error ("argument to %qs should be a non-negative integer",
11868 else if (val > s390_hotpatch_trampoline_halfwords_max)
11870 error ("argument to %qs is too large (max. %d)",
11871 "-mhotpatch=", s390_hotpatch_trampoline_halfwords_max);
11874 s390_hotpatch_trampoline_halfwords = val;
11878 gcc_unreachable ();
11882 /* Set up function hooks. */
11883 init_machine_status = s390_init_machine_status;
11885 /* Architecture mode defaults according to ABI. */
11886 if (!(target_flags_explicit & MASK_ZARCH))
11889 target_flags |= MASK_ZARCH;
11891 target_flags &= ~MASK_ZARCH;
11894 /* Set the march default in case it hasn't been specified on
11896 if (s390_arch == PROCESSOR_max)
11898 s390_arch_string = TARGET_ZARCH? "z900" : "g5";
11899 s390_arch = TARGET_ZARCH ? PROCESSOR_2064_Z900 : PROCESSOR_9672_G5;
11900 s390_arch_flags = processor_flags_table[(int)s390_arch];
11903 /* Determine processor to tune for. */
11904 if (s390_tune == PROCESSOR_max)
11906 s390_tune = s390_arch;
11907 s390_tune_flags = s390_arch_flags;
11910 /* Sanity checks. */
11911 if (TARGET_ZARCH && !TARGET_CPU_ZARCH)
11912 error ("z/Architecture mode not supported on %s", s390_arch_string);
11913 if (TARGET_64BIT && !TARGET_ZARCH)
11914 error ("64-bit ABI not supported in ESA/390 mode");
11916 /* Use hardware DFP if available and not explicitly disabled by
11917 user. E.g. with -m31 -march=z10 -mzarch */
11918 if (!(target_flags_explicit & MASK_HARD_DFP) && TARGET_DFP)
11919 target_flags |= MASK_HARD_DFP;
11921 /* Enable hardware transactions if available and not explicitly
11922 disabled by user. E.g. with -m31 -march=zEC12 -mzarch */
11923 if (!(target_flags_explicit & MASK_OPT_HTM) && TARGET_CPU_HTM && TARGET_ZARCH)
11924 target_flags |= MASK_OPT_HTM;
11926 if (TARGET_HARD_DFP && !TARGET_DFP)
11928 if (target_flags_explicit & MASK_HARD_DFP)
11930 if (!TARGET_CPU_DFP)
11931 error ("hardware decimal floating point instructions"
11932 " not available on %s", s390_arch_string);
11934 error ("hardware decimal floating point instructions"
11935 " not available in ESA/390 mode");
11938 target_flags &= ~MASK_HARD_DFP;
11941 if ((target_flags_explicit & MASK_SOFT_FLOAT) && TARGET_SOFT_FLOAT)
11943 if ((target_flags_explicit & MASK_HARD_DFP) && TARGET_HARD_DFP)
11944 error ("-mhard-dfp can%'t be used in conjunction with -msoft-float");
11946 target_flags &= ~MASK_HARD_DFP;
11949 /* Set processor cost function. */
11952 case PROCESSOR_2084_Z990:
11953 s390_cost = &z990_cost;
11955 case PROCESSOR_2094_Z9_109:
11956 s390_cost = &z9_109_cost;
11958 case PROCESSOR_2097_Z10:
11959 s390_cost = &z10_cost;
11961 case PROCESSOR_2817_Z196:
11962 s390_cost = &z196_cost;
11964 case PROCESSOR_2827_ZEC12:
11965 s390_cost = &zEC12_cost;
11968 s390_cost = &z900_cost;
11971 if (TARGET_BACKCHAIN && TARGET_PACKED_STACK && TARGET_HARD_FLOAT)
11972 error ("-mbackchain -mpacked-stack -mhard-float are not supported "
11975 if (s390_stack_size)
11977 if (s390_stack_guard >= s390_stack_size)
11978 error ("stack size must be greater than the stack guard value");
11979 else if (s390_stack_size > 1 << 16)
11980 error ("stack size must not be greater than 64k");
11982 else if (s390_stack_guard)
11983 error ("-mstack-guard implies use of -mstack-size");
11985 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
11986 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
11987 target_flags |= MASK_LONG_DOUBLE_128;
11990 if (s390_tune == PROCESSOR_2097_Z10
11991 || s390_tune == PROCESSOR_2817_Z196
11992 || s390_tune == PROCESSOR_2827_ZEC12)
11994 maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
11995 global_options.x_param_values,
11996 global_options_set.x_param_values);
11997 maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
11998 global_options.x_param_values,
11999 global_options_set.x_param_values);
12000 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
12001 global_options.x_param_values,
12002 global_options_set.x_param_values);
12003 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
12004 global_options.x_param_values,
12005 global_options_set.x_param_values);
12008 maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
12009 global_options.x_param_values,
12010 global_options_set.x_param_values);
12011 /* values for loop prefetching */
12012 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
12013 global_options.x_param_values,
12014 global_options_set.x_param_values);
12015 maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
12016 global_options.x_param_values,
12017 global_options_set.x_param_values);
12018 /* s390 has more than 2 levels and the size is much larger. Since
12019 we are always running virtualized assume that we only get a small
12020 part of the caches above l1. */
12021 maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
12022 global_options.x_param_values,
12023 global_options_set.x_param_values);
12024 maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
12025 global_options.x_param_values,
12026 global_options_set.x_param_values);
12027 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
12028 global_options.x_param_values,
12029 global_options_set.x_param_values);
12031 /* This cannot reside in s390_option_optimization_table since HAVE_prefetch
12032 requires the arch flags to be evaluated already. Since prefetching
12033 is beneficial on s390, we enable it if available. */
12034 if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch && optimize >= 3)
12035 flag_prefetch_loop_arrays = 1;
12037 /* Use the alternative scheduling-pressure algorithm by default. */
12038 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
12039 global_options.x_param_values,
12040 global_options_set.x_param_values);
12044 /* Don't emit DWARF3/4 unless specifically selected. The TPF
12045 debuggers do not yet support DWARF 3/4. */
12046 if (!global_options_set.x_dwarf_strict)
12048 if (!global_options_set.x_dwarf_version)
12052 /* Register a target-specific optimization-and-lowering pass
12053 to run immediately before prologue and epilogue generation.
12055 Registering the pass must be done at start up. It's
12056 convenient to do it here. */
12057 opt_pass *new_pass = new pass_s390_early_mach (g);
12058 struct register_pass_info insert_pass_s390_early_mach =
12060 new_pass, /* pass */
12061 "pro_and_epilogue", /* reference_pass_name */
12062 1, /* ref_pass_instance_number */
12063 PASS_POS_INSERT_BEFORE /* po_op */
12065 register_pass (&insert_pass_s390_early_mach);
12068 /* Initialize GCC target structure. */
12070 #undef TARGET_ASM_ALIGNED_HI_OP
12071 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
12072 #undef TARGET_ASM_ALIGNED_DI_OP
12073 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
12074 #undef TARGET_ASM_INTEGER
12075 #define TARGET_ASM_INTEGER s390_assemble_integer
12077 #undef TARGET_ASM_OPEN_PAREN
12078 #define TARGET_ASM_OPEN_PAREN ""
12080 #undef TARGET_ASM_CLOSE_PAREN
12081 #define TARGET_ASM_CLOSE_PAREN ""
12083 #undef TARGET_OPTION_OVERRIDE
12084 #define TARGET_OPTION_OVERRIDE s390_option_override
12086 #undef TARGET_ENCODE_SECTION_INFO
12087 #define TARGET_ENCODE_SECTION_INFO s390_encode_section_info
12089 #undef TARGET_SCALAR_MODE_SUPPORTED_P
12090 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
12093 #undef TARGET_HAVE_TLS
12094 #define TARGET_HAVE_TLS true
12096 #undef TARGET_CANNOT_FORCE_CONST_MEM
12097 #define TARGET_CANNOT_FORCE_CONST_MEM s390_cannot_force_const_mem
12099 #undef TARGET_DELEGITIMIZE_ADDRESS
12100 #define TARGET_DELEGITIMIZE_ADDRESS s390_delegitimize_address
12102 #undef TARGET_LEGITIMIZE_ADDRESS
12103 #define TARGET_LEGITIMIZE_ADDRESS s390_legitimize_address
12105 #undef TARGET_RETURN_IN_MEMORY
12106 #define TARGET_RETURN_IN_MEMORY s390_return_in_memory
12108 #undef TARGET_INIT_BUILTINS
12109 #define TARGET_INIT_BUILTINS s390_init_builtins
12110 #undef TARGET_EXPAND_BUILTIN
12111 #define TARGET_EXPAND_BUILTIN s390_expand_builtin
12113 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
12114 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA s390_output_addr_const_extra
12116 #undef TARGET_ASM_OUTPUT_MI_THUNK
12117 #define TARGET_ASM_OUTPUT_MI_THUNK s390_output_mi_thunk
12118 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
12119 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
12121 #undef TARGET_SCHED_ADJUST_PRIORITY
12122 #define TARGET_SCHED_ADJUST_PRIORITY s390_adjust_priority
12123 #undef TARGET_SCHED_ISSUE_RATE
12124 #define TARGET_SCHED_ISSUE_RATE s390_issue_rate
12125 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
12126 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD s390_first_cycle_multipass_dfa_lookahead
12128 #undef TARGET_SCHED_VARIABLE_ISSUE
12129 #define TARGET_SCHED_VARIABLE_ISSUE s390_sched_variable_issue
12130 #undef TARGET_SCHED_REORDER
12131 #define TARGET_SCHED_REORDER s390_sched_reorder
12132 #undef TARGET_SCHED_INIT
12133 #define TARGET_SCHED_INIT s390_sched_init
12135 #undef TARGET_CANNOT_COPY_INSN_P
12136 #define TARGET_CANNOT_COPY_INSN_P s390_cannot_copy_insn_p
12137 #undef TARGET_RTX_COSTS
12138 #define TARGET_RTX_COSTS s390_rtx_costs
12139 #undef TARGET_ADDRESS_COST
12140 #define TARGET_ADDRESS_COST s390_address_cost
12141 #undef TARGET_REGISTER_MOVE_COST
12142 #define TARGET_REGISTER_MOVE_COST s390_register_move_cost
12143 #undef TARGET_MEMORY_MOVE_COST
12144 #define TARGET_MEMORY_MOVE_COST s390_memory_move_cost
12146 #undef TARGET_MACHINE_DEPENDENT_REORG
12147 #define TARGET_MACHINE_DEPENDENT_REORG s390_reorg
12149 #undef TARGET_VALID_POINTER_MODE
12150 #define TARGET_VALID_POINTER_MODE s390_valid_pointer_mode
12152 #undef TARGET_BUILD_BUILTIN_VA_LIST
12153 #define TARGET_BUILD_BUILTIN_VA_LIST s390_build_builtin_va_list
12154 #undef TARGET_EXPAND_BUILTIN_VA_START
12155 #define TARGET_EXPAND_BUILTIN_VA_START s390_va_start
12156 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
12157 #define TARGET_GIMPLIFY_VA_ARG_EXPR s390_gimplify_va_arg
12159 #undef TARGET_PROMOTE_FUNCTION_MODE
12160 #define TARGET_PROMOTE_FUNCTION_MODE s390_promote_function_mode
12161 #undef TARGET_PASS_BY_REFERENCE
12162 #define TARGET_PASS_BY_REFERENCE s390_pass_by_reference
12164 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
12165 #define TARGET_FUNCTION_OK_FOR_SIBCALL s390_function_ok_for_sibcall
12166 #undef TARGET_FUNCTION_ARG
12167 #define TARGET_FUNCTION_ARG s390_function_arg
12168 #undef TARGET_FUNCTION_ARG_ADVANCE
12169 #define TARGET_FUNCTION_ARG_ADVANCE s390_function_arg_advance
12170 #undef TARGET_FUNCTION_VALUE
12171 #define TARGET_FUNCTION_VALUE s390_function_value
12172 #undef TARGET_LIBCALL_VALUE
12173 #define TARGET_LIBCALL_VALUE s390_libcall_value
12175 #undef TARGET_KEEP_LEAF_WHEN_PROFILED
12176 #define TARGET_KEEP_LEAF_WHEN_PROFILED s390_keep_leaf_when_profiled
12178 #undef TARGET_FIXED_CONDITION_CODE_REGS
12179 #define TARGET_FIXED_CONDITION_CODE_REGS s390_fixed_condition_code_regs
12181 #undef TARGET_CC_MODES_COMPATIBLE
12182 #define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
12184 #undef TARGET_INVALID_WITHIN_DOLOOP
12185 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_insn_null
12188 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
12189 #define TARGET_ASM_OUTPUT_DWARF_DTPREL s390_output_dwarf_dtprel
12192 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
12193 #undef TARGET_MANGLE_TYPE
12194 #define TARGET_MANGLE_TYPE s390_mangle_type
12197 #undef TARGET_SCALAR_MODE_SUPPORTED_P
12198 #define TARGET_SCALAR_MODE_SUPPORTED_P s390_scalar_mode_supported_p
12200 #undef TARGET_PREFERRED_RELOAD_CLASS
12201 #define TARGET_PREFERRED_RELOAD_CLASS s390_preferred_reload_class
12203 #undef TARGET_SECONDARY_RELOAD
12204 #define TARGET_SECONDARY_RELOAD s390_secondary_reload
12206 #undef TARGET_LIBGCC_CMP_RETURN_MODE
12207 #define TARGET_LIBGCC_CMP_RETURN_MODE s390_libgcc_cmp_return_mode
12209 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
12210 #define TARGET_LIBGCC_SHIFT_COUNT_MODE s390_libgcc_shift_count_mode
12212 #undef TARGET_LEGITIMATE_ADDRESS_P
12213 #define TARGET_LEGITIMATE_ADDRESS_P s390_legitimate_address_p
12215 #undef TARGET_LEGITIMATE_CONSTANT_P
12216 #define TARGET_LEGITIMATE_CONSTANT_P s390_legitimate_constant_p
12218 #undef TARGET_LRA_P
12219 #define TARGET_LRA_P s390_lra_p
12221 #undef TARGET_CAN_ELIMINATE
12222 #define TARGET_CAN_ELIMINATE s390_can_eliminate
12224 #undef TARGET_CONDITIONAL_REGISTER_USAGE
12225 #define TARGET_CONDITIONAL_REGISTER_USAGE s390_conditional_register_usage
12227 #undef TARGET_LOOP_UNROLL_ADJUST
12228 #define TARGET_LOOP_UNROLL_ADJUST s390_loop_unroll_adjust
12230 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
12231 #define TARGET_ASM_TRAMPOLINE_TEMPLATE s390_asm_trampoline_template
12232 #undef TARGET_TRAMPOLINE_INIT
12233 #define TARGET_TRAMPOLINE_INIT s390_trampoline_init
12235 #undef TARGET_UNWIND_WORD_MODE
12236 #define TARGET_UNWIND_WORD_MODE s390_unwind_word_mode
12238 #undef TARGET_CANONICALIZE_COMPARISON
12239 #define TARGET_CANONICALIZE_COMPARISON s390_canonicalize_comparison
12241 #undef TARGET_HARD_REGNO_SCRATCH_OK
12242 #define TARGET_HARD_REGNO_SCRATCH_OK s390_hard_regno_scratch_ok
12244 #undef TARGET_ATTRIBUTE_TABLE
12245 #define TARGET_ATTRIBUTE_TABLE s390_attribute_table
12247 #undef TARGET_CAN_INLINE_P
12248 #define TARGET_CAN_INLINE_P s390_can_inline_p
12250 #undef TARGET_SET_UP_BY_PROLOGUE
12251 #define TARGET_SET_UP_BY_PROLOGUE s300_set_up_by_prologue
12253 struct gcc_target targetm = TARGET_INITIALIZER;
12255 #include "gt-s390.h"