1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007 Free Software
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
26 #include "coretypes.h"
30 #include "hard-reg-set.h"
31 #include "insn-config.h"
43 /* Information about a subreg of a hard register. */
46 /* Offset of first hard register involved in the subreg. */
48 /* Number of hard registers involved in the subreg. */
50 /* Whether this subreg can be represented as a hard reg with the new
55 /* Forward declarations */
56 static void set_of_1 (rtx, rtx, void *);
57 static bool covers_regno_p (rtx, unsigned int);
58 static bool covers_regno_no_parallel_p (rtx, unsigned int);
59 static int rtx_referenced_p_1 (rtx *, void *);
60 static int computed_jump_p_1 (rtx);
61 static void parms_set (rtx, rtx, void *);
62 static void subreg_get_info (unsigned int, enum machine_mode,
63 unsigned int, enum machine_mode,
64 struct subreg_info *);
66 static unsigned HOST_WIDE_INT cached_nonzero_bits (rtx, enum machine_mode,
67 rtx, enum machine_mode,
68 unsigned HOST_WIDE_INT);
69 static unsigned HOST_WIDE_INT nonzero_bits1 (rtx, enum machine_mode, rtx,
71 unsigned HOST_WIDE_INT);
72 static unsigned int cached_num_sign_bit_copies (rtx, enum machine_mode, rtx,
75 static unsigned int num_sign_bit_copies1 (rtx, enum machine_mode, rtx,
76 enum machine_mode, unsigned int);
78 /* Offset of the first 'e', 'E' or 'V' operand for each rtx code, or
79 -1 if a code has no such operand. */
80 static int non_rtx_starting_operands[NUM_RTX_CODE];
82 /* Bit flags that specify the machine subtype we are compiling for.
83 Bits are tested using macros TARGET_... defined in the tm.h file
84 and set by `-m...' switches. Must be defined in rtlanal.c. */
88 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
89 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
90 SIGN_EXTEND then while narrowing we also have to enforce the
91 representation and sign-extend the value to mode DESTINATION_REP.
93 If the value is already sign-extended to DESTINATION_REP mode we
94 can just switch to DESTINATION mode on it. For each pair of
95 integral modes SOURCE and DESTINATION, when truncating from SOURCE
96 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
97 contains the number of high-order bits in SOURCE that have to be
98 copies of the sign-bit so that we can do this mode-switch to
102 num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1];
104 /* Return 1 if the value of X is unstable
105 (would be different at a different point in the program).
106 The frame pointer, arg pointer, etc. are considered stable
107 (within one function) and so is anything marked `unchanging'. */
110 rtx_unstable_p (rtx x)
112 RTX_CODE code = GET_CODE (x);
119 return !MEM_READONLY_P (x) || rtx_unstable_p (XEXP (x, 0));
130 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
131 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
132 /* The arg pointer varies if it is not a fixed register. */
133 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
135 #ifndef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
136 /* ??? When call-clobbered, the value is stable modulo the restore
137 that must happen after a call. This currently screws up local-alloc
138 into believing that the restore is not needed. */
139 if (x == pic_offset_table_rtx)
145 if (MEM_VOLATILE_P (x))
154 fmt = GET_RTX_FORMAT (code);
155 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
158 if (rtx_unstable_p (XEXP (x, i)))
161 else if (fmt[i] == 'E')
164 for (j = 0; j < XVECLEN (x, i); j++)
165 if (rtx_unstable_p (XVECEXP (x, i, j)))
172 /* Return 1 if X has a value that can vary even between two
173 executions of the program. 0 means X can be compared reliably
174 against certain constants or near-constants.
175 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
176 zero, we are slightly more conservative.
177 The frame pointer and the arg pointer are considered constant. */
180 rtx_varies_p (rtx x, int for_alias)
193 return !MEM_READONLY_P (x) || rtx_varies_p (XEXP (x, 0), for_alias);
204 /* Note that we have to test for the actual rtx used for the frame
205 and arg pointers and not just the register number in case we have
206 eliminated the frame and/or arg pointer and are using it
208 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
209 /* The arg pointer varies if it is not a fixed register. */
210 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
212 if (x == pic_offset_table_rtx
213 #ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
214 /* ??? When call-clobbered, the value is stable modulo the restore
215 that must happen after a call. This currently screws up
216 local-alloc into believing that the restore is not needed, so we
217 must return 0 only if we are called from alias analysis. */
225 /* The operand 0 of a LO_SUM is considered constant
226 (in fact it is related specifically to operand 1)
227 during alias analysis. */
228 return (! for_alias && rtx_varies_p (XEXP (x, 0), for_alias))
229 || rtx_varies_p (XEXP (x, 1), for_alias);
232 if (MEM_VOLATILE_P (x))
241 fmt = GET_RTX_FORMAT (code);
242 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
245 if (rtx_varies_p (XEXP (x, i), for_alias))
248 else if (fmt[i] == 'E')
251 for (j = 0; j < XVECLEN (x, i); j++)
252 if (rtx_varies_p (XVECEXP (x, i, j), for_alias))
259 /* Return nonzero if the use of X as an address in a MEM can cause a trap.
260 MODE is the mode of the MEM (not that of X) and UNALIGNED_MEMS controls
261 whether nonzero is returned for unaligned memory accesses on strict
262 alignment machines. */
265 rtx_addr_can_trap_p_1 (rtx x, enum machine_mode mode, bool unaligned_mems)
267 enum rtx_code code = GET_CODE (x);
272 return SYMBOL_REF_WEAK (x);
278 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
279 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
280 || x == stack_pointer_rtx
281 /* The arg pointer varies if it is not a fixed register. */
282 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
284 /* All of the virtual frame registers are stack references. */
285 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
286 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
291 return rtx_addr_can_trap_p_1 (XEXP (x, 0), mode, unaligned_mems);
294 /* An address is assumed not to trap if:
295 - it is an address that can't trap plus a constant integer,
296 with the proper remainder modulo the mode size if we are
297 considering unaligned memory references. */
298 if (!rtx_addr_can_trap_p_1 (XEXP (x, 0), mode, unaligned_mems)
299 && GET_CODE (XEXP (x, 1)) == CONST_INT)
301 HOST_WIDE_INT offset;
303 if (!STRICT_ALIGNMENT
305 || GET_MODE_SIZE (mode) == 0)
308 offset = INTVAL (XEXP (x, 1));
310 #ifdef SPARC_STACK_BOUNDARY_HACK
311 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
312 the real alignment of %sp. However, when it does this, the
313 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
314 if (SPARC_STACK_BOUNDARY_HACK
315 && (XEXP (x, 0) == stack_pointer_rtx
316 || XEXP (x, 0) == hard_frame_pointer_rtx))
317 offset -= STACK_POINTER_OFFSET;
320 return offset % GET_MODE_SIZE (mode) != 0;
323 /* - or it is the pic register plus a constant. */
324 if (XEXP (x, 0) == pic_offset_table_rtx && CONSTANT_P (XEXP (x, 1)))
331 return rtx_addr_can_trap_p_1 (XEXP (x, 1), mode, unaligned_mems);
338 return rtx_addr_can_trap_p_1 (XEXP (x, 0), mode, unaligned_mems);
344 /* If it isn't one of the case above, it can cause a trap. */
348 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
351 rtx_addr_can_trap_p (rtx x)
353 return rtx_addr_can_trap_p_1 (x, VOIDmode, false);
356 /* Return true if X is an address that is known to not be zero. */
359 nonzero_address_p (rtx x)
361 enum rtx_code code = GET_CODE (x);
366 return !SYMBOL_REF_WEAK (x);
372 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
373 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
374 || x == stack_pointer_rtx
375 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
377 /* All of the virtual frame registers are stack references. */
378 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
379 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
384 return nonzero_address_p (XEXP (x, 0));
387 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
388 return nonzero_address_p (XEXP (x, 0));
389 /* Handle PIC references. */
390 else if (XEXP (x, 0) == pic_offset_table_rtx
391 && CONSTANT_P (XEXP (x, 1)))
396 /* Similar to the above; allow positive offsets. Further, since
397 auto-inc is only allowed in memories, the register must be a
399 if (GET_CODE (XEXP (x, 1)) == CONST_INT
400 && INTVAL (XEXP (x, 1)) > 0)
402 return nonzero_address_p (XEXP (x, 0));
405 /* Similarly. Further, the offset is always positive. */
412 return nonzero_address_p (XEXP (x, 0));
415 return nonzero_address_p (XEXP (x, 1));
421 /* If it isn't one of the case above, might be zero. */
425 /* Return 1 if X refers to a memory location whose address
426 cannot be compared reliably with constant addresses,
427 or if X refers to a BLKmode memory object.
428 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
429 zero, we are slightly more conservative. */
432 rtx_addr_varies_p (rtx x, int for_alias)
443 return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0), for_alias);
445 fmt = GET_RTX_FORMAT (code);
446 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
449 if (rtx_addr_varies_p (XEXP (x, i), for_alias))
452 else if (fmt[i] == 'E')
455 for (j = 0; j < XVECLEN (x, i); j++)
456 if (rtx_addr_varies_p (XVECEXP (x, i, j), for_alias))
462 /* Return the value of the integer term in X, if one is apparent;
464 Only obvious integer terms are detected.
465 This is used in cse.c with the `related_value' field. */
468 get_integer_term (rtx x)
470 if (GET_CODE (x) == CONST)
473 if (GET_CODE (x) == MINUS
474 && GET_CODE (XEXP (x, 1)) == CONST_INT)
475 return - INTVAL (XEXP (x, 1));
476 if (GET_CODE (x) == PLUS
477 && GET_CODE (XEXP (x, 1)) == CONST_INT)
478 return INTVAL (XEXP (x, 1));
482 /* If X is a constant, return the value sans apparent integer term;
484 Only obvious integer terms are detected. */
487 get_related_value (rtx x)
489 if (GET_CODE (x) != CONST)
492 if (GET_CODE (x) == PLUS
493 && GET_CODE (XEXP (x, 1)) == CONST_INT)
495 else if (GET_CODE (x) == MINUS
496 && GET_CODE (XEXP (x, 1)) == CONST_INT)
501 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
502 to somewhere in the same object or object_block as SYMBOL. */
505 offset_within_block_p (rtx symbol, HOST_WIDE_INT offset)
509 if (GET_CODE (symbol) != SYMBOL_REF)
517 if (CONSTANT_POOL_ADDRESS_P (symbol)
518 && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
521 decl = SYMBOL_REF_DECL (symbol);
522 if (decl && offset < int_size_in_bytes (TREE_TYPE (decl)))
526 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
527 && SYMBOL_REF_BLOCK (symbol)
528 && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
529 && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
530 < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
536 /* Split X into a base and a constant offset, storing them in *BASE_OUT
537 and *OFFSET_OUT respectively. */
540 split_const (rtx x, rtx *base_out, rtx *offset_out)
542 if (GET_CODE (x) == CONST)
545 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
547 *base_out = XEXP (x, 0);
548 *offset_out = XEXP (x, 1);
553 *offset_out = const0_rtx;
556 /* Return the number of places FIND appears within X. If COUNT_DEST is
557 zero, we do not count occurrences inside the destination of a SET. */
560 count_occurrences (rtx x, rtx find, int count_dest)
564 const char *format_ptr;
585 count = count_occurrences (XEXP (x, 0), find, count_dest);
587 count += count_occurrences (XEXP (x, 1), find, count_dest);
591 if (MEM_P (find) && rtx_equal_p (x, find))
596 if (SET_DEST (x) == find && ! count_dest)
597 return count_occurrences (SET_SRC (x), find, count_dest);
604 format_ptr = GET_RTX_FORMAT (code);
607 for (i = 0; i < GET_RTX_LENGTH (code); i++)
609 switch (*format_ptr++)
612 count += count_occurrences (XEXP (x, i), find, count_dest);
616 for (j = 0; j < XVECLEN (x, i); j++)
617 count += count_occurrences (XVECEXP (x, i, j), find, count_dest);
625 /* Nonzero if register REG appears somewhere within IN.
626 Also works if REG is not a register; in this case it checks
627 for a subexpression of IN that is Lisp "equal" to REG. */
630 reg_mentioned_p (rtx reg, rtx in)
642 if (GET_CODE (in) == LABEL_REF)
643 return reg == XEXP (in, 0);
645 code = GET_CODE (in);
649 /* Compare registers by number. */
651 return REG_P (reg) && REGNO (in) == REGNO (reg);
653 /* These codes have no constituent expressions
663 /* These are kept unique for a given value. */
670 if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
673 fmt = GET_RTX_FORMAT (code);
675 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
680 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
681 if (reg_mentioned_p (reg, XVECEXP (in, i, j)))
684 else if (fmt[i] == 'e'
685 && reg_mentioned_p (reg, XEXP (in, i)))
691 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
692 no CODE_LABEL insn. */
695 no_labels_between_p (rtx beg, rtx end)
700 for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
706 /* Nonzero if register REG is used in an insn between
707 FROM_INSN and TO_INSN (exclusive of those two). */
710 reg_used_between_p (rtx reg, rtx from_insn, rtx to_insn)
714 if (from_insn == to_insn)
717 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
719 && (reg_overlap_mentioned_p (reg, PATTERN (insn))
720 || (CALL_P (insn) && find_reg_fusage (insn, USE, reg))))
725 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
726 is entirely replaced by a new value and the only use is as a SET_DEST,
727 we do not consider it a reference. */
730 reg_referenced_p (rtx x, rtx body)
734 switch (GET_CODE (body))
737 if (reg_overlap_mentioned_p (x, SET_SRC (body)))
740 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
741 of a REG that occupies all of the REG, the insn references X if
742 it is mentioned in the destination. */
743 if (GET_CODE (SET_DEST (body)) != CC0
744 && GET_CODE (SET_DEST (body)) != PC
745 && !REG_P (SET_DEST (body))
746 && ! (GET_CODE (SET_DEST (body)) == SUBREG
747 && REG_P (SUBREG_REG (SET_DEST (body)))
748 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body))))
749 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
750 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body)))
751 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
752 && reg_overlap_mentioned_p (x, SET_DEST (body)))
757 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
758 if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)))
765 return reg_overlap_mentioned_p (x, body);
768 return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
771 return reg_overlap_mentioned_p (x, XEXP (body, 0));
774 case UNSPEC_VOLATILE:
775 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
776 if (reg_overlap_mentioned_p (x, XVECEXP (body, 0, i)))
781 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
782 if (reg_referenced_p (x, XVECEXP (body, 0, i)))
787 if (MEM_P (XEXP (body, 0)))
788 if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)))
793 if (reg_overlap_mentioned_p (x, COND_EXEC_TEST (body)))
795 return reg_referenced_p (x, COND_EXEC_CODE (body));
802 /* Nonzero if register REG is set or clobbered in an insn between
803 FROM_INSN and TO_INSN (exclusive of those two). */
806 reg_set_between_p (rtx reg, rtx from_insn, rtx to_insn)
810 if (from_insn == to_insn)
813 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
814 if (INSN_P (insn) && reg_set_p (reg, insn))
819 /* Internals of reg_set_between_p. */
821 reg_set_p (rtx reg, rtx insn)
823 /* We can be passed an insn or part of one. If we are passed an insn,
824 check if a side-effect of the insn clobbers REG. */
826 && (FIND_REG_INC_NOTE (insn, reg)
829 && REGNO (reg) < FIRST_PSEUDO_REGISTER
830 && overlaps_hard_reg_set_p (regs_invalidated_by_call,
831 GET_MODE (reg), REGNO (reg)))
833 || find_reg_fusage (insn, CLOBBER, reg)))))
836 return set_of (reg, insn) != NULL_RTX;
839 /* Similar to reg_set_between_p, but check all registers in X. Return 0
840 only if none of them are modified between START and END. Return 1 if
841 X contains a MEM; this routine does usememory aliasing. */
844 modified_between_p (rtx x, rtx start, rtx end)
846 enum rtx_code code = GET_CODE (x);
869 if (modified_between_p (XEXP (x, 0), start, end))
871 if (MEM_READONLY_P (x))
873 for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn))
874 if (memory_modified_in_insn_p (x, insn))
880 return reg_set_between_p (x, start, end);
886 fmt = GET_RTX_FORMAT (code);
887 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
889 if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end))
892 else if (fmt[i] == 'E')
893 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
894 if (modified_between_p (XVECEXP (x, i, j), start, end))
901 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
902 of them are modified in INSN. Return 1 if X contains a MEM; this routine
903 does use memory aliasing. */
906 modified_in_p (rtx x, rtx insn)
908 enum rtx_code code = GET_CODE (x);
927 if (modified_in_p (XEXP (x, 0), insn))
929 if (MEM_READONLY_P (x))
931 if (memory_modified_in_insn_p (x, insn))
937 return reg_set_p (x, insn);
943 fmt = GET_RTX_FORMAT (code);
944 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
946 if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn))
949 else if (fmt[i] == 'E')
950 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
951 if (modified_in_p (XVECEXP (x, i, j), insn))
958 /* Helper function for set_of. */
966 set_of_1 (rtx x, rtx pat, void *data1)
968 struct set_of_data *data = (struct set_of_data *) (data1);
969 if (rtx_equal_p (x, data->pat)
970 || (!MEM_P (x) && reg_overlap_mentioned_p (data->pat, x)))
974 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
975 (either directly or via STRICT_LOW_PART and similar modifiers). */
977 set_of (rtx pat, rtx insn)
979 struct set_of_data data;
980 data.found = NULL_RTX;
982 note_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data);
986 /* Given an INSN, return a SET expression if this insn has only a single SET.
987 It may also have CLOBBERs, USEs, or SET whose output
988 will not be used, which we ignore. */
991 single_set_2 (rtx insn, rtx pat)
994 int set_verified = 1;
997 if (GET_CODE (pat) == PARALLEL)
999 for (i = 0; i < XVECLEN (pat, 0); i++)
1001 rtx sub = XVECEXP (pat, 0, i);
1002 switch (GET_CODE (sub))
1009 /* We can consider insns having multiple sets, where all
1010 but one are dead as single set insns. In common case
1011 only single set is present in the pattern so we want
1012 to avoid checking for REG_UNUSED notes unless necessary.
1014 When we reach set first time, we just expect this is
1015 the single set we are looking for and only when more
1016 sets are found in the insn, we check them. */
1019 if (find_reg_note (insn, REG_UNUSED, SET_DEST (set))
1020 && !side_effects_p (set))
1026 set = sub, set_verified = 0;
1027 else if (!find_reg_note (insn, REG_UNUSED, SET_DEST (sub))
1028 || side_effects_p (sub))
1040 /* Given an INSN, return nonzero if it has more than one SET, else return
1044 multiple_sets (rtx insn)
1049 /* INSN must be an insn. */
1050 if (! INSN_P (insn))
1053 /* Only a PARALLEL can have multiple SETs. */
1054 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1056 for (i = 0, found = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1057 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1059 /* If we have already found a SET, then return now. */
1067 /* Either zero or one SET. */
1071 /* Return nonzero if the destination of SET equals the source
1072 and there are no side effects. */
1075 set_noop_p (rtx set)
1077 rtx src = SET_SRC (set);
1078 rtx dst = SET_DEST (set);
1080 if (dst == pc_rtx && src == pc_rtx)
1083 if (MEM_P (dst) && MEM_P (src))
1084 return rtx_equal_p (dst, src) && !side_effects_p (dst);
1086 if (GET_CODE (dst) == ZERO_EXTRACT)
1087 return rtx_equal_p (XEXP (dst, 0), src)
1088 && ! BYTES_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx
1089 && !side_effects_p (src);
1091 if (GET_CODE (dst) == STRICT_LOW_PART)
1092 dst = XEXP (dst, 0);
1094 if (GET_CODE (src) == SUBREG && GET_CODE (dst) == SUBREG)
1096 if (SUBREG_BYTE (src) != SUBREG_BYTE (dst))
1098 src = SUBREG_REG (src);
1099 dst = SUBREG_REG (dst);
1102 return (REG_P (src) && REG_P (dst)
1103 && REGNO (src) == REGNO (dst));
1106 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1110 noop_move_p (rtx insn)
1112 rtx pat = PATTERN (insn);
1114 if (INSN_CODE (insn) == NOOP_MOVE_INSN_CODE)
1117 /* Insns carrying these notes are useful later on. */
1118 if (find_reg_note (insn, REG_EQUAL, NULL_RTX))
1121 /* For now treat an insn with a REG_RETVAL note as a
1122 a special insn which should not be considered a no-op. */
1123 if (find_reg_note (insn, REG_RETVAL, NULL_RTX))
1126 if (GET_CODE (pat) == SET && set_noop_p (pat))
1129 if (GET_CODE (pat) == PARALLEL)
1132 /* If nothing but SETs of registers to themselves,
1133 this insn can also be deleted. */
1134 for (i = 0; i < XVECLEN (pat, 0); i++)
1136 rtx tem = XVECEXP (pat, 0, i);
1138 if (GET_CODE (tem) == USE
1139 || GET_CODE (tem) == CLOBBER)
1142 if (GET_CODE (tem) != SET || ! set_noop_p (tem))
1152 /* Return the last thing that X was assigned from before *PINSN. If VALID_TO
1153 is not NULL_RTX then verify that the object is not modified up to VALID_TO.
1154 If the object was modified, if we hit a partial assignment to X, or hit a
1155 CODE_LABEL first, return X. If we found an assignment, update *PINSN to
1156 point to it. ALLOW_HWREG is set to 1 if hardware registers are allowed to
1160 find_last_value (rtx x, rtx *pinsn, rtx valid_to, int allow_hwreg)
1164 for (p = PREV_INSN (*pinsn); p && !LABEL_P (p);
1168 rtx set = single_set (p);
1169 rtx note = find_reg_note (p, REG_EQUAL, NULL_RTX);
1171 if (set && rtx_equal_p (x, SET_DEST (set)))
1173 rtx src = SET_SRC (set);
1175 if (note && GET_CODE (XEXP (note, 0)) != EXPR_LIST)
1176 src = XEXP (note, 0);
1178 if ((valid_to == NULL_RTX
1179 || ! modified_between_p (src, PREV_INSN (p), valid_to))
1180 /* Reject hard registers because we don't usually want
1181 to use them; we'd rather use a pseudo. */
1183 && REGNO (src) < FIRST_PSEUDO_REGISTER) || allow_hwreg))
1190 /* If set in non-simple way, we don't have a value. */
1191 if (reg_set_p (x, p))
1198 /* Return nonzero if register in range [REGNO, ENDREGNO)
1199 appears either explicitly or implicitly in X
1200 other than being stored into.
1202 References contained within the substructure at LOC do not count.
1203 LOC may be zero, meaning don't ignore anything. */
1206 refers_to_regno_p (unsigned int regno, unsigned int endregno, rtx x,
1210 unsigned int x_regno;
1215 /* The contents of a REG_NONNEG note is always zero, so we must come here
1216 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1220 code = GET_CODE (x);
1225 x_regno = REGNO (x);
1227 /* If we modifying the stack, frame, or argument pointer, it will
1228 clobber a virtual register. In fact, we could be more precise,
1229 but it isn't worth it. */
1230 if ((x_regno == STACK_POINTER_REGNUM
1231 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1232 || x_regno == ARG_POINTER_REGNUM
1234 || x_regno == FRAME_POINTER_REGNUM)
1235 && regno >= FIRST_VIRTUAL_REGISTER && regno <= LAST_VIRTUAL_REGISTER)
1238 return endregno > x_regno && regno < END_REGNO (x);
1241 /* If this is a SUBREG of a hard reg, we can see exactly which
1242 registers are being modified. Otherwise, handle normally. */
1243 if (REG_P (SUBREG_REG (x))
1244 && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
1246 unsigned int inner_regno = subreg_regno (x);
1247 unsigned int inner_endregno
1248 = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
1249 ? subreg_nregs (x) : 1);
1251 return endregno > inner_regno && regno < inner_endregno;
1257 if (&SET_DEST (x) != loc
1258 /* Note setting a SUBREG counts as referring to the REG it is in for
1259 a pseudo but not for hard registers since we can
1260 treat each word individually. */
1261 && ((GET_CODE (SET_DEST (x)) == SUBREG
1262 && loc != &SUBREG_REG (SET_DEST (x))
1263 && REG_P (SUBREG_REG (SET_DEST (x)))
1264 && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
1265 && refers_to_regno_p (regno, endregno,
1266 SUBREG_REG (SET_DEST (x)), loc))
1267 || (!REG_P (SET_DEST (x))
1268 && refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
1271 if (code == CLOBBER || loc == &SET_SRC (x))
1280 /* X does not match, so try its subexpressions. */
1282 fmt = GET_RTX_FORMAT (code);
1283 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1285 if (fmt[i] == 'e' && loc != &XEXP (x, i))
1293 if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc))
1296 else if (fmt[i] == 'E')
1299 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1300 if (loc != &XVECEXP (x, i, j)
1301 && refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc))
1308 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1309 we check if any register number in X conflicts with the relevant register
1310 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1311 contains a MEM (we don't bother checking for memory addresses that can't
1312 conflict because we expect this to be a rare case. */
1315 reg_overlap_mentioned_p (rtx x, rtx in)
1317 unsigned int regno, endregno;
1319 /* If either argument is a constant, then modifying X can not
1320 affect IN. Here we look at IN, we can profitably combine
1321 CONSTANT_P (x) with the switch statement below. */
1322 if (CONSTANT_P (in))
1326 switch (GET_CODE (x))
1328 case STRICT_LOW_PART:
1331 /* Overly conservative. */
1336 regno = REGNO (SUBREG_REG (x));
1337 if (regno < FIRST_PSEUDO_REGISTER)
1338 regno = subreg_regno (x);
1339 endregno = regno + (regno < FIRST_PSEUDO_REGISTER
1340 ? subreg_nregs (x) : 1);
1345 endregno = END_REGNO (x);
1347 return refers_to_regno_p (regno, endregno, in, (rtx*) 0);
1357 fmt = GET_RTX_FORMAT (GET_CODE (in));
1358 for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
1361 if (reg_overlap_mentioned_p (x, XEXP (in, i)))
1364 else if (fmt[i] == 'E')
1367 for (j = XVECLEN (in, i) - 1; j >= 0; --j)
1368 if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j)))
1378 return reg_mentioned_p (x, in);
1384 /* If any register in here refers to it we return true. */
1385 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1386 if (XEXP (XVECEXP (x, 0, i), 0) != 0
1387 && reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0), in))
1393 gcc_assert (CONSTANT_P (x));
1398 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1399 (X would be the pattern of an insn).
1400 FUN receives two arguments:
1401 the REG, MEM, CC0 or PC being stored in or clobbered,
1402 the SET or CLOBBER rtx that does the store.
1404 If the item being stored in or clobbered is a SUBREG of a hard register,
1405 the SUBREG will be passed. */
1408 note_stores (rtx x, void (*fun) (rtx, rtx, void *), void *data)
1412 if (GET_CODE (x) == COND_EXEC)
1413 x = COND_EXEC_CODE (x);
1415 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
1417 rtx dest = SET_DEST (x);
1419 while ((GET_CODE (dest) == SUBREG
1420 && (!REG_P (SUBREG_REG (dest))
1421 || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
1422 || GET_CODE (dest) == ZERO_EXTRACT
1423 || GET_CODE (dest) == STRICT_LOW_PART)
1424 dest = XEXP (dest, 0);
1426 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1427 each of whose first operand is a register. */
1428 if (GET_CODE (dest) == PARALLEL)
1430 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1431 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1432 (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data);
1435 (*fun) (dest, x, data);
1438 else if (GET_CODE (x) == PARALLEL)
1439 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1440 note_stores (XVECEXP (x, 0, i), fun, data);
1443 /* Like notes_stores, but call FUN for each expression that is being
1444 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1445 FUN for each expression, not any interior subexpressions. FUN receives a
1446 pointer to the expression and the DATA passed to this function.
1448 Note that this is not quite the same test as that done in reg_referenced_p
1449 since that considers something as being referenced if it is being
1450 partially set, while we do not. */
1453 note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
1458 switch (GET_CODE (body))
1461 (*fun) (&COND_EXEC_TEST (body), data);
1462 note_uses (&COND_EXEC_CODE (body), fun, data);
1466 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1467 note_uses (&XVECEXP (body, 0, i), fun, data);
1471 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1472 note_uses (&PATTERN (XVECEXP (body, 0, i)), fun, data);
1476 (*fun) (&XEXP (body, 0), data);
1480 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1481 (*fun) (&ASM_OPERANDS_INPUT (body, i), data);
1485 (*fun) (&TRAP_CONDITION (body), data);
1489 (*fun) (&XEXP (body, 0), data);
1493 case UNSPEC_VOLATILE:
1494 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1495 (*fun) (&XVECEXP (body, 0, i), data);
1499 if (MEM_P (XEXP (body, 0)))
1500 (*fun) (&XEXP (XEXP (body, 0), 0), data);
1505 rtx dest = SET_DEST (body);
1507 /* For sets we replace everything in source plus registers in memory
1508 expression in store and operands of a ZERO_EXTRACT. */
1509 (*fun) (&SET_SRC (body), data);
1511 if (GET_CODE (dest) == ZERO_EXTRACT)
1513 (*fun) (&XEXP (dest, 1), data);
1514 (*fun) (&XEXP (dest, 2), data);
1517 while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART)
1518 dest = XEXP (dest, 0);
1521 (*fun) (&XEXP (dest, 0), data);
1526 /* All the other possibilities never store. */
1527 (*fun) (pbody, data);
1532 /* Return nonzero if X's old contents don't survive after INSN.
1533 This will be true if X is (cc0) or if X is a register and
1534 X dies in INSN or because INSN entirely sets X.
1536 "Entirely set" means set directly and not through a SUBREG, or
1537 ZERO_EXTRACT, so no trace of the old contents remains.
1538 Likewise, REG_INC does not count.
1540 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1541 but for this use that makes no difference, since regs don't overlap
1542 during their lifetimes. Therefore, this function may be used
1543 at any time after deaths have been computed.
1545 If REG is a hard reg that occupies multiple machine registers, this
1546 function will only return 1 if each of those registers will be replaced
1550 dead_or_set_p (rtx insn, rtx x)
1552 unsigned int regno, end_regno;
1555 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1556 if (GET_CODE (x) == CC0)
1559 gcc_assert (REG_P (x));
1562 end_regno = END_REGNO (x);
1563 for (i = regno; i < end_regno; i++)
1564 if (! dead_or_set_regno_p (insn, i))
1570 /* Return TRUE iff DEST is a register or subreg of a register and
1571 doesn't change the number of words of the inner register, and any
1572 part of the register is TEST_REGNO. */
1575 covers_regno_no_parallel_p (rtx dest, unsigned int test_regno)
1577 unsigned int regno, endregno;
1579 if (GET_CODE (dest) == SUBREG
1580 && (((GET_MODE_SIZE (GET_MODE (dest))
1581 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
1582 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
1583 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)))
1584 dest = SUBREG_REG (dest);
1589 regno = REGNO (dest);
1590 endregno = END_REGNO (dest);
1591 return (test_regno >= regno && test_regno < endregno);
1594 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
1595 any member matches the covers_regno_no_parallel_p criteria. */
1598 covers_regno_p (rtx dest, unsigned int test_regno)
1600 if (GET_CODE (dest) == PARALLEL)
1602 /* Some targets place small structures in registers for return
1603 values of functions, and those registers are wrapped in
1604 PARALLELs that we may see as the destination of a SET. */
1607 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1609 rtx inner = XEXP (XVECEXP (dest, 0, i), 0);
1610 if (inner != NULL_RTX
1611 && covers_regno_no_parallel_p (inner, test_regno))
1618 return covers_regno_no_parallel_p (dest, test_regno);
1621 /* Utility function for dead_or_set_p to check an individual register. */
1624 dead_or_set_regno_p (rtx insn, unsigned int test_regno)
1628 /* See if there is a death note for something that includes TEST_REGNO. */
1629 if (find_regno_note (insn, REG_DEAD, test_regno))
1633 && find_regno_fusage (insn, CLOBBER, test_regno))
1636 pattern = PATTERN (insn);
1638 if (GET_CODE (pattern) == COND_EXEC)
1639 pattern = COND_EXEC_CODE (pattern);
1641 if (GET_CODE (pattern) == SET)
1642 return covers_regno_p (SET_DEST (pattern), test_regno);
1643 else if (GET_CODE (pattern) == PARALLEL)
1647 for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
1649 rtx body = XVECEXP (pattern, 0, i);
1651 if (GET_CODE (body) == COND_EXEC)
1652 body = COND_EXEC_CODE (body);
1654 if ((GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
1655 && covers_regno_p (SET_DEST (body), test_regno))
1663 /* Return the reg-note of kind KIND in insn INSN, if there is one.
1664 If DATUM is nonzero, look for one whose datum is DATUM. */
1667 find_reg_note (rtx insn, enum reg_note kind, rtx datum)
1673 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1674 if (! INSN_P (insn))
1678 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1679 if (REG_NOTE_KIND (link) == kind)
1684 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1685 if (REG_NOTE_KIND (link) == kind && datum == XEXP (link, 0))
1690 /* Return the reg-note of kind KIND in insn INSN which applies to register
1691 number REGNO, if any. Return 0 if there is no such reg-note. Note that
1692 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
1693 it might be the case that the note overlaps REGNO. */
1696 find_regno_note (rtx insn, enum reg_note kind, unsigned int regno)
1700 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1701 if (! INSN_P (insn))
1704 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1705 if (REG_NOTE_KIND (link) == kind
1706 /* Verify that it is a register, so that scratch and MEM won't cause a
1708 && REG_P (XEXP (link, 0))
1709 && REGNO (XEXP (link, 0)) <= regno
1710 && END_REGNO (XEXP (link, 0)) > regno)
1715 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
1719 find_reg_equal_equiv_note (rtx insn)
1726 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1727 if (REG_NOTE_KIND (link) == REG_EQUAL
1728 || REG_NOTE_KIND (link) == REG_EQUIV)
1730 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
1731 insns that have multiple sets. Checking single_set to
1732 make sure of this is not the proper check, as explained
1733 in the comment in set_unique_reg_note.
1735 This should be changed into an assert. */
1736 if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
1743 /* Check whether INSN is a single_set whose source is known to be
1744 equivalent to a constant. Return that constant if so, otherwise
1748 find_constant_src (rtx insn)
1752 set = single_set (insn);
1755 x = avoid_constant_pool_reference (SET_SRC (set));
1760 note = find_reg_equal_equiv_note (insn);
1761 if (note && CONSTANT_P (XEXP (note, 0)))
1762 return XEXP (note, 0);
1767 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
1768 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1771 find_reg_fusage (rtx insn, enum rtx_code code, rtx datum)
1773 /* If it's not a CALL_INSN, it can't possibly have a
1774 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
1784 for (link = CALL_INSN_FUNCTION_USAGE (insn);
1786 link = XEXP (link, 1))
1787 if (GET_CODE (XEXP (link, 0)) == code
1788 && rtx_equal_p (datum, XEXP (XEXP (link, 0), 0)))
1793 unsigned int regno = REGNO (datum);
1795 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1796 to pseudo registers, so don't bother checking. */
1798 if (regno < FIRST_PSEUDO_REGISTER)
1800 unsigned int end_regno = END_HARD_REGNO (datum);
1803 for (i = regno; i < end_regno; i++)
1804 if (find_regno_fusage (insn, code, i))
1812 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
1813 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1816 find_regno_fusage (rtx insn, enum rtx_code code, unsigned int regno)
1820 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1821 to pseudo registers, so don't bother checking. */
1823 if (regno >= FIRST_PSEUDO_REGISTER
1827 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1831 if (GET_CODE (op = XEXP (link, 0)) == code
1832 && REG_P (reg = XEXP (op, 0))
1833 && REGNO (reg) <= regno
1834 && END_HARD_REGNO (reg) > regno)
1841 /* Return true if INSN is a call to a pure function. */
1844 pure_call_p (rtx insn)
1848 if (!CALL_P (insn) || ! CONST_OR_PURE_CALL_P (insn))
1851 /* Look for the note that differentiates const and pure functions. */
1852 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1856 if (GET_CODE (u = XEXP (link, 0)) == USE
1857 && MEM_P (m = XEXP (u, 0)) && GET_MODE (m) == BLKmode
1858 && GET_CODE (XEXP (m, 0)) == SCRATCH)
1865 /* Remove register note NOTE from the REG_NOTES of INSN. */
1868 remove_note (rtx insn, rtx note)
1872 if (note == NULL_RTX)
1875 if (REG_NOTES (insn) == note)
1876 REG_NOTES (insn) = XEXP (note, 1);
1878 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1879 if (XEXP (link, 1) == note)
1881 XEXP (link, 1) = XEXP (note, 1);
1885 switch (REG_NOTE_KIND (note))
1889 df_notes_rescan (insn);
1896 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes. */
1899 remove_reg_equal_equiv_notes (rtx insn)
1903 loc = ®_NOTES (insn);
1906 enum reg_note kind = REG_NOTE_KIND (*loc);
1907 if (kind == REG_EQUAL || kind == REG_EQUIV)
1908 *loc = XEXP (*loc, 1);
1910 loc = &XEXP (*loc, 1);
1914 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
1915 return 1 if it is found. A simple equality test is used to determine if
1919 in_expr_list_p (rtx listp, rtx node)
1923 for (x = listp; x; x = XEXP (x, 1))
1924 if (node == XEXP (x, 0))
1930 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
1931 remove that entry from the list if it is found.
1933 A simple equality test is used to determine if NODE matches. */
1936 remove_node_from_expr_list (rtx node, rtx *listp)
1939 rtx prev = NULL_RTX;
1943 if (node == XEXP (temp, 0))
1945 /* Splice the node out of the list. */
1947 XEXP (prev, 1) = XEXP (temp, 1);
1949 *listp = XEXP (temp, 1);
1955 temp = XEXP (temp, 1);
1959 /* Nonzero if X contains any volatile instructions. These are instructions
1960 which may cause unpredictable machine state instructions, and thus no
1961 instructions should be moved or combined across them. This includes
1962 only volatile asms and UNSPEC_VOLATILE instructions. */
1965 volatile_insn_p (rtx x)
1969 code = GET_CODE (x);
1989 case UNSPEC_VOLATILE:
1990 /* case TRAP_IF: This isn't clear yet. */
1995 if (MEM_VOLATILE_P (x))
2002 /* Recursively scan the operands of this expression. */
2005 const char *fmt = GET_RTX_FORMAT (code);
2008 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2012 if (volatile_insn_p (XEXP (x, i)))
2015 else if (fmt[i] == 'E')
2018 for (j = 0; j < XVECLEN (x, i); j++)
2019 if (volatile_insn_p (XVECEXP (x, i, j)))
2027 /* Nonzero if X contains any volatile memory references
2028 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2031 volatile_refs_p (rtx x)
2035 code = GET_CODE (x);
2053 case UNSPEC_VOLATILE:
2059 if (MEM_VOLATILE_P (x))
2066 /* Recursively scan the operands of this expression. */
2069 const char *fmt = GET_RTX_FORMAT (code);
2072 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2076 if (volatile_refs_p (XEXP (x, i)))
2079 else if (fmt[i] == 'E')
2082 for (j = 0; j < XVECLEN (x, i); j++)
2083 if (volatile_refs_p (XVECEXP (x, i, j)))
2091 /* Similar to above, except that it also rejects register pre- and post-
2095 side_effects_p (rtx x)
2099 code = GET_CODE (x);
2117 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2118 when some combination can't be done. If we see one, don't think
2119 that we can simplify the expression. */
2120 return (GET_MODE (x) != VOIDmode);
2129 case UNSPEC_VOLATILE:
2130 /* case TRAP_IF: This isn't clear yet. */
2136 if (MEM_VOLATILE_P (x))
2143 /* Recursively scan the operands of this expression. */
2146 const char *fmt = GET_RTX_FORMAT (code);
2149 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2153 if (side_effects_p (XEXP (x, i)))
2156 else if (fmt[i] == 'E')
2159 for (j = 0; j < XVECLEN (x, i); j++)
2160 if (side_effects_p (XVECEXP (x, i, j)))
2168 enum may_trap_p_flags
2170 MTP_UNALIGNED_MEMS = 1,
2173 /* Return nonzero if evaluating rtx X might cause a trap.
2174 (FLAGS & MTP_UNALIGNED_MEMS) controls whether nonzero is returned for
2175 unaligned memory accesses on strict alignment machines. If
2176 (FLAGS & AFTER_MOVE) is true, returns nonzero even in case the expression
2177 cannot trap at its current location, but it might become trapping if moved
2181 may_trap_p_1 (rtx x, unsigned flags)
2186 bool unaligned_mems = (flags & MTP_UNALIGNED_MEMS) != 0;
2190 code = GET_CODE (x);
2193 /* Handle these cases quickly. */
2207 case UNSPEC_VOLATILE:
2212 return MEM_VOLATILE_P (x);
2214 /* Memory ref can trap unless it's a static var or a stack slot. */
2216 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2217 reference; moving it out of condition might cause its address
2219 !(flags & MTP_AFTER_MOVE)
2221 && (!STRICT_ALIGNMENT || !unaligned_mems))
2224 rtx_addr_can_trap_p_1 (XEXP (x, 0), GET_MODE (x), unaligned_mems);
2226 /* Division by a non-constant might trap. */
2231 if (HONOR_SNANS (GET_MODE (x)))
2233 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)))
2234 return flag_trapping_math;
2235 if (!CONSTANT_P (XEXP (x, 1)) || (XEXP (x, 1) == const0_rtx))
2240 /* An EXPR_LIST is used to represent a function call. This
2241 certainly may trap. */
2250 /* Some floating point comparisons may trap. */
2251 if (!flag_trapping_math)
2253 /* ??? There is no machine independent way to check for tests that trap
2254 when COMPARE is used, though many targets do make this distinction.
2255 For instance, sparc uses CCFPE for compares which generate exceptions
2256 and CCFP for compares which do not generate exceptions. */
2257 if (HONOR_NANS (GET_MODE (x)))
2259 /* But often the compare has some CC mode, so check operand
2261 if (HONOR_NANS (GET_MODE (XEXP (x, 0)))
2262 || HONOR_NANS (GET_MODE (XEXP (x, 1))))
2268 if (HONOR_SNANS (GET_MODE (x)))
2270 /* Often comparison is CC mode, so check operand modes. */
2271 if (HONOR_SNANS (GET_MODE (XEXP (x, 0)))
2272 || HONOR_SNANS (GET_MODE (XEXP (x, 1))))
2277 /* Conversion of floating point might trap. */
2278 if (flag_trapping_math && HONOR_NANS (GET_MODE (XEXP (x, 0))))
2285 /* These operations don't trap even with floating point. */
2289 /* Any floating arithmetic may trap. */
2290 if (SCALAR_FLOAT_MODE_P (GET_MODE (x))
2291 && flag_trapping_math)
2295 fmt = GET_RTX_FORMAT (code);
2296 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2300 if (may_trap_p_1 (XEXP (x, i), flags))
2303 else if (fmt[i] == 'E')
2306 for (j = 0; j < XVECLEN (x, i); j++)
2307 if (may_trap_p_1 (XVECEXP (x, i, j), flags))
2314 /* Return nonzero if evaluating rtx X might cause a trap. */
2319 return may_trap_p_1 (x, 0);
2322 /* Return nonzero if evaluating rtx X might cause a trap, when the expression
2323 is moved from its current location by some optimization. */
2326 may_trap_after_code_motion_p (rtx x)
2328 return may_trap_p_1 (x, MTP_AFTER_MOVE);
2331 /* Same as above, but additionally return nonzero if evaluating rtx X might
2332 cause a fault. We define a fault for the purpose of this function as a
2333 erroneous execution condition that cannot be encountered during the normal
2334 execution of a valid program; the typical example is an unaligned memory
2335 access on a strict alignment machine. The compiler guarantees that it
2336 doesn't generate code that will fault from a valid program, but this
2337 guarantee doesn't mean anything for individual instructions. Consider
2338 the following example:
2340 struct S { int d; union { char *cp; int *ip; }; };
2342 int foo(struct S *s)
2350 on a strict alignment machine. In a valid program, foo will never be
2351 invoked on a structure for which d is equal to 1 and the underlying
2352 unique field of the union not aligned on a 4-byte boundary, but the
2353 expression *s->ip might cause a fault if considered individually.
2355 At the RTL level, potentially problematic expressions will almost always
2356 verify may_trap_p; for example, the above dereference can be emitted as
2357 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2358 However, suppose that foo is inlined in a caller that causes s->cp to
2359 point to a local character variable and guarantees that s->d is not set
2360 to 1; foo may have been effectively translated into pseudo-RTL as:
2363 (set (reg:SI) (mem:SI (%fp - 7)))
2365 (set (reg:QI) (mem:QI (%fp - 7)))
2367 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2368 memory reference to a stack slot, but it will certainly cause a fault
2369 on a strict alignment machine. */
2372 may_trap_or_fault_p (rtx x)
2374 return may_trap_p_1 (x, MTP_UNALIGNED_MEMS);
2377 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2378 i.e., an inequality. */
2381 inequality_comparisons_p (rtx x)
2385 enum rtx_code code = GET_CODE (x);
2415 len = GET_RTX_LENGTH (code);
2416 fmt = GET_RTX_FORMAT (code);
2418 for (i = 0; i < len; i++)
2422 if (inequality_comparisons_p (XEXP (x, i)))
2425 else if (fmt[i] == 'E')
2428 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2429 if (inequality_comparisons_p (XVECEXP (x, i, j)))
2437 /* Replace any occurrence of FROM in X with TO. The function does
2438 not enter into CONST_DOUBLE for the replace.
2440 Note that copying is not done so X must not be shared unless all copies
2441 are to be modified. */
2444 replace_rtx (rtx x, rtx from, rtx to)
2449 /* The following prevents loops occurrence when we change MEM in
2450 CONST_DOUBLE onto the same CONST_DOUBLE. */
2451 if (x != 0 && GET_CODE (x) == CONST_DOUBLE)
2457 /* Allow this function to make replacements in EXPR_LISTs. */
2461 if (GET_CODE (x) == SUBREG)
2463 rtx new = replace_rtx (SUBREG_REG (x), from, to);
2465 if (GET_CODE (new) == CONST_INT)
2467 x = simplify_subreg (GET_MODE (x), new,
2468 GET_MODE (SUBREG_REG (x)),
2473 SUBREG_REG (x) = new;
2477 else if (GET_CODE (x) == ZERO_EXTEND)
2479 rtx new = replace_rtx (XEXP (x, 0), from, to);
2481 if (GET_CODE (new) == CONST_INT)
2483 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
2484 new, GET_MODE (XEXP (x, 0)));
2493 fmt = GET_RTX_FORMAT (GET_CODE (x));
2494 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
2497 XEXP (x, i) = replace_rtx (XEXP (x, i), from, to);
2498 else if (fmt[i] == 'E')
2499 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2500 XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j), from, to);
2506 /* Replace occurrences of the old label in *X with the new one.
2507 DATA is a REPLACE_LABEL_DATA containing the old and new labels. */
2510 replace_label (rtx *x, void *data)
2513 rtx old_label = ((replace_label_data *) data)->r1;
2514 rtx new_label = ((replace_label_data *) data)->r2;
2515 bool update_label_nuses = ((replace_label_data *) data)->update_label_nuses;
2520 if (GET_CODE (l) == SYMBOL_REF
2521 && CONSTANT_POOL_ADDRESS_P (l))
2523 rtx c = get_pool_constant (l);
2524 if (rtx_referenced_p (old_label, c))
2527 replace_label_data *d = (replace_label_data *) data;
2529 /* Create a copy of constant C; replace the label inside
2530 but do not update LABEL_NUSES because uses in constant pool
2532 new_c = copy_rtx (c);
2533 d->update_label_nuses = false;
2534 for_each_rtx (&new_c, replace_label, data);
2535 d->update_label_nuses = update_label_nuses;
2537 /* Add the new constant NEW_C to constant pool and replace
2538 the old reference to constant by new reference. */
2539 new_l = XEXP (force_const_mem (get_pool_mode (l), new_c), 0);
2540 *x = replace_rtx (l, l, new_l);
2545 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
2546 field. This is not handled by for_each_rtx because it doesn't
2547 handle unprinted ('0') fields. */
2548 if (JUMP_P (l) && JUMP_LABEL (l) == old_label)
2549 JUMP_LABEL (l) = new_label;
2551 if ((GET_CODE (l) == LABEL_REF
2552 || GET_CODE (l) == INSN_LIST)
2553 && XEXP (l, 0) == old_label)
2555 XEXP (l, 0) = new_label;
2556 if (update_label_nuses)
2558 ++LABEL_NUSES (new_label);
2559 --LABEL_NUSES (old_label);
2567 /* When *BODY is equal to X or X is directly referenced by *BODY
2568 return nonzero, thus FOR_EACH_RTX stops traversing and returns nonzero
2569 too, otherwise FOR_EACH_RTX continues traversing *BODY. */
2572 rtx_referenced_p_1 (rtx *body, void *x)
2576 if (*body == NULL_RTX)
2577 return y == NULL_RTX;
2579 /* Return true if a label_ref *BODY refers to label Y. */
2580 if (GET_CODE (*body) == LABEL_REF && LABEL_P (y))
2581 return XEXP (*body, 0) == y;
2583 /* If *BODY is a reference to pool constant traverse the constant. */
2584 if (GET_CODE (*body) == SYMBOL_REF
2585 && CONSTANT_POOL_ADDRESS_P (*body))
2586 return rtx_referenced_p (y, get_pool_constant (*body));
2588 /* By default, compare the RTL expressions. */
2589 return rtx_equal_p (*body, y);
2592 /* Return true if X is referenced in BODY. */
2595 rtx_referenced_p (rtx x, rtx body)
2597 return for_each_rtx (&body, rtx_referenced_p_1, x);
2600 /* If INSN is a tablejump return true and store the label (before jump table) to
2601 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
2604 tablejump_p (rtx insn, rtx *labelp, rtx *tablep)
2609 && (label = JUMP_LABEL (insn)) != NULL_RTX
2610 && (table = next_active_insn (label)) != NULL_RTX
2612 && (GET_CODE (PATTERN (table)) == ADDR_VEC
2613 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
2624 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
2625 constant that is not in the constant pool and not in the condition
2626 of an IF_THEN_ELSE. */
2629 computed_jump_p_1 (rtx x)
2631 enum rtx_code code = GET_CODE (x);
2650 return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
2651 && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
2654 return (computed_jump_p_1 (XEXP (x, 1))
2655 || computed_jump_p_1 (XEXP (x, 2)));
2661 fmt = GET_RTX_FORMAT (code);
2662 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2665 && computed_jump_p_1 (XEXP (x, i)))
2668 else if (fmt[i] == 'E')
2669 for (j = 0; j < XVECLEN (x, i); j++)
2670 if (computed_jump_p_1 (XVECEXP (x, i, j)))
2677 /* Return nonzero if INSN is an indirect jump (aka computed jump).
2679 Tablejumps and casesi insns are not considered indirect jumps;
2680 we can recognize them by a (use (label_ref)). */
2683 computed_jump_p (rtx insn)
2688 rtx pat = PATTERN (insn);
2690 if (find_reg_note (insn, REG_LABEL, NULL_RTX))
2692 else if (GET_CODE (pat) == PARALLEL)
2694 int len = XVECLEN (pat, 0);
2695 int has_use_labelref = 0;
2697 for (i = len - 1; i >= 0; i--)
2698 if (GET_CODE (XVECEXP (pat, 0, i)) == USE
2699 && (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
2701 has_use_labelref = 1;
2703 if (! has_use_labelref)
2704 for (i = len - 1; i >= 0; i--)
2705 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
2706 && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
2707 && computed_jump_p_1 (SET_SRC (XVECEXP (pat, 0, i))))
2710 else if (GET_CODE (pat) == SET
2711 && SET_DEST (pat) == pc_rtx
2712 && computed_jump_p_1 (SET_SRC (pat)))
2718 /* Optimized loop of for_each_rtx, trying to avoid useless recursive
2719 calls. Processes the subexpressions of EXP and passes them to F. */
2721 for_each_rtx_1 (rtx exp, int n, rtx_function f, void *data)
2724 const char *format = GET_RTX_FORMAT (GET_CODE (exp));
2727 for (; format[n] != '\0'; n++)
2734 result = (*f) (x, data);
2736 /* Do not traverse sub-expressions. */
2738 else if (result != 0)
2739 /* Stop the traversal. */
2743 /* There are no sub-expressions. */
2746 i = non_rtx_starting_operands[GET_CODE (*x)];
2749 result = for_each_rtx_1 (*x, i, f, data);
2757 if (XVEC (exp, n) == 0)
2759 for (j = 0; j < XVECLEN (exp, n); ++j)
2762 x = &XVECEXP (exp, n, j);
2763 result = (*f) (x, data);
2765 /* Do not traverse sub-expressions. */
2767 else if (result != 0)
2768 /* Stop the traversal. */
2772 /* There are no sub-expressions. */
2775 i = non_rtx_starting_operands[GET_CODE (*x)];
2778 result = for_each_rtx_1 (*x, i, f, data);
2786 /* Nothing to do. */
2794 /* Traverse X via depth-first search, calling F for each
2795 sub-expression (including X itself). F is also passed the DATA.
2796 If F returns -1, do not traverse sub-expressions, but continue
2797 traversing the rest of the tree. If F ever returns any other
2798 nonzero value, stop the traversal, and return the value returned
2799 by F. Otherwise, return 0. This function does not traverse inside
2800 tree structure that contains RTX_EXPRs, or into sub-expressions
2801 whose format code is `0' since it is not known whether or not those
2802 codes are actually RTL.
2804 This routine is very general, and could (should?) be used to
2805 implement many of the other routines in this file. */
2808 for_each_rtx (rtx *x, rtx_function f, void *data)
2814 result = (*f) (x, data);
2816 /* Do not traverse sub-expressions. */
2818 else if (result != 0)
2819 /* Stop the traversal. */
2823 /* There are no sub-expressions. */
2826 i = non_rtx_starting_operands[GET_CODE (*x)];
2830 return for_each_rtx_1 (*x, i, f, data);
2834 /* Searches X for any reference to REGNO, returning the rtx of the
2835 reference found if any. Otherwise, returns NULL_RTX. */
2838 regno_use_in (unsigned int regno, rtx x)
2844 if (REG_P (x) && REGNO (x) == regno)
2847 fmt = GET_RTX_FORMAT (GET_CODE (x));
2848 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
2852 if ((tem = regno_use_in (regno, XEXP (x, i))))
2855 else if (fmt[i] == 'E')
2856 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2857 if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
2864 /* Return a value indicating whether OP, an operand of a commutative
2865 operation, is preferred as the first or second operand. The higher
2866 the value, the stronger the preference for being the first operand.
2867 We use negative values to indicate a preference for the first operand
2868 and positive values for the second operand. */
2871 commutative_operand_precedence (rtx op)
2873 enum rtx_code code = GET_CODE (op);
2875 /* Constants always come the second operand. Prefer "nice" constants. */
2876 if (code == CONST_INT)
2878 if (code == CONST_DOUBLE)
2880 op = avoid_constant_pool_reference (op);
2881 code = GET_CODE (op);
2883 switch (GET_RTX_CLASS (code))
2886 if (code == CONST_INT)
2888 if (code == CONST_DOUBLE)
2893 /* SUBREGs of objects should come second. */
2894 if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
2900 /* Complex expressions should be the first, so decrease priority
2904 case RTX_COMM_ARITH:
2905 /* Prefer operands that are themselves commutative to be first.
2906 This helps to make things linear. In particular,
2907 (and (and (reg) (reg)) (not (reg))) is canonical. */
2911 /* If only one operand is a binary expression, it will be the first
2912 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
2913 is canonical, although it will usually be further simplified. */
2917 /* Then prefer NEG and NOT. */
2918 if (code == NEG || code == NOT)
2926 /* Return 1 iff it is necessary to swap operands of commutative operation
2927 in order to canonicalize expression. */
2930 swap_commutative_operands_p (rtx x, rtx y)
2932 return (commutative_operand_precedence (x)
2933 < commutative_operand_precedence (y));
2936 /* Return 1 if X is an autoincrement side effect and the register is
2937 not the stack pointer. */
2941 switch (GET_CODE (x))
2949 /* There are no REG_INC notes for SP. */
2950 if (XEXP (x, 0) != stack_pointer_rtx)
2958 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
2960 loc_mentioned_in_p (rtx *loc, rtx in)
2969 code = GET_CODE (in);
2970 fmt = GET_RTX_FORMAT (code);
2971 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2973 if (loc == &in->u.fld[i].rt_rtx)
2977 if (loc_mentioned_in_p (loc, XEXP (in, i)))
2980 else if (fmt[i] == 'E')
2981 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
2982 if (loc_mentioned_in_p (loc, XVECEXP (in, i, j)))
2988 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
2989 and SUBREG_BYTE, return the bit offset where the subreg begins
2990 (counting from the least significant bit of the operand). */
2993 subreg_lsb_1 (enum machine_mode outer_mode,
2994 enum machine_mode inner_mode,
2995 unsigned int subreg_byte)
2997 unsigned int bitpos;
3001 /* A paradoxical subreg begins at bit position 0. */
3002 if (GET_MODE_BITSIZE (outer_mode) > GET_MODE_BITSIZE (inner_mode))
3005 if (WORDS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
3006 /* If the subreg crosses a word boundary ensure that
3007 it also begins and ends on a word boundary. */
3008 gcc_assert (!((subreg_byte % UNITS_PER_WORD
3009 + GET_MODE_SIZE (outer_mode)) > UNITS_PER_WORD
3010 && (subreg_byte % UNITS_PER_WORD
3011 || GET_MODE_SIZE (outer_mode) % UNITS_PER_WORD)));
3013 if (WORDS_BIG_ENDIAN)
3014 word = (GET_MODE_SIZE (inner_mode)
3015 - (subreg_byte + GET_MODE_SIZE (outer_mode))) / UNITS_PER_WORD;
3017 word = subreg_byte / UNITS_PER_WORD;
3018 bitpos = word * BITS_PER_WORD;
3020 if (BYTES_BIG_ENDIAN)
3021 byte = (GET_MODE_SIZE (inner_mode)
3022 - (subreg_byte + GET_MODE_SIZE (outer_mode))) % UNITS_PER_WORD;
3024 byte = subreg_byte % UNITS_PER_WORD;
3025 bitpos += byte * BITS_PER_UNIT;
3030 /* Given a subreg X, return the bit offset where the subreg begins
3031 (counting from the least significant bit of the reg). */
3036 return subreg_lsb_1 (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
3040 /* Fill in information about a subreg of a hard register.
3041 xregno - A regno of an inner hard subreg_reg (or what will become one).
3042 xmode - The mode of xregno.
3043 offset - The byte offset.
3044 ymode - The mode of a top level SUBREG (or what may become one).
3045 info - Pointer to structure to fill in. */
3047 subreg_get_info (unsigned int xregno, enum machine_mode xmode,
3048 unsigned int offset, enum machine_mode ymode,
3049 struct subreg_info *info)
3051 int nregs_xmode, nregs_ymode;
3052 int mode_multiple, nregs_multiple;
3053 int offset_adj, y_offset, y_offset_adj;
3054 int regsize_xmode, regsize_ymode;
3057 gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
3061 /* If there are holes in a non-scalar mode in registers, we expect
3062 that it is made up of its units concatenated together. */
3063 if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
3065 enum machine_mode xmode_unit;
3067 nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
3068 if (GET_MODE_INNER (xmode) == VOIDmode)
3071 xmode_unit = GET_MODE_INNER (xmode);
3072 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode_unit));
3073 gcc_assert (nregs_xmode
3074 == (GET_MODE_NUNITS (xmode)
3075 * HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode_unit)));
3076 gcc_assert (hard_regno_nregs[xregno][xmode]
3077 == (hard_regno_nregs[xregno][xmode_unit]
3078 * GET_MODE_NUNITS (xmode)));
3080 /* You can only ask for a SUBREG of a value with holes in the middle
3081 if you don't cross the holes. (Such a SUBREG should be done by
3082 picking a different register class, or doing it in memory if
3083 necessary.) An example of a value with holes is XCmode on 32-bit
3084 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3085 3 for each part, but in memory it's two 128-bit parts.
3086 Padding is assumed to be at the end (not necessarily the 'high part')
3088 if ((offset / GET_MODE_SIZE (xmode_unit) + 1
3089 < GET_MODE_NUNITS (xmode))
3090 && (offset / GET_MODE_SIZE (xmode_unit)
3091 != ((offset + GET_MODE_SIZE (ymode) - 1)
3092 / GET_MODE_SIZE (xmode_unit))))
3094 info->representable_p = false;
3099 nregs_xmode = hard_regno_nregs[xregno][xmode];
3101 nregs_ymode = hard_regno_nregs[xregno][ymode];
3103 /* Paradoxical subregs are otherwise valid. */
3106 && GET_MODE_SIZE (ymode) > GET_MODE_SIZE (xmode))
3108 info->representable_p = true;
3109 /* If this is a big endian paradoxical subreg, which uses more
3110 actual hard registers than the original register, we must
3111 return a negative offset so that we find the proper highpart
3113 if (GET_MODE_SIZE (ymode) > UNITS_PER_WORD
3114 ? WORDS_BIG_ENDIAN : BYTES_BIG_ENDIAN)
3115 info->offset = nregs_xmode - nregs_ymode;
3118 info->nregs = nregs_ymode;
3122 /* If registers store different numbers of bits in the different
3123 modes, we cannot generally form this subreg. */
3124 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode)
3125 && !HARD_REGNO_NREGS_HAS_PADDING (xregno, ymode)
3126 && (GET_MODE_SIZE (xmode) % nregs_xmode) == 0
3127 && (GET_MODE_SIZE (ymode) % nregs_ymode) == 0)
3129 regsize_xmode = GET_MODE_SIZE (xmode) / nregs_xmode;
3130 regsize_ymode = GET_MODE_SIZE (ymode) / nregs_ymode;
3131 if (!rknown && regsize_xmode > regsize_ymode && nregs_ymode > 1)
3133 info->representable_p = false;
3135 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3136 info->offset = offset / regsize_xmode;
3139 if (!rknown && regsize_ymode > regsize_xmode && nregs_xmode > 1)
3141 info->representable_p = false;
3143 = (GET_MODE_SIZE (ymode) + regsize_xmode - 1) / regsize_xmode;
3144 info->offset = offset / regsize_xmode;
3149 /* Lowpart subregs are otherwise valid. */
3150 if (!rknown && offset == subreg_lowpart_offset (ymode, xmode))
3152 info->representable_p = true;
3155 if (offset == 0 || nregs_xmode == nregs_ymode)
3158 info->nregs = nregs_ymode;
3163 /* This should always pass, otherwise we don't know how to verify
3164 the constraint. These conditions may be relaxed but
3165 subreg_regno_offset would need to be redesigned. */
3166 gcc_assert ((GET_MODE_SIZE (xmode) % GET_MODE_SIZE (ymode)) == 0);
3167 gcc_assert ((nregs_xmode % nregs_ymode) == 0);
3169 /* The XMODE value can be seen as a vector of NREGS_XMODE
3170 values. The subreg must represent a lowpart of given field.
3171 Compute what field it is. */
3172 offset_adj = offset;
3173 offset_adj -= subreg_lowpart_offset (ymode,
3174 mode_for_size (GET_MODE_BITSIZE (xmode)
3178 /* Size of ymode must not be greater than the size of xmode. */
3179 mode_multiple = GET_MODE_SIZE (xmode) / GET_MODE_SIZE (ymode);
3180 gcc_assert (mode_multiple != 0);
3182 y_offset = offset / GET_MODE_SIZE (ymode);
3183 y_offset_adj = offset_adj / GET_MODE_SIZE (ymode);
3184 nregs_multiple = nregs_xmode / nregs_ymode;
3186 gcc_assert ((offset_adj % GET_MODE_SIZE (ymode)) == 0);
3187 gcc_assert ((mode_multiple % nregs_multiple) == 0);
3191 info->representable_p = (!(y_offset_adj % (mode_multiple / nregs_multiple)));
3194 info->offset = (y_offset / (mode_multiple / nregs_multiple)) * nregs_ymode;
3195 info->nregs = nregs_ymode;
3198 /* This function returns the regno offset of a subreg expression.
3199 xregno - A regno of an inner hard subreg_reg (or what will become one).
3200 xmode - The mode of xregno.
3201 offset - The byte offset.
3202 ymode - The mode of a top level SUBREG (or what may become one).
3203 RETURN - The regno offset which would be used. */
3205 subreg_regno_offset (unsigned int xregno, enum machine_mode xmode,
3206 unsigned int offset, enum machine_mode ymode)
3208 struct subreg_info info;
3209 subreg_get_info (xregno, xmode, offset, ymode, &info);
3213 /* This function returns true when the offset is representable via
3214 subreg_offset in the given regno.
3215 xregno - A regno of an inner hard subreg_reg (or what will become one).
3216 xmode - The mode of xregno.
3217 offset - The byte offset.
3218 ymode - The mode of a top level SUBREG (or what may become one).
3219 RETURN - Whether the offset is representable. */
3221 subreg_offset_representable_p (unsigned int xregno, enum machine_mode xmode,
3222 unsigned int offset, enum machine_mode ymode)
3224 struct subreg_info info;
3225 subreg_get_info (xregno, xmode, offset, ymode, &info);
3226 return info.representable_p;
3229 /* Return the final regno that a subreg expression refers to. */
3231 subreg_regno (rtx x)
3234 rtx subreg = SUBREG_REG (x);
3235 int regno = REGNO (subreg);
3237 ret = regno + subreg_regno_offset (regno,
3245 /* Return the number of registers that a subreg expression refers
3248 subreg_nregs (rtx x)
3250 struct subreg_info info;
3251 rtx subreg = SUBREG_REG (x);
3252 int regno = REGNO (subreg);
3254 subreg_get_info (regno, GET_MODE (subreg), SUBREG_BYTE (x), GET_MODE (x),
3259 struct parms_set_data
3265 /* Helper function for noticing stores to parameter registers. */
3267 parms_set (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
3269 struct parms_set_data *d = data;
3270 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
3271 && TEST_HARD_REG_BIT (d->regs, REGNO (x)))
3273 CLEAR_HARD_REG_BIT (d->regs, REGNO (x));
3278 /* Look backward for first parameter to be loaded.
3279 Note that loads of all parameters will not necessarily be
3280 found if CSE has eliminated some of them (e.g., an argument
3281 to the outer function is passed down as a parameter).
3282 Do not skip BOUNDARY. */
3284 find_first_parameter_load (rtx call_insn, rtx boundary)
3286 struct parms_set_data parm;
3287 rtx p, before, first_set;
3289 /* Since different machines initialize their parameter registers
3290 in different orders, assume nothing. Collect the set of all
3291 parameter registers. */
3292 CLEAR_HARD_REG_SET (parm.regs);
3294 for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1))
3295 if (GET_CODE (XEXP (p, 0)) == USE
3296 && REG_P (XEXP (XEXP (p, 0), 0)))
3298 gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER);
3300 /* We only care about registers which can hold function
3302 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0))))
3305 SET_HARD_REG_BIT (parm.regs, REGNO (XEXP (XEXP (p, 0), 0)));
3309 first_set = call_insn;
3311 /* Search backward for the first set of a register in this set. */
3312 while (parm.nregs && before != boundary)
3314 before = PREV_INSN (before);
3316 /* It is possible that some loads got CSEed from one call to
3317 another. Stop in that case. */
3318 if (CALL_P (before))
3321 /* Our caller needs either ensure that we will find all sets
3322 (in case code has not been optimized yet), or take care
3323 for possible labels in a way by setting boundary to preceding
3325 if (LABEL_P (before))
3327 gcc_assert (before == boundary);
3331 if (INSN_P (before))
3333 int nregs_old = parm.nregs;
3334 note_stores (PATTERN (before), parms_set, &parm);
3335 /* If we found something that did not set a parameter reg,
3336 we're done. Do not keep going, as that might result
3337 in hoisting an insn before the setting of a pseudo
3338 that is used by the hoisted insn. */
3339 if (nregs_old != parm.nregs)
3348 /* Return true if we should avoid inserting code between INSN and preceding
3349 call instruction. */
3352 keep_with_call_p (rtx insn)
3356 if (INSN_P (insn) && (set = single_set (insn)) != NULL)
3358 if (REG_P (SET_DEST (set))
3359 && REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER
3360 && fixed_regs[REGNO (SET_DEST (set))]
3361 && general_operand (SET_SRC (set), VOIDmode))
3363 if (REG_P (SET_SRC (set))
3364 && FUNCTION_VALUE_REGNO_P (REGNO (SET_SRC (set)))
3365 && REG_P (SET_DEST (set))
3366 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3368 /* There may be a stack pop just after the call and before the store
3369 of the return register. Search for the actual store when deciding
3370 if we can break or not. */
3371 if (SET_DEST (set) == stack_pointer_rtx)
3373 rtx i2 = next_nonnote_insn (insn);
3374 if (i2 && keep_with_call_p (i2))
3381 /* Return true if LABEL is a target of JUMP_INSN. This applies only
3382 to non-complex jumps. That is, direct unconditional, conditional,
3383 and tablejumps, but not computed jumps or returns. It also does
3384 not apply to the fallthru case of a conditional jump. */
3387 label_is_jump_target_p (rtx label, rtx jump_insn)
3389 rtx tmp = JUMP_LABEL (jump_insn);
3394 if (tablejump_p (jump_insn, NULL, &tmp))
3396 rtvec vec = XVEC (PATTERN (tmp),
3397 GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC);
3398 int i, veclen = GET_NUM_ELEM (vec);
3400 for (i = 0; i < veclen; ++i)
3401 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
3409 /* Return an estimate of the cost of computing rtx X.
3410 One use is in cse, to decide which expression to keep in the hash table.
3411 Another is in rtl generation, to pick the cheapest way to multiply.
3412 Other uses like the latter are expected in the future. */
3415 rtx_cost (rtx x, enum rtx_code outer_code ATTRIBUTE_UNUSED)
3425 /* Compute the default costs of certain things.
3426 Note that targetm.rtx_costs can override the defaults. */
3428 code = GET_CODE (x);
3432 total = COSTS_N_INSNS (5);
3438 total = COSTS_N_INSNS (7);
3441 /* Used in combine.c as a marker. */
3445 total = COSTS_N_INSNS (1);
3455 /* If we can't tie these modes, make this expensive. The larger
3456 the mode, the more expensive it is. */
3457 if (! MODES_TIEABLE_P (GET_MODE (x), GET_MODE (SUBREG_REG (x))))
3458 return COSTS_N_INSNS (2
3459 + GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD);
3463 if (targetm.rtx_costs (x, code, outer_code, &total))
3468 /* Sum the costs of the sub-rtx's, plus cost of this operation,
3469 which is already in total. */
3471 fmt = GET_RTX_FORMAT (code);
3472 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3474 total += rtx_cost (XEXP (x, i), code);
3475 else if (fmt[i] == 'E')
3476 for (j = 0; j < XVECLEN (x, i); j++)
3477 total += rtx_cost (XVECEXP (x, i, j), code);
3482 /* Return cost of address expression X.
3483 Expect that X is properly formed address reference. */
3486 address_cost (rtx x, enum machine_mode mode)
3488 /* We may be asked for cost of various unusual addresses, such as operands
3489 of push instruction. It is not worthwhile to complicate writing
3490 of the target hook by such cases. */
3492 if (!memory_address_p (mode, x))
3495 return targetm.address_cost (x);
3498 /* If the target doesn't override, compute the cost as with arithmetic. */
3501 default_address_cost (rtx x)
3503 return rtx_cost (x, MEM);
3507 unsigned HOST_WIDE_INT
3508 nonzero_bits (rtx x, enum machine_mode mode)
3510 return cached_nonzero_bits (x, mode, NULL_RTX, VOIDmode, 0);
3514 num_sign_bit_copies (rtx x, enum machine_mode mode)
3516 return cached_num_sign_bit_copies (x, mode, NULL_RTX, VOIDmode, 0);
3519 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
3520 It avoids exponential behavior in nonzero_bits1 when X has
3521 identical subexpressions on the first or the second level. */
3523 static unsigned HOST_WIDE_INT
3524 cached_nonzero_bits (rtx x, enum machine_mode mode, rtx known_x,
3525 enum machine_mode known_mode,
3526 unsigned HOST_WIDE_INT known_ret)
3528 if (x == known_x && mode == known_mode)
3531 /* Try to find identical subexpressions. If found call
3532 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
3533 precomputed value for the subexpression as KNOWN_RET. */
3535 if (ARITHMETIC_P (x))
3537 rtx x0 = XEXP (x, 0);
3538 rtx x1 = XEXP (x, 1);
3540 /* Check the first level. */
3542 return nonzero_bits1 (x, mode, x0, mode,
3543 cached_nonzero_bits (x0, mode, known_x,
3544 known_mode, known_ret));
3546 /* Check the second level. */
3547 if (ARITHMETIC_P (x0)
3548 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
3549 return nonzero_bits1 (x, mode, x1, mode,
3550 cached_nonzero_bits (x1, mode, known_x,
3551 known_mode, known_ret));
3553 if (ARITHMETIC_P (x1)
3554 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
3555 return nonzero_bits1 (x, mode, x0, mode,
3556 cached_nonzero_bits (x0, mode, known_x,
3557 known_mode, known_ret));
3560 return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
3563 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
3564 We don't let nonzero_bits recur into num_sign_bit_copies, because that
3565 is less useful. We can't allow both, because that results in exponential
3566 run time recursion. There is a nullstone testcase that triggered
3567 this. This macro avoids accidental uses of num_sign_bit_copies. */
3568 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
3570 /* Given an expression, X, compute which bits in X can be nonzero.
3571 We don't care about bits outside of those defined in MODE.
3573 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
3574 an arithmetic operation, we can do better. */
3576 static unsigned HOST_WIDE_INT
3577 nonzero_bits1 (rtx x, enum machine_mode mode, rtx known_x,
3578 enum machine_mode known_mode,
3579 unsigned HOST_WIDE_INT known_ret)
3581 unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
3582 unsigned HOST_WIDE_INT inner_nz;
3584 unsigned int mode_width = GET_MODE_BITSIZE (mode);
3586 /* For floating-point values, assume all bits are needed. */
3587 if (FLOAT_MODE_P (GET_MODE (x)) || FLOAT_MODE_P (mode))
3590 /* If X is wider than MODE, use its mode instead. */
3591 if (GET_MODE_BITSIZE (GET_MODE (x)) > mode_width)
3593 mode = GET_MODE (x);
3594 nonzero = GET_MODE_MASK (mode);
3595 mode_width = GET_MODE_BITSIZE (mode);
3598 if (mode_width > HOST_BITS_PER_WIDE_INT)
3599 /* Our only callers in this case look for single bit values. So
3600 just return the mode mask. Those tests will then be false. */
3603 #ifndef WORD_REGISTER_OPERATIONS
3604 /* If MODE is wider than X, but both are a single word for both the host
3605 and target machines, we can compute this from which bits of the
3606 object might be nonzero in its own mode, taking into account the fact
3607 that on many CISC machines, accessing an object in a wider mode
3608 causes the high-order bits to become undefined. So they are
3609 not known to be zero. */
3611 if (GET_MODE (x) != VOIDmode && GET_MODE (x) != mode
3612 && GET_MODE_BITSIZE (GET_MODE (x)) <= BITS_PER_WORD
3613 && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
3614 && GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (GET_MODE (x)))
3616 nonzero &= cached_nonzero_bits (x, GET_MODE (x),
3617 known_x, known_mode, known_ret);
3618 nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x));
3623 code = GET_CODE (x);
3627 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
3628 /* If pointers extend unsigned and this is a pointer in Pmode, say that
3629 all the bits above ptr_mode are known to be zero. */
3630 if (POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
3632 nonzero &= GET_MODE_MASK (ptr_mode);
3635 /* Include declared information about alignment of pointers. */
3636 /* ??? We don't properly preserve REG_POINTER changes across
3637 pointer-to-integer casts, so we can't trust it except for
3638 things that we know must be pointers. See execute/960116-1.c. */
3639 if ((x == stack_pointer_rtx
3640 || x == frame_pointer_rtx
3641 || x == arg_pointer_rtx)
3642 && REGNO_POINTER_ALIGN (REGNO (x)))
3644 unsigned HOST_WIDE_INT alignment
3645 = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT;
3647 #ifdef PUSH_ROUNDING
3648 /* If PUSH_ROUNDING is defined, it is possible for the
3649 stack to be momentarily aligned only to that amount,
3650 so we pick the least alignment. */
3651 if (x == stack_pointer_rtx && PUSH_ARGS)
3652 alignment = MIN ((unsigned HOST_WIDE_INT) PUSH_ROUNDING (1),
3656 nonzero &= ~(alignment - 1);
3660 unsigned HOST_WIDE_INT nonzero_for_hook = nonzero;
3661 rtx new = rtl_hooks.reg_nonzero_bits (x, mode, known_x,
3662 known_mode, known_ret,
3666 nonzero_for_hook &= cached_nonzero_bits (new, mode, known_x,
3667 known_mode, known_ret);
3669 return nonzero_for_hook;
3673 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
3674 /* If X is negative in MODE, sign-extend the value. */
3675 if (INTVAL (x) > 0 && mode_width < BITS_PER_WORD
3676 && 0 != (INTVAL (x) & ((HOST_WIDE_INT) 1 << (mode_width - 1))))
3677 return (INTVAL (x) | ((HOST_WIDE_INT) (-1) << mode_width));
3683 #ifdef LOAD_EXTEND_OP
3684 /* In many, if not most, RISC machines, reading a byte from memory
3685 zeros the rest of the register. Noticing that fact saves a lot
3686 of extra zero-extends. */
3687 if (LOAD_EXTEND_OP (GET_MODE (x)) == ZERO_EXTEND)
3688 nonzero &= GET_MODE_MASK (GET_MODE (x));
3693 case UNEQ: case LTGT:
3694 case GT: case GTU: case UNGT:
3695 case LT: case LTU: case UNLT:
3696 case GE: case GEU: case UNGE:
3697 case LE: case LEU: case UNLE:
3698 case UNORDERED: case ORDERED:
3699 /* If this produces an integer result, we know which bits are set.
3700 Code here used to clear bits outside the mode of X, but that is
3702 /* Mind that MODE is the mode the caller wants to look at this
3703 operation in, and not the actual operation mode. We can wind
3704 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
3705 that describes the results of a vector compare. */
3706 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
3707 && mode_width <= HOST_BITS_PER_WIDE_INT)
3708 nonzero = STORE_FLAG_VALUE;
3713 /* Disabled to avoid exponential mutual recursion between nonzero_bits
3714 and num_sign_bit_copies. */
3715 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
3716 == GET_MODE_BITSIZE (GET_MODE (x)))
3720 if (GET_MODE_SIZE (GET_MODE (x)) < mode_width)
3721 nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x)));
3726 /* Disabled to avoid exponential mutual recursion between nonzero_bits
3727 and num_sign_bit_copies. */
3728 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
3729 == GET_MODE_BITSIZE (GET_MODE (x)))
3735 nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode,
3736 known_x, known_mode, known_ret)
3737 & GET_MODE_MASK (mode));
3741 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
3742 known_x, known_mode, known_ret);
3743 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
3744 nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
3748 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
3749 Otherwise, show all the bits in the outer mode but not the inner
3751 inner_nz = cached_nonzero_bits (XEXP (x, 0), mode,
3752 known_x, known_mode, known_ret);
3753 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
3755 inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
3757 & (((HOST_WIDE_INT) 1
3758 << (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) - 1))))
3759 inner_nz |= (GET_MODE_MASK (mode)
3760 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
3763 nonzero &= inner_nz;
3767 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
3768 known_x, known_mode, known_ret)
3769 & cached_nonzero_bits (XEXP (x, 1), mode,
3770 known_x, known_mode, known_ret);
3774 case UMIN: case UMAX: case SMIN: case SMAX:
3776 unsigned HOST_WIDE_INT nonzero0 =
3777 cached_nonzero_bits (XEXP (x, 0), mode,
3778 known_x, known_mode, known_ret);
3780 /* Don't call nonzero_bits for the second time if it cannot change
3782 if ((nonzero & nonzero0) != nonzero)
3784 | cached_nonzero_bits (XEXP (x, 1), mode,
3785 known_x, known_mode, known_ret);
3789 case PLUS: case MINUS:
3791 case DIV: case UDIV:
3792 case MOD: case UMOD:
3793 /* We can apply the rules of arithmetic to compute the number of
3794 high- and low-order zero bits of these operations. We start by
3795 computing the width (position of the highest-order nonzero bit)
3796 and the number of low-order zero bits for each value. */
3798 unsigned HOST_WIDE_INT nz0 =
3799 cached_nonzero_bits (XEXP (x, 0), mode,
3800 known_x, known_mode, known_ret);
3801 unsigned HOST_WIDE_INT nz1 =
3802 cached_nonzero_bits (XEXP (x, 1), mode,
3803 known_x, known_mode, known_ret);
3804 int sign_index = GET_MODE_BITSIZE (GET_MODE (x)) - 1;
3805 int width0 = floor_log2 (nz0) + 1;
3806 int width1 = floor_log2 (nz1) + 1;
3807 int low0 = floor_log2 (nz0 & -nz0);
3808 int low1 = floor_log2 (nz1 & -nz1);
3809 HOST_WIDE_INT op0_maybe_minusp
3810 = (nz0 & ((HOST_WIDE_INT) 1 << sign_index));
3811 HOST_WIDE_INT op1_maybe_minusp
3812 = (nz1 & ((HOST_WIDE_INT) 1 << sign_index));
3813 unsigned int result_width = mode_width;
3819 result_width = MAX (width0, width1) + 1;
3820 result_low = MIN (low0, low1);
3823 result_low = MIN (low0, low1);
3826 result_width = width0 + width1;
3827 result_low = low0 + low1;
3832 if (! op0_maybe_minusp && ! op1_maybe_minusp)
3833 result_width = width0;
3838 result_width = width0;
3843 if (! op0_maybe_minusp && ! op1_maybe_minusp)
3844 result_width = MIN (width0, width1);
3845 result_low = MIN (low0, low1);
3850 result_width = MIN (width0, width1);
3851 result_low = MIN (low0, low1);
3857 if (result_width < mode_width)
3858 nonzero &= ((HOST_WIDE_INT) 1 << result_width) - 1;
3861 nonzero &= ~(((HOST_WIDE_INT) 1 << result_low) - 1);
3863 #ifdef POINTERS_EXTEND_UNSIGNED
3864 /* If pointers extend unsigned and this is an addition or subtraction
3865 to a pointer in Pmode, all the bits above ptr_mode are known to be
3867 if (POINTERS_EXTEND_UNSIGNED > 0 && GET_MODE (x) == Pmode
3868 && (code == PLUS || code == MINUS)
3869 && REG_P (XEXP (x, 0)) && REG_POINTER (XEXP (x, 0)))
3870 nonzero &= GET_MODE_MASK (ptr_mode);
3876 if (GET_CODE (XEXP (x, 1)) == CONST_INT
3877 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
3878 nonzero &= ((HOST_WIDE_INT) 1 << INTVAL (XEXP (x, 1))) - 1;
3882 /* If this is a SUBREG formed for a promoted variable that has
3883 been zero-extended, we know that at least the high-order bits
3884 are zero, though others might be too. */
3886 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x) > 0)
3887 nonzero = GET_MODE_MASK (GET_MODE (x))
3888 & cached_nonzero_bits (SUBREG_REG (x), GET_MODE (x),
3889 known_x, known_mode, known_ret);
3891 /* If the inner mode is a single word for both the host and target
3892 machines, we can compute this from which bits of the inner
3893 object might be nonzero. */
3894 if (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) <= BITS_PER_WORD
3895 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))
3896 <= HOST_BITS_PER_WIDE_INT))
3898 nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
3899 known_x, known_mode, known_ret);
3901 #if defined (WORD_REGISTER_OPERATIONS) && defined (LOAD_EXTEND_OP)
3902 /* If this is a typical RISC machine, we only have to worry
3903 about the way loads are extended. */
3904 if ((LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND
3906 & (((unsigned HOST_WIDE_INT) 1
3907 << (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) - 1))))
3909 : LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) != ZERO_EXTEND)
3910 || !MEM_P (SUBREG_REG (x)))
3913 /* On many CISC machines, accessing an object in a wider mode
3914 causes the high-order bits to become undefined. So they are
3915 not known to be zero. */
3916 if (GET_MODE_SIZE (GET_MODE (x))
3917 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
3918 nonzero |= (GET_MODE_MASK (GET_MODE (x))
3919 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x))));
3928 /* The nonzero bits are in two classes: any bits within MODE
3929 that aren't in GET_MODE (x) are always significant. The rest of the
3930 nonzero bits are those that are significant in the operand of
3931 the shift when shifted the appropriate number of bits. This
3932 shows that high-order bits are cleared by the right shift and
3933 low-order bits by left shifts. */
3934 if (GET_CODE (XEXP (x, 1)) == CONST_INT
3935 && INTVAL (XEXP (x, 1)) >= 0
3936 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
3938 enum machine_mode inner_mode = GET_MODE (x);
3939 unsigned int width = GET_MODE_BITSIZE (inner_mode);
3940 int count = INTVAL (XEXP (x, 1));
3941 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (inner_mode);
3942 unsigned HOST_WIDE_INT op_nonzero =
3943 cached_nonzero_bits (XEXP (x, 0), mode,
3944 known_x, known_mode, known_ret);
3945 unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
3946 unsigned HOST_WIDE_INT outer = 0;
3948 if (mode_width > width)
3949 outer = (op_nonzero & nonzero & ~mode_mask);
3951 if (code == LSHIFTRT)
3953 else if (code == ASHIFTRT)
3957 /* If the sign bit may have been nonzero before the shift, we
3958 need to mark all the places it could have been copied to
3959 by the shift as possibly nonzero. */
3960 if (inner & ((HOST_WIDE_INT) 1 << (width - 1 - count)))
3961 inner |= (((HOST_WIDE_INT) 1 << count) - 1) << (width - count);
3963 else if (code == ASHIFT)
3966 inner = ((inner << (count % width)
3967 | (inner >> (width - (count % width)))) & mode_mask);
3969 nonzero &= (outer | inner);
3975 /* This is at most the number of bits in the mode. */
3976 nonzero = ((HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
3980 /* If CLZ has a known value at zero, then the nonzero bits are
3981 that value, plus the number of bits in the mode minus one. */
3982 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
3983 nonzero |= ((HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
3989 /* If CTZ has a known value at zero, then the nonzero bits are
3990 that value, plus the number of bits in the mode minus one. */
3991 if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
3992 nonzero |= ((HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
4003 unsigned HOST_WIDE_INT nonzero_true =
4004 cached_nonzero_bits (XEXP (x, 1), mode,
4005 known_x, known_mode, known_ret);
4007 /* Don't call nonzero_bits for the second time if it cannot change
4009 if ((nonzero & nonzero_true) != nonzero)
4010 nonzero &= nonzero_true
4011 | cached_nonzero_bits (XEXP (x, 2), mode,
4012 known_x, known_mode, known_ret);
4023 /* See the macro definition above. */
4024 #undef cached_num_sign_bit_copies
4027 /* The function cached_num_sign_bit_copies is a wrapper around
4028 num_sign_bit_copies1. It avoids exponential behavior in
4029 num_sign_bit_copies1 when X has identical subexpressions on the
4030 first or the second level. */
4033 cached_num_sign_bit_copies (rtx x, enum machine_mode mode, rtx known_x,
4034 enum machine_mode known_mode,
4035 unsigned int known_ret)
4037 if (x == known_x && mode == known_mode)
4040 /* Try to find identical subexpressions. If found call
4041 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4042 the precomputed value for the subexpression as KNOWN_RET. */
4044 if (ARITHMETIC_P (x))
4046 rtx x0 = XEXP (x, 0);
4047 rtx x1 = XEXP (x, 1);
4049 /* Check the first level. */
4052 num_sign_bit_copies1 (x, mode, x0, mode,
4053 cached_num_sign_bit_copies (x0, mode, known_x,
4057 /* Check the second level. */
4058 if (ARITHMETIC_P (x0)
4059 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4061 num_sign_bit_copies1 (x, mode, x1, mode,
4062 cached_num_sign_bit_copies (x1, mode, known_x,
4066 if (ARITHMETIC_P (x1)
4067 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4069 num_sign_bit_copies1 (x, mode, x0, mode,
4070 cached_num_sign_bit_copies (x0, mode, known_x,
4075 return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
4078 /* Return the number of bits at the high-order end of X that are known to
4079 be equal to the sign bit. X will be used in mode MODE; if MODE is
4080 VOIDmode, X will be used in its own mode. The returned value will always
4081 be between 1 and the number of bits in MODE. */
4084 num_sign_bit_copies1 (rtx x, enum machine_mode mode, rtx known_x,
4085 enum machine_mode known_mode,
4086 unsigned int known_ret)
4088 enum rtx_code code = GET_CODE (x);
4089 unsigned int bitwidth = GET_MODE_BITSIZE (mode);
4090 int num0, num1, result;
4091 unsigned HOST_WIDE_INT nonzero;
4093 /* If we weren't given a mode, use the mode of X. If the mode is still
4094 VOIDmode, we don't know anything. Likewise if one of the modes is
4097 if (mode == VOIDmode)
4098 mode = GET_MODE (x);
4100 if (mode == VOIDmode || FLOAT_MODE_P (mode) || FLOAT_MODE_P (GET_MODE (x)))
4103 /* For a smaller object, just ignore the high bits. */
4104 if (bitwidth < GET_MODE_BITSIZE (GET_MODE (x)))
4106 num0 = cached_num_sign_bit_copies (x, GET_MODE (x),
4107 known_x, known_mode, known_ret);
4109 num0 - (int) (GET_MODE_BITSIZE (GET_MODE (x)) - bitwidth));
4112 if (GET_MODE (x) != VOIDmode && bitwidth > GET_MODE_BITSIZE (GET_MODE (x)))
4114 #ifndef WORD_REGISTER_OPERATIONS
4115 /* If this machine does not do all register operations on the entire
4116 register and MODE is wider than the mode of X, we can say nothing
4117 at all about the high-order bits. */
4120 /* Likewise on machines that do, if the mode of the object is smaller
4121 than a word and loads of that size don't sign extend, we can say
4122 nothing about the high order bits. */
4123 if (GET_MODE_BITSIZE (GET_MODE (x)) < BITS_PER_WORD
4124 #ifdef LOAD_EXTEND_OP
4125 && LOAD_EXTEND_OP (GET_MODE (x)) != SIGN_EXTEND
4136 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4137 /* If pointers extend signed and this is a pointer in Pmode, say that
4138 all the bits above ptr_mode are known to be sign bit copies. */
4139 if (! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode && mode == Pmode
4141 return GET_MODE_BITSIZE (Pmode) - GET_MODE_BITSIZE (ptr_mode) + 1;
4145 unsigned int copies_for_hook = 1, copies = 1;
4146 rtx new = rtl_hooks.reg_num_sign_bit_copies (x, mode, known_x,
4147 known_mode, known_ret,
4151 copies = cached_num_sign_bit_copies (new, mode, known_x,
4152 known_mode, known_ret);
4154 if (copies > 1 || copies_for_hook > 1)
4155 return MAX (copies, copies_for_hook);
4157 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4162 #ifdef LOAD_EXTEND_OP
4163 /* Some RISC machines sign-extend all loads of smaller than a word. */
4164 if (LOAD_EXTEND_OP (GET_MODE (x)) == SIGN_EXTEND)
4165 return MAX (1, ((int) bitwidth
4166 - (int) GET_MODE_BITSIZE (GET_MODE (x)) + 1));
4171 /* If the constant is negative, take its 1's complement and remask.
4172 Then see how many zero bits we have. */
4173 nonzero = INTVAL (x) & GET_MODE_MASK (mode);
4174 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4175 && (nonzero & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4176 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4178 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4181 /* If this is a SUBREG for a promoted object that is sign-extended
4182 and we are looking at it in a wider mode, we know that at least the
4183 high-order bits are known to be sign bit copies. */
4185 if (SUBREG_PROMOTED_VAR_P (x) && ! SUBREG_PROMOTED_UNSIGNED_P (x))
4187 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4188 known_x, known_mode, known_ret);
4189 return MAX ((int) bitwidth
4190 - (int) GET_MODE_BITSIZE (GET_MODE (x)) + 1,
4194 /* For a smaller object, just ignore the high bits. */
4195 if (bitwidth <= GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))))
4197 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), VOIDmode,
4198 known_x, known_mode, known_ret);
4199 return MAX (1, (num0
4200 - (int) (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))
4204 #ifdef WORD_REGISTER_OPERATIONS
4205 #ifdef LOAD_EXTEND_OP
4206 /* For paradoxical SUBREGs on machines where all register operations
4207 affect the entire register, just look inside. Note that we are
4208 passing MODE to the recursive call, so the number of sign bit copies
4209 will remain relative to that mode, not the inner mode. */
4211 /* This works only if loads sign extend. Otherwise, if we get a
4212 reload for the inner part, it may be loaded from the stack, and
4213 then we lose all sign bit copies that existed before the store
4216 if ((GET_MODE_SIZE (GET_MODE (x))
4217 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
4218 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND
4219 && MEM_P (SUBREG_REG (x)))
4220 return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4221 known_x, known_mode, known_ret);
4227 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4228 return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
4232 return (bitwidth - GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
4233 + cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4234 known_x, known_mode, known_ret));
4237 /* For a smaller object, just ignore the high bits. */
4238 num0 = cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4239 known_x, known_mode, known_ret);
4240 return MAX (1, (num0 - (int) (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
4244 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4245 known_x, known_mode, known_ret);
4247 case ROTATE: case ROTATERT:
4248 /* If we are rotating left by a number of bits less than the number
4249 of sign bit copies, we can just subtract that amount from the
4251 if (GET_CODE (XEXP (x, 1)) == CONST_INT
4252 && INTVAL (XEXP (x, 1)) >= 0
4253 && INTVAL (XEXP (x, 1)) < (int) bitwidth)
4255 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4256 known_x, known_mode, known_ret);
4257 return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
4258 : (int) bitwidth - INTVAL (XEXP (x, 1))));
4263 /* In general, this subtracts one sign bit copy. But if the value
4264 is known to be positive, the number of sign bit copies is the
4265 same as that of the input. Finally, if the input has just one bit
4266 that might be nonzero, all the bits are copies of the sign bit. */
4267 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4268 known_x, known_mode, known_ret);
4269 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4270 return num0 > 1 ? num0 - 1 : 1;
4272 nonzero = nonzero_bits (XEXP (x, 0), mode);
4277 && (((HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero))
4282 case IOR: case AND: case XOR:
4283 case SMIN: case SMAX: case UMIN: case UMAX:
4284 /* Logical operations will preserve the number of sign-bit copies.
4285 MIN and MAX operations always return one of the operands. */
4286 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4287 known_x, known_mode, known_ret);
4288 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4289 known_x, known_mode, known_ret);
4290 return MIN (num0, num1);
4292 case PLUS: case MINUS:
4293 /* For addition and subtraction, we can have a 1-bit carry. However,
4294 if we are subtracting 1 from a positive number, there will not
4295 be such a carry. Furthermore, if the positive number is known to
4296 be 0 or 1, we know the result is either -1 or 0. */
4298 if (code == PLUS && XEXP (x, 1) == constm1_rtx
4299 && bitwidth <= HOST_BITS_PER_WIDE_INT)
4301 nonzero = nonzero_bits (XEXP (x, 0), mode);
4302 if ((((HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero) == 0)
4303 return (nonzero == 1 || nonzero == 0 ? bitwidth
4304 : bitwidth - floor_log2 (nonzero) - 1);
4307 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4308 known_x, known_mode, known_ret);
4309 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4310 known_x, known_mode, known_ret);
4311 result = MAX (1, MIN (num0, num1) - 1);
4313 #ifdef POINTERS_EXTEND_UNSIGNED
4314 /* If pointers extend signed and this is an addition or subtraction
4315 to a pointer in Pmode, all the bits above ptr_mode are known to be
4317 if (! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4318 && (code == PLUS || code == MINUS)
4319 && REG_P (XEXP (x, 0)) && REG_POINTER (XEXP (x, 0)))
4320 result = MAX ((int) (GET_MODE_BITSIZE (Pmode)
4321 - GET_MODE_BITSIZE (ptr_mode) + 1),
4327 /* The number of bits of the product is the sum of the number of
4328 bits of both terms. However, unless one of the terms if known
4329 to be positive, we must allow for an additional bit since negating
4330 a negative number can remove one sign bit copy. */
4332 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4333 known_x, known_mode, known_ret);
4334 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4335 known_x, known_mode, known_ret);
4337 result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
4339 && (bitwidth > HOST_BITS_PER_WIDE_INT
4340 || (((nonzero_bits (XEXP (x, 0), mode)
4341 & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4342 && ((nonzero_bits (XEXP (x, 1), mode)
4343 & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))))
4346 return MAX (1, result);
4349 /* The result must be <= the first operand. If the first operand
4350 has the high bit set, we know nothing about the number of sign
4352 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4354 else if ((nonzero_bits (XEXP (x, 0), mode)
4355 & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4358 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4359 known_x, known_mode, known_ret);
4362 /* The result must be <= the second operand. */
4363 return cached_num_sign_bit_copies (XEXP (x, 1), mode,
4364 known_x, known_mode, known_ret);
4367 /* Similar to unsigned division, except that we have to worry about
4368 the case where the divisor is negative, in which case we have
4370 result = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4371 known_x, known_mode, known_ret);
4373 && (bitwidth > HOST_BITS_PER_WIDE_INT
4374 || (nonzero_bits (XEXP (x, 1), mode)
4375 & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
4381 result = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4382 known_x, known_mode, known_ret);
4384 && (bitwidth > HOST_BITS_PER_WIDE_INT
4385 || (nonzero_bits (XEXP (x, 1), mode)
4386 & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
4392 /* Shifts by a constant add to the number of bits equal to the
4394 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4395 known_x, known_mode, known_ret);
4396 if (GET_CODE (XEXP (x, 1)) == CONST_INT
4397 && INTVAL (XEXP (x, 1)) > 0)
4398 num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
4403 /* Left shifts destroy copies. */
4404 if (GET_CODE (XEXP (x, 1)) != CONST_INT
4405 || INTVAL (XEXP (x, 1)) < 0
4406 || INTVAL (XEXP (x, 1)) >= (int) bitwidth)
4409 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4410 known_x, known_mode, known_ret);
4411 return MAX (1, num0 - INTVAL (XEXP (x, 1)));
4414 num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4415 known_x, known_mode, known_ret);
4416 num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode,
4417 known_x, known_mode, known_ret);
4418 return MIN (num0, num1);
4420 case EQ: case NE: case GE: case GT: case LE: case LT:
4421 case UNEQ: case LTGT: case UNGE: case UNGT: case UNLE: case UNLT:
4422 case GEU: case GTU: case LEU: case LTU:
4423 case UNORDERED: case ORDERED:
4424 /* If the constant is negative, take its 1's complement and remask.
4425 Then see how many zero bits we have. */
4426 nonzero = STORE_FLAG_VALUE;
4427 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4428 && (nonzero & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4429 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4431 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4437 /* If we haven't been able to figure it out by one of the above rules,
4438 see if some of the high-order bits are known to be zero. If so,
4439 count those bits and return one less than that amount. If we can't
4440 safely compute the mask for this mode, always return BITWIDTH. */
4442 bitwidth = GET_MODE_BITSIZE (mode);
4443 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4446 nonzero = nonzero_bits (x, mode);
4447 return nonzero & ((HOST_WIDE_INT) 1 << (bitwidth - 1))
4448 ? 1 : bitwidth - floor_log2 (nonzero) - 1;
4451 /* Calculate the rtx_cost of a single instruction. A return value of
4452 zero indicates an instruction pattern without a known cost. */
4455 insn_rtx_cost (rtx pat)
4460 /* Extract the single set rtx from the instruction pattern.
4461 We can't use single_set since we only have the pattern. */
4462 if (GET_CODE (pat) == SET)
4464 else if (GET_CODE (pat) == PARALLEL)
4467 for (i = 0; i < XVECLEN (pat, 0); i++)
4469 rtx x = XVECEXP (pat, 0, i);
4470 if (GET_CODE (x) == SET)
4483 cost = rtx_cost (SET_SRC (set), SET);
4484 return cost > 0 ? cost : COSTS_N_INSNS (1);
4487 /* Given an insn INSN and condition COND, return the condition in a
4488 canonical form to simplify testing by callers. Specifically:
4490 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
4491 (2) Both operands will be machine operands; (cc0) will have been replaced.
4492 (3) If an operand is a constant, it will be the second operand.
4493 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
4494 for GE, GEU, and LEU.
4496 If the condition cannot be understood, or is an inequality floating-point
4497 comparison which needs to be reversed, 0 will be returned.
4499 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
4501 If EARLIEST is nonzero, it is a pointer to a place where the earliest
4502 insn used in locating the condition was found. If a replacement test
4503 of the condition is desired, it should be placed in front of that
4504 insn and we will be sure that the inputs are still valid.
4506 If WANT_REG is nonzero, we wish the condition to be relative to that
4507 register, if possible. Therefore, do not canonicalize the condition
4508 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
4509 to be a compare to a CC mode register.
4511 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
4515 canonicalize_condition (rtx insn, rtx cond, int reverse, rtx *earliest,
4516 rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
4523 int reverse_code = 0;
4524 enum machine_mode mode;
4525 basic_block bb = BLOCK_FOR_INSN (insn);
4527 code = GET_CODE (cond);
4528 mode = GET_MODE (cond);
4529 op0 = XEXP (cond, 0);
4530 op1 = XEXP (cond, 1);
4533 code = reversed_comparison_code (cond, insn);
4534 if (code == UNKNOWN)
4540 /* If we are comparing a register with zero, see if the register is set
4541 in the previous insn to a COMPARE or a comparison operation. Perform
4542 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
4545 while ((GET_RTX_CLASS (code) == RTX_COMPARE
4546 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
4547 && op1 == CONST0_RTX (GET_MODE (op0))
4550 /* Set nonzero when we find something of interest. */
4554 /* If comparison with cc0, import actual comparison from compare
4558 if ((prev = prev_nonnote_insn (prev)) == 0
4559 || !NONJUMP_INSN_P (prev)
4560 || (set = single_set (prev)) == 0
4561 || SET_DEST (set) != cc0_rtx)
4564 op0 = SET_SRC (set);
4565 op1 = CONST0_RTX (GET_MODE (op0));
4571 /* If this is a COMPARE, pick up the two things being compared. */
4572 if (GET_CODE (op0) == COMPARE)
4574 op1 = XEXP (op0, 1);
4575 op0 = XEXP (op0, 0);
4578 else if (!REG_P (op0))
4581 /* Go back to the previous insn. Stop if it is not an INSN. We also
4582 stop if it isn't a single set or if it has a REG_INC note because
4583 we don't want to bother dealing with it. */
4585 if ((prev = prev_nonnote_insn (prev)) == 0
4586 || !NONJUMP_INSN_P (prev)
4587 || FIND_REG_INC_NOTE (prev, NULL_RTX)
4588 /* In cfglayout mode, there do not have to be labels at the
4589 beginning of a block, or jumps at the end, so the previous
4590 conditions would not stop us when we reach bb boundary. */
4591 || BLOCK_FOR_INSN (prev) != bb)
4594 set = set_of (op0, prev);
4597 && (GET_CODE (set) != SET
4598 || !rtx_equal_p (SET_DEST (set), op0)))
4601 /* If this is setting OP0, get what it sets it to if it looks
4605 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
4606 #ifdef FLOAT_STORE_FLAG_VALUE
4607 REAL_VALUE_TYPE fsfv;
4610 /* ??? We may not combine comparisons done in a CCmode with
4611 comparisons not done in a CCmode. This is to aid targets
4612 like Alpha that have an IEEE compliant EQ instruction, and
4613 a non-IEEE compliant BEQ instruction. The use of CCmode is
4614 actually artificial, simply to prevent the combination, but
4615 should not affect other platforms.
4617 However, we must allow VOIDmode comparisons to match either
4618 CCmode or non-CCmode comparison, because some ports have
4619 modeless comparisons inside branch patterns.
4621 ??? This mode check should perhaps look more like the mode check
4622 in simplify_comparison in combine. */
4624 if ((GET_CODE (SET_SRC (set)) == COMPARE
4627 && GET_MODE_CLASS (inner_mode) == MODE_INT
4628 && (GET_MODE_BITSIZE (inner_mode)
4629 <= HOST_BITS_PER_WIDE_INT)
4630 && (STORE_FLAG_VALUE
4631 & ((HOST_WIDE_INT) 1
4632 << (GET_MODE_BITSIZE (inner_mode) - 1))))
4633 #ifdef FLOAT_STORE_FLAG_VALUE
4635 && SCALAR_FLOAT_MODE_P (inner_mode)
4636 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
4637 REAL_VALUE_NEGATIVE (fsfv)))
4640 && COMPARISON_P (SET_SRC (set))))
4641 && (((GET_MODE_CLASS (mode) == MODE_CC)
4642 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
4643 || mode == VOIDmode || inner_mode == VOIDmode))
4645 else if (((code == EQ
4647 && (GET_MODE_BITSIZE (inner_mode)
4648 <= HOST_BITS_PER_WIDE_INT)
4649 && GET_MODE_CLASS (inner_mode) == MODE_INT
4650 && (STORE_FLAG_VALUE
4651 & ((HOST_WIDE_INT) 1
4652 << (GET_MODE_BITSIZE (inner_mode) - 1))))
4653 #ifdef FLOAT_STORE_FLAG_VALUE
4655 && SCALAR_FLOAT_MODE_P (inner_mode)
4656 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
4657 REAL_VALUE_NEGATIVE (fsfv)))
4660 && COMPARISON_P (SET_SRC (set))
4661 && (((GET_MODE_CLASS (mode) == MODE_CC)
4662 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
4663 || mode == VOIDmode || inner_mode == VOIDmode))
4673 else if (reg_set_p (op0, prev))
4674 /* If this sets OP0, but not directly, we have to give up. */
4679 /* If the caller is expecting the condition to be valid at INSN,
4680 make sure X doesn't change before INSN. */
4681 if (valid_at_insn_p)
4682 if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
4684 if (COMPARISON_P (x))
4685 code = GET_CODE (x);
4688 code = reversed_comparison_code (x, prev);
4689 if (code == UNKNOWN)
4694 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
4700 /* If constant is first, put it last. */
4701 if (CONSTANT_P (op0))
4702 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
4704 /* If OP0 is the result of a comparison, we weren't able to find what
4705 was really being compared, so fail. */
4707 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
4710 /* Canonicalize any ordered comparison with integers involving equality
4711 if we can do computations in the relevant mode and we do not
4714 if (GET_MODE_CLASS (GET_MODE (op0)) != MODE_CC
4715 && GET_CODE (op1) == CONST_INT
4716 && GET_MODE (op0) != VOIDmode
4717 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
4719 HOST_WIDE_INT const_val = INTVAL (op1);
4720 unsigned HOST_WIDE_INT uconst_val = const_val;
4721 unsigned HOST_WIDE_INT max_val
4722 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
4727 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
4728 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
4731 /* When cross-compiling, const_val might be sign-extended from
4732 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
4734 if ((HOST_WIDE_INT) (const_val & max_val)
4735 != (((HOST_WIDE_INT) 1
4736 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
4737 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
4741 if (uconst_val < max_val)
4742 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
4746 if (uconst_val != 0)
4747 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
4755 /* Never return CC0; return zero instead. */
4759 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
4762 /* Given a jump insn JUMP, return the condition that will cause it to branch
4763 to its JUMP_LABEL. If the condition cannot be understood, or is an
4764 inequality floating-point comparison which needs to be reversed, 0 will
4767 If EARLIEST is nonzero, it is a pointer to a place where the earliest
4768 insn used in locating the condition was found. If a replacement test
4769 of the condition is desired, it should be placed in front of that
4770 insn and we will be sure that the inputs are still valid. If EARLIEST
4771 is null, the returned condition will be valid at INSN.
4773 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
4774 compare CC mode register.
4776 VALID_AT_INSN_P is the same as for canonicalize_condition. */
4779 get_condition (rtx jump, rtx *earliest, int allow_cc_mode, int valid_at_insn_p)
4785 /* If this is not a standard conditional jump, we can't parse it. */
4787 || ! any_condjump_p (jump))
4789 set = pc_set (jump);
4791 cond = XEXP (SET_SRC (set), 0);
4793 /* If this branches to JUMP_LABEL when the condition is false, reverse
4796 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
4797 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
4799 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
4800 allow_cc_mode, valid_at_insn_p);
4803 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
4804 TARGET_MODE_REP_EXTENDED.
4806 Note that we assume that the property of
4807 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
4808 narrower than mode B. I.e., if A is a mode narrower than B then in
4809 order to be able to operate on it in mode B, mode A needs to
4810 satisfy the requirements set by the representation of mode B. */
4813 init_num_sign_bit_copies_in_rep (void)
4815 enum machine_mode mode, in_mode;
4817 for (in_mode = GET_CLASS_NARROWEST_MODE (MODE_INT); in_mode != VOIDmode;
4818 in_mode = GET_MODE_WIDER_MODE (mode))
4819 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != in_mode;
4820 mode = GET_MODE_WIDER_MODE (mode))
4822 enum machine_mode i;
4824 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
4825 extends to the next widest mode. */
4826 gcc_assert (targetm.mode_rep_extended (mode, in_mode) == UNKNOWN
4827 || GET_MODE_WIDER_MODE (mode) == in_mode);
4829 /* We are in in_mode. Count how many bits outside of mode
4830 have to be copies of the sign-bit. */
4831 for (i = mode; i != in_mode; i = GET_MODE_WIDER_MODE (i))
4833 enum machine_mode wider = GET_MODE_WIDER_MODE (i);
4835 if (targetm.mode_rep_extended (i, wider) == SIGN_EXTEND
4836 /* We can only check sign-bit copies starting from the
4837 top-bit. In order to be able to check the bits we
4838 have already seen we pretend that subsequent bits
4839 have to be sign-bit copies too. */
4840 || num_sign_bit_copies_in_rep [in_mode][mode])
4841 num_sign_bit_copies_in_rep [in_mode][mode]
4842 += GET_MODE_BITSIZE (wider) - GET_MODE_BITSIZE (i);
4847 /* Suppose that truncation from the machine mode of X to MODE is not a
4848 no-op. See if there is anything special about X so that we can
4849 assume it already contains a truncated value of MODE. */
4852 truncated_to_mode (enum machine_mode mode, rtx x)
4854 /* This register has already been used in MODE without explicit
4856 if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x))
4859 /* See if we already satisfy the requirements of MODE. If yes we
4860 can just switch to MODE. */
4861 if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode]
4862 && (num_sign_bit_copies (x, GET_MODE (x))
4863 >= num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + 1))
4869 /* Initialize non_rtx_starting_operands, which is used to speed up
4875 for (i = 0; i < NUM_RTX_CODE; i++)
4877 const char *format = GET_RTX_FORMAT (i);
4878 const char *first = strpbrk (format, "eEV");
4879 non_rtx_starting_operands[i] = first ? first - format : -1;
4882 init_num_sign_bit_copies_in_rep ();
4885 /* Check whether this is a constant pool constant. */
4887 constant_pool_constant_p (rtx x)
4889 x = avoid_constant_pool_reference (x);
4890 return GET_CODE (x) == CONST_DOUBLE;