1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2018 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
32 #include "insn-config.h"
34 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
36 #include "addresses.h"
38 #include "hard-reg-set.h"
40 /* Forward declarations */
41 static void set_of_1 (rtx, const_rtx, void *);
42 static bool covers_regno_p (const_rtx, unsigned int);
43 static bool covers_regno_no_parallel_p (const_rtx, unsigned int);
44 static int computed_jump_p_1 (const_rtx);
45 static void parms_set (rtx, const_rtx, void *);
47 static unsigned HOST_WIDE_INT cached_nonzero_bits (const_rtx, scalar_int_mode,
48 const_rtx, machine_mode,
49 unsigned HOST_WIDE_INT);
50 static unsigned HOST_WIDE_INT nonzero_bits1 (const_rtx, scalar_int_mode,
51 const_rtx, machine_mode,
52 unsigned HOST_WIDE_INT);
53 static unsigned int cached_num_sign_bit_copies (const_rtx, scalar_int_mode,
54 const_rtx, machine_mode,
56 static unsigned int num_sign_bit_copies1 (const_rtx, scalar_int_mode,
57 const_rtx, machine_mode,
60 rtx_subrtx_bound_info rtx_all_subrtx_bounds[NUM_RTX_CODE];
61 rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds[NUM_RTX_CODE];
63 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
64 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
65 SIGN_EXTEND then while narrowing we also have to enforce the
66 representation and sign-extend the value to mode DESTINATION_REP.
68 If the value is already sign-extended to DESTINATION_REP mode we
69 can just switch to DESTINATION mode on it. For each pair of
70 integral modes SOURCE and DESTINATION, when truncating from SOURCE
71 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
72 contains the number of high-order bits in SOURCE that have to be
73 copies of the sign-bit so that we can do this mode-switch to
77 num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1];
79 /* Store X into index I of ARRAY. ARRAY is known to have at least I
80 elements. Return the new base of ARRAY. */
83 typename T::value_type *
84 generic_subrtx_iterator <T>::add_single_to_queue (array_type &array,
86 size_t i, value_type x)
88 if (base == array.stack)
95 gcc_checking_assert (i == LOCAL_ELEMS);
96 /* A previous iteration might also have moved from the stack to the
97 heap, in which case the heap array will already be big enough. */
98 if (vec_safe_length (array.heap) <= i)
99 vec_safe_grow (array.heap, i + 1);
100 base = array.heap->address ();
101 memcpy (base, array.stack, sizeof (array.stack));
102 base[LOCAL_ELEMS] = x;
105 unsigned int length = array.heap->length ();
108 gcc_checking_assert (base == array.heap->address ());
114 gcc_checking_assert (i == length);
115 vec_safe_push (array.heap, x);
116 return array.heap->address ();
120 /* Add the subrtxes of X to worklist ARRAY, starting at END. Return the
121 number of elements added to the worklist. */
123 template <typename T>
125 generic_subrtx_iterator <T>::add_subrtxes_to_queue (array_type &array,
127 size_t end, rtx_type x)
129 enum rtx_code code = GET_CODE (x);
130 const char *format = GET_RTX_FORMAT (code);
131 size_t orig_end = end;
132 if (__builtin_expect (INSN_P (x), false))
134 /* Put the pattern at the top of the queue, since that's what
135 we're likely to want most. It also allows for the SEQUENCE
137 for (int i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; --i)
138 if (format[i] == 'e')
140 value_type subx = T::get_value (x->u.fld[i].rt_rtx);
141 if (__builtin_expect (end < LOCAL_ELEMS, true))
144 base = add_single_to_queue (array, base, end++, subx);
148 for (int i = 0; format[i]; ++i)
149 if (format[i] == 'e')
151 value_type subx = T::get_value (x->u.fld[i].rt_rtx);
152 if (__builtin_expect (end < LOCAL_ELEMS, true))
155 base = add_single_to_queue (array, base, end++, subx);
157 else if (format[i] == 'E')
159 unsigned int length = GET_NUM_ELEM (x->u.fld[i].rt_rtvec);
160 rtx *vec = x->u.fld[i].rt_rtvec->elem;
161 if (__builtin_expect (end + length <= LOCAL_ELEMS, true))
162 for (unsigned int j = 0; j < length; j++)
163 base[end++] = T::get_value (vec[j]);
165 for (unsigned int j = 0; j < length; j++)
166 base = add_single_to_queue (array, base, end++,
167 T::get_value (vec[j]));
168 if (code == SEQUENCE && end == length)
169 /* If the subrtxes of the sequence fill the entire array then
170 we know that no other parts of a containing insn are queued.
171 The caller is therefore iterating over the sequence as a
172 PATTERN (...), so we also want the patterns of the
174 for (unsigned int j = 0; j < length; j++)
176 typename T::rtx_type x = T::get_rtx (base[j]);
178 base[j] = T::get_value (PATTERN (x));
181 return end - orig_end;
184 template <typename T>
186 generic_subrtx_iterator <T>::free_array (array_type &array)
188 vec_free (array.heap);
191 template <typename T>
192 const size_t generic_subrtx_iterator <T>::LOCAL_ELEMS;
194 template class generic_subrtx_iterator <const_rtx_accessor>;
195 template class generic_subrtx_iterator <rtx_var_accessor>;
196 template class generic_subrtx_iterator <rtx_ptr_accessor>;
198 /* Return 1 if the value of X is unstable
199 (would be different at a different point in the program).
200 The frame pointer, arg pointer, etc. are considered stable
201 (within one function) and so is anything marked `unchanging'. */
204 rtx_unstable_p (const_rtx x)
206 const RTX_CODE code = GET_CODE (x);
213 return !MEM_READONLY_P (x) || rtx_unstable_p (XEXP (x, 0));
222 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
223 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
224 /* The arg pointer varies if it is not a fixed register. */
225 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
227 /* ??? When call-clobbered, the value is stable modulo the restore
228 that must happen after a call. This currently screws up local-alloc
229 into believing that the restore is not needed. */
230 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED && x == pic_offset_table_rtx)
235 if (MEM_VOLATILE_P (x))
244 fmt = GET_RTX_FORMAT (code);
245 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
248 if (rtx_unstable_p (XEXP (x, i)))
251 else if (fmt[i] == 'E')
254 for (j = 0; j < XVECLEN (x, i); j++)
255 if (rtx_unstable_p (XVECEXP (x, i, j)))
262 /* Return 1 if X has a value that can vary even between two
263 executions of the program. 0 means X can be compared reliably
264 against certain constants or near-constants.
265 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
266 zero, we are slightly more conservative.
267 The frame pointer and the arg pointer are considered constant. */
270 rtx_varies_p (const_rtx x, bool for_alias)
283 return !MEM_READONLY_P (x) || rtx_varies_p (XEXP (x, 0), for_alias);
292 /* Note that we have to test for the actual rtx used for the frame
293 and arg pointers and not just the register number in case we have
294 eliminated the frame and/or arg pointer and are using it
296 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
297 /* The arg pointer varies if it is not a fixed register. */
298 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
300 if (x == pic_offset_table_rtx
301 /* ??? When call-clobbered, the value is stable modulo the restore
302 that must happen after a call. This currently screws up
303 local-alloc into believing that the restore is not needed, so we
304 must return 0 only if we are called from alias analysis. */
305 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED || for_alias))
310 /* The operand 0 of a LO_SUM is considered constant
311 (in fact it is related specifically to operand 1)
312 during alias analysis. */
313 return (! for_alias && rtx_varies_p (XEXP (x, 0), for_alias))
314 || rtx_varies_p (XEXP (x, 1), for_alias);
317 if (MEM_VOLATILE_P (x))
326 fmt = GET_RTX_FORMAT (code);
327 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
330 if (rtx_varies_p (XEXP (x, i), for_alias))
333 else if (fmt[i] == 'E')
336 for (j = 0; j < XVECLEN (x, i); j++)
337 if (rtx_varies_p (XVECEXP (x, i, j), for_alias))
344 /* Compute an approximation for the offset between the register
345 FROM and TO for the current function, as it was at the start
349 get_initial_register_offset (int from, int to)
351 static const struct elim_table_t
355 } table[] = ELIMINABLE_REGS;
356 poly_int64 offset1, offset2;
362 /* It is not safe to call INITIAL_ELIMINATION_OFFSET
363 before the reload pass. We need to give at least
364 an estimation for the resulting frame size. */
365 if (! reload_completed)
367 offset1 = crtl->outgoing_args_size + get_frame_size ();
368 #if !STACK_GROWS_DOWNWARD
371 if (to == STACK_POINTER_REGNUM)
373 else if (from == STACK_POINTER_REGNUM)
379 for (i = 0; i < ARRAY_SIZE (table); i++)
380 if (table[i].from == from)
382 if (table[i].to == to)
384 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
388 for (j = 0; j < ARRAY_SIZE (table); j++)
390 if (table[j].to == to
391 && table[j].from == table[i].to)
393 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
395 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
397 return offset1 + offset2;
399 if (table[j].from == to
400 && table[j].to == table[i].to)
402 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
404 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
406 return offset1 - offset2;
410 else if (table[i].to == from)
412 if (table[i].from == to)
414 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
418 for (j = 0; j < ARRAY_SIZE (table); j++)
420 if (table[j].to == to
421 && table[j].from == table[i].from)
423 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
425 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
427 return - offset1 + offset2;
429 if (table[j].from == to
430 && table[j].to == table[i].from)
432 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
434 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
436 return - offset1 - offset2;
441 /* If the requested register combination was not found,
442 try a different more simple combination. */
443 if (from == ARG_POINTER_REGNUM)
444 return get_initial_register_offset (HARD_FRAME_POINTER_REGNUM, to);
445 else if (to == ARG_POINTER_REGNUM)
446 return get_initial_register_offset (from, HARD_FRAME_POINTER_REGNUM);
447 else if (from == HARD_FRAME_POINTER_REGNUM)
448 return get_initial_register_offset (FRAME_POINTER_REGNUM, to);
449 else if (to == HARD_FRAME_POINTER_REGNUM)
450 return get_initial_register_offset (from, FRAME_POINTER_REGNUM);
455 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
456 bytes can cause a trap. MODE is the mode of the MEM (not that of X) and
457 UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
458 references on strict alignment machines. */
461 rtx_addr_can_trap_p_1 (const_rtx x, poly_int64 offset, poly_int64 size,
462 machine_mode mode, bool unaligned_mems)
464 enum rtx_code code = GET_CODE (x);
465 gcc_checking_assert (mode == BLKmode || known_size_p (size));
468 /* The offset must be a multiple of the mode size if we are considering
469 unaligned memory references on strict alignment machines. */
470 if (STRICT_ALIGNMENT && unaligned_mems && mode != BLKmode)
472 poly_int64 actual_offset = offset;
474 #ifdef SPARC_STACK_BOUNDARY_HACK
475 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
476 the real alignment of %sp. However, when it does this, the
477 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
478 if (SPARC_STACK_BOUNDARY_HACK
479 && (x == stack_pointer_rtx || x == hard_frame_pointer_rtx))
480 actual_offset -= STACK_POINTER_OFFSET;
483 if (!multiple_p (actual_offset, GET_MODE_SIZE (mode)))
490 if (SYMBOL_REF_WEAK (x))
492 if (!CONSTANT_POOL_ADDRESS_P (x) && !SYMBOL_REF_FUNCTION_P (x))
495 poly_int64 decl_size;
497 if (maybe_lt (offset, 0))
499 if (!known_size_p (size))
500 return maybe_ne (offset, 0);
502 /* If the size of the access or of the symbol is unknown,
504 decl = SYMBOL_REF_DECL (x);
506 /* Else check that the access is in bounds. TODO: restructure
507 expr_size/tree_expr_size/int_expr_size and just use the latter. */
510 else if (DECL_P (decl) && DECL_SIZE_UNIT (decl))
512 if (!poly_int_tree_p (DECL_SIZE_UNIT (decl), &decl_size))
515 else if (TREE_CODE (decl) == STRING_CST)
516 decl_size = TREE_STRING_LENGTH (decl);
517 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl)))
518 decl_size = int_size_in_bytes (TREE_TYPE (decl));
522 return (!known_size_p (decl_size) || known_eq (decl_size, 0)
523 ? maybe_ne (offset, 0)
524 : maybe_gt (offset + size, decl_size));
533 /* Stack references are assumed not to trap, but we need to deal with
534 nonsensical offsets. */
535 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
536 || x == stack_pointer_rtx
537 /* The arg pointer varies if it is not a fixed register. */
538 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
541 poly_int64 red_zone_size = RED_ZONE_SIZE;
543 poly_int64 red_zone_size = 0;
545 poly_int64 stack_boundary = PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT;
546 poly_int64 low_bound, high_bound;
548 if (!known_size_p (size))
551 if (x == frame_pointer_rtx)
553 if (FRAME_GROWS_DOWNWARD)
555 high_bound = targetm.starting_frame_offset ();
556 low_bound = high_bound - get_frame_size ();
560 low_bound = targetm.starting_frame_offset ();
561 high_bound = low_bound + get_frame_size ();
564 else if (x == hard_frame_pointer_rtx)
567 = get_initial_register_offset (STACK_POINTER_REGNUM,
568 HARD_FRAME_POINTER_REGNUM);
570 = get_initial_register_offset (ARG_POINTER_REGNUM,
571 HARD_FRAME_POINTER_REGNUM);
573 #if STACK_GROWS_DOWNWARD
574 low_bound = sp_offset - red_zone_size - stack_boundary;
575 high_bound = ap_offset
576 + FIRST_PARM_OFFSET (current_function_decl)
577 #if !ARGS_GROW_DOWNWARD
582 high_bound = sp_offset + red_zone_size + stack_boundary;
583 low_bound = ap_offset
584 + FIRST_PARM_OFFSET (current_function_decl)
585 #if ARGS_GROW_DOWNWARD
591 else if (x == stack_pointer_rtx)
594 = get_initial_register_offset (ARG_POINTER_REGNUM,
595 STACK_POINTER_REGNUM);
597 #if STACK_GROWS_DOWNWARD
598 low_bound = - red_zone_size - stack_boundary;
599 high_bound = ap_offset
600 + FIRST_PARM_OFFSET (current_function_decl)
601 #if !ARGS_GROW_DOWNWARD
606 high_bound = red_zone_size + stack_boundary;
607 low_bound = ap_offset
608 + FIRST_PARM_OFFSET (current_function_decl)
609 #if ARGS_GROW_DOWNWARD
617 /* We assume that accesses are safe to at least the
619 Examples are varargs and __builtin_return_address. */
620 #if ARGS_GROW_DOWNWARD
621 high_bound = FIRST_PARM_OFFSET (current_function_decl)
623 low_bound = FIRST_PARM_OFFSET (current_function_decl)
624 - crtl->args.size - stack_boundary;
626 low_bound = FIRST_PARM_OFFSET (current_function_decl)
628 high_bound = FIRST_PARM_OFFSET (current_function_decl)
629 + crtl->args.size + stack_boundary;
633 if (known_ge (offset, low_bound)
634 && known_le (offset, high_bound - size))
638 /* All of the virtual frame registers are stack references. */
639 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
640 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
645 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
646 mode, unaligned_mems);
649 /* An address is assumed not to trap if:
650 - it is the pic register plus a const unspec without offset. */
651 if (XEXP (x, 0) == pic_offset_table_rtx
652 && GET_CODE (XEXP (x, 1)) == CONST
653 && GET_CODE (XEXP (XEXP (x, 1), 0)) == UNSPEC
654 && known_eq (offset, 0))
657 /* - or it is an address that can't trap plus a constant integer. */
658 if (poly_int_rtx_p (XEXP (x, 1), &const_x1)
659 && !rtx_addr_can_trap_p_1 (XEXP (x, 0), offset + const_x1,
660 size, mode, unaligned_mems))
667 return rtx_addr_can_trap_p_1 (XEXP (x, 1), offset, size,
668 mode, unaligned_mems);
675 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
676 mode, unaligned_mems);
682 /* If it isn't one of the case above, it can cause a trap. */
686 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
689 rtx_addr_can_trap_p (const_rtx x)
691 return rtx_addr_can_trap_p_1 (x, 0, -1, BLKmode, false);
694 /* Return true if X contains a MEM subrtx. */
697 contains_mem_rtx_p (rtx x)
699 subrtx_iterator::array_type array;
700 FOR_EACH_SUBRTX (iter, array, x, ALL)
707 /* Return true if X is an address that is known to not be zero. */
710 nonzero_address_p (const_rtx x)
712 const enum rtx_code code = GET_CODE (x);
717 return flag_delete_null_pointer_checks && !SYMBOL_REF_WEAK (x);
723 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
724 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
725 || x == stack_pointer_rtx
726 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
728 /* All of the virtual frame registers are stack references. */
729 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
730 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
735 return nonzero_address_p (XEXP (x, 0));
738 /* Handle PIC references. */
739 if (XEXP (x, 0) == pic_offset_table_rtx
740 && CONSTANT_P (XEXP (x, 1)))
745 /* Similar to the above; allow positive offsets. Further, since
746 auto-inc is only allowed in memories, the register must be a
748 if (CONST_INT_P (XEXP (x, 1))
749 && INTVAL (XEXP (x, 1)) > 0)
751 return nonzero_address_p (XEXP (x, 0));
754 /* Similarly. Further, the offset is always positive. */
761 return nonzero_address_p (XEXP (x, 0));
764 return nonzero_address_p (XEXP (x, 1));
770 /* If it isn't one of the case above, might be zero. */
774 /* Return 1 if X refers to a memory location whose address
775 cannot be compared reliably with constant addresses,
776 or if X refers to a BLKmode memory object.
777 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
778 zero, we are slightly more conservative. */
781 rtx_addr_varies_p (const_rtx x, bool for_alias)
792 return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0), for_alias);
794 fmt = GET_RTX_FORMAT (code);
795 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
798 if (rtx_addr_varies_p (XEXP (x, i), for_alias))
801 else if (fmt[i] == 'E')
804 for (j = 0; j < XVECLEN (x, i); j++)
805 if (rtx_addr_varies_p (XVECEXP (x, i, j), for_alias))
811 /* Return the CALL in X if there is one. */
814 get_call_rtx_from (rtx x)
818 if (GET_CODE (x) == PARALLEL)
819 x = XVECEXP (x, 0, 0);
820 if (GET_CODE (x) == SET)
822 if (GET_CODE (x) == CALL && MEM_P (XEXP (x, 0)))
827 /* Return the value of the integer term in X, if one is apparent;
829 Only obvious integer terms are detected.
830 This is used in cse.c with the `related_value' field. */
833 get_integer_term (const_rtx x)
835 if (GET_CODE (x) == CONST)
838 if (GET_CODE (x) == MINUS
839 && CONST_INT_P (XEXP (x, 1)))
840 return - INTVAL (XEXP (x, 1));
841 if (GET_CODE (x) == PLUS
842 && CONST_INT_P (XEXP (x, 1)))
843 return INTVAL (XEXP (x, 1));
847 /* If X is a constant, return the value sans apparent integer term;
849 Only obvious integer terms are detected. */
852 get_related_value (const_rtx x)
854 if (GET_CODE (x) != CONST)
857 if (GET_CODE (x) == PLUS
858 && CONST_INT_P (XEXP (x, 1)))
860 else if (GET_CODE (x) == MINUS
861 && CONST_INT_P (XEXP (x, 1)))
866 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
867 to somewhere in the same object or object_block as SYMBOL. */
870 offset_within_block_p (const_rtx symbol, HOST_WIDE_INT offset)
874 if (GET_CODE (symbol) != SYMBOL_REF)
882 if (CONSTANT_POOL_ADDRESS_P (symbol)
883 && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
886 decl = SYMBOL_REF_DECL (symbol);
887 if (decl && offset < int_size_in_bytes (TREE_TYPE (decl)))
891 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
892 && SYMBOL_REF_BLOCK (symbol)
893 && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
894 && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
895 < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
901 /* Split X into a base and a constant offset, storing them in *BASE_OUT
902 and *OFFSET_OUT respectively. */
905 split_const (rtx x, rtx *base_out, rtx *offset_out)
907 if (GET_CODE (x) == CONST)
910 if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
912 *base_out = XEXP (x, 0);
913 *offset_out = XEXP (x, 1);
918 *offset_out = const0_rtx;
921 /* Express integer value X as some value Y plus a polynomial offset,
922 where Y is either const0_rtx, X or something within X (as opposed
923 to a new rtx). Return the Y and store the offset in *OFFSET_OUT. */
926 strip_offset (rtx x, poly_int64_pod *offset_out)
928 rtx base = const0_rtx;
930 if (GET_CODE (test) == CONST)
931 test = XEXP (test, 0);
932 if (GET_CODE (test) == PLUS)
934 base = XEXP (test, 0);
935 test = XEXP (test, 1);
937 if (poly_int_rtx_p (test, offset_out))
943 /* Return the argument size in REG_ARGS_SIZE note X. */
946 get_args_size (const_rtx x)
948 gcc_checking_assert (REG_NOTE_KIND (x) == REG_ARGS_SIZE);
949 return rtx_to_poly_int64 (XEXP (x, 0));
952 /* Return the number of places FIND appears within X. If COUNT_DEST is
953 zero, we do not count occurrences inside the destination of a SET. */
956 count_occurrences (const_rtx x, const_rtx find, int count_dest)
960 const char *format_ptr;
979 count = count_occurrences (XEXP (x, 0), find, count_dest);
981 count += count_occurrences (XEXP (x, 1), find, count_dest);
985 if (MEM_P (find) && rtx_equal_p (x, find))
990 if (SET_DEST (x) == find && ! count_dest)
991 return count_occurrences (SET_SRC (x), find, count_dest);
998 format_ptr = GET_RTX_FORMAT (code);
1001 for (i = 0; i < GET_RTX_LENGTH (code); i++)
1003 switch (*format_ptr++)
1006 count += count_occurrences (XEXP (x, i), find, count_dest);
1010 for (j = 0; j < XVECLEN (x, i); j++)
1011 count += count_occurrences (XVECEXP (x, i, j), find, count_dest);
1019 /* Return TRUE if OP is a register or subreg of a register that
1020 holds an unsigned quantity. Otherwise, return FALSE. */
1023 unsigned_reg_p (rtx op)
1027 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op))))
1030 if (GET_CODE (op) == SUBREG
1031 && SUBREG_PROMOTED_SIGN (op))
1038 /* Nonzero if register REG appears somewhere within IN.
1039 Also works if REG is not a register; in this case it checks
1040 for a subexpression of IN that is Lisp "equal" to REG. */
1043 reg_mentioned_p (const_rtx reg, const_rtx in)
1055 if (GET_CODE (in) == LABEL_REF)
1056 return reg == label_ref_label (in);
1058 code = GET_CODE (in);
1062 /* Compare registers by number. */
1064 return REG_P (reg) && REGNO (in) == REGNO (reg);
1066 /* These codes have no constituent expressions
1074 /* These are kept unique for a given value. */
1081 if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
1084 fmt = GET_RTX_FORMAT (code);
1086 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1091 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
1092 if (reg_mentioned_p (reg, XVECEXP (in, i, j)))
1095 else if (fmt[i] == 'e'
1096 && reg_mentioned_p (reg, XEXP (in, i)))
1102 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
1103 no CODE_LABEL insn. */
1106 no_labels_between_p (const rtx_insn *beg, const rtx_insn *end)
1111 for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
1117 /* Nonzero if register REG is used in an insn between
1118 FROM_INSN and TO_INSN (exclusive of those two). */
1121 reg_used_between_p (const_rtx reg, const rtx_insn *from_insn,
1122 const rtx_insn *to_insn)
1126 if (from_insn == to_insn)
1129 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1130 if (NONDEBUG_INSN_P (insn)
1131 && (reg_overlap_mentioned_p (reg, PATTERN (insn))
1132 || (CALL_P (insn) && find_reg_fusage (insn, USE, reg))))
1137 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
1138 is entirely replaced by a new value and the only use is as a SET_DEST,
1139 we do not consider it a reference. */
1142 reg_referenced_p (const_rtx x, const_rtx body)
1146 switch (GET_CODE (body))
1149 if (reg_overlap_mentioned_p (x, SET_SRC (body)))
1152 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
1153 of a REG that occupies all of the REG, the insn references X if
1154 it is mentioned in the destination. */
1155 if (GET_CODE (SET_DEST (body)) != CC0
1156 && GET_CODE (SET_DEST (body)) != PC
1157 && !REG_P (SET_DEST (body))
1158 && ! (GET_CODE (SET_DEST (body)) == SUBREG
1159 && REG_P (SUBREG_REG (SET_DEST (body)))
1160 && !read_modify_subreg_p (SET_DEST (body)))
1161 && reg_overlap_mentioned_p (x, SET_DEST (body)))
1166 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1167 if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)))
1174 return reg_overlap_mentioned_p (x, body);
1177 return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
1180 return reg_overlap_mentioned_p (x, XEXP (body, 0));
1183 case UNSPEC_VOLATILE:
1184 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1185 if (reg_overlap_mentioned_p (x, XVECEXP (body, 0, i)))
1190 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1191 if (reg_referenced_p (x, XVECEXP (body, 0, i)))
1196 if (MEM_P (XEXP (body, 0)))
1197 if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)))
1202 gcc_assert (REG_P (XEXP (body, 0)));
1206 if (reg_overlap_mentioned_p (x, COND_EXEC_TEST (body)))
1208 return reg_referenced_p (x, COND_EXEC_CODE (body));
1215 /* Nonzero if register REG is set or clobbered in an insn between
1216 FROM_INSN and TO_INSN (exclusive of those two). */
1219 reg_set_between_p (const_rtx reg, const rtx_insn *from_insn,
1220 const rtx_insn *to_insn)
1222 const rtx_insn *insn;
1224 if (from_insn == to_insn)
1227 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1228 if (INSN_P (insn) && reg_set_p (reg, insn))
1233 /* Return true if REG is set or clobbered inside INSN. */
1236 reg_set_p (const_rtx reg, const_rtx insn)
1238 /* After delay slot handling, call and branch insns might be in a
1239 sequence. Check all the elements there. */
1240 if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
1242 for (int i = 0; i < XVECLEN (PATTERN (insn), 0); ++i)
1243 if (reg_set_p (reg, XVECEXP (PATTERN (insn), 0, i)))
1249 /* We can be passed an insn or part of one. If we are passed an insn,
1250 check if a side-effect of the insn clobbers REG. */
1252 && (FIND_REG_INC_NOTE (insn, reg)
1255 && REGNO (reg) < FIRST_PSEUDO_REGISTER
1256 && overlaps_hard_reg_set_p (regs_invalidated_by_call,
1257 GET_MODE (reg), REGNO (reg)))
1259 || find_reg_fusage (insn, CLOBBER, reg)))))
1262 /* There are no REG_INC notes for SP autoinc. */
1263 if (reg == stack_pointer_rtx && INSN_P (insn))
1265 subrtx_var_iterator::array_type array;
1266 FOR_EACH_SUBRTX_VAR (iter, array, PATTERN (insn), NONCONST)
1271 && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == RTX_AUTOINC)
1273 if (XEXP (XEXP (mem, 0), 0) == stack_pointer_rtx)
1275 iter.skip_subrtxes ();
1280 return set_of (reg, insn) != NULL_RTX;
1283 /* Similar to reg_set_between_p, but check all registers in X. Return 0
1284 only if none of them are modified between START and END. Return 1 if
1285 X contains a MEM; this routine does use memory aliasing. */
1288 modified_between_p (const_rtx x, const rtx_insn *start, const rtx_insn *end)
1290 const enum rtx_code code = GET_CODE (x);
1311 if (modified_between_p (XEXP (x, 0), start, end))
1313 if (MEM_READONLY_P (x))
1315 for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn))
1316 if (memory_modified_in_insn_p (x, insn))
1321 return reg_set_between_p (x, start, end);
1327 fmt = GET_RTX_FORMAT (code);
1328 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1330 if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end))
1333 else if (fmt[i] == 'E')
1334 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1335 if (modified_between_p (XVECEXP (x, i, j), start, end))
1342 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
1343 of them are modified in INSN. Return 1 if X contains a MEM; this routine
1344 does use memory aliasing. */
1347 modified_in_p (const_rtx x, const_rtx insn)
1349 const enum rtx_code code = GET_CODE (x);
1366 if (modified_in_p (XEXP (x, 0), insn))
1368 if (MEM_READONLY_P (x))
1370 if (memory_modified_in_insn_p (x, insn))
1375 return reg_set_p (x, insn);
1381 fmt = GET_RTX_FORMAT (code);
1382 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1384 if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn))
1387 else if (fmt[i] == 'E')
1388 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1389 if (modified_in_p (XVECEXP (x, i, j), insn))
1396 /* Return true if X is a SUBREG and if storing a value to X would
1397 preserve some of its SUBREG_REG. For example, on a normal 32-bit
1398 target, using a SUBREG to store to one half of a DImode REG would
1399 preserve the other half. */
1402 read_modify_subreg_p (const_rtx x)
1404 if (GET_CODE (x) != SUBREG)
1406 poly_uint64 isize = GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)));
1407 poly_uint64 osize = GET_MODE_SIZE (GET_MODE (x));
1408 poly_uint64 regsize = REGMODE_NATURAL_SIZE (GET_MODE (SUBREG_REG (x)));
1409 /* The inner and outer modes of a subreg must be ordered, so that we
1410 can tell whether they're paradoxical or partial. */
1411 gcc_checking_assert (ordered_p (isize, osize));
1412 return (maybe_gt (isize, osize) && maybe_gt (isize, regsize));
1415 /* Helper function for set_of. */
1423 set_of_1 (rtx x, const_rtx pat, void *data1)
1425 struct set_of_data *const data = (struct set_of_data *) (data1);
1426 if (rtx_equal_p (x, data->pat)
1427 || (GET_CODE (pat) == CLOBBER_HIGH
1428 && REGNO(data->pat) == REGNO(XEXP (pat, 0))
1429 && reg_is_clobbered_by_clobber_high (data->pat, XEXP (pat, 0)))
1430 || (GET_CODE (pat) != CLOBBER_HIGH && !MEM_P (x)
1431 && reg_overlap_mentioned_p (data->pat, x)))
1435 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1436 (either directly or via STRICT_LOW_PART and similar modifiers). */
1438 set_of (const_rtx pat, const_rtx insn)
1440 struct set_of_data data;
1441 data.found = NULL_RTX;
1443 note_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data);
1447 /* Add all hard register in X to *PSET. */
1449 find_all_hard_regs (const_rtx x, HARD_REG_SET *pset)
1451 subrtx_iterator::array_type array;
1452 FOR_EACH_SUBRTX (iter, array, x, NONCONST)
1454 const_rtx x = *iter;
1455 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
1456 add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1460 /* This function, called through note_stores, collects sets and
1461 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1464 record_hard_reg_sets (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
1466 HARD_REG_SET *pset = (HARD_REG_SET *)data;
1467 if (REG_P (x) && HARD_REGISTER_P (x))
1468 add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1471 /* Examine INSN, and compute the set of hard registers written by it.
1472 Store it in *PSET. Should only be called after reload. */
1474 find_all_hard_reg_sets (const rtx_insn *insn, HARD_REG_SET *pset, bool implicit)
1478 CLEAR_HARD_REG_SET (*pset);
1479 note_stores (PATTERN (insn), record_hard_reg_sets, pset);
1483 IOR_HARD_REG_SET (*pset, call_used_reg_set);
1485 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1486 record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1488 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1489 if (REG_NOTE_KIND (link) == REG_INC)
1490 record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1493 /* Like record_hard_reg_sets, but called through note_uses. */
1495 record_hard_reg_uses (rtx *px, void *data)
1497 find_all_hard_regs (*px, (HARD_REG_SET *) data);
1500 /* Given an INSN, return a SET expression if this insn has only a single SET.
1501 It may also have CLOBBERs, USEs, or SET whose output
1502 will not be used, which we ignore. */
1505 single_set_2 (const rtx_insn *insn, const_rtx pat)
1508 int set_verified = 1;
1511 if (GET_CODE (pat) == PARALLEL)
1513 for (i = 0; i < XVECLEN (pat, 0); i++)
1515 rtx sub = XVECEXP (pat, 0, i);
1516 switch (GET_CODE (sub))
1524 /* We can consider insns having multiple sets, where all
1525 but one are dead as single set insns. In common case
1526 only single set is present in the pattern so we want
1527 to avoid checking for REG_UNUSED notes unless necessary.
1529 When we reach set first time, we just expect this is
1530 the single set we are looking for and only when more
1531 sets are found in the insn, we check them. */
1534 if (find_reg_note (insn, REG_UNUSED, SET_DEST (set))
1535 && !side_effects_p (set))
1541 set = sub, set_verified = 0;
1542 else if (!find_reg_note (insn, REG_UNUSED, SET_DEST (sub))
1543 || side_effects_p (sub))
1555 /* Given an INSN, return nonzero if it has more than one SET, else return
1559 multiple_sets (const_rtx insn)
1564 /* INSN must be an insn. */
1565 if (! INSN_P (insn))
1568 /* Only a PARALLEL can have multiple SETs. */
1569 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1571 for (i = 0, found = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1572 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1574 /* If we have already found a SET, then return now. */
1582 /* Either zero or one SET. */
1586 /* Return nonzero if the destination of SET equals the source
1587 and there are no side effects. */
1590 set_noop_p (const_rtx set)
1592 rtx src = SET_SRC (set);
1593 rtx dst = SET_DEST (set);
1595 if (dst == pc_rtx && src == pc_rtx)
1598 if (MEM_P (dst) && MEM_P (src))
1599 return rtx_equal_p (dst, src) && !side_effects_p (dst);
1601 if (GET_CODE (dst) == ZERO_EXTRACT)
1602 return rtx_equal_p (XEXP (dst, 0), src)
1603 && !BITS_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx
1604 && !side_effects_p (src);
1606 if (GET_CODE (dst) == STRICT_LOW_PART)
1607 dst = XEXP (dst, 0);
1609 if (GET_CODE (src) == SUBREG && GET_CODE (dst) == SUBREG)
1611 if (maybe_ne (SUBREG_BYTE (src), SUBREG_BYTE (dst)))
1613 src = SUBREG_REG (src);
1614 dst = SUBREG_REG (dst);
1617 /* It is a NOOP if destination overlaps with selected src vector
1619 if (GET_CODE (src) == VEC_SELECT
1620 && REG_P (XEXP (src, 0)) && REG_P (dst)
1621 && HARD_REGISTER_P (XEXP (src, 0))
1622 && HARD_REGISTER_P (dst))
1625 rtx par = XEXP (src, 1);
1626 rtx src0 = XEXP (src, 0);
1627 poly_int64 c0 = rtx_to_poly_int64 (XVECEXP (par, 0, 0));
1628 poly_int64 offset = GET_MODE_UNIT_SIZE (GET_MODE (src0)) * c0;
1630 for (i = 1; i < XVECLEN (par, 0); i++)
1631 if (maybe_ne (rtx_to_poly_int64 (XVECEXP (par, 0, i)), c0 + i))
1634 REG_CAN_CHANGE_MODE_P (REGNO (dst), GET_MODE (src0), GET_MODE (dst))
1635 && simplify_subreg_regno (REGNO (src0), GET_MODE (src0),
1636 offset, GET_MODE (dst)) == (int) REGNO (dst);
1639 return (REG_P (src) && REG_P (dst)
1640 && REGNO (src) == REGNO (dst));
1643 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1647 noop_move_p (const rtx_insn *insn)
1649 rtx pat = PATTERN (insn);
1651 if (INSN_CODE (insn) == NOOP_MOVE_INSN_CODE)
1654 /* Insns carrying these notes are useful later on. */
1655 if (find_reg_note (insn, REG_EQUAL, NULL_RTX))
1658 /* Check the code to be executed for COND_EXEC. */
1659 if (GET_CODE (pat) == COND_EXEC)
1660 pat = COND_EXEC_CODE (pat);
1662 if (GET_CODE (pat) == SET && set_noop_p (pat))
1665 if (GET_CODE (pat) == PARALLEL)
1668 /* If nothing but SETs of registers to themselves,
1669 this insn can also be deleted. */
1670 for (i = 0; i < XVECLEN (pat, 0); i++)
1672 rtx tem = XVECEXP (pat, 0, i);
1674 if (GET_CODE (tem) == USE
1675 || GET_CODE (tem) == CLOBBER
1676 || GET_CODE (tem) == CLOBBER_HIGH)
1679 if (GET_CODE (tem) != SET || ! set_noop_p (tem))
1689 /* Return nonzero if register in range [REGNO, ENDREGNO)
1690 appears either explicitly or implicitly in X
1691 other than being stored into.
1693 References contained within the substructure at LOC do not count.
1694 LOC may be zero, meaning don't ignore anything. */
1697 refers_to_regno_p (unsigned int regno, unsigned int endregno, const_rtx x,
1701 unsigned int x_regno;
1706 /* The contents of a REG_NONNEG note is always zero, so we must come here
1707 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1711 code = GET_CODE (x);
1716 x_regno = REGNO (x);
1718 /* If we modifying the stack, frame, or argument pointer, it will
1719 clobber a virtual register. In fact, we could be more precise,
1720 but it isn't worth it. */
1721 if ((x_regno == STACK_POINTER_REGNUM
1722 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1723 && x_regno == ARG_POINTER_REGNUM)
1724 || x_regno == FRAME_POINTER_REGNUM)
1725 && regno >= FIRST_VIRTUAL_REGISTER && regno <= LAST_VIRTUAL_REGISTER)
1728 return endregno > x_regno && regno < END_REGNO (x);
1731 /* If this is a SUBREG of a hard reg, we can see exactly which
1732 registers are being modified. Otherwise, handle normally. */
1733 if (REG_P (SUBREG_REG (x))
1734 && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
1736 unsigned int inner_regno = subreg_regno (x);
1737 unsigned int inner_endregno
1738 = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
1739 ? subreg_nregs (x) : 1);
1741 return endregno > inner_regno && regno < inner_endregno;
1747 if (&SET_DEST (x) != loc
1748 /* Note setting a SUBREG counts as referring to the REG it is in for
1749 a pseudo but not for hard registers since we can
1750 treat each word individually. */
1751 && ((GET_CODE (SET_DEST (x)) == SUBREG
1752 && loc != &SUBREG_REG (SET_DEST (x))
1753 && REG_P (SUBREG_REG (SET_DEST (x)))
1754 && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
1755 && refers_to_regno_p (regno, endregno,
1756 SUBREG_REG (SET_DEST (x)), loc))
1757 || (!REG_P (SET_DEST (x))
1758 && refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
1761 if (code == CLOBBER || loc == &SET_SRC (x))
1770 /* X does not match, so try its subexpressions. */
1772 fmt = GET_RTX_FORMAT (code);
1773 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1775 if (fmt[i] == 'e' && loc != &XEXP (x, i))
1783 if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc))
1786 else if (fmt[i] == 'E')
1789 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1790 if (loc != &XVECEXP (x, i, j)
1791 && refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc))
1798 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1799 we check if any register number in X conflicts with the relevant register
1800 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1801 contains a MEM (we don't bother checking for memory addresses that can't
1802 conflict because we expect this to be a rare case. */
1805 reg_overlap_mentioned_p (const_rtx x, const_rtx in)
1807 unsigned int regno, endregno;
1809 /* If either argument is a constant, then modifying X can not
1810 affect IN. Here we look at IN, we can profitably combine
1811 CONSTANT_P (x) with the switch statement below. */
1812 if (CONSTANT_P (in))
1816 switch (GET_CODE (x))
1818 case STRICT_LOW_PART:
1821 /* Overly conservative. */
1826 regno = REGNO (SUBREG_REG (x));
1827 if (regno < FIRST_PSEUDO_REGISTER)
1828 regno = subreg_regno (x);
1829 endregno = regno + (regno < FIRST_PSEUDO_REGISTER
1830 ? subreg_nregs (x) : 1);
1835 endregno = END_REGNO (x);
1837 return refers_to_regno_p (regno, endregno, in, (rtx*) 0);
1847 fmt = GET_RTX_FORMAT (GET_CODE (in));
1848 for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
1851 if (reg_overlap_mentioned_p (x, XEXP (in, i)))
1854 else if (fmt[i] == 'E')
1857 for (j = XVECLEN (in, i) - 1; j >= 0; --j)
1858 if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j)))
1868 return reg_mentioned_p (x, in);
1874 /* If any register in here refers to it we return true. */
1875 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1876 if (XEXP (XVECEXP (x, 0, i), 0) != 0
1877 && reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0), in))
1883 gcc_assert (CONSTANT_P (x));
1888 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1889 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1890 ignored by note_stores, but passed to FUN.
1892 FUN receives three arguments:
1893 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1894 2. the SET or CLOBBER rtx that does the store,
1895 3. the pointer DATA provided to note_stores.
1897 If the item being stored in or clobbered is a SUBREG of a hard register,
1898 the SUBREG will be passed. */
1901 note_stores (const_rtx x, void (*fun) (rtx, const_rtx, void *), void *data)
1905 if (GET_CODE (x) == COND_EXEC)
1906 x = COND_EXEC_CODE (x);
1908 if (GET_CODE (x) == SET
1909 || GET_CODE (x) == CLOBBER
1910 || GET_CODE (x) == CLOBBER_HIGH)
1912 rtx dest = SET_DEST (x);
1914 while ((GET_CODE (dest) == SUBREG
1915 && (!REG_P (SUBREG_REG (dest))
1916 || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
1917 || GET_CODE (dest) == ZERO_EXTRACT
1918 || GET_CODE (dest) == STRICT_LOW_PART)
1919 dest = XEXP (dest, 0);
1921 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1922 each of whose first operand is a register. */
1923 if (GET_CODE (dest) == PARALLEL)
1925 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1926 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1927 (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data);
1930 (*fun) (dest, x, data);
1933 else if (GET_CODE (x) == PARALLEL)
1934 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1935 note_stores (XVECEXP (x, 0, i), fun, data);
1938 /* Like notes_stores, but call FUN for each expression that is being
1939 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1940 FUN for each expression, not any interior subexpressions. FUN receives a
1941 pointer to the expression and the DATA passed to this function.
1943 Note that this is not quite the same test as that done in reg_referenced_p
1944 since that considers something as being referenced if it is being
1945 partially set, while we do not. */
1948 note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
1953 switch (GET_CODE (body))
1956 (*fun) (&COND_EXEC_TEST (body), data);
1957 note_uses (&COND_EXEC_CODE (body), fun, data);
1961 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1962 note_uses (&XVECEXP (body, 0, i), fun, data);
1966 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1967 note_uses (&PATTERN (XVECEXP (body, 0, i)), fun, data);
1971 (*fun) (&XEXP (body, 0), data);
1975 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1976 (*fun) (&ASM_OPERANDS_INPUT (body, i), data);
1980 (*fun) (&TRAP_CONDITION (body), data);
1984 (*fun) (&XEXP (body, 0), data);
1988 case UNSPEC_VOLATILE:
1989 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1990 (*fun) (&XVECEXP (body, 0, i), data);
1994 if (MEM_P (XEXP (body, 0)))
1995 (*fun) (&XEXP (XEXP (body, 0), 0), data);
2000 rtx dest = SET_DEST (body);
2002 /* For sets we replace everything in source plus registers in memory
2003 expression in store and operands of a ZERO_EXTRACT. */
2004 (*fun) (&SET_SRC (body), data);
2006 if (GET_CODE (dest) == ZERO_EXTRACT)
2008 (*fun) (&XEXP (dest, 1), data);
2009 (*fun) (&XEXP (dest, 2), data);
2012 while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART)
2013 dest = XEXP (dest, 0);
2016 (*fun) (&XEXP (dest, 0), data);
2021 /* All the other possibilities never store. */
2022 (*fun) (pbody, data);
2027 /* Return nonzero if X's old contents don't survive after INSN.
2028 This will be true if X is (cc0) or if X is a register and
2029 X dies in INSN or because INSN entirely sets X.
2031 "Entirely set" means set directly and not through a SUBREG, or
2032 ZERO_EXTRACT, so no trace of the old contents remains.
2033 Likewise, REG_INC does not count.
2035 REG may be a hard or pseudo reg. Renumbering is not taken into account,
2036 but for this use that makes no difference, since regs don't overlap
2037 during their lifetimes. Therefore, this function may be used
2038 at any time after deaths have been computed.
2040 If REG is a hard reg that occupies multiple machine registers, this
2041 function will only return 1 if each of those registers will be replaced
2045 dead_or_set_p (const rtx_insn *insn, const_rtx x)
2047 unsigned int regno, end_regno;
2050 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
2051 if (GET_CODE (x) == CC0)
2054 gcc_assert (REG_P (x));
2057 end_regno = END_REGNO (x);
2058 for (i = regno; i < end_regno; i++)
2059 if (! dead_or_set_regno_p (insn, i))
2065 /* Return TRUE iff DEST is a register or subreg of a register, is a
2066 complete rather than read-modify-write destination, and contains
2067 register TEST_REGNO. */
2070 covers_regno_no_parallel_p (const_rtx dest, unsigned int test_regno)
2072 unsigned int regno, endregno;
2074 if (GET_CODE (dest) == SUBREG && !read_modify_subreg_p (dest))
2075 dest = SUBREG_REG (dest);
2080 regno = REGNO (dest);
2081 endregno = END_REGNO (dest);
2082 return (test_regno >= regno && test_regno < endregno);
2085 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
2086 any member matches the covers_regno_no_parallel_p criteria. */
2089 covers_regno_p (const_rtx dest, unsigned int test_regno)
2091 if (GET_CODE (dest) == PARALLEL)
2093 /* Some targets place small structures in registers for return
2094 values of functions, and those registers are wrapped in
2095 PARALLELs that we may see as the destination of a SET. */
2098 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2100 rtx inner = XEXP (XVECEXP (dest, 0, i), 0);
2101 if (inner != NULL_RTX
2102 && covers_regno_no_parallel_p (inner, test_regno))
2109 return covers_regno_no_parallel_p (dest, test_regno);
2112 /* Utility function for dead_or_set_p to check an individual register. */
2115 dead_or_set_regno_p (const rtx_insn *insn, unsigned int test_regno)
2119 /* See if there is a death note for something that includes TEST_REGNO. */
2120 if (find_regno_note (insn, REG_DEAD, test_regno))
2124 && find_regno_fusage (insn, CLOBBER, test_regno))
2127 pattern = PATTERN (insn);
2129 /* If a COND_EXEC is not executed, the value survives. */
2130 if (GET_CODE (pattern) == COND_EXEC)
2133 if (GET_CODE (pattern) == SET || GET_CODE (pattern) == CLOBBER)
2134 return covers_regno_p (SET_DEST (pattern), test_regno);
2135 else if (GET_CODE (pattern) == PARALLEL)
2139 for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
2141 rtx body = XVECEXP (pattern, 0, i);
2143 if (GET_CODE (body) == COND_EXEC)
2144 body = COND_EXEC_CODE (body);
2146 if ((GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
2147 && covers_regno_p (SET_DEST (body), test_regno))
2155 /* Return the reg-note of kind KIND in insn INSN, if there is one.
2156 If DATUM is nonzero, look for one whose datum is DATUM. */
2159 find_reg_note (const_rtx insn, enum reg_note kind, const_rtx datum)
2163 gcc_checking_assert (insn);
2165 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2166 if (! INSN_P (insn))
2170 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2171 if (REG_NOTE_KIND (link) == kind)
2176 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2177 if (REG_NOTE_KIND (link) == kind && datum == XEXP (link, 0))
2182 /* Return the reg-note of kind KIND in insn INSN which applies to register
2183 number REGNO, if any. Return 0 if there is no such reg-note. Note that
2184 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
2185 it might be the case that the note overlaps REGNO. */
2188 find_regno_note (const_rtx insn, enum reg_note kind, unsigned int regno)
2192 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2193 if (! INSN_P (insn))
2196 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2197 if (REG_NOTE_KIND (link) == kind
2198 /* Verify that it is a register, so that scratch and MEM won't cause a
2200 && REG_P (XEXP (link, 0))
2201 && REGNO (XEXP (link, 0)) <= regno
2202 && END_REGNO (XEXP (link, 0)) > regno)
2207 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
2211 find_reg_equal_equiv_note (const_rtx insn)
2218 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2219 if (REG_NOTE_KIND (link) == REG_EQUAL
2220 || REG_NOTE_KIND (link) == REG_EQUIV)
2222 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
2223 insns that have multiple sets. Checking single_set to
2224 make sure of this is not the proper check, as explained
2225 in the comment in set_unique_reg_note.
2227 This should be changed into an assert. */
2228 if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
2235 /* Check whether INSN is a single_set whose source is known to be
2236 equivalent to a constant. Return that constant if so, otherwise
2240 find_constant_src (const rtx_insn *insn)
2244 set = single_set (insn);
2247 x = avoid_constant_pool_reference (SET_SRC (set));
2252 note = find_reg_equal_equiv_note (insn);
2253 if (note && CONSTANT_P (XEXP (note, 0)))
2254 return XEXP (note, 0);
2259 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
2260 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2263 find_reg_fusage (const_rtx insn, enum rtx_code code, const_rtx datum)
2265 /* If it's not a CALL_INSN, it can't possibly have a
2266 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
2276 for (link = CALL_INSN_FUNCTION_USAGE (insn);
2278 link = XEXP (link, 1))
2279 if (GET_CODE (XEXP (link, 0)) == code
2280 && rtx_equal_p (datum, XEXP (XEXP (link, 0), 0)))
2285 unsigned int regno = REGNO (datum);
2287 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2288 to pseudo registers, so don't bother checking. */
2290 if (regno < FIRST_PSEUDO_REGISTER)
2292 unsigned int end_regno = END_REGNO (datum);
2295 for (i = regno; i < end_regno; i++)
2296 if (find_regno_fusage (insn, code, i))
2304 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2305 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2308 find_regno_fusage (const_rtx insn, enum rtx_code code, unsigned int regno)
2312 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2313 to pseudo registers, so don't bother checking. */
2315 if (regno >= FIRST_PSEUDO_REGISTER
2319 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2323 if (GET_CODE (op = XEXP (link, 0)) == code
2324 && REG_P (reg = XEXP (op, 0))
2325 && REGNO (reg) <= regno
2326 && END_REGNO (reg) > regno)
2334 /* Return true if KIND is an integer REG_NOTE. */
2337 int_reg_note_p (enum reg_note kind)
2339 return kind == REG_BR_PROB;
2342 /* Allocate a register note with kind KIND and datum DATUM. LIST is
2343 stored as the pointer to the next register note. */
2346 alloc_reg_note (enum reg_note kind, rtx datum, rtx list)
2350 gcc_checking_assert (!int_reg_note_p (kind));
2355 case REG_LABEL_TARGET:
2356 case REG_LABEL_OPERAND:
2358 /* These types of register notes use an INSN_LIST rather than an
2359 EXPR_LIST, so that copying is done right and dumps look
2361 note = alloc_INSN_LIST (datum, list);
2362 PUT_REG_NOTE_KIND (note, kind);
2366 note = alloc_EXPR_LIST (kind, datum, list);
2373 /* Add register note with kind KIND and datum DATUM to INSN. */
2376 add_reg_note (rtx insn, enum reg_note kind, rtx datum)
2378 REG_NOTES (insn) = alloc_reg_note (kind, datum, REG_NOTES (insn));
2381 /* Add an integer register note with kind KIND and datum DATUM to INSN. */
2384 add_int_reg_note (rtx_insn *insn, enum reg_note kind, int datum)
2386 gcc_checking_assert (int_reg_note_p (kind));
2387 REG_NOTES (insn) = gen_rtx_INT_LIST ((machine_mode) kind,
2388 datum, REG_NOTES (insn));
2391 /* Add a REG_ARGS_SIZE note to INSN with value VALUE. */
2394 add_args_size_note (rtx_insn *insn, poly_int64 value)
2396 gcc_checking_assert (!find_reg_note (insn, REG_ARGS_SIZE, NULL_RTX));
2397 add_reg_note (insn, REG_ARGS_SIZE, gen_int_mode (value, Pmode));
2400 /* Add a register note like NOTE to INSN. */
2403 add_shallow_copy_of_reg_note (rtx_insn *insn, rtx note)
2405 if (GET_CODE (note) == INT_LIST)
2406 add_int_reg_note (insn, REG_NOTE_KIND (note), XINT (note, 0));
2408 add_reg_note (insn, REG_NOTE_KIND (note), XEXP (note, 0));
2411 /* Duplicate NOTE and return the copy. */
2413 duplicate_reg_note (rtx note)
2415 reg_note kind = REG_NOTE_KIND (note);
2417 if (GET_CODE (note) == INT_LIST)
2418 return gen_rtx_INT_LIST ((machine_mode) kind, XINT (note, 0), NULL_RTX);
2419 else if (GET_CODE (note) == EXPR_LIST)
2420 return alloc_reg_note (kind, copy_insn_1 (XEXP (note, 0)), NULL_RTX);
2422 return alloc_reg_note (kind, XEXP (note, 0), NULL_RTX);
2425 /* Remove register note NOTE from the REG_NOTES of INSN. */
2428 remove_note (rtx_insn *insn, const_rtx note)
2432 if (note == NULL_RTX)
2435 if (REG_NOTES (insn) == note)
2436 REG_NOTES (insn) = XEXP (note, 1);
2438 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2439 if (XEXP (link, 1) == note)
2441 XEXP (link, 1) = XEXP (note, 1);
2445 switch (REG_NOTE_KIND (note))
2449 df_notes_rescan (insn);
2456 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes.
2457 Return true if any note has been removed. */
2460 remove_reg_equal_equiv_notes (rtx_insn *insn)
2465 loc = ®_NOTES (insn);
2468 enum reg_note kind = REG_NOTE_KIND (*loc);
2469 if (kind == REG_EQUAL || kind == REG_EQUIV)
2471 *loc = XEXP (*loc, 1);
2475 loc = &XEXP (*loc, 1);
2480 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2483 remove_reg_equal_equiv_notes_for_regno (unsigned int regno)
2490 /* This loop is a little tricky. We cannot just go down the chain because
2491 it is being modified by some actions in the loop. So we just iterate
2492 over the head. We plan to drain the list anyway. */
2493 while ((eq_use = DF_REG_EQ_USE_CHAIN (regno)) != NULL)
2495 rtx_insn *insn = DF_REF_INSN (eq_use);
2496 rtx note = find_reg_equal_equiv_note (insn);
2498 /* This assert is generally triggered when someone deletes a REG_EQUAL
2499 or REG_EQUIV note by hacking the list manually rather than calling
2503 remove_note (insn, note);
2507 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2508 return 1 if it is found. A simple equality test is used to determine if
2512 in_insn_list_p (const rtx_insn_list *listp, const rtx_insn *node)
2516 for (x = listp; x; x = XEXP (x, 1))
2517 if (node == XEXP (x, 0))
2523 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2524 remove that entry from the list if it is found.
2526 A simple equality test is used to determine if NODE matches. */
2529 remove_node_from_expr_list (const_rtx node, rtx_expr_list **listp)
2531 rtx_expr_list *temp = *listp;
2532 rtx_expr_list *prev = NULL;
2536 if (node == temp->element ())
2538 /* Splice the node out of the list. */
2540 XEXP (prev, 1) = temp->next ();
2542 *listp = temp->next ();
2548 temp = temp->next ();
2552 /* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2553 remove that entry from the list if it is found.
2555 A simple equality test is used to determine if NODE matches. */
2558 remove_node_from_insn_list (const rtx_insn *node, rtx_insn_list **listp)
2560 rtx_insn_list *temp = *listp;
2561 rtx_insn_list *prev = NULL;
2565 if (node == temp->insn ())
2567 /* Splice the node out of the list. */
2569 XEXP (prev, 1) = temp->next ();
2571 *listp = temp->next ();
2577 temp = temp->next ();
2581 /* Nonzero if X contains any volatile instructions. These are instructions
2582 which may cause unpredictable machine state instructions, and thus no
2583 instructions or register uses should be moved or combined across them.
2584 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2587 volatile_insn_p (const_rtx x)
2589 const RTX_CODE code = GET_CODE (x);
2607 case UNSPEC_VOLATILE:
2612 if (MEM_VOLATILE_P (x))
2619 /* Recursively scan the operands of this expression. */
2622 const char *const fmt = GET_RTX_FORMAT (code);
2625 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2629 if (volatile_insn_p (XEXP (x, i)))
2632 else if (fmt[i] == 'E')
2635 for (j = 0; j < XVECLEN (x, i); j++)
2636 if (volatile_insn_p (XVECEXP (x, i, j)))
2644 /* Nonzero if X contains any volatile memory references
2645 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2648 volatile_refs_p (const_rtx x)
2650 const RTX_CODE code = GET_CODE (x);
2666 case UNSPEC_VOLATILE:
2672 if (MEM_VOLATILE_P (x))
2679 /* Recursively scan the operands of this expression. */
2682 const char *const fmt = GET_RTX_FORMAT (code);
2685 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2689 if (volatile_refs_p (XEXP (x, i)))
2692 else if (fmt[i] == 'E')
2695 for (j = 0; j < XVECLEN (x, i); j++)
2696 if (volatile_refs_p (XVECEXP (x, i, j)))
2704 /* Similar to above, except that it also rejects register pre- and post-
2708 side_effects_p (const_rtx x)
2710 const RTX_CODE code = GET_CODE (x);
2727 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2728 when some combination can't be done. If we see one, don't think
2729 that we can simplify the expression. */
2730 return (GET_MODE (x) != VOIDmode);
2739 case UNSPEC_VOLATILE:
2745 if (MEM_VOLATILE_P (x))
2752 /* Recursively scan the operands of this expression. */
2755 const char *fmt = GET_RTX_FORMAT (code);
2758 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2762 if (side_effects_p (XEXP (x, i)))
2765 else if (fmt[i] == 'E')
2768 for (j = 0; j < XVECLEN (x, i); j++)
2769 if (side_effects_p (XVECEXP (x, i, j)))
2777 /* Return nonzero if evaluating rtx X might cause a trap.
2778 FLAGS controls how to consider MEMs. A nonzero means the context
2779 of the access may have changed from the original, such that the
2780 address may have become invalid. */
2783 may_trap_p_1 (const_rtx x, unsigned flags)
2789 /* We make no distinction currently, but this function is part of
2790 the internal target-hooks ABI so we keep the parameter as
2791 "unsigned flags". */
2792 bool code_changed = flags != 0;
2796 code = GET_CODE (x);
2799 /* Handle these cases quickly. */
2811 return targetm.unspec_may_trap_p (x, flags);
2813 case UNSPEC_VOLATILE:
2819 return MEM_VOLATILE_P (x);
2821 /* Memory ref can trap unless it's a static var or a stack slot. */
2823 /* Recognize specific pattern of stack checking probes. */
2824 if (flag_stack_check
2825 && MEM_VOLATILE_P (x)
2826 && XEXP (x, 0) == stack_pointer_rtx)
2828 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2829 reference; moving it out of context such as when moving code
2830 when optimizing, might cause its address to become invalid. */
2832 || !MEM_NOTRAP_P (x))
2834 poly_int64 size = MEM_SIZE_KNOWN_P (x) ? MEM_SIZE (x) : -1;
2835 return rtx_addr_can_trap_p_1 (XEXP (x, 0), 0, size,
2836 GET_MODE (x), code_changed);
2841 /* Division by a non-constant might trap. */
2846 if (HONOR_SNANS (x))
2848 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)))
2849 return flag_trapping_math;
2850 if (!CONSTANT_P (XEXP (x, 1)) || (XEXP (x, 1) == const0_rtx))
2855 /* An EXPR_LIST is used to represent a function call. This
2856 certainly may trap. */
2865 /* Some floating point comparisons may trap. */
2866 if (!flag_trapping_math)
2868 /* ??? There is no machine independent way to check for tests that trap
2869 when COMPARE is used, though many targets do make this distinction.
2870 For instance, sparc uses CCFPE for compares which generate exceptions
2871 and CCFP for compares which do not generate exceptions. */
2874 /* But often the compare has some CC mode, so check operand
2876 if (HONOR_NANS (XEXP (x, 0))
2877 || HONOR_NANS (XEXP (x, 1)))
2883 if (HONOR_SNANS (x))
2885 /* Often comparison is CC mode, so check operand modes. */
2886 if (HONOR_SNANS (XEXP (x, 0))
2887 || HONOR_SNANS (XEXP (x, 1)))
2892 /* Conversion of floating point might trap. */
2893 if (flag_trapping_math && HONOR_NANS (XEXP (x, 0)))
2900 /* These operations don't trap even with floating point. */
2904 /* Any floating arithmetic may trap. */
2905 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)) && flag_trapping_math)
2909 fmt = GET_RTX_FORMAT (code);
2910 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2914 if (may_trap_p_1 (XEXP (x, i), flags))
2917 else if (fmt[i] == 'E')
2920 for (j = 0; j < XVECLEN (x, i); j++)
2921 if (may_trap_p_1 (XVECEXP (x, i, j), flags))
2928 /* Return nonzero if evaluating rtx X might cause a trap. */
2931 may_trap_p (const_rtx x)
2933 return may_trap_p_1 (x, 0);
2936 /* Same as above, but additionally return nonzero if evaluating rtx X might
2937 cause a fault. We define a fault for the purpose of this function as a
2938 erroneous execution condition that cannot be encountered during the normal
2939 execution of a valid program; the typical example is an unaligned memory
2940 access on a strict alignment machine. The compiler guarantees that it
2941 doesn't generate code that will fault from a valid program, but this
2942 guarantee doesn't mean anything for individual instructions. Consider
2943 the following example:
2945 struct S { int d; union { char *cp; int *ip; }; };
2947 int foo(struct S *s)
2955 on a strict alignment machine. In a valid program, foo will never be
2956 invoked on a structure for which d is equal to 1 and the underlying
2957 unique field of the union not aligned on a 4-byte boundary, but the
2958 expression *s->ip might cause a fault if considered individually.
2960 At the RTL level, potentially problematic expressions will almost always
2961 verify may_trap_p; for example, the above dereference can be emitted as
2962 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2963 However, suppose that foo is inlined in a caller that causes s->cp to
2964 point to a local character variable and guarantees that s->d is not set
2965 to 1; foo may have been effectively translated into pseudo-RTL as:
2968 (set (reg:SI) (mem:SI (%fp - 7)))
2970 (set (reg:QI) (mem:QI (%fp - 7)))
2972 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2973 memory reference to a stack slot, but it will certainly cause a fault
2974 on a strict alignment machine. */
2977 may_trap_or_fault_p (const_rtx x)
2979 return may_trap_p_1 (x, 1);
2982 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2983 i.e., an inequality. */
2986 inequality_comparisons_p (const_rtx x)
2990 const enum rtx_code code = GET_CODE (x);
3018 len = GET_RTX_LENGTH (code);
3019 fmt = GET_RTX_FORMAT (code);
3021 for (i = 0; i < len; i++)
3025 if (inequality_comparisons_p (XEXP (x, i)))
3028 else if (fmt[i] == 'E')
3031 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3032 if (inequality_comparisons_p (XVECEXP (x, i, j)))
3040 /* Replace any occurrence of FROM in X with TO. The function does
3041 not enter into CONST_DOUBLE for the replace.
3043 Note that copying is not done so X must not be shared unless all copies
3046 ALL_REGS is true if we want to replace all REGs equal to FROM, not just
3047 those pointer-equal ones. */
3050 replace_rtx (rtx x, rtx from, rtx to, bool all_regs)
3058 /* Allow this function to make replacements in EXPR_LISTs. */
3065 && REGNO (x) == REGNO (from))
3067 gcc_assert (GET_MODE (x) == GET_MODE (from));
3070 else if (GET_CODE (x) == SUBREG)
3072 rtx new_rtx = replace_rtx (SUBREG_REG (x), from, to, all_regs);
3074 if (CONST_INT_P (new_rtx))
3076 x = simplify_subreg (GET_MODE (x), new_rtx,
3077 GET_MODE (SUBREG_REG (x)),
3082 SUBREG_REG (x) = new_rtx;
3086 else if (GET_CODE (x) == ZERO_EXTEND)
3088 rtx new_rtx = replace_rtx (XEXP (x, 0), from, to, all_regs);
3090 if (CONST_INT_P (new_rtx))
3092 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
3093 new_rtx, GET_MODE (XEXP (x, 0)));
3097 XEXP (x, 0) = new_rtx;
3102 fmt = GET_RTX_FORMAT (GET_CODE (x));
3103 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3106 XEXP (x, i) = replace_rtx (XEXP (x, i), from, to, all_regs);
3107 else if (fmt[i] == 'E')
3108 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3109 XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j),
3110 from, to, all_regs);
3116 /* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL. Also track
3117 the change in LABEL_NUSES if UPDATE_LABEL_NUSES. */
3120 replace_label (rtx *loc, rtx old_label, rtx new_label, bool update_label_nuses)
3122 /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long. */
3124 if (JUMP_TABLE_DATA_P (x))
3127 rtvec vec = XVEC (x, GET_CODE (x) == ADDR_DIFF_VEC);
3128 int len = GET_NUM_ELEM (vec);
3129 for (int i = 0; i < len; ++i)
3131 rtx ref = RTVEC_ELT (vec, i);
3132 if (XEXP (ref, 0) == old_label)
3134 XEXP (ref, 0) = new_label;
3135 if (update_label_nuses)
3137 ++LABEL_NUSES (new_label);
3138 --LABEL_NUSES (old_label);
3145 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
3146 field. This is not handled by the iterator because it doesn't
3147 handle unprinted ('0') fields. */
3148 if (JUMP_P (x) && JUMP_LABEL (x) == old_label)
3149 JUMP_LABEL (x) = new_label;
3151 subrtx_ptr_iterator::array_type array;
3152 FOR_EACH_SUBRTX_PTR (iter, array, loc, ALL)
3157 if (GET_CODE (x) == SYMBOL_REF
3158 && CONSTANT_POOL_ADDRESS_P (x))
3160 rtx c = get_pool_constant (x);
3161 if (rtx_referenced_p (old_label, c))
3163 /* Create a copy of constant C; replace the label inside
3164 but do not update LABEL_NUSES because uses in constant pool
3166 rtx new_c = copy_rtx (c);
3167 replace_label (&new_c, old_label, new_label, false);
3169 /* Add the new constant NEW_C to constant pool and replace
3170 the old reference to constant by new reference. */
3171 rtx new_mem = force_const_mem (get_pool_mode (x), new_c);
3172 *loc = replace_rtx (x, x, XEXP (new_mem, 0));
3176 if ((GET_CODE (x) == LABEL_REF
3177 || GET_CODE (x) == INSN_LIST)
3178 && XEXP (x, 0) == old_label)
3180 XEXP (x, 0) = new_label;
3181 if (update_label_nuses)
3183 ++LABEL_NUSES (new_label);
3184 --LABEL_NUSES (old_label);
3192 replace_label_in_insn (rtx_insn *insn, rtx_insn *old_label,
3193 rtx_insn *new_label, bool update_label_nuses)
3195 rtx insn_as_rtx = insn;
3196 replace_label (&insn_as_rtx, old_label, new_label, update_label_nuses);
3197 gcc_checking_assert (insn_as_rtx == insn);
3200 /* Return true if X is referenced in BODY. */
3203 rtx_referenced_p (const_rtx x, const_rtx body)
3205 subrtx_iterator::array_type array;
3206 FOR_EACH_SUBRTX (iter, array, body, ALL)
3207 if (const_rtx y = *iter)
3209 /* Check if a label_ref Y refers to label X. */
3210 if (GET_CODE (y) == LABEL_REF
3212 && label_ref_label (y) == x)
3215 if (rtx_equal_p (x, y))
3218 /* If Y is a reference to pool constant traverse the constant. */
3219 if (GET_CODE (y) == SYMBOL_REF
3220 && CONSTANT_POOL_ADDRESS_P (y))
3221 iter.substitute (get_pool_constant (y));
3226 /* If INSN is a tablejump return true and store the label (before jump table) to
3227 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
3230 tablejump_p (const rtx_insn *insn, rtx_insn **labelp,
3231 rtx_jump_table_data **tablep)
3236 rtx target = JUMP_LABEL (insn);
3237 if (target == NULL_RTX || ANY_RETURN_P (target))
3240 rtx_insn *label = as_a<rtx_insn *> (target);
3241 rtx_insn *table = next_insn (label);
3242 if (table == NULL_RTX || !JUMP_TABLE_DATA_P (table))
3248 *tablep = as_a <rtx_jump_table_data *> (table);
3252 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
3253 constant that is not in the constant pool and not in the condition
3254 of an IF_THEN_ELSE. */
3257 computed_jump_p_1 (const_rtx x)
3259 const enum rtx_code code = GET_CODE (x);
3276 return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
3277 && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
3280 return (computed_jump_p_1 (XEXP (x, 1))
3281 || computed_jump_p_1 (XEXP (x, 2)));
3287 fmt = GET_RTX_FORMAT (code);
3288 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3291 && computed_jump_p_1 (XEXP (x, i)))
3294 else if (fmt[i] == 'E')
3295 for (j = 0; j < XVECLEN (x, i); j++)
3296 if (computed_jump_p_1 (XVECEXP (x, i, j)))
3303 /* Return nonzero if INSN is an indirect jump (aka computed jump).
3305 Tablejumps and casesi insns are not considered indirect jumps;
3306 we can recognize them by a (use (label_ref)). */
3309 computed_jump_p (const rtx_insn *insn)
3314 rtx pat = PATTERN (insn);
3316 /* If we have a JUMP_LABEL set, we're not a computed jump. */
3317 if (JUMP_LABEL (insn) != NULL)
3320 if (GET_CODE (pat) == PARALLEL)
3322 int len = XVECLEN (pat, 0);
3323 int has_use_labelref = 0;
3325 for (i = len - 1; i >= 0; i--)
3326 if (GET_CODE (XVECEXP (pat, 0, i)) == USE
3327 && (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
3330 has_use_labelref = 1;
3334 if (! has_use_labelref)
3335 for (i = len - 1; i >= 0; i--)
3336 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
3337 && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
3338 && computed_jump_p_1 (SET_SRC (XVECEXP (pat, 0, i))))
3341 else if (GET_CODE (pat) == SET
3342 && SET_DEST (pat) == pc_rtx
3343 && computed_jump_p_1 (SET_SRC (pat)))
3351 /* MEM has a PRE/POST-INC/DEC/MODIFY address X. Extract the operands of
3352 the equivalent add insn and pass the result to FN, using DATA as the
3356 for_each_inc_dec_find_inc_dec (rtx mem, for_each_inc_dec_fn fn, void *data)
3358 rtx x = XEXP (mem, 0);
3359 switch (GET_CODE (x))
3364 poly_int64 size = GET_MODE_SIZE (GET_MODE (mem));
3365 rtx r1 = XEXP (x, 0);
3366 rtx c = gen_int_mode (size, GET_MODE (r1));
3367 return fn (mem, x, r1, r1, c, data);
3373 poly_int64 size = GET_MODE_SIZE (GET_MODE (mem));
3374 rtx r1 = XEXP (x, 0);
3375 rtx c = gen_int_mode (-size, GET_MODE (r1));
3376 return fn (mem, x, r1, r1, c, data);
3382 rtx r1 = XEXP (x, 0);
3383 rtx add = XEXP (x, 1);
3384 return fn (mem, x, r1, add, NULL, data);
3392 /* Traverse *LOC looking for MEMs that have autoinc addresses.
3393 For each such autoinc operation found, call FN, passing it
3394 the innermost enclosing MEM, the operation itself, the RTX modified
3395 by the operation, two RTXs (the second may be NULL) that, once
3396 added, represent the value to be held by the modified RTX
3397 afterwards, and DATA. FN is to return 0 to continue the
3398 traversal or any other value to have it returned to the caller of
3399 for_each_inc_dec. */
3402 for_each_inc_dec (rtx x,
3403 for_each_inc_dec_fn fn,
3406 subrtx_var_iterator::array_type array;
3407 FOR_EACH_SUBRTX_VAR (iter, array, x, NONCONST)
3412 && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == RTX_AUTOINC)
3414 int res = for_each_inc_dec_find_inc_dec (mem, fn, data);
3417 iter.skip_subrtxes ();
3424 /* Searches X for any reference to REGNO, returning the rtx of the
3425 reference found if any. Otherwise, returns NULL_RTX. */
3428 regno_use_in (unsigned int regno, rtx x)
3434 if (REG_P (x) && REGNO (x) == regno)
3437 fmt = GET_RTX_FORMAT (GET_CODE (x));
3438 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3442 if ((tem = regno_use_in (regno, XEXP (x, i))))
3445 else if (fmt[i] == 'E')
3446 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3447 if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
3454 /* Return a value indicating whether OP, an operand of a commutative
3455 operation, is preferred as the first or second operand. The more
3456 positive the value, the stronger the preference for being the first
3460 commutative_operand_precedence (rtx op)
3462 enum rtx_code code = GET_CODE (op);
3464 /* Constants always become the second operand. Prefer "nice" constants. */
3465 if (code == CONST_INT)
3467 if (code == CONST_WIDE_INT)
3469 if (code == CONST_POLY_INT)
3471 if (code == CONST_DOUBLE)
3473 if (code == CONST_FIXED)
3475 op = avoid_constant_pool_reference (op);
3476 code = GET_CODE (op);
3478 switch (GET_RTX_CLASS (code))
3481 if (code == CONST_INT)
3483 if (code == CONST_WIDE_INT)
3485 if (code == CONST_POLY_INT)
3487 if (code == CONST_DOUBLE)
3489 if (code == CONST_FIXED)
3494 /* SUBREGs of objects should come second. */
3495 if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
3500 /* Complex expressions should be the first, so decrease priority
3501 of objects. Prefer pointer objects over non pointer objects. */
3502 if ((REG_P (op) && REG_POINTER (op))
3503 || (MEM_P (op) && MEM_POINTER (op)))
3507 case RTX_COMM_ARITH:
3508 /* Prefer operands that are themselves commutative to be first.
3509 This helps to make things linear. In particular,
3510 (and (and (reg) (reg)) (not (reg))) is canonical. */
3514 /* If only one operand is a binary expression, it will be the first
3515 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3516 is canonical, although it will usually be further simplified. */
3520 /* Then prefer NEG and NOT. */
3521 if (code == NEG || code == NOT)
3530 /* Return 1 iff it is necessary to swap operands of commutative operation
3531 in order to canonicalize expression. */
3534 swap_commutative_operands_p (rtx x, rtx y)
3536 return (commutative_operand_precedence (x)
3537 < commutative_operand_precedence (y));
3540 /* Return 1 if X is an autoincrement side effect and the register is
3541 not the stack pointer. */
3543 auto_inc_p (const_rtx x)
3545 switch (GET_CODE (x))
3553 /* There are no REG_INC notes for SP. */
3554 if (XEXP (x, 0) != stack_pointer_rtx)
3562 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3564 loc_mentioned_in_p (rtx *loc, const_rtx in)
3573 code = GET_CODE (in);
3574 fmt = GET_RTX_FORMAT (code);
3575 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3579 if (loc == &XEXP (in, i) || loc_mentioned_in_p (loc, XEXP (in, i)))
3582 else if (fmt[i] == 'E')
3583 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
3584 if (loc == &XVECEXP (in, i, j)
3585 || loc_mentioned_in_p (loc, XVECEXP (in, i, j)))
3591 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3592 and SUBREG_BYTE, return the bit offset where the subreg begins
3593 (counting from the least significant bit of the operand). */
3596 subreg_lsb_1 (machine_mode outer_mode,
3597 machine_mode inner_mode,
3598 poly_uint64 subreg_byte)
3600 poly_uint64 subreg_end, trailing_bytes, byte_pos;
3602 /* A paradoxical subreg begins at bit position 0. */
3603 if (paradoxical_subreg_p (outer_mode, inner_mode))
3606 subreg_end = subreg_byte + GET_MODE_SIZE (outer_mode);
3607 trailing_bytes = GET_MODE_SIZE (inner_mode) - subreg_end;
3608 if (WORDS_BIG_ENDIAN && BYTES_BIG_ENDIAN)
3609 byte_pos = trailing_bytes;
3610 else if (!WORDS_BIG_ENDIAN && !BYTES_BIG_ENDIAN)
3611 byte_pos = subreg_byte;
3614 /* When bytes and words have opposite endianness, we must be able
3615 to split offsets into words and bytes at compile time. */
3616 poly_uint64 leading_word_part
3617 = force_align_down (subreg_byte, UNITS_PER_WORD);
3618 poly_uint64 trailing_word_part
3619 = force_align_down (trailing_bytes, UNITS_PER_WORD);
3620 /* If the subreg crosses a word boundary ensure that
3621 it also begins and ends on a word boundary. */
3622 gcc_assert (known_le (subreg_end - leading_word_part,
3623 (unsigned int) UNITS_PER_WORD)
3624 || (known_eq (leading_word_part, subreg_byte)
3625 && known_eq (trailing_word_part, trailing_bytes)));
3626 if (WORDS_BIG_ENDIAN)
3627 byte_pos = trailing_word_part + (subreg_byte - leading_word_part);
3629 byte_pos = leading_word_part + (trailing_bytes - trailing_word_part);
3632 return byte_pos * BITS_PER_UNIT;
3635 /* Given a subreg X, return the bit offset where the subreg begins
3636 (counting from the least significant bit of the reg). */
3639 subreg_lsb (const_rtx x)
3641 return subreg_lsb_1 (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
3645 /* Return the subreg byte offset for a subreg whose outer value has
3646 OUTER_BYTES bytes, whose inner value has INNER_BYTES bytes, and where
3647 there are LSB_SHIFT *bits* between the lsb of the outer value and the
3648 lsb of the inner value. This is the inverse of the calculation
3649 performed by subreg_lsb_1 (which converts byte offsets to bit shifts). */
3652 subreg_size_offset_from_lsb (poly_uint64 outer_bytes, poly_uint64 inner_bytes,
3653 poly_uint64 lsb_shift)
3655 /* A paradoxical subreg begins at bit position 0. */
3656 gcc_checking_assert (ordered_p (outer_bytes, inner_bytes));
3657 if (maybe_gt (outer_bytes, inner_bytes))
3659 gcc_checking_assert (known_eq (lsb_shift, 0U));
3663 poly_uint64 lower_bytes = exact_div (lsb_shift, BITS_PER_UNIT);
3664 poly_uint64 upper_bytes = inner_bytes - (lower_bytes + outer_bytes);
3665 if (WORDS_BIG_ENDIAN && BYTES_BIG_ENDIAN)
3667 else if (!WORDS_BIG_ENDIAN && !BYTES_BIG_ENDIAN)
3671 /* When bytes and words have opposite endianness, we must be able
3672 to split offsets into words and bytes at compile time. */
3673 poly_uint64 lower_word_part = force_align_down (lower_bytes,
3675 poly_uint64 upper_word_part = force_align_down (upper_bytes,
3677 if (WORDS_BIG_ENDIAN)
3678 return upper_word_part + (lower_bytes - lower_word_part);
3680 return lower_word_part + (upper_bytes - upper_word_part);
3684 /* Fill in information about a subreg of a hard register.
3685 xregno - A regno of an inner hard subreg_reg (or what will become one).
3686 xmode - The mode of xregno.
3687 offset - The byte offset.
3688 ymode - The mode of a top level SUBREG (or what may become one).
3689 info - Pointer to structure to fill in.
3691 Rather than considering one particular inner register (and thus one
3692 particular "outer" register) in isolation, this function really uses
3693 XREGNO as a model for a sequence of isomorphic hard registers. Thus the
3694 function does not check whether adding INFO->offset to XREGNO gives
3695 a valid hard register; even if INFO->offset + XREGNO is out of range,
3696 there might be another register of the same type that is in range.
3697 Likewise it doesn't check whether targetm.hard_regno_mode_ok accepts
3698 the new register, since that can depend on things like whether the final
3699 register number is even or odd. Callers that want to check whether
3700 this particular subreg can be replaced by a simple (reg ...) should
3701 use simplify_subreg_regno. */
3704 subreg_get_info (unsigned int xregno, machine_mode xmode,
3705 poly_uint64 offset, machine_mode ymode,
3706 struct subreg_info *info)
3708 unsigned int nregs_xmode, nregs_ymode;
3710 gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
3712 poly_uint64 xsize = GET_MODE_SIZE (xmode);
3713 poly_uint64 ysize = GET_MODE_SIZE (ymode);
3715 bool rknown = false;
3717 /* If the register representation of a non-scalar mode has holes in it,
3718 we expect the scalar units to be concatenated together, with the holes
3719 distributed evenly among the scalar units. Each scalar unit must occupy
3720 at least one register. */
3721 if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
3723 /* As a consequence, we must be dealing with a constant number of
3724 scalars, and thus a constant offset and number of units. */
3725 HOST_WIDE_INT coffset = offset.to_constant ();
3726 HOST_WIDE_INT cysize = ysize.to_constant ();
3727 nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
3728 unsigned int nunits = GET_MODE_NUNITS (xmode).to_constant ();
3729 scalar_mode xmode_unit = GET_MODE_INNER (xmode);
3730 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode_unit));
3731 gcc_assert (nregs_xmode
3733 * HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode_unit)));
3734 gcc_assert (hard_regno_nregs (xregno, xmode)
3735 == hard_regno_nregs (xregno, xmode_unit) * nunits);
3737 /* You can only ask for a SUBREG of a value with holes in the middle
3738 if you don't cross the holes. (Such a SUBREG should be done by
3739 picking a different register class, or doing it in memory if
3740 necessary.) An example of a value with holes is XCmode on 32-bit
3741 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3742 3 for each part, but in memory it's two 128-bit parts.
3743 Padding is assumed to be at the end (not necessarily the 'high part')
3745 if ((coffset / GET_MODE_SIZE (xmode_unit) + 1 < nunits)
3746 && (coffset / GET_MODE_SIZE (xmode_unit)
3747 != ((coffset + cysize - 1) / GET_MODE_SIZE (xmode_unit))))
3749 info->representable_p = false;
3754 nregs_xmode = hard_regno_nregs (xregno, xmode);
3756 nregs_ymode = hard_regno_nregs (xregno, ymode);
3758 /* Subreg sizes must be ordered, so that we can tell whether they are
3759 partial, paradoxical or complete. */
3760 gcc_checking_assert (ordered_p (xsize, ysize));
3762 /* Paradoxical subregs are otherwise valid. */
3763 if (!rknown && known_eq (offset, 0U) && maybe_gt (ysize, xsize))
3765 info->representable_p = true;
3766 /* If this is a big endian paradoxical subreg, which uses more
3767 actual hard registers than the original register, we must
3768 return a negative offset so that we find the proper highpart
3771 We assume that the ordering of registers within a multi-register
3772 value has a consistent endianness: if bytes and register words
3773 have different endianness, the hard registers that make up a
3774 multi-register value must be at least word-sized. */
3775 if (REG_WORDS_BIG_ENDIAN)
3776 info->offset = (int) nregs_xmode - (int) nregs_ymode;
3779 info->nregs = nregs_ymode;
3783 /* If registers store different numbers of bits in the different
3784 modes, we cannot generally form this subreg. */
3785 poly_uint64 regsize_xmode, regsize_ymode;
3786 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode)
3787 && !HARD_REGNO_NREGS_HAS_PADDING (xregno, ymode)
3788 && multiple_p (xsize, nregs_xmode, ®size_xmode)
3789 && multiple_p (ysize, nregs_ymode, ®size_ymode))
3792 && ((nregs_ymode > 1 && maybe_gt (regsize_xmode, regsize_ymode))
3793 || (nregs_xmode > 1 && maybe_gt (regsize_ymode, regsize_xmode))))
3795 info->representable_p = false;
3796 if (!can_div_away_from_zero_p (ysize, regsize_xmode, &info->nregs)
3797 || !can_div_trunc_p (offset, regsize_xmode, &info->offset))
3798 /* Checked by validate_subreg. We must know at compile time
3799 which inner registers are being accessed. */
3803 /* It's not valid to extract a subreg of mode YMODE at OFFSET that
3804 would go outside of XMODE. */
3805 if (!rknown && maybe_gt (ysize + offset, xsize))
3807 info->representable_p = false;
3808 info->nregs = nregs_ymode;
3809 if (!can_div_trunc_p (offset, regsize_xmode, &info->offset))
3810 /* Checked by validate_subreg. We must know at compile time
3811 which inner registers are being accessed. */
3815 /* Quick exit for the simple and common case of extracting whole
3816 subregisters from a multiregister value. */
3817 /* ??? It would be better to integrate this into the code below,
3818 if we can generalize the concept enough and figure out how
3819 odd-sized modes can coexist with the other weird cases we support. */
3820 HOST_WIDE_INT count;
3822 && WORDS_BIG_ENDIAN == REG_WORDS_BIG_ENDIAN
3823 && known_eq (regsize_xmode, regsize_ymode)
3824 && constant_multiple_p (offset, regsize_ymode, &count))
3826 info->representable_p = true;
3827 info->nregs = nregs_ymode;
3828 info->offset = count;
3829 gcc_assert (info->offset + info->nregs <= (int) nregs_xmode);
3834 /* Lowpart subregs are otherwise valid. */
3835 if (!rknown && known_eq (offset, subreg_lowpart_offset (ymode, xmode)))
3837 info->representable_p = true;
3840 if (known_eq (offset, 0U) || nregs_xmode == nregs_ymode)
3843 info->nregs = nregs_ymode;
3848 /* Set NUM_BLOCKS to the number of independently-representable YMODE
3849 values there are in (reg:XMODE XREGNO). We can view the register
3850 as consisting of this number of independent "blocks", where each
3851 block occupies NREGS_YMODE registers and contains exactly one
3852 representable YMODE value. */
3853 gcc_assert ((nregs_xmode % nregs_ymode) == 0);
3854 unsigned int num_blocks = nregs_xmode / nregs_ymode;
3856 /* Calculate the number of bytes in each block. This must always
3857 be exact, otherwise we don't know how to verify the constraint.
3858 These conditions may be relaxed but subreg_regno_offset would
3859 need to be redesigned. */
3860 poly_uint64 bytes_per_block = exact_div (xsize, num_blocks);
3862 /* Get the number of the first block that contains the subreg and the byte
3863 offset of the subreg from the start of that block. */
3864 unsigned int block_number;
3865 poly_uint64 subblock_offset;
3866 if (!can_div_trunc_p (offset, bytes_per_block, &block_number,
3868 /* Checked by validate_subreg. We must know at compile time which
3869 inner registers are being accessed. */
3874 /* Only the lowpart of each block is representable. */
3875 info->representable_p
3876 = known_eq (subblock_offset,
3877 subreg_size_lowpart_offset (ysize, bytes_per_block));
3881 /* We assume that the ordering of registers within a multi-register
3882 value has a consistent endianness: if bytes and register words
3883 have different endianness, the hard registers that make up a
3884 multi-register value must be at least word-sized. */
3885 if (WORDS_BIG_ENDIAN != REG_WORDS_BIG_ENDIAN)
3886 /* The block number we calculated above followed memory endianness.
3887 Convert it to register endianness by counting back from the end.
3888 (Note that, because of the assumption above, each block must be
3889 at least word-sized.) */
3890 info->offset = (num_blocks - block_number - 1) * nregs_ymode;
3892 info->offset = block_number * nregs_ymode;
3893 info->nregs = nregs_ymode;
3896 /* This function returns the regno offset of a subreg expression.
3897 xregno - A regno of an inner hard subreg_reg (or what will become one).
3898 xmode - The mode of xregno.
3899 offset - The byte offset.
3900 ymode - The mode of a top level SUBREG (or what may become one).
3901 RETURN - The regno offset which would be used. */
3903 subreg_regno_offset (unsigned int xregno, machine_mode xmode,
3904 poly_uint64 offset, machine_mode ymode)
3906 struct subreg_info info;
3907 subreg_get_info (xregno, xmode, offset, ymode, &info);
3911 /* This function returns true when the offset is representable via
3912 subreg_offset in the given regno.
3913 xregno - A regno of an inner hard subreg_reg (or what will become one).
3914 xmode - The mode of xregno.
3915 offset - The byte offset.
3916 ymode - The mode of a top level SUBREG (or what may become one).
3917 RETURN - Whether the offset is representable. */
3919 subreg_offset_representable_p (unsigned int xregno, machine_mode xmode,
3920 poly_uint64 offset, machine_mode ymode)
3922 struct subreg_info info;
3923 subreg_get_info (xregno, xmode, offset, ymode, &info);
3924 return info.representable_p;
3927 /* Return the number of a YMODE register to which
3929 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3931 can be simplified. Return -1 if the subreg can't be simplified.
3933 XREGNO is a hard register number. */
3936 simplify_subreg_regno (unsigned int xregno, machine_mode xmode,
3937 poly_uint64 offset, machine_mode ymode)
3939 struct subreg_info info;
3940 unsigned int yregno;
3942 /* Give the backend a chance to disallow the mode change. */
3943 if (GET_MODE_CLASS (xmode) != MODE_COMPLEX_INT
3944 && GET_MODE_CLASS (xmode) != MODE_COMPLEX_FLOAT
3945 && !REG_CAN_CHANGE_MODE_P (xregno, xmode, ymode)
3946 /* We can use mode change in LRA for some transformations. */
3947 && ! lra_in_progress)
3950 /* We shouldn't simplify stack-related registers. */
3951 if ((!reload_completed || frame_pointer_needed)
3952 && xregno == FRAME_POINTER_REGNUM)
3955 if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3956 && xregno == ARG_POINTER_REGNUM)
3959 if (xregno == STACK_POINTER_REGNUM
3960 /* We should convert hard stack register in LRA if it is
3962 && ! lra_in_progress)
3965 /* Try to get the register offset. */
3966 subreg_get_info (xregno, xmode, offset, ymode, &info);
3967 if (!info.representable_p)
3970 /* Make sure that the offsetted register value is in range. */
3971 yregno = xregno + info.offset;
3972 if (!HARD_REGISTER_NUM_P (yregno))
3975 /* See whether (reg:YMODE YREGNO) is valid.
3977 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3978 This is a kludge to work around how complex FP arguments are passed
3979 on IA-64 and should be fixed. See PR target/49226. */
3980 if (!targetm.hard_regno_mode_ok (yregno, ymode)
3981 && targetm.hard_regno_mode_ok (xregno, xmode))
3984 return (int) yregno;
3987 /* Return the final regno that a subreg expression refers to. */
3989 subreg_regno (const_rtx x)
3992 rtx subreg = SUBREG_REG (x);
3993 int regno = REGNO (subreg);
3995 ret = regno + subreg_regno_offset (regno,
4003 /* Return the number of registers that a subreg expression refers
4006 subreg_nregs (const_rtx x)
4008 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x)), x);
4011 /* Return the number of registers that a subreg REG with REGNO
4012 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
4013 changed so that the regno can be passed in. */
4016 subreg_nregs_with_regno (unsigned int regno, const_rtx x)
4018 struct subreg_info info;
4019 rtx subreg = SUBREG_REG (x);
4021 subreg_get_info (regno, GET_MODE (subreg), SUBREG_BYTE (x), GET_MODE (x),
4026 struct parms_set_data
4032 /* Helper function for noticing stores to parameter registers. */
4034 parms_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
4036 struct parms_set_data *const d = (struct parms_set_data *) data;
4037 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
4038 && TEST_HARD_REG_BIT (d->regs, REGNO (x)))
4040 CLEAR_HARD_REG_BIT (d->regs, REGNO (x));
4045 /* Look backward for first parameter to be loaded.
4046 Note that loads of all parameters will not necessarily be
4047 found if CSE has eliminated some of them (e.g., an argument
4048 to the outer function is passed down as a parameter).
4049 Do not skip BOUNDARY. */
4051 find_first_parameter_load (rtx_insn *call_insn, rtx_insn *boundary)
4053 struct parms_set_data parm;
4055 rtx_insn *before, *first_set;
4057 /* Since different machines initialize their parameter registers
4058 in different orders, assume nothing. Collect the set of all
4059 parameter registers. */
4060 CLEAR_HARD_REG_SET (parm.regs);
4062 for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1))
4063 if (GET_CODE (XEXP (p, 0)) == USE
4064 && REG_P (XEXP (XEXP (p, 0), 0))
4065 && !STATIC_CHAIN_REG_P (XEXP (XEXP (p, 0), 0)))
4067 gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER);
4069 /* We only care about registers which can hold function
4071 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0))))
4074 SET_HARD_REG_BIT (parm.regs, REGNO (XEXP (XEXP (p, 0), 0)));
4078 first_set = call_insn;
4080 /* Search backward for the first set of a register in this set. */
4081 while (parm.nregs && before != boundary)
4083 before = PREV_INSN (before);
4085 /* It is possible that some loads got CSEed from one call to
4086 another. Stop in that case. */
4087 if (CALL_P (before))
4090 /* Our caller needs either ensure that we will find all sets
4091 (in case code has not been optimized yet), or take care
4092 for possible labels in a way by setting boundary to preceding
4094 if (LABEL_P (before))
4096 gcc_assert (before == boundary);
4100 if (INSN_P (before))
4102 int nregs_old = parm.nregs;
4103 note_stores (PATTERN (before), parms_set, &parm);
4104 /* If we found something that did not set a parameter reg,
4105 we're done. Do not keep going, as that might result
4106 in hoisting an insn before the setting of a pseudo
4107 that is used by the hoisted insn. */
4108 if (nregs_old != parm.nregs)
4117 /* Return true if we should avoid inserting code between INSN and preceding
4118 call instruction. */
4121 keep_with_call_p (const rtx_insn *insn)
4125 if (INSN_P (insn) && (set = single_set (insn)) != NULL)
4127 if (REG_P (SET_DEST (set))
4128 && REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER
4129 && fixed_regs[REGNO (SET_DEST (set))]
4130 && general_operand (SET_SRC (set), VOIDmode))
4132 if (REG_P (SET_SRC (set))
4133 && targetm.calls.function_value_regno_p (REGNO (SET_SRC (set)))
4134 && REG_P (SET_DEST (set))
4135 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
4137 /* There may be a stack pop just after the call and before the store
4138 of the return register. Search for the actual store when deciding
4139 if we can break or not. */
4140 if (SET_DEST (set) == stack_pointer_rtx)
4142 /* This CONST_CAST is okay because next_nonnote_insn just
4143 returns its argument and we assign it to a const_rtx
4146 = next_nonnote_insn (const_cast<rtx_insn *> (insn));
4147 if (i2 && keep_with_call_p (i2))
4154 /* Return true if LABEL is a target of JUMP_INSN. This applies only
4155 to non-complex jumps. That is, direct unconditional, conditional,
4156 and tablejumps, but not computed jumps or returns. It also does
4157 not apply to the fallthru case of a conditional jump. */
4160 label_is_jump_target_p (const_rtx label, const rtx_insn *jump_insn)
4162 rtx tmp = JUMP_LABEL (jump_insn);
4163 rtx_jump_table_data *table;
4168 if (tablejump_p (jump_insn, NULL, &table))
4170 rtvec vec = table->get_labels ();
4171 int i, veclen = GET_NUM_ELEM (vec);
4173 for (i = 0; i < veclen; ++i)
4174 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
4178 if (find_reg_note (jump_insn, REG_LABEL_TARGET, label))
4185 /* Return an estimate of the cost of computing rtx X.
4186 One use is in cse, to decide which expression to keep in the hash table.
4187 Another is in rtl generation, to pick the cheapest way to multiply.
4188 Other uses like the latter are expected in the future.
4190 X appears as operand OPNO in an expression with code OUTER_CODE.
4191 SPEED specifies whether costs optimized for speed or size should
4195 rtx_cost (rtx x, machine_mode mode, enum rtx_code outer_code,
4196 int opno, bool speed)
4207 if (GET_MODE (x) != VOIDmode)
4208 mode = GET_MODE (x);
4210 /* A size N times larger than UNITS_PER_WORD likely needs N times as
4211 many insns, taking N times as long. */
4212 factor = estimated_poly_value (GET_MODE_SIZE (mode)) / UNITS_PER_WORD;
4216 /* Compute the default costs of certain things.
4217 Note that targetm.rtx_costs can override the defaults. */
4219 code = GET_CODE (x);
4223 /* Multiplication has time-complexity O(N*N), where N is the
4224 number of units (translated from digits) when using
4225 schoolbook long multiplication. */
4226 total = factor * factor * COSTS_N_INSNS (5);
4232 /* Similarly, complexity for schoolbook long division. */
4233 total = factor * factor * COSTS_N_INSNS (7);
4236 /* Used in combine.c as a marker. */
4240 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
4241 the mode for the factor. */
4242 mode = GET_MODE (SET_DEST (x));
4243 factor = estimated_poly_value (GET_MODE_SIZE (mode)) / UNITS_PER_WORD;
4248 total = factor * COSTS_N_INSNS (1);
4258 /* If we can't tie these modes, make this expensive. The larger
4259 the mode, the more expensive it is. */
4260 if (!targetm.modes_tieable_p (mode, GET_MODE (SUBREG_REG (x))))
4261 return COSTS_N_INSNS (2 + factor);
4265 if (targetm.modes_tieable_p (mode, GET_MODE (XEXP (x, 0))))
4272 if (targetm.rtx_costs (x, mode, outer_code, opno, &total, speed))
4277 /* Sum the costs of the sub-rtx's, plus cost of this operation,
4278 which is already in total. */
4280 fmt = GET_RTX_FORMAT (code);
4281 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4283 total += rtx_cost (XEXP (x, i), mode, code, i, speed);
4284 else if (fmt[i] == 'E')
4285 for (j = 0; j < XVECLEN (x, i); j++)
4286 total += rtx_cost (XVECEXP (x, i, j), mode, code, i, speed);
4291 /* Fill in the structure C with information about both speed and size rtx
4292 costs for X, which is operand OPNO in an expression with code OUTER. */
4295 get_full_rtx_cost (rtx x, machine_mode mode, enum rtx_code outer, int opno,
4296 struct full_rtx_costs *c)
4298 c->speed = rtx_cost (x, mode, outer, opno, true);
4299 c->size = rtx_cost (x, mode, outer, opno, false);
4303 /* Return cost of address expression X.
4304 Expect that X is properly formed address reference.
4306 SPEED parameter specify whether costs optimized for speed or size should
4310 address_cost (rtx x, machine_mode mode, addr_space_t as, bool speed)
4312 /* We may be asked for cost of various unusual addresses, such as operands
4313 of push instruction. It is not worthwhile to complicate writing
4314 of the target hook by such cases. */
4316 if (!memory_address_addr_space_p (mode, x, as))
4319 return targetm.address_cost (x, mode, as, speed);
4322 /* If the target doesn't override, compute the cost as with arithmetic. */
4325 default_address_cost (rtx x, machine_mode, addr_space_t, bool speed)
4327 return rtx_cost (x, Pmode, MEM, 0, speed);
4331 unsigned HOST_WIDE_INT
4332 nonzero_bits (const_rtx x, machine_mode mode)
4334 if (mode == VOIDmode)
4335 mode = GET_MODE (x);
4336 scalar_int_mode int_mode;
4337 if (!is_a <scalar_int_mode> (mode, &int_mode))
4338 return GET_MODE_MASK (mode);
4339 return cached_nonzero_bits (x, int_mode, NULL_RTX, VOIDmode, 0);
4343 num_sign_bit_copies (const_rtx x, machine_mode mode)
4345 if (mode == VOIDmode)
4346 mode = GET_MODE (x);
4347 scalar_int_mode int_mode;
4348 if (!is_a <scalar_int_mode> (mode, &int_mode))
4350 return cached_num_sign_bit_copies (x, int_mode, NULL_RTX, VOIDmode, 0);
4353 /* Return true if nonzero_bits1 might recurse into both operands
4357 nonzero_bits_binary_arith_p (const_rtx x)
4359 if (!ARITHMETIC_P (x))
4361 switch (GET_CODE (x))
4383 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
4384 It avoids exponential behavior in nonzero_bits1 when X has
4385 identical subexpressions on the first or the second level. */
4387 static unsigned HOST_WIDE_INT
4388 cached_nonzero_bits (const_rtx x, scalar_int_mode mode, const_rtx known_x,
4389 machine_mode known_mode,
4390 unsigned HOST_WIDE_INT known_ret)
4392 if (x == known_x && mode == known_mode)
4395 /* Try to find identical subexpressions. If found call
4396 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
4397 precomputed value for the subexpression as KNOWN_RET. */
4399 if (nonzero_bits_binary_arith_p (x))
4401 rtx x0 = XEXP (x, 0);
4402 rtx x1 = XEXP (x, 1);
4404 /* Check the first level. */
4406 return nonzero_bits1 (x, mode, x0, mode,
4407 cached_nonzero_bits (x0, mode, known_x,
4408 known_mode, known_ret));
4410 /* Check the second level. */
4411 if (nonzero_bits_binary_arith_p (x0)
4412 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4413 return nonzero_bits1 (x, mode, x1, mode,
4414 cached_nonzero_bits (x1, mode, known_x,
4415 known_mode, known_ret));
4417 if (nonzero_bits_binary_arith_p (x1)
4418 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4419 return nonzero_bits1 (x, mode, x0, mode,
4420 cached_nonzero_bits (x0, mode, known_x,
4421 known_mode, known_ret));
4424 return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
4427 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
4428 We don't let nonzero_bits recur into num_sign_bit_copies, because that
4429 is less useful. We can't allow both, because that results in exponential
4430 run time recursion. There is a nullstone testcase that triggered
4431 this. This macro avoids accidental uses of num_sign_bit_copies. */
4432 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
4434 /* Given an expression, X, compute which bits in X can be nonzero.
4435 We don't care about bits outside of those defined in MODE.
4437 For most X this is simply GET_MODE_MASK (GET_MODE (X)), but if X is
4438 an arithmetic operation, we can do better. */
4440 static unsigned HOST_WIDE_INT
4441 nonzero_bits1 (const_rtx x, scalar_int_mode mode, const_rtx known_x,
4442 machine_mode known_mode,
4443 unsigned HOST_WIDE_INT known_ret)
4445 unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
4446 unsigned HOST_WIDE_INT inner_nz;
4447 enum rtx_code code = GET_CODE (x);
4448 machine_mode inner_mode;
4449 unsigned int inner_width;
4450 scalar_int_mode xmode;
4452 unsigned int mode_width = GET_MODE_PRECISION (mode);
4454 if (CONST_INT_P (x))
4456 if (SHORT_IMMEDIATES_SIGN_EXTEND
4458 && mode_width < BITS_PER_WORD
4459 && (UINTVAL (x) & (HOST_WIDE_INT_1U << (mode_width - 1))) != 0)
4460 return UINTVAL (x) | (HOST_WIDE_INT_M1U << mode_width);
4465 if (!is_a <scalar_int_mode> (GET_MODE (x), &xmode))
4467 unsigned int xmode_width = GET_MODE_PRECISION (xmode);
4469 /* If X is wider than MODE, use its mode instead. */
4470 if (xmode_width > mode_width)
4473 nonzero = GET_MODE_MASK (mode);
4474 mode_width = xmode_width;
4477 if (mode_width > HOST_BITS_PER_WIDE_INT)
4478 /* Our only callers in this case look for single bit values. So
4479 just return the mode mask. Those tests will then be false. */
4482 /* If MODE is wider than X, but both are a single word for both the host
4483 and target machines, we can compute this from which bits of the object
4484 might be nonzero in its own mode, taking into account the fact that, on
4485 CISC machines, accessing an object in a wider mode generally causes the
4486 high-order bits to become undefined, so they are not known to be zero.
4487 We extend this reasoning to RISC machines for rotate operations since the
4488 semantics of the operations in the larger mode is not well defined. */
4489 if (mode_width > xmode_width
4490 && xmode_width <= BITS_PER_WORD
4491 && xmode_width <= HOST_BITS_PER_WIDE_INT
4492 && (!WORD_REGISTER_OPERATIONS || code == ROTATE || code == ROTATERT))
4494 nonzero &= cached_nonzero_bits (x, xmode,
4495 known_x, known_mode, known_ret);
4496 nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (xmode);
4500 /* Please keep nonzero_bits_binary_arith_p above in sync with
4501 the code in the switch below. */
4505 #if defined(POINTERS_EXTEND_UNSIGNED)
4506 /* If pointers extend unsigned and this is a pointer in Pmode, say that
4507 all the bits above ptr_mode are known to be zero. */
4508 /* As we do not know which address space the pointer is referring to,
4509 we can do this only if the target does not support different pointer
4510 or address modes depending on the address space. */
4511 if (target_default_pointer_address_modes_p ()
4512 && POINTERS_EXTEND_UNSIGNED
4515 && !targetm.have_ptr_extend ())
4516 nonzero &= GET_MODE_MASK (ptr_mode);
4519 /* Include declared information about alignment of pointers. */
4520 /* ??? We don't properly preserve REG_POINTER changes across
4521 pointer-to-integer casts, so we can't trust it except for
4522 things that we know must be pointers. See execute/960116-1.c. */
4523 if ((x == stack_pointer_rtx
4524 || x == frame_pointer_rtx
4525 || x == arg_pointer_rtx)
4526 && REGNO_POINTER_ALIGN (REGNO (x)))
4528 unsigned HOST_WIDE_INT alignment
4529 = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT;
4531 #ifdef PUSH_ROUNDING
4532 /* If PUSH_ROUNDING is defined, it is possible for the
4533 stack to be momentarily aligned only to that amount,
4534 so we pick the least alignment. */
4535 if (x == stack_pointer_rtx && PUSH_ARGS)
4537 poly_uint64 rounded_1 = PUSH_ROUNDING (poly_int64 (1));
4538 alignment = MIN (known_alignment (rounded_1), alignment);
4542 nonzero &= ~(alignment - 1);
4546 unsigned HOST_WIDE_INT nonzero_for_hook = nonzero;
4547 rtx new_rtx = rtl_hooks.reg_nonzero_bits (x, xmode, mode,
4551 nonzero_for_hook &= cached_nonzero_bits (new_rtx, mode, known_x,
4552 known_mode, known_ret);
4554 return nonzero_for_hook;
4558 /* In many, if not most, RISC machines, reading a byte from memory
4559 zeros the rest of the register. Noticing that fact saves a lot
4560 of extra zero-extends. */
4561 if (load_extend_op (xmode) == ZERO_EXTEND)
4562 nonzero &= GET_MODE_MASK (xmode);
4566 case UNEQ: case LTGT:
4567 case GT: case GTU: case UNGT:
4568 case LT: case LTU: case UNLT:
4569 case GE: case GEU: case UNGE:
4570 case LE: case LEU: case UNLE:
4571 case UNORDERED: case ORDERED:
4572 /* If this produces an integer result, we know which bits are set.
4573 Code here used to clear bits outside the mode of X, but that is
4575 /* Mind that MODE is the mode the caller wants to look at this
4576 operation in, and not the actual operation mode. We can wind
4577 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4578 that describes the results of a vector compare. */
4579 if (GET_MODE_CLASS (xmode) == MODE_INT
4580 && mode_width <= HOST_BITS_PER_WIDE_INT)
4581 nonzero = STORE_FLAG_VALUE;
4586 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4587 and num_sign_bit_copies. */
4588 if (num_sign_bit_copies (XEXP (x, 0), xmode) == xmode_width)
4592 if (xmode_width < mode_width)
4593 nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (xmode));
4598 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4599 and num_sign_bit_copies. */
4600 if (num_sign_bit_copies (XEXP (x, 0), xmode) == xmode_width)
4606 nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode,
4607 known_x, known_mode, known_ret)
4608 & GET_MODE_MASK (mode));
4612 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4613 known_x, known_mode, known_ret);
4614 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4615 nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4619 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4620 Otherwise, show all the bits in the outer mode but not the inner
4622 inner_nz = cached_nonzero_bits (XEXP (x, 0), mode,
4623 known_x, known_mode, known_ret);
4624 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4626 inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4627 if (val_signbit_known_set_p (GET_MODE (XEXP (x, 0)), inner_nz))
4628 inner_nz |= (GET_MODE_MASK (mode)
4629 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
4632 nonzero &= inner_nz;
4636 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4637 known_x, known_mode, known_ret)
4638 & cached_nonzero_bits (XEXP (x, 1), mode,
4639 known_x, known_mode, known_ret);
4643 case UMIN: case UMAX: case SMIN: case SMAX:
4645 unsigned HOST_WIDE_INT nonzero0
4646 = cached_nonzero_bits (XEXP (x, 0), mode,
4647 known_x, known_mode, known_ret);
4649 /* Don't call nonzero_bits for the second time if it cannot change
4651 if ((nonzero & nonzero0) != nonzero)
4653 | cached_nonzero_bits (XEXP (x, 1), mode,
4654 known_x, known_mode, known_ret);
4658 case PLUS: case MINUS:
4660 case DIV: case UDIV:
4661 case MOD: case UMOD:
4662 /* We can apply the rules of arithmetic to compute the number of
4663 high- and low-order zero bits of these operations. We start by
4664 computing the width (position of the highest-order nonzero bit)
4665 and the number of low-order zero bits for each value. */
4667 unsigned HOST_WIDE_INT nz0
4668 = cached_nonzero_bits (XEXP (x, 0), mode,
4669 known_x, known_mode, known_ret);
4670 unsigned HOST_WIDE_INT nz1
4671 = cached_nonzero_bits (XEXP (x, 1), mode,
4672 known_x, known_mode, known_ret);
4673 int sign_index = xmode_width - 1;
4674 int width0 = floor_log2 (nz0) + 1;
4675 int width1 = floor_log2 (nz1) + 1;
4676 int low0 = ctz_or_zero (nz0);
4677 int low1 = ctz_or_zero (nz1);
4678 unsigned HOST_WIDE_INT op0_maybe_minusp
4679 = nz0 & (HOST_WIDE_INT_1U << sign_index);
4680 unsigned HOST_WIDE_INT op1_maybe_minusp
4681 = nz1 & (HOST_WIDE_INT_1U << sign_index);
4682 unsigned int result_width = mode_width;
4688 result_width = MAX (width0, width1) + 1;
4689 result_low = MIN (low0, low1);
4692 result_low = MIN (low0, low1);
4695 result_width = width0 + width1;
4696 result_low = low0 + low1;
4701 if (!op0_maybe_minusp && !op1_maybe_minusp)
4702 result_width = width0;
4707 result_width = width0;
4712 if (!op0_maybe_minusp && !op1_maybe_minusp)
4713 result_width = MIN (width0, width1);
4714 result_low = MIN (low0, low1);
4719 result_width = MIN (width0, width1);
4720 result_low = MIN (low0, low1);
4726 if (result_width < mode_width)
4727 nonzero &= (HOST_WIDE_INT_1U << result_width) - 1;
4730 nonzero &= ~((HOST_WIDE_INT_1U << result_low) - 1);
4735 if (CONST_INT_P (XEXP (x, 1))
4736 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
4737 nonzero &= (HOST_WIDE_INT_1U << INTVAL (XEXP (x, 1))) - 1;
4741 /* If this is a SUBREG formed for a promoted variable that has
4742 been zero-extended, we know that at least the high-order bits
4743 are zero, though others might be too. */
4744 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x))
4745 nonzero = GET_MODE_MASK (xmode)
4746 & cached_nonzero_bits (SUBREG_REG (x), xmode,
4747 known_x, known_mode, known_ret);
4749 /* If the inner mode is a single word for both the host and target
4750 machines, we can compute this from which bits of the inner
4751 object might be nonzero. */
4752 inner_mode = GET_MODE (SUBREG_REG (x));
4753 if (GET_MODE_PRECISION (inner_mode).is_constant (&inner_width)
4754 && inner_width <= BITS_PER_WORD
4755 && inner_width <= HOST_BITS_PER_WIDE_INT)
4757 nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
4758 known_x, known_mode, known_ret);
4760 /* On many CISC machines, accessing an object in a wider mode
4761 causes the high-order bits to become undefined. So they are
4762 not known to be zero. */
4764 if ((!WORD_REGISTER_OPERATIONS
4765 /* If this is a typical RISC machine, we only have to worry
4766 about the way loads are extended. */
4767 || ((extend_op = load_extend_op (inner_mode)) == SIGN_EXTEND
4768 ? val_signbit_known_set_p (inner_mode, nonzero)
4769 : extend_op != ZERO_EXTEND)
4770 || (!MEM_P (SUBREG_REG (x)) && !REG_P (SUBREG_REG (x))))
4771 && xmode_width > inner_width)
4773 |= (GET_MODE_MASK (GET_MODE (x)) & ~GET_MODE_MASK (inner_mode));
4782 /* The nonzero bits are in two classes: any bits within MODE
4783 that aren't in xmode are always significant. The rest of the
4784 nonzero bits are those that are significant in the operand of
4785 the shift when shifted the appropriate number of bits. This
4786 shows that high-order bits are cleared by the right shift and
4787 low-order bits by left shifts. */
4788 if (CONST_INT_P (XEXP (x, 1))
4789 && INTVAL (XEXP (x, 1)) >= 0
4790 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
4791 && INTVAL (XEXP (x, 1)) < xmode_width)
4793 int count = INTVAL (XEXP (x, 1));
4794 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (xmode);
4795 unsigned HOST_WIDE_INT op_nonzero
4796 = cached_nonzero_bits (XEXP (x, 0), mode,
4797 known_x, known_mode, known_ret);
4798 unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
4799 unsigned HOST_WIDE_INT outer = 0;
4801 if (mode_width > xmode_width)
4802 outer = (op_nonzero & nonzero & ~mode_mask);
4817 /* If the sign bit may have been nonzero before the shift, we
4818 need to mark all the places it could have been copied to
4819 by the shift as possibly nonzero. */
4820 if (inner & (HOST_WIDE_INT_1U << (xmode_width - 1 - count)))
4821 inner |= (((HOST_WIDE_INT_1U << count) - 1)
4822 << (xmode_width - count));
4826 inner = (inner << (count % xmode_width)
4827 | (inner >> (xmode_width - (count % xmode_width))))
4832 inner = (inner >> (count % xmode_width)
4833 | (inner << (xmode_width - (count % xmode_width))))
4841 nonzero &= (outer | inner);
4847 /* This is at most the number of bits in the mode. */
4848 nonzero = ((unsigned HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
4852 /* If CLZ has a known value at zero, then the nonzero bits are
4853 that value, plus the number of bits in the mode minus one. */
4854 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4856 |= (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4862 /* If CTZ has a known value at zero, then the nonzero bits are
4863 that value, plus the number of bits in the mode minus one. */
4864 if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4866 |= (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4872 /* This is at most the number of bits in the mode minus 1. */
4873 nonzero = (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4882 unsigned HOST_WIDE_INT nonzero_true
4883 = cached_nonzero_bits (XEXP (x, 1), mode,
4884 known_x, known_mode, known_ret);
4886 /* Don't call nonzero_bits for the second time if it cannot change
4888 if ((nonzero & nonzero_true) != nonzero)
4889 nonzero &= nonzero_true
4890 | cached_nonzero_bits (XEXP (x, 2), mode,
4891 known_x, known_mode, known_ret);
4902 /* See the macro definition above. */
4903 #undef cached_num_sign_bit_copies
4906 /* Return true if num_sign_bit_copies1 might recurse into both operands
4910 num_sign_bit_copies_binary_arith_p (const_rtx x)
4912 if (!ARITHMETIC_P (x))
4914 switch (GET_CODE (x))
4932 /* The function cached_num_sign_bit_copies is a wrapper around
4933 num_sign_bit_copies1. It avoids exponential behavior in
4934 num_sign_bit_copies1 when X has identical subexpressions on the
4935 first or the second level. */
4938 cached_num_sign_bit_copies (const_rtx x, scalar_int_mode mode,
4939 const_rtx known_x, machine_mode known_mode,
4940 unsigned int known_ret)
4942 if (x == known_x && mode == known_mode)
4945 /* Try to find identical subexpressions. If found call
4946 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4947 the precomputed value for the subexpression as KNOWN_RET. */
4949 if (num_sign_bit_copies_binary_arith_p (x))
4951 rtx x0 = XEXP (x, 0);
4952 rtx x1 = XEXP (x, 1);
4954 /* Check the first level. */
4957 num_sign_bit_copies1 (x, mode, x0, mode,
4958 cached_num_sign_bit_copies (x0, mode, known_x,
4962 /* Check the second level. */
4963 if (num_sign_bit_copies_binary_arith_p (x0)
4964 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4966 num_sign_bit_copies1 (x, mode, x1, mode,
4967 cached_num_sign_bit_copies (x1, mode, known_x,
4971 if (num_sign_bit_copies_binary_arith_p (x1)
4972 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4974 num_sign_bit_copies1 (x, mode, x0, mode,
4975 cached_num_sign_bit_copies (x0, mode, known_x,
4980 return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
4983 /* Return the number of bits at the high-order end of X that are known to
4984 be equal to the sign bit. X will be used in mode MODE. The returned
4985 value will always be between 1 and the number of bits in MODE. */
4988 num_sign_bit_copies1 (const_rtx x, scalar_int_mode mode, const_rtx known_x,
4989 machine_mode known_mode,
4990 unsigned int known_ret)
4992 enum rtx_code code = GET_CODE (x);
4993 unsigned int bitwidth = GET_MODE_PRECISION (mode);
4994 int num0, num1, result;
4995 unsigned HOST_WIDE_INT nonzero;
4997 if (CONST_INT_P (x))
4999 /* If the constant is negative, take its 1's complement and remask.
5000 Then see how many zero bits we have. */
5001 nonzero = UINTVAL (x) & GET_MODE_MASK (mode);
5002 if (bitwidth <= HOST_BITS_PER_WIDE_INT
5003 && (nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5004 nonzero = (~nonzero) & GET_MODE_MASK (mode);
5006 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
5009 scalar_int_mode xmode, inner_mode;
5010 if (!is_a <scalar_int_mode> (GET_MODE (x), &xmode))
5013 unsigned int xmode_width = GET_MODE_PRECISION (xmode);
5015 /* For a smaller mode, just ignore the high bits. */
5016 if (bitwidth < xmode_width)
5018 num0 = cached_num_sign_bit_copies (x, xmode,
5019 known_x, known_mode, known_ret);
5020 return MAX (1, num0 - (int) (xmode_width - bitwidth));
5023 if (bitwidth > xmode_width)
5025 /* If this machine does not do all register operations on the entire
5026 register and MODE is wider than the mode of X, we can say nothing
5027 at all about the high-order bits. We extend this reasoning to every
5028 machine for rotate operations since the semantics of the operations
5029 in the larger mode is not well defined. */
5030 if (!WORD_REGISTER_OPERATIONS || code == ROTATE || code == ROTATERT)
5033 /* Likewise on machines that do, if the mode of the object is smaller
5034 than a word and loads of that size don't sign extend, we can say
5035 nothing about the high order bits. */
5036 if (xmode_width < BITS_PER_WORD
5037 && load_extend_op (xmode) != SIGN_EXTEND)
5041 /* Please keep num_sign_bit_copies_binary_arith_p above in sync with
5042 the code in the switch below. */
5047 #if defined(POINTERS_EXTEND_UNSIGNED)
5048 /* If pointers extend signed and this is a pointer in Pmode, say that
5049 all the bits above ptr_mode are known to be sign bit copies. */
5050 /* As we do not know which address space the pointer is referring to,
5051 we can do this only if the target does not support different pointer
5052 or address modes depending on the address space. */
5053 if (target_default_pointer_address_modes_p ()
5054 && ! POINTERS_EXTEND_UNSIGNED && xmode == Pmode
5055 && mode == Pmode && REG_POINTER (x)
5056 && !targetm.have_ptr_extend ())
5057 return GET_MODE_PRECISION (Pmode) - GET_MODE_PRECISION (ptr_mode) + 1;
5061 unsigned int copies_for_hook = 1, copies = 1;
5062 rtx new_rtx = rtl_hooks.reg_num_sign_bit_copies (x, xmode, mode,
5066 copies = cached_num_sign_bit_copies (new_rtx, mode, known_x,
5067 known_mode, known_ret);
5069 if (copies > 1 || copies_for_hook > 1)
5070 return MAX (copies, copies_for_hook);
5072 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
5077 /* Some RISC machines sign-extend all loads of smaller than a word. */
5078 if (load_extend_op (xmode) == SIGN_EXTEND)
5079 return MAX (1, ((int) bitwidth - (int) xmode_width + 1));
5083 /* If this is a SUBREG for a promoted object that is sign-extended
5084 and we are looking at it in a wider mode, we know that at least the
5085 high-order bits are known to be sign bit copies. */
5087 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_SIGNED_P (x))
5089 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
5090 known_x, known_mode, known_ret);
5091 return MAX ((int) bitwidth - (int) xmode_width + 1, num0);
5094 if (is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (x)), &inner_mode))
5096 /* For a smaller object, just ignore the high bits. */
5097 if (bitwidth <= GET_MODE_PRECISION (inner_mode))
5099 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), inner_mode,
5100 known_x, known_mode,
5102 return MAX (1, num0 - (int) (GET_MODE_PRECISION (inner_mode)
5106 /* For paradoxical SUBREGs on machines where all register operations
5107 affect the entire register, just look inside. Note that we are
5108 passing MODE to the recursive call, so the number of sign bit
5109 copies will remain relative to that mode, not the inner mode. */
5111 /* This works only if loads sign extend. Otherwise, if we get a
5112 reload for the inner part, it may be loaded from the stack, and
5113 then we lose all sign bit copies that existed before the store
5116 if (WORD_REGISTER_OPERATIONS
5117 && load_extend_op (inner_mode) == SIGN_EXTEND
5118 && paradoxical_subreg_p (x)
5119 && MEM_P (SUBREG_REG (x)))
5120 return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
5121 known_x, known_mode, known_ret);
5126 if (CONST_INT_P (XEXP (x, 1)))
5127 return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
5131 if (is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
5132 return (bitwidth - GET_MODE_PRECISION (inner_mode)
5133 + cached_num_sign_bit_copies (XEXP (x, 0), inner_mode,
5134 known_x, known_mode, known_ret));
5138 /* For a smaller object, just ignore the high bits. */
5139 inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
5140 num0 = cached_num_sign_bit_copies (XEXP (x, 0), inner_mode,
5141 known_x, known_mode, known_ret);
5142 return MAX (1, (num0 - (int) (GET_MODE_PRECISION (inner_mode)
5146 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
5147 known_x, known_mode, known_ret);
5149 case ROTATE: case ROTATERT:
5150 /* If we are rotating left by a number of bits less than the number
5151 of sign bit copies, we can just subtract that amount from the
5153 if (CONST_INT_P (XEXP (x, 1))
5154 && INTVAL (XEXP (x, 1)) >= 0
5155 && INTVAL (XEXP (x, 1)) < (int) bitwidth)
5157 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5158 known_x, known_mode, known_ret);
5159 return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
5160 : (int) bitwidth - INTVAL (XEXP (x, 1))));
5165 /* In general, this subtracts one sign bit copy. But if the value
5166 is known to be positive, the number of sign bit copies is the
5167 same as that of the input. Finally, if the input has just one bit
5168 that might be nonzero, all the bits are copies of the sign bit. */
5169 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5170 known_x, known_mode, known_ret);
5171 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5172 return num0 > 1 ? num0 - 1 : 1;
5174 nonzero = nonzero_bits (XEXP (x, 0), mode);
5179 && ((HOST_WIDE_INT_1U << (bitwidth - 1)) & nonzero))
5184 case IOR: case AND: case XOR:
5185 case SMIN: case SMAX: case UMIN: case UMAX:
5186 /* Logical operations will preserve the number of sign-bit copies.
5187 MIN and MAX operations always return one of the operands. */
5188 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5189 known_x, known_mode, known_ret);
5190 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5191 known_x, known_mode, known_ret);
5193 /* If num1 is clearing some of the top bits then regardless of
5194 the other term, we are guaranteed to have at least that many
5195 high-order zero bits. */
5198 && bitwidth <= HOST_BITS_PER_WIDE_INT
5199 && CONST_INT_P (XEXP (x, 1))
5200 && (UINTVAL (XEXP (x, 1))
5201 & (HOST_WIDE_INT_1U << (bitwidth - 1))) == 0)
5204 /* Similarly for IOR when setting high-order bits. */
5207 && bitwidth <= HOST_BITS_PER_WIDE_INT
5208 && CONST_INT_P (XEXP (x, 1))
5209 && (UINTVAL (XEXP (x, 1))
5210 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5213 return MIN (num0, num1);
5215 case PLUS: case MINUS:
5216 /* For addition and subtraction, we can have a 1-bit carry. However,
5217 if we are subtracting 1 from a positive number, there will not
5218 be such a carry. Furthermore, if the positive number is known to
5219 be 0 or 1, we know the result is either -1 or 0. */
5221 if (code == PLUS && XEXP (x, 1) == constm1_rtx
5222 && bitwidth <= HOST_BITS_PER_WIDE_INT)
5224 nonzero = nonzero_bits (XEXP (x, 0), mode);
5225 if (((HOST_WIDE_INT_1U << (bitwidth - 1)) & nonzero) == 0)
5226 return (nonzero == 1 || nonzero == 0 ? bitwidth
5227 : bitwidth - floor_log2 (nonzero) - 1);
5230 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5231 known_x, known_mode, known_ret);
5232 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5233 known_x, known_mode, known_ret);
5234 result = MAX (1, MIN (num0, num1) - 1);
5239 /* The number of bits of the product is the sum of the number of
5240 bits of both terms. However, unless one of the terms if known
5241 to be positive, we must allow for an additional bit since negating
5242 a negative number can remove one sign bit copy. */
5244 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5245 known_x, known_mode, known_ret);
5246 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5247 known_x, known_mode, known_ret);
5249 result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
5251 && (bitwidth > HOST_BITS_PER_WIDE_INT
5252 || (((nonzero_bits (XEXP (x, 0), mode)
5253 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5254 && ((nonzero_bits (XEXP (x, 1), mode)
5255 & (HOST_WIDE_INT_1U << (bitwidth - 1)))
5259 return MAX (1, result);
5262 /* The result must be <= the first operand. If the first operand
5263 has the high bit set, we know nothing about the number of sign
5265 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5267 else if ((nonzero_bits (XEXP (x, 0), mode)
5268 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5271 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
5272 known_x, known_mode, known_ret);
5275 /* The result must be <= the second operand. If the second operand
5276 has (or just might have) the high bit set, we know nothing about
5277 the number of sign bit copies. */
5278 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5280 else if ((nonzero_bits (XEXP (x, 1), mode)
5281 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5284 return cached_num_sign_bit_copies (XEXP (x, 1), mode,
5285 known_x, known_mode, known_ret);
5288 /* Similar to unsigned division, except that we have to worry about
5289 the case where the divisor is negative, in which case we have
5291 result = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5292 known_x, known_mode, known_ret);
5294 && (bitwidth > HOST_BITS_PER_WIDE_INT
5295 || (nonzero_bits (XEXP (x, 1), mode)
5296 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0))
5302 result = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5303 known_x, known_mode, known_ret);
5305 && (bitwidth > HOST_BITS_PER_WIDE_INT
5306 || (nonzero_bits (XEXP (x, 1), mode)
5307 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0))
5313 /* Shifts by a constant add to the number of bits equal to the
5315 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5316 known_x, known_mode, known_ret);
5317 if (CONST_INT_P (XEXP (x, 1))
5318 && INTVAL (XEXP (x, 1)) > 0
5319 && INTVAL (XEXP (x, 1)) < xmode_width)
5320 num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
5325 /* Left shifts destroy copies. */
5326 if (!CONST_INT_P (XEXP (x, 1))
5327 || INTVAL (XEXP (x, 1)) < 0
5328 || INTVAL (XEXP (x, 1)) >= (int) bitwidth
5329 || INTVAL (XEXP (x, 1)) >= xmode_width)
5332 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5333 known_x, known_mode, known_ret);
5334 return MAX (1, num0 - INTVAL (XEXP (x, 1)));
5337 num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5338 known_x, known_mode, known_ret);
5339 num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode,
5340 known_x, known_mode, known_ret);
5341 return MIN (num0, num1);
5343 case EQ: case NE: case GE: case GT: case LE: case LT:
5344 case UNEQ: case LTGT: case UNGE: case UNGT: case UNLE: case UNLT:
5345 case GEU: case GTU: case LEU: case LTU:
5346 case UNORDERED: case ORDERED:
5347 /* If the constant is negative, take its 1's complement and remask.
5348 Then see how many zero bits we have. */
5349 nonzero = STORE_FLAG_VALUE;
5350 if (bitwidth <= HOST_BITS_PER_WIDE_INT
5351 && (nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5352 nonzero = (~nonzero) & GET_MODE_MASK (mode);
5354 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
5360 /* If we haven't been able to figure it out by one of the above rules,
5361 see if some of the high-order bits are known to be zero. If so,
5362 count those bits and return one less than that amount. If we can't
5363 safely compute the mask for this mode, always return BITWIDTH. */
5365 bitwidth = GET_MODE_PRECISION (mode);
5366 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5369 nonzero = nonzero_bits (x, mode);
5370 return nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))
5371 ? 1 : bitwidth - floor_log2 (nonzero) - 1;
5374 /* Calculate the rtx_cost of a single instruction pattern. A return value of
5375 zero indicates an instruction pattern without a known cost. */
5378 pattern_cost (rtx pat, bool speed)
5383 /* Extract the single set rtx from the instruction pattern. We
5384 can't use single_set since we only have the pattern. We also
5385 consider PARALLELs of a normal set and a single comparison. In
5386 that case we use the cost of the non-comparison SET operation,
5387 which is most-likely to be the real cost of this operation. */
5388 if (GET_CODE (pat) == SET)
5390 else if (GET_CODE (pat) == PARALLEL)
5393 rtx comparison = NULL_RTX;
5395 for (i = 0; i < XVECLEN (pat, 0); i++)
5397 rtx x = XVECEXP (pat, 0, i);
5398 if (GET_CODE (x) == SET)
5400 if (GET_CODE (SET_SRC (x)) == COMPARE)
5415 if (!set && comparison)
5424 cost = set_src_cost (SET_SRC (set), GET_MODE (SET_DEST (set)), speed);
5425 return cost > 0 ? cost : COSTS_N_INSNS (1);
5428 /* Calculate the cost of a single instruction. A return value of zero
5429 indicates an instruction pattern without a known cost. */
5432 insn_cost (rtx_insn *insn, bool speed)
5434 if (targetm.insn_cost)
5435 return targetm.insn_cost (insn, speed);
5437 return pattern_cost (PATTERN (insn), speed);
5440 /* Returns estimate on cost of computing SEQ. */
5443 seq_cost (const rtx_insn *seq, bool speed)
5448 for (; seq; seq = NEXT_INSN (seq))
5450 set = single_set (seq);
5452 cost += set_rtx_cost (set, speed);
5453 else if (NONDEBUG_INSN_P (seq))
5455 int this_cost = insn_cost (CONST_CAST_RTX_INSN (seq), speed);
5466 /* Given an insn INSN and condition COND, return the condition in a
5467 canonical form to simplify testing by callers. Specifically:
5469 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
5470 (2) Both operands will be machine operands; (cc0) will have been replaced.
5471 (3) If an operand is a constant, it will be the second operand.
5472 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
5473 for GE, GEU, and LEU.
5475 If the condition cannot be understood, or is an inequality floating-point
5476 comparison which needs to be reversed, 0 will be returned.
5478 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
5480 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5481 insn used in locating the condition was found. If a replacement test
5482 of the condition is desired, it should be placed in front of that
5483 insn and we will be sure that the inputs are still valid.
5485 If WANT_REG is nonzero, we wish the condition to be relative to that
5486 register, if possible. Therefore, do not canonicalize the condition
5487 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
5488 to be a compare to a CC mode register.
5490 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
5494 canonicalize_condition (rtx_insn *insn, rtx cond, int reverse,
5495 rtx_insn **earliest,
5496 rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
5499 rtx_insn *prev = insn;
5503 int reverse_code = 0;
5505 basic_block bb = BLOCK_FOR_INSN (insn);
5507 code = GET_CODE (cond);
5508 mode = GET_MODE (cond);
5509 op0 = XEXP (cond, 0);
5510 op1 = XEXP (cond, 1);
5513 code = reversed_comparison_code (cond, insn);
5514 if (code == UNKNOWN)
5520 /* If we are comparing a register with zero, see if the register is set
5521 in the previous insn to a COMPARE or a comparison operation. Perform
5522 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5525 while ((GET_RTX_CLASS (code) == RTX_COMPARE
5526 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
5527 && op1 == CONST0_RTX (GET_MODE (op0))
5530 /* Set nonzero when we find something of interest. */
5533 /* If comparison with cc0, import actual comparison from compare
5537 if ((prev = prev_nonnote_insn (prev)) == 0
5538 || !NONJUMP_INSN_P (prev)
5539 || (set = single_set (prev)) == 0
5540 || SET_DEST (set) != cc0_rtx)
5543 op0 = SET_SRC (set);
5544 op1 = CONST0_RTX (GET_MODE (op0));
5549 /* If this is a COMPARE, pick up the two things being compared. */
5550 if (GET_CODE (op0) == COMPARE)
5552 op1 = XEXP (op0, 1);
5553 op0 = XEXP (op0, 0);
5556 else if (!REG_P (op0))
5559 /* Go back to the previous insn. Stop if it is not an INSN. We also
5560 stop if it isn't a single set or if it has a REG_INC note because
5561 we don't want to bother dealing with it. */
5563 prev = prev_nonnote_nondebug_insn (prev);
5566 || !NONJUMP_INSN_P (prev)
5567 || FIND_REG_INC_NOTE (prev, NULL_RTX)
5568 /* In cfglayout mode, there do not have to be labels at the
5569 beginning of a block, or jumps at the end, so the previous
5570 conditions would not stop us when we reach bb boundary. */
5571 || BLOCK_FOR_INSN (prev) != bb)
5574 set = set_of (op0, prev);
5577 && (GET_CODE (set) != SET
5578 || !rtx_equal_p (SET_DEST (set), op0)))
5581 /* If this is setting OP0, get what it sets it to if it looks
5585 machine_mode inner_mode = GET_MODE (SET_DEST (set));
5586 #ifdef FLOAT_STORE_FLAG_VALUE
5587 REAL_VALUE_TYPE fsfv;
5590 /* ??? We may not combine comparisons done in a CCmode with
5591 comparisons not done in a CCmode. This is to aid targets
5592 like Alpha that have an IEEE compliant EQ instruction, and
5593 a non-IEEE compliant BEQ instruction. The use of CCmode is
5594 actually artificial, simply to prevent the combination, but
5595 should not affect other platforms.
5597 However, we must allow VOIDmode comparisons to match either
5598 CCmode or non-CCmode comparison, because some ports have
5599 modeless comparisons inside branch patterns.
5601 ??? This mode check should perhaps look more like the mode check
5602 in simplify_comparison in combine. */
5603 if (((GET_MODE_CLASS (mode) == MODE_CC)
5604 != (GET_MODE_CLASS (inner_mode) == MODE_CC))
5606 && inner_mode != VOIDmode)
5608 if (GET_CODE (SET_SRC (set)) == COMPARE
5611 && val_signbit_known_set_p (inner_mode,
5613 #ifdef FLOAT_STORE_FLAG_VALUE
5615 && SCALAR_FLOAT_MODE_P (inner_mode)
5616 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5617 REAL_VALUE_NEGATIVE (fsfv)))
5620 && COMPARISON_P (SET_SRC (set))))
5622 else if (((code == EQ
5624 && val_signbit_known_set_p (inner_mode,
5626 #ifdef FLOAT_STORE_FLAG_VALUE
5628 && SCALAR_FLOAT_MODE_P (inner_mode)
5629 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5630 REAL_VALUE_NEGATIVE (fsfv)))
5633 && COMPARISON_P (SET_SRC (set)))
5638 else if ((code == EQ || code == NE)
5639 && GET_CODE (SET_SRC (set)) == XOR)
5640 /* Handle sequences like:
5643 ...(eq|ne op0 (const_int 0))...
5647 (eq op0 (const_int 0)) reduces to (eq X Y)
5648 (ne op0 (const_int 0)) reduces to (ne X Y)
5650 This is the form used by MIPS16, for example. */
5656 else if (reg_set_p (op0, prev))
5657 /* If this sets OP0, but not directly, we have to give up. */
5662 /* If the caller is expecting the condition to be valid at INSN,
5663 make sure X doesn't change before INSN. */
5664 if (valid_at_insn_p)
5665 if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
5667 if (COMPARISON_P (x))
5668 code = GET_CODE (x);
5671 code = reversed_comparison_code (x, prev);
5672 if (code == UNKNOWN)
5677 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
5683 /* If constant is first, put it last. */
5684 if (CONSTANT_P (op0))
5685 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
5687 /* If OP0 is the result of a comparison, we weren't able to find what
5688 was really being compared, so fail. */
5690 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
5693 /* Canonicalize any ordered comparison with integers involving equality
5694 if we can do computations in the relevant mode and we do not
5697 scalar_int_mode op0_mode;
5698 if (CONST_INT_P (op1)
5699 && is_a <scalar_int_mode> (GET_MODE (op0), &op0_mode)
5700 && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT)
5702 HOST_WIDE_INT const_val = INTVAL (op1);
5703 unsigned HOST_WIDE_INT uconst_val = const_val;
5704 unsigned HOST_WIDE_INT max_val
5705 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (op0_mode);
5710 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
5711 code = LT, op1 = gen_int_mode (const_val + 1, op0_mode);
5714 /* When cross-compiling, const_val might be sign-extended from
5715 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5717 if ((const_val & max_val)
5718 != (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (op0_mode) - 1)))
5719 code = GT, op1 = gen_int_mode (const_val - 1, op0_mode);
5723 if (uconst_val < max_val)
5724 code = LTU, op1 = gen_int_mode (uconst_val + 1, op0_mode);
5728 if (uconst_val != 0)
5729 code = GTU, op1 = gen_int_mode (uconst_val - 1, op0_mode);
5737 /* Never return CC0; return zero instead. */
5741 /* We promised to return a comparison. */
5742 rtx ret = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
5743 if (COMPARISON_P (ret))
5748 /* Given a jump insn JUMP, return the condition that will cause it to branch
5749 to its JUMP_LABEL. If the condition cannot be understood, or is an
5750 inequality floating-point comparison which needs to be reversed, 0 will
5753 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5754 insn used in locating the condition was found. If a replacement test
5755 of the condition is desired, it should be placed in front of that
5756 insn and we will be sure that the inputs are still valid. If EARLIEST
5757 is null, the returned condition will be valid at INSN.
5759 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5760 compare CC mode register.
5762 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5765 get_condition (rtx_insn *jump, rtx_insn **earliest, int allow_cc_mode,
5766 int valid_at_insn_p)
5772 /* If this is not a standard conditional jump, we can't parse it. */
5774 || ! any_condjump_p (jump))
5776 set = pc_set (jump);
5778 cond = XEXP (SET_SRC (set), 0);
5780 /* If this branches to JUMP_LABEL when the condition is false, reverse
5783 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
5784 && label_ref_label (XEXP (SET_SRC (set), 2)) == JUMP_LABEL (jump);
5786 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
5787 allow_cc_mode, valid_at_insn_p);
5790 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5791 TARGET_MODE_REP_EXTENDED.
5793 Note that we assume that the property of
5794 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5795 narrower than mode B. I.e., if A is a mode narrower than B then in
5796 order to be able to operate on it in mode B, mode A needs to
5797 satisfy the requirements set by the representation of mode B. */
5800 init_num_sign_bit_copies_in_rep (void)
5802 opt_scalar_int_mode in_mode_iter;
5803 scalar_int_mode mode;
5805 FOR_EACH_MODE_IN_CLASS (in_mode_iter, MODE_INT)
5806 FOR_EACH_MODE_UNTIL (mode, in_mode_iter.require ())
5808 scalar_int_mode in_mode = in_mode_iter.require ();
5811 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5812 extends to the next widest mode. */
5813 gcc_assert (targetm.mode_rep_extended (mode, in_mode) == UNKNOWN
5814 || GET_MODE_WIDER_MODE (mode).require () == in_mode);
5816 /* We are in in_mode. Count how many bits outside of mode
5817 have to be copies of the sign-bit. */
5818 FOR_EACH_MODE (i, mode, in_mode)
5820 /* This must always exist (for the last iteration it will be
5822 scalar_int_mode wider = GET_MODE_WIDER_MODE (i).require ();
5824 if (targetm.mode_rep_extended (i, wider) == SIGN_EXTEND
5825 /* We can only check sign-bit copies starting from the
5826 top-bit. In order to be able to check the bits we
5827 have already seen we pretend that subsequent bits
5828 have to be sign-bit copies too. */
5829 || num_sign_bit_copies_in_rep [in_mode][mode])
5830 num_sign_bit_copies_in_rep [in_mode][mode]
5831 += GET_MODE_PRECISION (wider) - GET_MODE_PRECISION (i);
5836 /* Suppose that truncation from the machine mode of X to MODE is not a
5837 no-op. See if there is anything special about X so that we can
5838 assume it already contains a truncated value of MODE. */
5841 truncated_to_mode (machine_mode mode, const_rtx x)
5843 /* This register has already been used in MODE without explicit
5845 if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x))
5848 /* See if we already satisfy the requirements of MODE. If yes we
5849 can just switch to MODE. */
5850 if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode]
5851 && (num_sign_bit_copies (x, GET_MODE (x))
5852 >= num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + 1))
5858 /* Return true if RTX code CODE has a single sequence of zero or more
5859 "e" operands and no rtvec operands. Initialize its rtx_all_subrtx_bounds
5860 entry in that case. */
5863 setup_reg_subrtx_bounds (unsigned int code)
5865 const char *format = GET_RTX_FORMAT ((enum rtx_code) code);
5867 for (; format[i] != 'e'; ++i)
5870 /* No subrtxes. Leave start and count as 0. */
5872 if (format[i] == 'E' || format[i] == 'V')
5876 /* Record the sequence of 'e's. */
5877 rtx_all_subrtx_bounds[code].start = i;
5880 while (format[i] == 'e');
5881 rtx_all_subrtx_bounds[code].count = i - rtx_all_subrtx_bounds[code].start;
5882 /* rtl-iter.h relies on this. */
5883 gcc_checking_assert (rtx_all_subrtx_bounds[code].count <= 3);
5885 for (; format[i]; ++i)
5886 if (format[i] == 'E' || format[i] == 'V' || format[i] == 'e')
5892 /* Initialize rtx_all_subrtx_bounds. */
5897 for (i = 0; i < NUM_RTX_CODE; i++)
5899 if (!setup_reg_subrtx_bounds (i))
5900 rtx_all_subrtx_bounds[i].count = UCHAR_MAX;
5901 if (GET_RTX_CLASS (i) != RTX_CONST_OBJ)
5902 rtx_nonconst_subrtx_bounds[i] = rtx_all_subrtx_bounds[i];
5905 init_num_sign_bit_copies_in_rep ();
5908 /* Check whether this is a constant pool constant. */
5910 constant_pool_constant_p (rtx x)
5912 x = avoid_constant_pool_reference (x);
5913 return CONST_DOUBLE_P (x);
5916 /* If M is a bitmask that selects a field of low-order bits within an item but
5917 not the entire word, return the length of the field. Return -1 otherwise.
5918 M is used in machine mode MODE. */
5921 low_bitmask_len (machine_mode mode, unsigned HOST_WIDE_INT m)
5923 if (mode != VOIDmode)
5925 if (!HWI_COMPUTABLE_MODE_P (mode))
5927 m &= GET_MODE_MASK (mode);
5930 return exact_log2 (m + 1);
5933 /* Return the mode of MEM's address. */
5936 get_address_mode (rtx mem)
5940 gcc_assert (MEM_P (mem));
5941 mode = GET_MODE (XEXP (mem, 0));
5942 if (mode != VOIDmode)
5943 return as_a <scalar_int_mode> (mode);
5944 return targetm.addr_space.address_mode (MEM_ADDR_SPACE (mem));
5947 /* Split up a CONST_DOUBLE or integer constant rtx
5948 into two rtx's for single words,
5949 storing in *FIRST the word that comes first in memory in the target
5950 and in *SECOND the other.
5952 TODO: This function needs to be rewritten to work on any size
5956 split_double (rtx value, rtx *first, rtx *second)
5958 if (CONST_INT_P (value))
5960 if (HOST_BITS_PER_WIDE_INT >= (2 * BITS_PER_WORD))
5962 /* In this case the CONST_INT holds both target words.
5963 Extract the bits from it into two word-sized pieces.
5964 Sign extend each half to HOST_WIDE_INT. */
5965 unsigned HOST_WIDE_INT low, high;
5966 unsigned HOST_WIDE_INT mask, sign_bit, sign_extend;
5967 unsigned bits_per_word = BITS_PER_WORD;
5969 /* Set sign_bit to the most significant bit of a word. */
5971 sign_bit <<= bits_per_word - 1;
5973 /* Set mask so that all bits of the word are set. We could
5974 have used 1 << BITS_PER_WORD instead of basing the
5975 calculation on sign_bit. However, on machines where
5976 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
5977 compiler warning, even though the code would never be
5979 mask = sign_bit << 1;
5982 /* Set sign_extend as any remaining bits. */
5983 sign_extend = ~mask;
5985 /* Pick the lower word and sign-extend it. */
5986 low = INTVAL (value);
5991 /* Pick the higher word, shifted to the least significant
5992 bits, and sign-extend it. */
5993 high = INTVAL (value);
5994 high >>= bits_per_word - 1;
5997 if (high & sign_bit)
5998 high |= sign_extend;
6000 /* Store the words in the target machine order. */
6001 if (WORDS_BIG_ENDIAN)
6003 *first = GEN_INT (high);
6004 *second = GEN_INT (low);
6008 *first = GEN_INT (low);
6009 *second = GEN_INT (high);
6014 /* The rule for using CONST_INT for a wider mode
6015 is that we regard the value as signed.
6016 So sign-extend it. */
6017 rtx high = (INTVAL (value) < 0 ? constm1_rtx : const0_rtx);
6018 if (WORDS_BIG_ENDIAN)
6030 else if (GET_CODE (value) == CONST_WIDE_INT)
6032 /* All of this is scary code and needs to be converted to
6033 properly work with any size integer. */
6034 gcc_assert (CONST_WIDE_INT_NUNITS (value) == 2);
6035 if (WORDS_BIG_ENDIAN)
6037 *first = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
6038 *second = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
6042 *first = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
6043 *second = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
6046 else if (!CONST_DOUBLE_P (value))
6048 if (WORDS_BIG_ENDIAN)
6050 *first = const0_rtx;
6056 *second = const0_rtx;
6059 else if (GET_MODE (value) == VOIDmode
6060 /* This is the old way we did CONST_DOUBLE integers. */
6061 || GET_MODE_CLASS (GET_MODE (value)) == MODE_INT)
6063 /* In an integer, the words are defined as most and least significant.
6064 So order them by the target's convention. */
6065 if (WORDS_BIG_ENDIAN)
6067 *first = GEN_INT (CONST_DOUBLE_HIGH (value));
6068 *second = GEN_INT (CONST_DOUBLE_LOW (value));
6072 *first = GEN_INT (CONST_DOUBLE_LOW (value));
6073 *second = GEN_INT (CONST_DOUBLE_HIGH (value));
6080 /* Note, this converts the REAL_VALUE_TYPE to the target's
6081 format, splits up the floating point double and outputs
6082 exactly 32 bits of it into each of l[0] and l[1] --
6083 not necessarily BITS_PER_WORD bits. */
6084 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (value), l);
6086 /* If 32 bits is an entire word for the target, but not for the host,
6087 then sign-extend on the host so that the number will look the same
6088 way on the host that it would on the target. See for instance
6089 simplify_unary_operation. The #if is needed to avoid compiler
6092 #if HOST_BITS_PER_LONG > 32
6093 if (BITS_PER_WORD < HOST_BITS_PER_LONG && BITS_PER_WORD == 32)
6095 if (l[0] & ((long) 1 << 31))
6096 l[0] |= ((unsigned long) (-1) << 32);
6097 if (l[1] & ((long) 1 << 31))
6098 l[1] |= ((unsigned long) (-1) << 32);
6102 *first = GEN_INT (l[0]);
6103 *second = GEN_INT (l[1]);
6107 /* Return true if X is a sign_extract or zero_extract from the least
6111 lsb_bitfield_op_p (rtx x)
6113 if (GET_RTX_CLASS (GET_CODE (x)) == RTX_BITFIELD_OPS)
6115 machine_mode mode = GET_MODE (XEXP (x, 0));
6116 HOST_WIDE_INT len = INTVAL (XEXP (x, 1));
6117 HOST_WIDE_INT pos = INTVAL (XEXP (x, 2));
6118 poly_int64 remaining_bits = GET_MODE_PRECISION (mode) - len;
6120 return known_eq (pos, BITS_BIG_ENDIAN ? remaining_bits : 0);
6125 /* Strip outer address "mutations" from LOC and return a pointer to the
6126 inner value. If OUTER_CODE is nonnull, store the code of the innermost
6127 stripped expression there.
6129 "Mutations" either convert between modes or apply some kind of
6130 extension, truncation or alignment. */
6133 strip_address_mutations (rtx *loc, enum rtx_code *outer_code)
6137 enum rtx_code code = GET_CODE (*loc);
6138 if (GET_RTX_CLASS (code) == RTX_UNARY)
6139 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
6140 used to convert between pointer sizes. */
6141 loc = &XEXP (*loc, 0);
6142 else if (lsb_bitfield_op_p (*loc))
6143 /* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
6144 acts as a combined truncation and extension. */
6145 loc = &XEXP (*loc, 0);
6146 else if (code == AND && CONST_INT_P (XEXP (*loc, 1)))
6147 /* (and ... (const_int -X)) is used to align to X bytes. */
6148 loc = &XEXP (*loc, 0);
6149 else if (code == SUBREG
6150 && !OBJECT_P (SUBREG_REG (*loc))
6151 && subreg_lowpart_p (*loc))
6152 /* (subreg (operator ...) ...) inside and is used for mode
6154 loc = &SUBREG_REG (*loc);
6162 /* Return true if CODE applies some kind of scale. The scaled value is
6163 is the first operand and the scale is the second. */
6166 binary_scale_code_p (enum rtx_code code)
6168 return (code == MULT
6170 /* Needed by ARM targets. */
6174 || code == ROTATERT);
6177 /* If *INNER can be interpreted as a base, return a pointer to the inner term
6178 (see address_info). Return null otherwise. */
6181 get_base_term (rtx *inner)
6183 if (GET_CODE (*inner) == LO_SUM)
6184 inner = strip_address_mutations (&XEXP (*inner, 0));
6187 || GET_CODE (*inner) == SUBREG
6188 || GET_CODE (*inner) == SCRATCH)
6193 /* If *INNER can be interpreted as an index, return a pointer to the inner term
6194 (see address_info). Return null otherwise. */
6197 get_index_term (rtx *inner)
6199 /* At present, only constant scales are allowed. */
6200 if (binary_scale_code_p (GET_CODE (*inner)) && CONSTANT_P (XEXP (*inner, 1)))
6201 inner = strip_address_mutations (&XEXP (*inner, 0));
6204 || GET_CODE (*inner) == SUBREG
6205 || GET_CODE (*inner) == SCRATCH)
6210 /* Set the segment part of address INFO to LOC, given that INNER is the
6214 set_address_segment (struct address_info *info, rtx *loc, rtx *inner)
6216 gcc_assert (!info->segment);
6217 info->segment = loc;
6218 info->segment_term = inner;
6221 /* Set the base part of address INFO to LOC, given that INNER is the
6225 set_address_base (struct address_info *info, rtx *loc, rtx *inner)
6227 gcc_assert (!info->base);
6229 info->base_term = inner;
6232 /* Set the index part of address INFO to LOC, given that INNER is the
6236 set_address_index (struct address_info *info, rtx *loc, rtx *inner)
6238 gcc_assert (!info->index);
6240 info->index_term = inner;
6243 /* Set the displacement part of address INFO to LOC, given that INNER
6244 is the constant term. */
6247 set_address_disp (struct address_info *info, rtx *loc, rtx *inner)
6249 gcc_assert (!info->disp);
6251 info->disp_term = inner;
6254 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
6255 rest of INFO accordingly. */
6258 decompose_incdec_address (struct address_info *info)
6260 info->autoinc_p = true;
6262 rtx *base = &XEXP (*info->inner, 0);
6263 set_address_base (info, base, base);
6264 gcc_checking_assert (info->base == info->base_term);
6266 /* These addresses are only valid when the size of the addressed
6268 gcc_checking_assert (info->mode != VOIDmode);
6271 /* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
6272 of INFO accordingly. */
6275 decompose_automod_address (struct address_info *info)
6277 info->autoinc_p = true;
6279 rtx *base = &XEXP (*info->inner, 0);
6280 set_address_base (info, base, base);
6281 gcc_checking_assert (info->base == info->base_term);
6283 rtx plus = XEXP (*info->inner, 1);
6284 gcc_assert (GET_CODE (plus) == PLUS);
6286 info->base_term2 = &XEXP (plus, 0);
6287 gcc_checking_assert (rtx_equal_p (*info->base_term, *info->base_term2));
6289 rtx *step = &XEXP (plus, 1);
6290 rtx *inner_step = strip_address_mutations (step);
6291 if (CONSTANT_P (*inner_step))
6292 set_address_disp (info, step, inner_step);
6294 set_address_index (info, step, inner_step);
6297 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
6298 values in [PTR, END). Return a pointer to the end of the used array. */
6301 extract_plus_operands (rtx *loc, rtx **ptr, rtx **end)
6304 if (GET_CODE (x) == PLUS)
6306 ptr = extract_plus_operands (&XEXP (x, 0), ptr, end);
6307 ptr = extract_plus_operands (&XEXP (x, 1), ptr, end);
6311 gcc_assert (ptr != end);
6317 /* Evaluate the likelihood of X being a base or index value, returning
6318 positive if it is likely to be a base, negative if it is likely to be
6319 an index, and 0 if we can't tell. Make the magnitude of the return
6320 value reflect the amount of confidence we have in the answer.
6322 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
6325 baseness (rtx x, machine_mode mode, addr_space_t as,
6326 enum rtx_code outer_code, enum rtx_code index_code)
6328 /* Believe *_POINTER unless the address shape requires otherwise. */
6329 if (REG_P (x) && REG_POINTER (x))
6331 if (MEM_P (x) && MEM_POINTER (x))
6334 if (REG_P (x) && HARD_REGISTER_P (x))
6336 /* X is a hard register. If it only fits one of the base
6337 or index classes, choose that interpretation. */
6338 int regno = REGNO (x);
6339 bool base_p = ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
6340 bool index_p = REGNO_OK_FOR_INDEX_P (regno);
6341 if (base_p != index_p)
6342 return base_p ? 1 : -1;
6347 /* INFO->INNER describes a normal, non-automodified address.
6348 Fill in the rest of INFO accordingly. */
6351 decompose_normal_address (struct address_info *info)
6353 /* Treat the address as the sum of up to four values. */
6355 size_t n_ops = extract_plus_operands (info->inner, ops,
6356 ops + ARRAY_SIZE (ops)) - ops;
6358 /* If there is more than one component, any base component is in a PLUS. */
6360 info->base_outer_code = PLUS;
6362 /* Try to classify each sum operand now. Leave those that could be
6363 either a base or an index in OPS. */
6366 for (size_t in = 0; in < n_ops; ++in)
6369 rtx *inner = strip_address_mutations (loc);
6370 if (CONSTANT_P (*inner))
6371 set_address_disp (info, loc, inner);
6372 else if (GET_CODE (*inner) == UNSPEC)
6373 set_address_segment (info, loc, inner);
6376 /* The only other possibilities are a base or an index. */
6377 rtx *base_term = get_base_term (inner);
6378 rtx *index_term = get_index_term (inner);
6379 gcc_assert (base_term || index_term);
6381 set_address_index (info, loc, index_term);
6382 else if (!index_term)
6383 set_address_base (info, loc, base_term);
6386 gcc_assert (base_term == index_term);
6388 inner_ops[out] = base_term;
6394 /* Classify the remaining OPS members as bases and indexes. */
6397 /* If we haven't seen a base or an index yet, assume that this is
6398 the base. If we were confident that another term was the base
6399 or index, treat the remaining operand as the other kind. */
6401 set_address_base (info, ops[0], inner_ops[0]);
6403 set_address_index (info, ops[0], inner_ops[0]);
6407 /* In the event of a tie, assume the base comes first. */
6408 if (baseness (*inner_ops[0], info->mode, info->as, PLUS,
6410 >= baseness (*inner_ops[1], info->mode, info->as, PLUS,
6411 GET_CODE (*ops[0])))
6413 set_address_base (info, ops[0], inner_ops[0]);
6414 set_address_index (info, ops[1], inner_ops[1]);
6418 set_address_base (info, ops[1], inner_ops[1]);
6419 set_address_index (info, ops[0], inner_ops[0]);
6423 gcc_assert (out == 0);
6426 /* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
6427 or VOIDmode if not known. AS is the address space associated with LOC.
6428 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
6431 decompose_address (struct address_info *info, rtx *loc, machine_mode mode,
6432 addr_space_t as, enum rtx_code outer_code)
6434 memset (info, 0, sizeof (*info));
6437 info->addr_outer_code = outer_code;
6439 info->inner = strip_address_mutations (loc, &outer_code);
6440 info->base_outer_code = outer_code;
6441 switch (GET_CODE (*info->inner))
6447 decompose_incdec_address (info);
6452 decompose_automod_address (info);
6456 decompose_normal_address (info);
6461 /* Describe address operand LOC in INFO. */
6464 decompose_lea_address (struct address_info *info, rtx *loc)
6466 decompose_address (info, loc, VOIDmode, ADDR_SPACE_GENERIC, ADDRESS);
6469 /* Describe the address of MEM X in INFO. */
6472 decompose_mem_address (struct address_info *info, rtx x)
6474 gcc_assert (MEM_P (x));
6475 decompose_address (info, &XEXP (x, 0), GET_MODE (x),
6476 MEM_ADDR_SPACE (x), MEM);
6479 /* Update INFO after a change to the address it describes. */
6482 update_address (struct address_info *info)
6484 decompose_address (info, info->outer, info->mode, info->as,
6485 info->addr_outer_code);
6488 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
6489 more complicated than that. */
6492 get_index_scale (const struct address_info *info)
6494 rtx index = *info->index;
6495 if (GET_CODE (index) == MULT
6496 && CONST_INT_P (XEXP (index, 1))
6497 && info->index_term == &XEXP (index, 0))
6498 return INTVAL (XEXP (index, 1));
6500 if (GET_CODE (index) == ASHIFT
6501 && CONST_INT_P (XEXP (index, 1))
6502 && info->index_term == &XEXP (index, 0))
6503 return HOST_WIDE_INT_1 << INTVAL (XEXP (index, 1));
6505 if (info->index == info->index_term)
6511 /* Return the "index code" of INFO, in the form required by
6515 get_index_code (const struct address_info *info)
6518 return GET_CODE (*info->index);
6521 return GET_CODE (*info->disp);
6526 /* Return true if RTL X contains a SYMBOL_REF. */
6529 contains_symbol_ref_p (const_rtx x)
6531 subrtx_iterator::array_type array;
6532 FOR_EACH_SUBRTX (iter, array, x, ALL)
6533 if (SYMBOL_REF_P (*iter))
6539 /* Return true if RTL X contains a SYMBOL_REF or LABEL_REF. */
6542 contains_symbolic_reference_p (const_rtx x)
6544 subrtx_iterator::array_type array;
6545 FOR_EACH_SUBRTX (iter, array, x, ALL)
6546 if (SYMBOL_REF_P (*iter) || GET_CODE (*iter) == LABEL_REF)
6552 /* Return true if X contains a thread-local symbol. */
6555 tls_referenced_p (const_rtx x)
6557 if (!targetm.have_tls)
6560 subrtx_iterator::array_type array;
6561 FOR_EACH_SUBRTX (iter, array, x, ALL)
6562 if (GET_CODE (*iter) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (*iter) != 0)
6567 /* Return true if reg REGNO with mode REG_MODE would be clobbered by the
6568 clobber_high operand in CLOBBER_HIGH_OP. */
6571 reg_is_clobbered_by_clobber_high (unsigned int regno, machine_mode reg_mode,
6572 const_rtx clobber_high_op)
6574 unsigned int clobber_regno = REGNO (clobber_high_op);
6575 machine_mode clobber_mode = GET_MODE (clobber_high_op);
6576 unsigned char regno_nregs = hard_regno_nregs (regno, reg_mode);
6578 /* Clobber high should always span exactly one register. */
6579 gcc_assert (REG_NREGS (clobber_high_op) == 1);
6581 /* Clobber high needs to match with one of the registers in X. */
6582 if (clobber_regno < regno || clobber_regno >= regno + regno_nregs)
6585 gcc_assert (reg_mode != BLKmode && clobber_mode != BLKmode);
6587 if (reg_mode == VOIDmode)
6588 return clobber_mode != VOIDmode;
6590 /* Clobber high will clobber if its size might be greater than the size of
6592 return maybe_gt (exact_div (GET_MODE_SIZE (reg_mode), regno_nregs),
6593 GET_MODE_SIZE (clobber_mode));