1 /* Optimize by combining instructions for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This module is essentially the "combiner" phase of the U. of Arizona
21 Portable Optimizer, but redone to work on our list-structured
22 representation for RTL instead of their string representation.
24 The LOG_LINKS of each insn identify the most recent assignment
25 to each REG used in the insn. It is a list of previous insns,
26 each of which contains a SET for a REG that is used in this insn
27 and not used or set in between. LOG_LINKs never cross basic blocks.
28 They were set up by the preceding pass (lifetime analysis).
30 We try to combine each pair of insns joined by a logical link.
31 We also try to combine triplets of insns A, B and C when C has
32 a link back to B and B has a link back to A. Likewise for a
33 small number of quadruplets of insns A, B, C and D for which
34 there's high likelihood of success.
36 LOG_LINKS does not have links for use of the CC0. They don't
37 need to, because the insn that sets the CC0 is always immediately
38 before the insn that tests it. So we always regard a branch
39 insn as having a logical link to the preceding insn. The same is true
40 for an insn explicitly using CC0.
42 We check (with use_crosses_set_p) to avoid combining in such a way
43 as to move a computation to a place where its value would be different.
45 Combination is done by mathematically substituting the previous
46 insn(s) values for the regs they set into the expressions in
47 the later insns that refer to these regs. If the result is a valid insn
48 for our target machine, according to the machine description,
49 we install it, delete the earlier insns, and update the data flow
50 information (LOG_LINKS and REG_NOTES) for what we did.
52 There are a few exceptions where the dataflow information isn't
53 completely updated (however this is only a local issue since it is
54 regenerated before the next pass that uses it):
56 - reg_live_length is not updated
57 - reg_n_refs is not adjusted in the rare case when a register is
58 no longer required in a computation
59 - there are extremely rare cases (see distribute_notes) when a
61 - a LOG_LINKS entry that refers to an insn with multiple SETs may be
62 removed because there is no way to know which register it was
65 To simplify substitution, we combine only when the earlier insn(s)
66 consist of only a single assignment. To simplify updating afterward,
67 we never combine when a subroutine call appears in the middle.
69 Since we do not represent assignments to CC0 explicitly except when that
70 is all an insn does, there is no LOG_LINKS entry in an insn that uses
71 the condition code for the insn that set the condition code.
72 Fortunately, these two insns must be consecutive.
73 Therefore, every JUMP_INSN is taken to have an implicit logical link
74 to the preceding insn. This is not quite right, since non-jumps can
75 also use the condition code; but in practice such insns would not
80 #include "coretypes.h"
95 #include "stor-layout.h"
97 #include "cfgcleanup.h"
98 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
100 #include "insn-attr.h"
101 #include "rtlhooks-def.h"
103 #include "tree-pass.h"
104 #include "valtrack.h"
105 #include "rtl-iter.h"
106 #include "print-rtl.h"
108 /* Number of attempts to combine instructions in this function. */
110 static int combine_attempts;
112 /* Number of attempts that got as far as substitution in this function. */
114 static int combine_merges;
116 /* Number of instructions combined with added SETs in this function. */
118 static int combine_extras;
120 /* Number of instructions combined in this function. */
122 static int combine_successes;
124 /* Totals over entire compilation. */
126 static int total_attempts, total_merges, total_extras, total_successes;
128 /* combine_instructions may try to replace the right hand side of the
129 second instruction with the value of an associated REG_EQUAL note
130 before throwing it at try_combine. That is problematic when there
131 is a REG_DEAD note for a register used in the old right hand side
132 and can cause distribute_notes to do wrong things. This is the
133 second instruction if it has been so modified, null otherwise. */
135 static rtx_insn *i2mod;
137 /* When I2MOD is nonnull, this is a copy of the old right hand side. */
139 static rtx i2mod_old_rhs;
141 /* When I2MOD is nonnull, this is a copy of the new right hand side. */
143 static rtx i2mod_new_rhs;
145 struct reg_stat_type {
146 /* Record last point of death of (hard or pseudo) register n. */
147 rtx_insn *last_death;
149 /* Record last point of modification of (hard or pseudo) register n. */
152 /* The next group of fields allows the recording of the last value assigned
153 to (hard or pseudo) register n. We use this information to see if an
154 operation being processed is redundant given a prior operation performed
155 on the register. For example, an `and' with a constant is redundant if
156 all the zero bits are already known to be turned off.
158 We use an approach similar to that used by cse, but change it in the
161 (1) We do not want to reinitialize at each label.
162 (2) It is useful, but not critical, to know the actual value assigned
163 to a register. Often just its form is helpful.
165 Therefore, we maintain the following fields:
167 last_set_value the last value assigned
168 last_set_label records the value of label_tick when the
169 register was assigned
170 last_set_table_tick records the value of label_tick when a
171 value using the register is assigned
172 last_set_invalid set to nonzero when it is not valid
173 to use the value of this register in some
176 To understand the usage of these tables, it is important to understand
177 the distinction between the value in last_set_value being valid and
178 the register being validly contained in some other expression in the
181 (The next two parameters are out of date).
183 reg_stat[i].last_set_value is valid if it is nonzero, and either
184 reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
186 Register I may validly appear in any expression returned for the value
187 of another register if reg_n_sets[i] is 1. It may also appear in the
188 value for register J if reg_stat[j].last_set_invalid is zero, or
189 reg_stat[i].last_set_label < reg_stat[j].last_set_label.
191 If an expression is found in the table containing a register which may
192 not validly appear in an expression, the register is replaced by
193 something that won't match, (clobber (const_int 0)). */
195 /* Record last value assigned to (hard or pseudo) register n. */
199 /* Record the value of label_tick when an expression involving register n
200 is placed in last_set_value. */
202 int last_set_table_tick;
204 /* Record the value of label_tick when the value for register n is placed in
209 /* These fields are maintained in parallel with last_set_value and are
210 used to store the mode in which the register was last set, the bits
211 that were known to be zero when it was last set, and the number of
212 sign bits copies it was known to have when it was last set. */
214 unsigned HOST_WIDE_INT last_set_nonzero_bits;
215 char last_set_sign_bit_copies;
216 ENUM_BITFIELD(machine_mode) last_set_mode : 8;
218 /* Set nonzero if references to register n in expressions should not be
219 used. last_set_invalid is set nonzero when this register is being
220 assigned to and last_set_table_tick == label_tick. */
222 char last_set_invalid;
224 /* Some registers that are set more than once and used in more than one
225 basic block are nevertheless always set in similar ways. For example,
226 a QImode register may be loaded from memory in two places on a machine
227 where byte loads zero extend.
229 We record in the following fields if a register has some leading bits
230 that are always equal to the sign bit, and what we know about the
231 nonzero bits of a register, specifically which bits are known to be
234 If an entry is zero, it means that we don't know anything special. */
236 unsigned char sign_bit_copies;
238 unsigned HOST_WIDE_INT nonzero_bits;
240 /* Record the value of the label_tick when the last truncation
241 happened. The field truncated_to_mode is only valid if
242 truncation_label == label_tick. */
244 int truncation_label;
246 /* Record the last truncation seen for this register. If truncation
247 is not a nop to this mode we might be able to save an explicit
248 truncation if we know that value already contains a truncated
251 ENUM_BITFIELD(machine_mode) truncated_to_mode : 8;
255 static vec<reg_stat_type> reg_stat;
257 /* One plus the highest pseudo for which we track REG_N_SETS.
258 regstat_init_n_sets_and_refs allocates the array for REG_N_SETS just once,
259 but during combine_split_insns new pseudos can be created. As we don't have
260 updated DF information in that case, it is hard to initialize the array
261 after growing. The combiner only cares about REG_N_SETS (regno) == 1,
262 so instead of growing the arrays, just assume all newly created pseudos
263 during combine might be set multiple times. */
265 static unsigned int reg_n_sets_max;
267 /* Record the luid of the last insn that invalidated memory
268 (anything that writes memory, and subroutine calls, but not pushes). */
270 static int mem_last_set;
272 /* Record the luid of the last CALL_INSN
273 so we can tell whether a potential combination crosses any calls. */
275 static int last_call_luid;
277 /* When `subst' is called, this is the insn that is being modified
278 (by combining in a previous insn). The PATTERN of this insn
279 is still the old pattern partially modified and it should not be
280 looked at, but this may be used to examine the successors of the insn
281 to judge whether a simplification is valid. */
283 static rtx_insn *subst_insn;
285 /* This is the lowest LUID that `subst' is currently dealing with.
286 get_last_value will not return a value if the register was set at or
287 after this LUID. If not for this mechanism, we could get confused if
288 I2 or I1 in try_combine were an insn that used the old value of a register
289 to obtain a new value. In that case, we might erroneously get the
290 new value of the register when we wanted the old one. */
292 static int subst_low_luid;
294 /* This contains any hard registers that are used in newpat; reg_dead_at_p
295 must consider all these registers to be always live. */
297 static HARD_REG_SET newpat_used_regs;
299 /* This is an insn to which a LOG_LINKS entry has been added. If this
300 insn is the earlier than I2 or I3, combine should rescan starting at
303 static rtx_insn *added_links_insn;
305 /* Basic block in which we are performing combines. */
306 static basic_block this_basic_block;
307 static bool optimize_this_for_speed_p;
310 /* Length of the currently allocated uid_insn_cost array. */
312 static int max_uid_known;
314 /* The following array records the insn_cost for every insn
315 in the instruction stream. */
317 static int *uid_insn_cost;
319 /* The following array records the LOG_LINKS for every insn in the
320 instruction stream as struct insn_link pointers. */
325 struct insn_link *next;
328 static struct insn_link **uid_log_links;
331 insn_uid_check (const_rtx insn)
333 int uid = INSN_UID (insn);
334 gcc_checking_assert (uid <= max_uid_known);
338 #define INSN_COST(INSN) (uid_insn_cost[insn_uid_check (INSN)])
339 #define LOG_LINKS(INSN) (uid_log_links[insn_uid_check (INSN)])
341 #define FOR_EACH_LOG_LINK(L, INSN) \
342 for ((L) = LOG_LINKS (INSN); (L); (L) = (L)->next)
344 /* Links for LOG_LINKS are allocated from this obstack. */
346 static struct obstack insn_link_obstack;
348 /* Allocate a link. */
350 static inline struct insn_link *
351 alloc_insn_link (rtx_insn *insn, unsigned int regno, struct insn_link *next)
354 = (struct insn_link *) obstack_alloc (&insn_link_obstack,
355 sizeof (struct insn_link));
362 /* Incremented for each basic block. */
364 static int label_tick;
366 /* Reset to label_tick for each extended basic block in scanning order. */
368 static int label_tick_ebb_start;
370 /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the
371 largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
373 static scalar_int_mode nonzero_bits_mode;
375 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
376 be safely used. It is zero while computing them and after combine has
377 completed. This former test prevents propagating values based on
378 previously set values, which can be incorrect if a variable is modified
381 static int nonzero_sign_valid;
384 /* Record one modification to rtl structure
385 to be undone by storing old_contents into *where. */
387 enum undo_kind { UNDO_RTX, UNDO_INT, UNDO_MODE, UNDO_LINKS };
393 union { rtx r; int i; machine_mode m; struct insn_link *l; } old_contents;
394 union { rtx *r; int *i; struct insn_link **l; } where;
397 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
398 num_undo says how many are currently recorded.
400 other_insn is nonzero if we have modified some other insn in the process
401 of working on subst_insn. It must be verified too. */
407 rtx_insn *other_insn;
410 static struct undobuf undobuf;
412 /* Number of times the pseudo being substituted for
413 was found and replaced. */
415 static int n_occurrences;
417 static rtx reg_nonzero_bits_for_combine (const_rtx, scalar_int_mode,
419 unsigned HOST_WIDE_INT *);
420 static rtx reg_num_sign_bit_copies_for_combine (const_rtx, scalar_int_mode,
423 static void do_SUBST (rtx *, rtx);
424 static void do_SUBST_INT (int *, int);
425 static void init_reg_last (void);
426 static void setup_incoming_promotions (rtx_insn *);
427 static void set_nonzero_bits_and_sign_copies (rtx, const_rtx, void *);
428 static int cant_combine_insn_p (rtx_insn *);
429 static int can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
430 rtx_insn *, rtx_insn *, rtx *, rtx *);
431 static int combinable_i3pat (rtx_insn *, rtx *, rtx, rtx, rtx, int, int, rtx *);
432 static int contains_muldiv (rtx);
433 static rtx_insn *try_combine (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
435 static void undo_all (void);
436 static void undo_commit (void);
437 static rtx *find_split_point (rtx *, rtx_insn *, bool);
438 static rtx subst (rtx, rtx, rtx, int, int, int);
439 static rtx combine_simplify_rtx (rtx, machine_mode, int, int);
440 static rtx simplify_if_then_else (rtx);
441 static rtx simplify_set (rtx);
442 static rtx simplify_logical (rtx);
443 static rtx expand_compound_operation (rtx);
444 static const_rtx expand_field_assignment (const_rtx);
445 static rtx make_extraction (machine_mode, rtx, HOST_WIDE_INT,
446 rtx, unsigned HOST_WIDE_INT, int, int, int);
447 static int get_pos_from_mask (unsigned HOST_WIDE_INT,
448 unsigned HOST_WIDE_INT *);
449 static rtx canon_reg_for_combine (rtx, rtx);
450 static rtx force_int_to_mode (rtx, scalar_int_mode, scalar_int_mode,
451 scalar_int_mode, unsigned HOST_WIDE_INT, int);
452 static rtx force_to_mode (rtx, machine_mode,
453 unsigned HOST_WIDE_INT, int);
454 static rtx if_then_else_cond (rtx, rtx *, rtx *);
455 static rtx known_cond (rtx, enum rtx_code, rtx, rtx);
456 static int rtx_equal_for_field_assignment_p (rtx, rtx, bool = false);
457 static rtx make_field_assignment (rtx);
458 static rtx apply_distributive_law (rtx);
459 static rtx distribute_and_simplify_rtx (rtx, int);
460 static rtx simplify_and_const_int_1 (scalar_int_mode, rtx,
461 unsigned HOST_WIDE_INT);
462 static rtx simplify_and_const_int (rtx, scalar_int_mode, rtx,
463 unsigned HOST_WIDE_INT);
464 static int merge_outer_ops (enum rtx_code *, HOST_WIDE_INT *, enum rtx_code,
465 HOST_WIDE_INT, machine_mode, int *);
466 static rtx simplify_shift_const_1 (enum rtx_code, machine_mode, rtx, int);
467 static rtx simplify_shift_const (rtx, enum rtx_code, machine_mode, rtx,
469 static int recog_for_combine (rtx *, rtx_insn *, rtx *);
470 static rtx gen_lowpart_for_combine (machine_mode, rtx);
471 static enum rtx_code simplify_compare_const (enum rtx_code, machine_mode,
473 static enum rtx_code simplify_comparison (enum rtx_code, rtx *, rtx *);
474 static void update_table_tick (rtx);
475 static void record_value_for_reg (rtx, rtx_insn *, rtx);
476 static void check_promoted_subreg (rtx_insn *, rtx);
477 static void record_dead_and_set_regs_1 (rtx, const_rtx, void *);
478 static void record_dead_and_set_regs (rtx_insn *);
479 static int get_last_value_validate (rtx *, rtx_insn *, int, int);
480 static rtx get_last_value (const_rtx);
481 static int use_crosses_set_p (const_rtx, int);
482 static void reg_dead_at_p_1 (rtx, const_rtx, void *);
483 static int reg_dead_at_p (rtx, rtx_insn *);
484 static void move_deaths (rtx, rtx, int, rtx_insn *, rtx *);
485 static int reg_bitfield_target_p (rtx, rtx);
486 static void distribute_notes (rtx, rtx_insn *, rtx_insn *, rtx_insn *, rtx, rtx, rtx);
487 static void distribute_links (struct insn_link *);
488 static void mark_used_regs_combine (rtx);
489 static void record_promoted_value (rtx_insn *, rtx);
490 static bool unmentioned_reg_p (rtx, rtx);
491 static void record_truncated_values (rtx *, void *);
492 static bool reg_truncated_to_mode (machine_mode, const_rtx);
493 static rtx gen_lowpart_or_truncate (machine_mode, rtx);
496 /* It is not safe to use ordinary gen_lowpart in combine.
497 See comments in gen_lowpart_for_combine. */
498 #undef RTL_HOOKS_GEN_LOWPART
499 #define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine
501 /* Our implementation of gen_lowpart never emits a new pseudo. */
502 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
503 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_for_combine
505 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
506 #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine
508 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
509 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine
511 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
512 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode
514 static const struct rtl_hooks combine_rtl_hooks = RTL_HOOKS_INITIALIZER;
517 /* Convenience wrapper for the canonicalize_comparison target hook.
518 Target hooks cannot use enum rtx_code. */
520 target_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1,
521 bool op0_preserve_value)
523 int code_int = (int)*code;
524 targetm.canonicalize_comparison (&code_int, op0, op1, op0_preserve_value);
525 *code = (enum rtx_code)code_int;
528 /* Try to split PATTERN found in INSN. This returns NULL_RTX if
529 PATTERN can not be split. Otherwise, it returns an insn sequence.
530 This is a wrapper around split_insns which ensures that the
531 reg_stat vector is made larger if the splitter creates a new
535 combine_split_insns (rtx pattern, rtx_insn *insn)
540 ret = split_insns (pattern, insn);
541 nregs = max_reg_num ();
542 if (nregs > reg_stat.length ())
543 reg_stat.safe_grow_cleared (nregs);
547 /* This is used by find_single_use to locate an rtx in LOC that
548 contains exactly one use of DEST, which is typically either a REG
549 or CC0. It returns a pointer to the innermost rtx expression
550 containing DEST. Appearances of DEST that are being used to
551 totally replace it are not counted. */
554 find_single_use_1 (rtx dest, rtx *loc)
557 enum rtx_code code = GET_CODE (x);
573 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
574 of a REG that occupies all of the REG, the insn uses DEST if
575 it is mentioned in the destination or the source. Otherwise, we
576 need just check the source. */
577 if (GET_CODE (SET_DEST (x)) != CC0
578 && GET_CODE (SET_DEST (x)) != PC
579 && !REG_P (SET_DEST (x))
580 && ! (GET_CODE (SET_DEST (x)) == SUBREG
581 && REG_P (SUBREG_REG (SET_DEST (x)))
582 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
583 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
584 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
585 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))))
588 return find_single_use_1 (dest, &SET_SRC (x));
592 return find_single_use_1 (dest, &XEXP (x, 0));
598 /* If it wasn't one of the common cases above, check each expression and
599 vector of this code. Look for a unique usage of DEST. */
601 fmt = GET_RTX_FORMAT (code);
602 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
606 if (dest == XEXP (x, i)
607 || (REG_P (dest) && REG_P (XEXP (x, i))
608 && REGNO (dest) == REGNO (XEXP (x, i))))
611 this_result = find_single_use_1 (dest, &XEXP (x, i));
614 result = this_result;
615 else if (this_result)
616 /* Duplicate usage. */
619 else if (fmt[i] == 'E')
623 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
625 if (XVECEXP (x, i, j) == dest
627 && REG_P (XVECEXP (x, i, j))
628 && REGNO (XVECEXP (x, i, j)) == REGNO (dest)))
631 this_result = find_single_use_1 (dest, &XVECEXP (x, i, j));
634 result = this_result;
635 else if (this_result)
645 /* See if DEST, produced in INSN, is used only a single time in the
646 sequel. If so, return a pointer to the innermost rtx expression in which
649 If PLOC is nonzero, *PLOC is set to the insn containing the single use.
651 If DEST is cc0_rtx, we look only at the next insn. In that case, we don't
652 care about REG_DEAD notes or LOG_LINKS.
654 Otherwise, we find the single use by finding an insn that has a
655 LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is
656 only referenced once in that insn, we know that it must be the first
657 and last insn referencing DEST. */
660 find_single_use (rtx dest, rtx_insn *insn, rtx_insn **ploc)
665 struct insn_link *link;
669 next = NEXT_INSN (insn);
671 || (!NONJUMP_INSN_P (next) && !JUMP_P (next)))
674 result = find_single_use_1 (dest, &PATTERN (next));
683 bb = BLOCK_FOR_INSN (insn);
684 for (next = NEXT_INSN (insn);
685 next && BLOCK_FOR_INSN (next) == bb;
686 next = NEXT_INSN (next))
687 if (NONDEBUG_INSN_P (next) && dead_or_set_p (next, dest))
689 FOR_EACH_LOG_LINK (link, next)
690 if (link->insn == insn && link->regno == REGNO (dest))
695 result = find_single_use_1 (dest, &PATTERN (next));
705 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
706 insn. The substitution can be undone by undo_all. If INTO is already
707 set to NEWVAL, do not record this change. Because computing NEWVAL might
708 also call SUBST, we have to compute it before we put anything into
712 do_SUBST (rtx *into, rtx newval)
717 if (oldval == newval)
720 /* We'd like to catch as many invalid transformations here as
721 possible. Unfortunately, there are way too many mode changes
722 that are perfectly valid, so we'd waste too much effort for
723 little gain doing the checks here. Focus on catching invalid
724 transformations involving integer constants. */
725 if (GET_MODE_CLASS (GET_MODE (oldval)) == MODE_INT
726 && CONST_INT_P (newval))
728 /* Sanity check that we're replacing oldval with a CONST_INT
729 that is a valid sign-extension for the original mode. */
730 gcc_assert (INTVAL (newval)
731 == trunc_int_for_mode (INTVAL (newval), GET_MODE (oldval)));
733 /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
734 CONST_INT is not valid, because after the replacement, the
735 original mode would be gone. Unfortunately, we can't tell
736 when do_SUBST is called to replace the operand thereof, so we
737 perform this test on oldval instead, checking whether an
738 invalid replacement took place before we got here. */
739 gcc_assert (!(GET_CODE (oldval) == SUBREG
740 && CONST_INT_P (SUBREG_REG (oldval))));
741 gcc_assert (!(GET_CODE (oldval) == ZERO_EXTEND
742 && CONST_INT_P (XEXP (oldval, 0))));
746 buf = undobuf.frees, undobuf.frees = buf->next;
748 buf = XNEW (struct undo);
750 buf->kind = UNDO_RTX;
752 buf->old_contents.r = oldval;
755 buf->next = undobuf.undos, undobuf.undos = buf;
758 #define SUBST(INTO, NEWVAL) do_SUBST (&(INTO), (NEWVAL))
760 /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution
761 for the value of a HOST_WIDE_INT value (including CONST_INT) is
765 do_SUBST_INT (int *into, int newval)
770 if (oldval == newval)
774 buf = undobuf.frees, undobuf.frees = buf->next;
776 buf = XNEW (struct undo);
778 buf->kind = UNDO_INT;
780 buf->old_contents.i = oldval;
783 buf->next = undobuf.undos, undobuf.undos = buf;
786 #define SUBST_INT(INTO, NEWVAL) do_SUBST_INT (&(INTO), (NEWVAL))
788 /* Similar to SUBST, but just substitute the mode. This is used when
789 changing the mode of a pseudo-register, so that any other
790 references to the entry in the regno_reg_rtx array will change as
794 do_SUBST_MODE (rtx *into, machine_mode newval)
797 machine_mode oldval = GET_MODE (*into);
799 if (oldval == newval)
803 buf = undobuf.frees, undobuf.frees = buf->next;
805 buf = XNEW (struct undo);
807 buf->kind = UNDO_MODE;
809 buf->old_contents.m = oldval;
810 adjust_reg_mode (*into, newval);
812 buf->next = undobuf.undos, undobuf.undos = buf;
815 #define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE (&(INTO), (NEWVAL))
817 /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression. */
820 do_SUBST_LINK (struct insn_link **into, struct insn_link *newval)
823 struct insn_link * oldval = *into;
825 if (oldval == newval)
829 buf = undobuf.frees, undobuf.frees = buf->next;
831 buf = XNEW (struct undo);
833 buf->kind = UNDO_LINKS;
835 buf->old_contents.l = oldval;
838 buf->next = undobuf.undos, undobuf.undos = buf;
841 #define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval)
843 /* Subroutine of try_combine. Determine whether the replacement patterns
844 NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_cost
845 than the original sequence I0, I1, I2, I3 and undobuf.other_insn. Note
846 that I0, I1 and/or NEWI2PAT may be NULL_RTX. Similarly, NEWOTHERPAT and
847 undobuf.other_insn may also both be NULL_RTX. Return false if the cost
848 of all the instructions can be estimated and the replacements are more
849 expensive than the original sequence. */
852 combine_validate_cost (rtx_insn *i0, rtx_insn *i1, rtx_insn *i2, rtx_insn *i3,
853 rtx newpat, rtx newi2pat, rtx newotherpat)
855 int i0_cost, i1_cost, i2_cost, i3_cost;
856 int new_i2_cost, new_i3_cost;
857 int old_cost, new_cost;
859 /* Lookup the original insn_rtx_costs. */
860 i2_cost = INSN_COST (i2);
861 i3_cost = INSN_COST (i3);
865 i1_cost = INSN_COST (i1);
868 i0_cost = INSN_COST (i0);
869 old_cost = (i0_cost > 0 && i1_cost > 0 && i2_cost > 0 && i3_cost > 0
870 ? i0_cost + i1_cost + i2_cost + i3_cost : 0);
874 old_cost = (i1_cost > 0 && i2_cost > 0 && i3_cost > 0
875 ? i1_cost + i2_cost + i3_cost : 0);
881 old_cost = (i2_cost > 0 && i3_cost > 0) ? i2_cost + i3_cost : 0;
882 i1_cost = i0_cost = 0;
885 /* If we have split a PARALLEL I2 to I1,I2, we have counted its cost twice;
887 if (old_cost && i1 && INSN_UID (i1) == INSN_UID (i2))
891 /* Calculate the replacement pattern_costs. */
892 new_i3_cost = pattern_cost (newpat, optimize_this_for_speed_p);
895 new_i2_cost = pattern_cost (newi2pat, optimize_this_for_speed_p);
896 new_cost = (new_i2_cost > 0 && new_i3_cost > 0)
897 ? new_i2_cost + new_i3_cost : 0;
901 new_cost = new_i3_cost;
905 if (undobuf.other_insn)
907 int old_other_cost, new_other_cost;
909 old_other_cost = INSN_COST (undobuf.other_insn);
910 new_other_cost = pattern_cost (newotherpat, optimize_this_for_speed_p);
911 if (old_other_cost > 0 && new_other_cost > 0)
913 old_cost += old_other_cost;
914 new_cost += new_other_cost;
920 /* Disallow this combination if both new_cost and old_cost are greater than
921 zero, and new_cost is greater than old cost. */
922 int reject = old_cost > 0 && new_cost > old_cost;
926 fprintf (dump_file, "%s combination of insns ",
927 reject ? "rejecting" : "allowing");
929 fprintf (dump_file, "%d, ", INSN_UID (i0));
930 if (i1 && INSN_UID (i1) != INSN_UID (i2))
931 fprintf (dump_file, "%d, ", INSN_UID (i1));
932 fprintf (dump_file, "%d and %d\n", INSN_UID (i2), INSN_UID (i3));
934 fprintf (dump_file, "original costs ");
936 fprintf (dump_file, "%d + ", i0_cost);
937 if (i1 && INSN_UID (i1) != INSN_UID (i2))
938 fprintf (dump_file, "%d + ", i1_cost);
939 fprintf (dump_file, "%d + %d = %d\n", i2_cost, i3_cost, old_cost);
942 fprintf (dump_file, "replacement costs %d + %d = %d\n",
943 new_i2_cost, new_i3_cost, new_cost);
945 fprintf (dump_file, "replacement cost %d\n", new_cost);
951 /* Update the uid_insn_cost array with the replacement costs. */
952 INSN_COST (i2) = new_i2_cost;
953 INSN_COST (i3) = new_i3_cost;
965 /* Delete any insns that copy a register to itself. */
968 delete_noop_moves (void)
970 rtx_insn *insn, *next;
973 FOR_EACH_BB_FN (bb, cfun)
975 for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next)
977 next = NEXT_INSN (insn);
978 if (INSN_P (insn) && noop_move_p (insn))
981 fprintf (dump_file, "deleting noop move %d\n", INSN_UID (insn));
983 delete_insn_and_edges (insn);
990 /* Return false if we do not want to (or cannot) combine DEF. */
992 can_combine_def_p (df_ref def)
994 /* Do not consider if it is pre/post modification in MEM. */
995 if (DF_REF_FLAGS (def) & DF_REF_PRE_POST_MODIFY)
998 unsigned int regno = DF_REF_REGNO (def);
1000 /* Do not combine frame pointer adjustments. */
1001 if ((regno == FRAME_POINTER_REGNUM
1002 && (!reload_completed || frame_pointer_needed))
1003 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
1004 && regno == HARD_FRAME_POINTER_REGNUM
1005 && (!reload_completed || frame_pointer_needed))
1006 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1007 && regno == ARG_POINTER_REGNUM && fixed_regs[regno]))
1013 /* Return false if we do not want to (or cannot) combine USE. */
1015 can_combine_use_p (df_ref use)
1017 /* Do not consider the usage of the stack pointer by function call. */
1018 if (DF_REF_FLAGS (use) & DF_REF_CALL_STACK_USAGE)
1024 /* Fill in log links field for all insns. */
1027 create_log_links (void)
1030 rtx_insn **next_use;
1034 next_use = XCNEWVEC (rtx_insn *, max_reg_num ());
1036 /* Pass through each block from the end, recording the uses of each
1037 register and establishing log links when def is encountered.
1038 Note that we do not clear next_use array in order to save time,
1039 so we have to test whether the use is in the same basic block as def.
1041 There are a few cases below when we do not consider the definition or
1042 usage -- these are taken from original flow.c did. Don't ask me why it is
1043 done this way; I don't know and if it works, I don't want to know. */
1045 FOR_EACH_BB_FN (bb, cfun)
1047 FOR_BB_INSNS_REVERSE (bb, insn)
1049 if (!NONDEBUG_INSN_P (insn))
1052 /* Log links are created only once. */
1053 gcc_assert (!LOG_LINKS (insn));
1055 FOR_EACH_INSN_DEF (def, insn)
1057 unsigned int regno = DF_REF_REGNO (def);
1060 if (!next_use[regno])
1063 if (!can_combine_def_p (def))
1066 use_insn = next_use[regno];
1067 next_use[regno] = NULL;
1069 if (BLOCK_FOR_INSN (use_insn) != bb)
1074 We don't build a LOG_LINK for hard registers contained
1075 in ASM_OPERANDs. If these registers get replaced,
1076 we might wind up changing the semantics of the insn,
1077 even if reload can make what appear to be valid
1078 assignments later. */
1079 if (regno < FIRST_PSEUDO_REGISTER
1080 && asm_noperands (PATTERN (use_insn)) >= 0)
1083 /* Don't add duplicate links between instructions. */
1084 struct insn_link *links;
1085 FOR_EACH_LOG_LINK (links, use_insn)
1086 if (insn == links->insn && regno == links->regno)
1090 LOG_LINKS (use_insn)
1091 = alloc_insn_link (insn, regno, LOG_LINKS (use_insn));
1094 FOR_EACH_INSN_USE (use, insn)
1095 if (can_combine_use_p (use))
1096 next_use[DF_REF_REGNO (use)] = insn;
1103 /* Walk the LOG_LINKS of insn B to see if we find a reference to A. Return
1104 true if we found a LOG_LINK that proves that A feeds B. This only works
1105 if there are no instructions between A and B which could have a link
1106 depending on A, since in that case we would not record a link for B.
1107 We also check the implicit dependency created by a cc0 setter/user
1111 insn_a_feeds_b (rtx_insn *a, rtx_insn *b)
1113 struct insn_link *links;
1114 FOR_EACH_LOG_LINK (links, b)
1115 if (links->insn == a)
1117 if (HAVE_cc0 && sets_cc0_p (a))
1122 /* Main entry point for combiner. F is the first insn of the function.
1123 NREGS is the first unused pseudo-reg number.
1125 Return nonzero if the combiner has turned an indirect jump
1126 instruction into a direct jump. */
1128 combine_instructions (rtx_insn *f, unsigned int nregs)
1130 rtx_insn *insn, *next;
1132 struct insn_link *links, *nextlinks;
1134 basic_block last_bb;
1136 int new_direct_jump_p = 0;
1138 for (first = f; first && !NONDEBUG_INSN_P (first); )
1139 first = NEXT_INSN (first);
1143 combine_attempts = 0;
1146 combine_successes = 0;
1148 rtl_hooks = combine_rtl_hooks;
1150 reg_stat.safe_grow_cleared (nregs);
1152 init_recog_no_volatile ();
1154 /* Allocate array for insn info. */
1155 max_uid_known = get_max_uid ();
1156 uid_log_links = XCNEWVEC (struct insn_link *, max_uid_known + 1);
1157 uid_insn_cost = XCNEWVEC (int, max_uid_known + 1);
1158 gcc_obstack_init (&insn_link_obstack);
1160 nonzero_bits_mode = int_mode_for_size (HOST_BITS_PER_WIDE_INT, 0).require ();
1162 /* Don't use reg_stat[].nonzero_bits when computing it. This can cause
1163 problems when, for example, we have j <<= 1 in a loop. */
1165 nonzero_sign_valid = 0;
1166 label_tick = label_tick_ebb_start = 1;
1168 /* Scan all SETs and see if we can deduce anything about what
1169 bits are known to be zero for some registers and how many copies
1170 of the sign bit are known to exist for those registers.
1172 Also set any known values so that we can use it while searching
1173 for what bits are known to be set. */
1175 setup_incoming_promotions (first);
1176 /* Allow the entry block and the first block to fall into the same EBB.
1177 Conceptually the incoming promotions are assigned to the entry block. */
1178 last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1180 create_log_links ();
1181 FOR_EACH_BB_FN (this_basic_block, cfun)
1183 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1188 if (!single_pred_p (this_basic_block)
1189 || single_pred (this_basic_block) != last_bb)
1190 label_tick_ebb_start = label_tick;
1191 last_bb = this_basic_block;
1193 FOR_BB_INSNS (this_basic_block, insn)
1194 if (INSN_P (insn) && BLOCK_FOR_INSN (insn))
1198 subst_low_luid = DF_INSN_LUID (insn);
1201 note_stores (PATTERN (insn), set_nonzero_bits_and_sign_copies,
1203 record_dead_and_set_regs (insn);
1206 for (links = REG_NOTES (insn); links; links = XEXP (links, 1))
1207 if (REG_NOTE_KIND (links) == REG_INC)
1208 set_nonzero_bits_and_sign_copies (XEXP (links, 0), NULL_RTX,
1211 /* Record the current insn_cost of this instruction. */
1212 if (NONJUMP_INSN_P (insn))
1213 INSN_COST (insn) = insn_cost (insn, optimize_this_for_speed_p);
1216 fprintf (dump_file, "insn_cost %d for ", INSN_COST (insn));
1217 dump_insn_slim (dump_file, insn);
1222 nonzero_sign_valid = 1;
1224 /* Now scan all the insns in forward order. */
1225 label_tick = label_tick_ebb_start = 1;
1227 setup_incoming_promotions (first);
1228 last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1229 int max_combine = PARAM_VALUE (PARAM_MAX_COMBINE_INSNS);
1231 FOR_EACH_BB_FN (this_basic_block, cfun)
1233 rtx_insn *last_combined_insn = NULL;
1235 /* Ignore instruction combination in basic blocks that are going to
1236 be removed as unreachable anyway. See PR82386. */
1237 if (EDGE_COUNT (this_basic_block->preds) == 0)
1240 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1245 if (!single_pred_p (this_basic_block)
1246 || single_pred (this_basic_block) != last_bb)
1247 label_tick_ebb_start = label_tick;
1248 last_bb = this_basic_block;
1250 rtl_profile_for_bb (this_basic_block);
1251 for (insn = BB_HEAD (this_basic_block);
1252 insn != NEXT_INSN (BB_END (this_basic_block));
1253 insn = next ? next : NEXT_INSN (insn))
1256 if (!NONDEBUG_INSN_P (insn))
1259 while (last_combined_insn
1260 && (!NONDEBUG_INSN_P (last_combined_insn)
1261 || last_combined_insn->deleted ()))
1262 last_combined_insn = PREV_INSN (last_combined_insn);
1263 if (last_combined_insn == NULL_RTX
1264 || BLOCK_FOR_INSN (last_combined_insn) != this_basic_block
1265 || DF_INSN_LUID (last_combined_insn) <= DF_INSN_LUID (insn))
1266 last_combined_insn = insn;
1268 /* See if we know about function return values before this
1269 insn based upon SUBREG flags. */
1270 check_promoted_subreg (insn, PATTERN (insn));
1272 /* See if we can find hardregs and subreg of pseudos in
1273 narrower modes. This could help turning TRUNCATEs
1275 note_uses (&PATTERN (insn), record_truncated_values, NULL);
1277 /* Try this insn with each insn it links back to. */
1279 FOR_EACH_LOG_LINK (links, insn)
1280 if ((next = try_combine (insn, links->insn, NULL,
1281 NULL, &new_direct_jump_p,
1282 last_combined_insn)) != 0)
1284 statistics_counter_event (cfun, "two-insn combine", 1);
1288 /* Try each sequence of three linked insns ending with this one. */
1290 if (max_combine >= 3)
1291 FOR_EACH_LOG_LINK (links, insn)
1293 rtx_insn *link = links->insn;
1295 /* If the linked insn has been replaced by a note, then there
1296 is no point in pursuing this chain any further. */
1300 FOR_EACH_LOG_LINK (nextlinks, link)
1301 if ((next = try_combine (insn, link, nextlinks->insn,
1302 NULL, &new_direct_jump_p,
1303 last_combined_insn)) != 0)
1305 statistics_counter_event (cfun, "three-insn combine", 1);
1310 /* Try to combine a jump insn that uses CC0
1311 with a preceding insn that sets CC0, and maybe with its
1312 logical predecessor as well.
1313 This is how we make decrement-and-branch insns.
1314 We need this special code because data flow connections
1315 via CC0 do not get entered in LOG_LINKS. */
1319 && (prev = prev_nonnote_insn (insn)) != 0
1320 && NONJUMP_INSN_P (prev)
1321 && sets_cc0_p (PATTERN (prev)))
1323 if ((next = try_combine (insn, prev, NULL, NULL,
1325 last_combined_insn)) != 0)
1328 FOR_EACH_LOG_LINK (nextlinks, prev)
1329 if ((next = try_combine (insn, prev, nextlinks->insn,
1330 NULL, &new_direct_jump_p,
1331 last_combined_insn)) != 0)
1335 /* Do the same for an insn that explicitly references CC0. */
1336 if (HAVE_cc0 && NONJUMP_INSN_P (insn)
1337 && (prev = prev_nonnote_insn (insn)) != 0
1338 && NONJUMP_INSN_P (prev)
1339 && sets_cc0_p (PATTERN (prev))
1340 && GET_CODE (PATTERN (insn)) == SET
1341 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (insn))))
1343 if ((next = try_combine (insn, prev, NULL, NULL,
1345 last_combined_insn)) != 0)
1348 FOR_EACH_LOG_LINK (nextlinks, prev)
1349 if ((next = try_combine (insn, prev, nextlinks->insn,
1350 NULL, &new_direct_jump_p,
1351 last_combined_insn)) != 0)
1355 /* Finally, see if any of the insns that this insn links to
1356 explicitly references CC0. If so, try this insn, that insn,
1357 and its predecessor if it sets CC0. */
1360 FOR_EACH_LOG_LINK (links, insn)
1361 if (NONJUMP_INSN_P (links->insn)
1362 && GET_CODE (PATTERN (links->insn)) == SET
1363 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (links->insn)))
1364 && (prev = prev_nonnote_insn (links->insn)) != 0
1365 && NONJUMP_INSN_P (prev)
1366 && sets_cc0_p (PATTERN (prev))
1367 && (next = try_combine (insn, links->insn,
1368 prev, NULL, &new_direct_jump_p,
1369 last_combined_insn)) != 0)
1373 /* Try combining an insn with two different insns whose results it
1375 if (max_combine >= 3)
1376 FOR_EACH_LOG_LINK (links, insn)
1377 for (nextlinks = links->next; nextlinks;
1378 nextlinks = nextlinks->next)
1379 if ((next = try_combine (insn, links->insn,
1380 nextlinks->insn, NULL,
1382 last_combined_insn)) != 0)
1385 statistics_counter_event (cfun, "three-insn combine", 1);
1389 /* Try four-instruction combinations. */
1390 if (max_combine >= 4)
1391 FOR_EACH_LOG_LINK (links, insn)
1393 struct insn_link *next1;
1394 rtx_insn *link = links->insn;
1396 /* If the linked insn has been replaced by a note, then there
1397 is no point in pursuing this chain any further. */
1401 FOR_EACH_LOG_LINK (next1, link)
1403 rtx_insn *link1 = next1->insn;
1406 /* I0 -> I1 -> I2 -> I3. */
1407 FOR_EACH_LOG_LINK (nextlinks, link1)
1408 if ((next = try_combine (insn, link, link1,
1411 last_combined_insn)) != 0)
1413 statistics_counter_event (cfun, "four-insn combine", 1);
1416 /* I0, I1 -> I2, I2 -> I3. */
1417 for (nextlinks = next1->next; nextlinks;
1418 nextlinks = nextlinks->next)
1419 if ((next = try_combine (insn, link, link1,
1422 last_combined_insn)) != 0)
1424 statistics_counter_event (cfun, "four-insn combine", 1);
1429 for (next1 = links->next; next1; next1 = next1->next)
1431 rtx_insn *link1 = next1->insn;
1434 /* I0 -> I2; I1, I2 -> I3. */
1435 FOR_EACH_LOG_LINK (nextlinks, link)
1436 if ((next = try_combine (insn, link, link1,
1439 last_combined_insn)) != 0)
1441 statistics_counter_event (cfun, "four-insn combine", 1);
1444 /* I0 -> I1; I1, I2 -> I3. */
1445 FOR_EACH_LOG_LINK (nextlinks, link1)
1446 if ((next = try_combine (insn, link, link1,
1449 last_combined_insn)) != 0)
1451 statistics_counter_event (cfun, "four-insn combine", 1);
1457 /* Try this insn with each REG_EQUAL note it links back to. */
1458 FOR_EACH_LOG_LINK (links, insn)
1461 rtx_insn *temp = links->insn;
1462 if ((set = single_set (temp)) != 0
1463 && (note = find_reg_equal_equiv_note (temp)) != 0
1464 && (note = XEXP (note, 0), GET_CODE (note)) != EXPR_LIST
1465 /* Avoid using a register that may already been marked
1466 dead by an earlier instruction. */
1467 && ! unmentioned_reg_p (note, SET_SRC (set))
1468 && (GET_MODE (note) == VOIDmode
1469 ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set)))
1470 : (GET_MODE (SET_DEST (set)) == GET_MODE (note)
1471 && (GET_CODE (SET_DEST (set)) != ZERO_EXTRACT
1472 || (GET_MODE (XEXP (SET_DEST (set), 0))
1473 == GET_MODE (note))))))
1475 /* Temporarily replace the set's source with the
1476 contents of the REG_EQUAL note. The insn will
1477 be deleted or recognized by try_combine. */
1478 rtx orig_src = SET_SRC (set);
1479 rtx orig_dest = SET_DEST (set);
1480 if (GET_CODE (SET_DEST (set)) == ZERO_EXTRACT)
1481 SET_DEST (set) = XEXP (SET_DEST (set), 0);
1482 SET_SRC (set) = note;
1484 i2mod_old_rhs = copy_rtx (orig_src);
1485 i2mod_new_rhs = copy_rtx (note);
1486 next = try_combine (insn, i2mod, NULL, NULL,
1488 last_combined_insn);
1492 statistics_counter_event (cfun, "insn-with-note combine", 1);
1495 SET_SRC (set) = orig_src;
1496 SET_DEST (set) = orig_dest;
1501 record_dead_and_set_regs (insn);
1508 default_rtl_profile ();
1510 new_direct_jump_p |= purge_all_dead_edges ();
1511 delete_noop_moves ();
1514 obstack_free (&insn_link_obstack, NULL);
1515 free (uid_log_links);
1516 free (uid_insn_cost);
1517 reg_stat.release ();
1520 struct undo *undo, *next;
1521 for (undo = undobuf.frees; undo; undo = next)
1529 total_attempts += combine_attempts;
1530 total_merges += combine_merges;
1531 total_extras += combine_extras;
1532 total_successes += combine_successes;
1534 nonzero_sign_valid = 0;
1535 rtl_hooks = general_rtl_hooks;
1537 /* Make recognizer allow volatile MEMs again. */
1540 return new_direct_jump_p;
1543 /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */
1546 init_reg_last (void)
1551 FOR_EACH_VEC_ELT (reg_stat, i, p)
1552 memset (p, 0, offsetof (reg_stat_type, sign_bit_copies));
1555 /* Set up any promoted values for incoming argument registers. */
1558 setup_incoming_promotions (rtx_insn *first)
1561 bool strictly_local = false;
1563 for (arg = DECL_ARGUMENTS (current_function_decl); arg;
1564 arg = DECL_CHAIN (arg))
1566 rtx x, reg = DECL_INCOMING_RTL (arg);
1568 machine_mode mode1, mode2, mode3, mode4;
1570 /* Only continue if the incoming argument is in a register. */
1574 /* Determine, if possible, whether all call sites of the current
1575 function lie within the current compilation unit. (This does
1576 take into account the exporting of a function via taking its
1577 address, and so forth.) */
1578 strictly_local = cgraph_node::local_info (current_function_decl)->local;
1580 /* The mode and signedness of the argument before any promotions happen
1581 (equal to the mode of the pseudo holding it at that stage). */
1582 mode1 = TYPE_MODE (TREE_TYPE (arg));
1583 uns1 = TYPE_UNSIGNED (TREE_TYPE (arg));
1585 /* The mode and signedness of the argument after any source language and
1586 TARGET_PROMOTE_PROTOTYPES-driven promotions. */
1587 mode2 = TYPE_MODE (DECL_ARG_TYPE (arg));
1588 uns3 = TYPE_UNSIGNED (DECL_ARG_TYPE (arg));
1590 /* The mode and signedness of the argument as it is actually passed,
1591 see assign_parm_setup_reg in function.c. */
1592 mode3 = promote_function_mode (TREE_TYPE (arg), mode1, &uns3,
1593 TREE_TYPE (cfun->decl), 0);
1595 /* The mode of the register in which the argument is being passed. */
1596 mode4 = GET_MODE (reg);
1598 /* Eliminate sign extensions in the callee when:
1599 (a) A mode promotion has occurred; */
1602 /* (b) The mode of the register is the same as the mode of
1603 the argument as it is passed; */
1606 /* (c) There's no language level extension; */
1609 /* (c.1) All callers are from the current compilation unit. If that's
1610 the case we don't have to rely on an ABI, we only have to know
1611 what we're generating right now, and we know that we will do the
1612 mode1 to mode2 promotion with the given sign. */
1613 else if (!strictly_local)
1615 /* (c.2) The combination of the two promotions is useful. This is
1616 true when the signs match, or if the first promotion is unsigned.
1617 In the later case, (sign_extend (zero_extend x)) is the same as
1618 (zero_extend (zero_extend x)), so make sure to force UNS3 true. */
1624 /* Record that the value was promoted from mode1 to mode3,
1625 so that any sign extension at the head of the current
1626 function may be eliminated. */
1627 x = gen_rtx_CLOBBER (mode1, const0_rtx);
1628 x = gen_rtx_fmt_e ((uns3 ? ZERO_EXTEND : SIGN_EXTEND), mode3, x);
1629 record_value_for_reg (reg, first, x);
1633 /* If MODE has a precision lower than PREC and SRC is a non-negative constant
1634 that would appear negative in MODE, sign-extend SRC for use in nonzero_bits
1635 because some machines (maybe most) will actually do the sign-extension and
1636 this is the conservative approach.
1638 ??? For 2.5, try to tighten up the MD files in this regard instead of this
1642 sign_extend_short_imm (rtx src, machine_mode mode, unsigned int prec)
1644 scalar_int_mode int_mode;
1645 if (CONST_INT_P (src)
1646 && is_a <scalar_int_mode> (mode, &int_mode)
1647 && GET_MODE_PRECISION (int_mode) < prec
1649 && val_signbit_known_set_p (int_mode, INTVAL (src)))
1650 src = GEN_INT (INTVAL (src) | ~GET_MODE_MASK (int_mode));
1655 /* Update RSP for pseudo-register X from INSN's REG_EQUAL note (if one exists)
1659 update_rsp_from_reg_equal (reg_stat_type *rsp, rtx_insn *insn, const_rtx set,
1662 rtx reg_equal_note = insn ? find_reg_equal_equiv_note (insn) : NULL_RTX;
1663 unsigned HOST_WIDE_INT bits = 0;
1664 rtx reg_equal = NULL, src = SET_SRC (set);
1665 unsigned int num = 0;
1668 reg_equal = XEXP (reg_equal_note, 0);
1670 if (SHORT_IMMEDIATES_SIGN_EXTEND)
1672 src = sign_extend_short_imm (src, GET_MODE (x), BITS_PER_WORD);
1674 reg_equal = sign_extend_short_imm (reg_equal, GET_MODE (x), BITS_PER_WORD);
1677 /* Don't call nonzero_bits if it cannot change anything. */
1678 if (rsp->nonzero_bits != HOST_WIDE_INT_M1U)
1680 bits = nonzero_bits (src, nonzero_bits_mode);
1681 if (reg_equal && bits)
1682 bits &= nonzero_bits (reg_equal, nonzero_bits_mode);
1683 rsp->nonzero_bits |= bits;
1686 /* Don't call num_sign_bit_copies if it cannot change anything. */
1687 if (rsp->sign_bit_copies != 1)
1689 num = num_sign_bit_copies (SET_SRC (set), GET_MODE (x));
1690 if (reg_equal && num != GET_MODE_PRECISION (GET_MODE (x)))
1692 unsigned int numeq = num_sign_bit_copies (reg_equal, GET_MODE (x));
1693 if (num == 0 || numeq > num)
1696 if (rsp->sign_bit_copies == 0 || num < rsp->sign_bit_copies)
1697 rsp->sign_bit_copies = num;
1701 /* Called via note_stores. If X is a pseudo that is narrower than
1702 HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1704 If we are setting only a portion of X and we can't figure out what
1705 portion, assume all bits will be used since we don't know what will
1708 Similarly, set how many bits of X are known to be copies of the sign bit
1709 at all locations in the function. This is the smallest number implied
1713 set_nonzero_bits_and_sign_copies (rtx x, const_rtx set, void *data)
1715 rtx_insn *insn = (rtx_insn *) data;
1716 scalar_int_mode mode;
1719 && REGNO (x) >= FIRST_PSEUDO_REGISTER
1720 /* If this register is undefined at the start of the file, we can't
1721 say what its contents were. */
1722 && ! REGNO_REG_SET_P
1723 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), REGNO (x))
1724 && is_a <scalar_int_mode> (GET_MODE (x), &mode)
1725 && HWI_COMPUTABLE_MODE_P (mode))
1727 reg_stat_type *rsp = ®_stat[REGNO (x)];
1729 if (set == 0 || GET_CODE (set) == CLOBBER)
1731 rsp->nonzero_bits = GET_MODE_MASK (mode);
1732 rsp->sign_bit_copies = 1;
1736 /* If this register is being initialized using itself, and the
1737 register is uninitialized in this basic block, and there are
1738 no LOG_LINKS which set the register, then part of the
1739 register is uninitialized. In that case we can't assume
1740 anything about the number of nonzero bits.
1742 ??? We could do better if we checked this in
1743 reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we
1744 could avoid making assumptions about the insn which initially
1745 sets the register, while still using the information in other
1746 insns. We would have to be careful to check every insn
1747 involved in the combination. */
1750 && reg_referenced_p (x, PATTERN (insn))
1751 && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn)),
1754 struct insn_link *link;
1756 FOR_EACH_LOG_LINK (link, insn)
1757 if (dead_or_set_p (link->insn, x))
1761 rsp->nonzero_bits = GET_MODE_MASK (mode);
1762 rsp->sign_bit_copies = 1;
1767 /* If this is a complex assignment, see if we can convert it into a
1768 simple assignment. */
1769 set = expand_field_assignment (set);
1771 /* If this is a simple assignment, or we have a paradoxical SUBREG,
1772 set what we know about X. */
1774 if (SET_DEST (set) == x
1775 || (paradoxical_subreg_p (SET_DEST (set))
1776 && SUBREG_REG (SET_DEST (set)) == x))
1777 update_rsp_from_reg_equal (rsp, insn, set, x);
1780 rsp->nonzero_bits = GET_MODE_MASK (mode);
1781 rsp->sign_bit_copies = 1;
1786 /* See if INSN can be combined into I3. PRED, PRED2, SUCC and SUCC2 are
1787 optionally insns that were previously combined into I3 or that will be
1788 combined into the merger of INSN and I3. The order is PRED, PRED2,
1789 INSN, SUCC, SUCC2, I3.
1791 Return 0 if the combination is not allowed for any reason.
1793 If the combination is allowed, *PDEST will be set to the single
1794 destination of INSN and *PSRC to the single source, and this function
1798 can_combine_p (rtx_insn *insn, rtx_insn *i3, rtx_insn *pred ATTRIBUTE_UNUSED,
1799 rtx_insn *pred2 ATTRIBUTE_UNUSED, rtx_insn *succ, rtx_insn *succ2,
1800 rtx *pdest, rtx *psrc)
1807 bool all_adjacent = true;
1808 int (*is_volatile_p) (const_rtx);
1814 if (next_active_insn (succ2) != i3)
1815 all_adjacent = false;
1816 if (next_active_insn (succ) != succ2)
1817 all_adjacent = false;
1819 else if (next_active_insn (succ) != i3)
1820 all_adjacent = false;
1821 if (next_active_insn (insn) != succ)
1822 all_adjacent = false;
1824 else if (next_active_insn (insn) != i3)
1825 all_adjacent = false;
1827 /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
1828 or a PARALLEL consisting of such a SET and CLOBBERs.
1830 If INSN has CLOBBER parallel parts, ignore them for our processing.
1831 By definition, these happen during the execution of the insn. When it
1832 is merged with another insn, all bets are off. If they are, in fact,
1833 needed and aren't also supplied in I3, they may be added by
1834 recog_for_combine. Otherwise, it won't match.
1836 We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1839 Get the source and destination of INSN. If more than one, can't
1842 if (GET_CODE (PATTERN (insn)) == SET)
1843 set = PATTERN (insn);
1844 else if (GET_CODE (PATTERN (insn)) == PARALLEL
1845 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
1847 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1849 rtx elt = XVECEXP (PATTERN (insn), 0, i);
1851 switch (GET_CODE (elt))
1853 /* This is important to combine floating point insns
1854 for the SH4 port. */
1856 /* Combining an isolated USE doesn't make sense.
1857 We depend here on combinable_i3pat to reject them. */
1858 /* The code below this loop only verifies that the inputs of
1859 the SET in INSN do not change. We call reg_set_between_p
1860 to verify that the REG in the USE does not change between
1862 If the USE in INSN was for a pseudo register, the matching
1863 insn pattern will likely match any register; combining this
1864 with any other USE would only be safe if we knew that the
1865 used registers have identical values, or if there was
1866 something to tell them apart, e.g. different modes. For
1867 now, we forgo such complicated tests and simply disallow
1868 combining of USES of pseudo registers with any other USE. */
1869 if (REG_P (XEXP (elt, 0))
1870 && GET_CODE (PATTERN (i3)) == PARALLEL)
1872 rtx i3pat = PATTERN (i3);
1873 int i = XVECLEN (i3pat, 0) - 1;
1874 unsigned int regno = REGNO (XEXP (elt, 0));
1878 rtx i3elt = XVECEXP (i3pat, 0, i);
1880 if (GET_CODE (i3elt) == USE
1881 && REG_P (XEXP (i3elt, 0))
1882 && (REGNO (XEXP (i3elt, 0)) == regno
1883 ? reg_set_between_p (XEXP (elt, 0),
1884 PREV_INSN (insn), i3)
1885 : regno >= FIRST_PSEUDO_REGISTER))
1892 /* We can ignore CLOBBERs. */
1897 /* Ignore SETs whose result isn't used but not those that
1898 have side-effects. */
1899 if (find_reg_note (insn, REG_UNUSED, SET_DEST (elt))
1900 && insn_nothrow_p (insn)
1901 && !side_effects_p (elt))
1904 /* If we have already found a SET, this is a second one and
1905 so we cannot combine with this insn. */
1913 /* Anything else means we can't combine. */
1919 /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1920 so don't do anything with it. */
1921 || GET_CODE (SET_SRC (set)) == ASM_OPERANDS)
1930 /* The simplification in expand_field_assignment may call back to
1931 get_last_value, so set safe guard here. */
1932 subst_low_luid = DF_INSN_LUID (insn);
1934 set = expand_field_assignment (set);
1935 src = SET_SRC (set), dest = SET_DEST (set);
1937 /* Do not eliminate user-specified register if it is in an
1938 asm input because we may break the register asm usage defined
1939 in GCC manual if allow to do so.
1940 Be aware that this may cover more cases than we expect but this
1941 should be harmless. */
1942 if (REG_P (dest) && REG_USERVAR_P (dest) && HARD_REGISTER_P (dest)
1943 && extract_asm_operands (PATTERN (i3)))
1946 /* Don't eliminate a store in the stack pointer. */
1947 if (dest == stack_pointer_rtx
1948 /* Don't combine with an insn that sets a register to itself if it has
1949 a REG_EQUAL note. This may be part of a LIBCALL sequence. */
1950 || (rtx_equal_p (src, dest) && find_reg_note (insn, REG_EQUAL, NULL_RTX))
1951 /* Can't merge an ASM_OPERANDS. */
1952 || GET_CODE (src) == ASM_OPERANDS
1953 /* Can't merge a function call. */
1954 || GET_CODE (src) == CALL
1955 /* Don't eliminate a function call argument. */
1957 && (find_reg_fusage (i3, USE, dest)
1959 && REGNO (dest) < FIRST_PSEUDO_REGISTER
1960 && global_regs[REGNO (dest)])))
1961 /* Don't substitute into an incremented register. */
1962 || FIND_REG_INC_NOTE (i3, dest)
1963 || (succ && FIND_REG_INC_NOTE (succ, dest))
1964 || (succ2 && FIND_REG_INC_NOTE (succ2, dest))
1965 /* Don't substitute into a non-local goto, this confuses CFG. */
1966 || (JUMP_P (i3) && find_reg_note (i3, REG_NON_LOCAL_GOTO, NULL_RTX))
1967 /* Make sure that DEST is not used after INSN but before SUCC, or
1968 after SUCC and before SUCC2, or after SUCC2 but before I3. */
1971 && (reg_used_between_p (dest, succ2, i3)
1972 || reg_used_between_p (dest, succ, succ2)))
1973 || (!succ2 && succ && reg_used_between_p (dest, succ, i3))
1975 /* SUCC and SUCC2 can be split halves from a PARALLEL; in
1976 that case SUCC is not in the insn stream, so use SUCC2
1977 instead for this test. */
1978 && reg_used_between_p (dest, insn,
1980 && INSN_UID (succ) == INSN_UID (succ2)
1982 /* Make sure that the value that is to be substituted for the register
1983 does not use any registers whose values alter in between. However,
1984 If the insns are adjacent, a use can't cross a set even though we
1985 think it might (this can happen for a sequence of insns each setting
1986 the same destination; last_set of that register might point to
1987 a NOTE). If INSN has a REG_EQUIV note, the register is always
1988 equivalent to the memory so the substitution is valid even if there
1989 are intervening stores. Also, don't move a volatile asm or
1990 UNSPEC_VOLATILE across any other insns. */
1993 || ! find_reg_note (insn, REG_EQUIV, src))
1994 && use_crosses_set_p (src, DF_INSN_LUID (insn)))
1995 || (GET_CODE (src) == ASM_OPERANDS && MEM_VOLATILE_P (src))
1996 || GET_CODE (src) == UNSPEC_VOLATILE))
1997 /* Don't combine across a CALL_INSN, because that would possibly
1998 change whether the life span of some REGs crosses calls or not,
1999 and it is a pain to update that information.
2000 Exception: if source is a constant, moving it later can't hurt.
2001 Accept that as a special case. */
2002 || (DF_INSN_LUID (insn) < last_call_luid && ! CONSTANT_P (src)))
2005 /* DEST must either be a REG or CC0. */
2008 /* If register alignment is being enforced for multi-word items in all
2009 cases except for parameters, it is possible to have a register copy
2010 insn referencing a hard register that is not allowed to contain the
2011 mode being copied and which would not be valid as an operand of most
2012 insns. Eliminate this problem by not combining with such an insn.
2014 Also, on some machines we don't want to extend the life of a hard
2018 && ((REGNO (dest) < FIRST_PSEUDO_REGISTER
2019 && !targetm.hard_regno_mode_ok (REGNO (dest), GET_MODE (dest)))
2020 /* Don't extend the life of a hard register unless it is
2021 user variable (if we have few registers) or it can't
2022 fit into the desired register (meaning something special
2024 Also avoid substituting a return register into I3, because
2025 reload can't handle a conflict with constraints of other
2027 || (REGNO (src) < FIRST_PSEUDO_REGISTER
2028 && !targetm.hard_regno_mode_ok (REGNO (src),
2032 else if (GET_CODE (dest) != CC0)
2036 if (GET_CODE (PATTERN (i3)) == PARALLEL)
2037 for (i = XVECLEN (PATTERN (i3), 0) - 1; i >= 0; i--)
2038 if (GET_CODE (XVECEXP (PATTERN (i3), 0, i)) == CLOBBER)
2040 rtx reg = XEXP (XVECEXP (PATTERN (i3), 0, i), 0);
2042 /* If the clobber represents an earlyclobber operand, we must not
2043 substitute an expression containing the clobbered register.
2044 As we do not analyze the constraint strings here, we have to
2045 make the conservative assumption. However, if the register is
2046 a fixed hard reg, the clobber cannot represent any operand;
2047 we leave it up to the machine description to either accept or
2048 reject use-and-clobber patterns. */
2050 || REGNO (reg) >= FIRST_PSEUDO_REGISTER
2051 || !fixed_regs[REGNO (reg)])
2052 if (reg_overlap_mentioned_p (reg, src))
2056 /* If INSN contains anything volatile, or is an `asm' (whether volatile
2057 or not), reject, unless nothing volatile comes between it and I3 */
2059 if (GET_CODE (src) == ASM_OPERANDS || volatile_refs_p (src))
2061 /* Make sure neither succ nor succ2 contains a volatile reference. */
2062 if (succ2 != 0 && volatile_refs_p (PATTERN (succ2)))
2064 if (succ != 0 && volatile_refs_p (PATTERN (succ)))
2066 /* We'll check insns between INSN and I3 below. */
2069 /* If INSN is an asm, and DEST is a hard register, reject, since it has
2070 to be an explicit register variable, and was chosen for a reason. */
2072 if (GET_CODE (src) == ASM_OPERANDS
2073 && REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER)
2076 /* If INSN contains volatile references (specifically volatile MEMs),
2077 we cannot combine across any other volatile references.
2078 Even if INSN doesn't contain volatile references, any intervening
2079 volatile insn might affect machine state. */
2081 is_volatile_p = volatile_refs_p (PATTERN (insn))
2085 for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p))
2086 if (INSN_P (p) && p != succ && p != succ2 && is_volatile_p (PATTERN (p)))
2089 /* If INSN contains an autoincrement or autodecrement, make sure that
2090 register is not used between there and I3, and not already used in
2091 I3 either. Neither must it be used in PRED or SUCC, if they exist.
2092 Also insist that I3 not be a jump; if it were one
2093 and the incremented register were spilled, we would lose. */
2096 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2097 if (REG_NOTE_KIND (link) == REG_INC
2099 || reg_used_between_p (XEXP (link, 0), insn, i3)
2100 || (pred != NULL_RTX
2101 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred)))
2102 || (pred2 != NULL_RTX
2103 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred2)))
2104 || (succ != NULL_RTX
2105 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ)))
2106 || (succ2 != NULL_RTX
2107 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ2)))
2108 || reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i3))))
2111 /* Don't combine an insn that follows a CC0-setting insn.
2112 An insn that uses CC0 must not be separated from the one that sets it.
2113 We do, however, allow I2 to follow a CC0-setting insn if that insn
2114 is passed as I1; in that case it will be deleted also.
2115 We also allow combining in this case if all the insns are adjacent
2116 because that would leave the two CC0 insns adjacent as well.
2117 It would be more logical to test whether CC0 occurs inside I1 or I2,
2118 but that would be much slower, and this ought to be equivalent. */
2122 p = prev_nonnote_insn (insn);
2123 if (p && p != pred && NONJUMP_INSN_P (p) && sets_cc0_p (PATTERN (p))
2128 /* If we get here, we have passed all the tests and the combination is
2137 /* LOC is the location within I3 that contains its pattern or the component
2138 of a PARALLEL of the pattern. We validate that it is valid for combining.
2140 One problem is if I3 modifies its output, as opposed to replacing it
2141 entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
2142 doing so would produce an insn that is not equivalent to the original insns.
2146 (set (reg:DI 101) (reg:DI 100))
2147 (set (subreg:SI (reg:DI 101) 0) <foo>)
2149 This is NOT equivalent to:
2151 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
2152 (set (reg:DI 101) (reg:DI 100))])
2154 Not only does this modify 100 (in which case it might still be valid
2155 if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
2157 We can also run into a problem if I2 sets a register that I1
2158 uses and I1 gets directly substituted into I3 (not via I2). In that
2159 case, we would be getting the wrong value of I2DEST into I3, so we
2160 must reject the combination. This case occurs when I2 and I1 both
2161 feed into I3, rather than when I1 feeds into I2, which feeds into I3.
2162 If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
2163 of a SET must prevent combination from occurring. The same situation
2164 can occur for I0, in which case I0_NOT_IN_SRC is set.
2166 Before doing the above check, we first try to expand a field assignment
2167 into a set of logical operations.
2169 If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
2170 we place a register that is both set and used within I3. If more than one
2171 such register is detected, we fail.
2173 Return 1 if the combination is valid, zero otherwise. */
2176 combinable_i3pat (rtx_insn *i3, rtx *loc, rtx i2dest, rtx i1dest, rtx i0dest,
2177 int i1_not_in_src, int i0_not_in_src, rtx *pi3dest_killed)
2181 if (GET_CODE (x) == SET)
2184 rtx dest = SET_DEST (set);
2185 rtx src = SET_SRC (set);
2186 rtx inner_dest = dest;
2189 while (GET_CODE (inner_dest) == STRICT_LOW_PART
2190 || GET_CODE (inner_dest) == SUBREG
2191 || GET_CODE (inner_dest) == ZERO_EXTRACT)
2192 inner_dest = XEXP (inner_dest, 0);
2194 /* Check for the case where I3 modifies its output, as discussed
2195 above. We don't want to prevent pseudos from being combined
2196 into the address of a MEM, so only prevent the combination if
2197 i1 or i2 set the same MEM. */
2198 if ((inner_dest != dest &&
2199 (!MEM_P (inner_dest)
2200 || rtx_equal_p (i2dest, inner_dest)
2201 || (i1dest && rtx_equal_p (i1dest, inner_dest))
2202 || (i0dest && rtx_equal_p (i0dest, inner_dest)))
2203 && (reg_overlap_mentioned_p (i2dest, inner_dest)
2204 || (i1dest && reg_overlap_mentioned_p (i1dest, inner_dest))
2205 || (i0dest && reg_overlap_mentioned_p (i0dest, inner_dest))))
2207 /* This is the same test done in can_combine_p except we can't test
2208 all_adjacent; we don't have to, since this instruction will stay
2209 in place, thus we are not considering increasing the lifetime of
2212 Also, if this insn sets a function argument, combining it with
2213 something that might need a spill could clobber a previous
2214 function argument; the all_adjacent test in can_combine_p also
2215 checks this; here, we do a more specific test for this case. */
2217 || (REG_P (inner_dest)
2218 && REGNO (inner_dest) < FIRST_PSEUDO_REGISTER
2219 && !targetm.hard_regno_mode_ok (REGNO (inner_dest),
2220 GET_MODE (inner_dest)))
2221 || (i1_not_in_src && reg_overlap_mentioned_p (i1dest, src))
2222 || (i0_not_in_src && reg_overlap_mentioned_p (i0dest, src)))
2225 /* If DEST is used in I3, it is being killed in this insn, so
2226 record that for later. We have to consider paradoxical
2227 subregs here, since they kill the whole register, but we
2228 ignore partial subregs, STRICT_LOW_PART, etc.
2229 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2230 STACK_POINTER_REGNUM, since these are always considered to be
2231 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2233 if (GET_CODE (subdest) == SUBREG && !partial_subreg_p (subdest))
2234 subdest = SUBREG_REG (subdest);
2237 && reg_referenced_p (subdest, PATTERN (i3))
2238 && REGNO (subdest) != FRAME_POINTER_REGNUM
2239 && (HARD_FRAME_POINTER_IS_FRAME_POINTER
2240 || REGNO (subdest) != HARD_FRAME_POINTER_REGNUM)
2241 && (FRAME_POINTER_REGNUM == ARG_POINTER_REGNUM
2242 || (REGNO (subdest) != ARG_POINTER_REGNUM
2243 || ! fixed_regs [REGNO (subdest)]))
2244 && REGNO (subdest) != STACK_POINTER_REGNUM)
2246 if (*pi3dest_killed)
2249 *pi3dest_killed = subdest;
2253 else if (GET_CODE (x) == PARALLEL)
2257 for (i = 0; i < XVECLEN (x, 0); i++)
2258 if (! combinable_i3pat (i3, &XVECEXP (x, 0, i), i2dest, i1dest, i0dest,
2259 i1_not_in_src, i0_not_in_src, pi3dest_killed))
2266 /* Return 1 if X is an arithmetic expression that contains a multiplication
2267 and division. We don't count multiplications by powers of two here. */
2270 contains_muldiv (rtx x)
2272 switch (GET_CODE (x))
2274 case MOD: case DIV: case UMOD: case UDIV:
2278 return ! (CONST_INT_P (XEXP (x, 1))
2279 && pow2p_hwi (UINTVAL (XEXP (x, 1))));
2282 return contains_muldiv (XEXP (x, 0))
2283 || contains_muldiv (XEXP (x, 1));
2286 return contains_muldiv (XEXP (x, 0));
2292 /* Determine whether INSN can be used in a combination. Return nonzero if
2293 not. This is used in try_combine to detect early some cases where we
2294 can't perform combinations. */
2297 cant_combine_insn_p (rtx_insn *insn)
2302 /* If this isn't really an insn, we can't do anything.
2303 This can occur when flow deletes an insn that it has merged into an
2304 auto-increment address. */
2305 if (!NONDEBUG_INSN_P (insn))
2308 /* Never combine loads and stores involving hard regs that are likely
2309 to be spilled. The register allocator can usually handle such
2310 reg-reg moves by tying. If we allow the combiner to make
2311 substitutions of likely-spilled regs, reload might die.
2312 As an exception, we allow combinations involving fixed regs; these are
2313 not available to the register allocator so there's no risk involved. */
2315 set = single_set (insn);
2318 src = SET_SRC (set);
2319 dest = SET_DEST (set);
2320 if (GET_CODE (src) == SUBREG)
2321 src = SUBREG_REG (src);
2322 if (GET_CODE (dest) == SUBREG)
2323 dest = SUBREG_REG (dest);
2324 if (REG_P (src) && REG_P (dest)
2325 && ((HARD_REGISTER_P (src)
2326 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (src))
2327 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (src))))
2328 || (HARD_REGISTER_P (dest)
2329 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (dest))
2330 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest))))))
2336 struct likely_spilled_retval_info
2338 unsigned regno, nregs;
2342 /* Called via note_stores by likely_spilled_retval_p. Remove from info->mask
2343 hard registers that are known to be written to / clobbered in full. */
2345 likely_spilled_retval_1 (rtx x, const_rtx set, void *data)
2347 struct likely_spilled_retval_info *const info =
2348 (struct likely_spilled_retval_info *) data;
2349 unsigned regno, nregs;
2352 if (!REG_P (XEXP (set, 0)))
2355 if (regno >= info->regno + info->nregs)
2357 nregs = REG_NREGS (x);
2358 if (regno + nregs <= info->regno)
2360 new_mask = (2U << (nregs - 1)) - 1;
2361 if (regno < info->regno)
2362 new_mask >>= info->regno - regno;
2364 new_mask <<= regno - info->regno;
2365 info->mask &= ~new_mask;
2368 /* Return nonzero iff part of the return value is live during INSN, and
2369 it is likely spilled. This can happen when more than one insn is needed
2370 to copy the return value, e.g. when we consider to combine into the
2371 second copy insn for a complex value. */
2374 likely_spilled_retval_p (rtx_insn *insn)
2376 rtx_insn *use = BB_END (this_basic_block);
2379 unsigned regno, nregs;
2380 /* We assume here that no machine mode needs more than
2381 32 hard registers when the value overlaps with a register
2382 for which TARGET_FUNCTION_VALUE_REGNO_P is true. */
2384 struct likely_spilled_retval_info info;
2386 if (!NONJUMP_INSN_P (use) || GET_CODE (PATTERN (use)) != USE || insn == use)
2388 reg = XEXP (PATTERN (use), 0);
2389 if (!REG_P (reg) || !targetm.calls.function_value_regno_p (REGNO (reg)))
2391 regno = REGNO (reg);
2392 nregs = REG_NREGS (reg);
2395 mask = (2U << (nregs - 1)) - 1;
2397 /* Disregard parts of the return value that are set later. */
2401 for (p = PREV_INSN (use); info.mask && p != insn; p = PREV_INSN (p))
2403 note_stores (PATTERN (p), likely_spilled_retval_1, &info);
2406 /* Check if any of the (probably) live return value registers is
2411 if ((mask & 1 << nregs)
2412 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (regno + nregs)))
2418 /* Adjust INSN after we made a change to its destination.
2420 Changing the destination can invalidate notes that say something about
2421 the results of the insn and a LOG_LINK pointing to the insn. */
2424 adjust_for_new_dest (rtx_insn *insn)
2426 /* For notes, be conservative and simply remove them. */
2427 remove_reg_equal_equiv_notes (insn);
2429 /* The new insn will have a destination that was previously the destination
2430 of an insn just above it. Call distribute_links to make a LOG_LINK from
2431 the next use of that destination. */
2433 rtx set = single_set (insn);
2436 rtx reg = SET_DEST (set);
2438 while (GET_CODE (reg) == ZERO_EXTRACT
2439 || GET_CODE (reg) == STRICT_LOW_PART
2440 || GET_CODE (reg) == SUBREG)
2441 reg = XEXP (reg, 0);
2442 gcc_assert (REG_P (reg));
2444 distribute_links (alloc_insn_link (insn, REGNO (reg), NULL));
2446 df_insn_rescan (insn);
2449 /* Return TRUE if combine can reuse reg X in mode MODE.
2450 ADDED_SETS is nonzero if the original set is still required. */
2452 can_change_dest_mode (rtx x, int added_sets, machine_mode mode)
2460 /* Allow hard registers if the new mode is legal, and occupies no more
2461 registers than the old mode. */
2462 if (regno < FIRST_PSEUDO_REGISTER)
2463 return (targetm.hard_regno_mode_ok (regno, mode)
2464 && REG_NREGS (x) >= hard_regno_nregs (regno, mode));
2466 /* Or a pseudo that is only used once. */
2467 return (regno < reg_n_sets_max
2468 && REG_N_SETS (regno) == 1
2470 && !REG_USERVAR_P (x));
2474 /* Check whether X, the destination of a set, refers to part of
2475 the register specified by REG. */
2478 reg_subword_p (rtx x, rtx reg)
2480 /* Check that reg is an integer mode register. */
2481 if (!REG_P (reg) || GET_MODE_CLASS (GET_MODE (reg)) != MODE_INT)
2484 if (GET_CODE (x) == STRICT_LOW_PART
2485 || GET_CODE (x) == ZERO_EXTRACT)
2488 return GET_CODE (x) == SUBREG
2489 && SUBREG_REG (x) == reg
2490 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT;
2493 /* Delete the unconditional jump INSN and adjust the CFG correspondingly.
2494 Note that the INSN should be deleted *after* removing dead edges, so
2495 that the kept edge is the fallthrough edge for a (set (pc) (pc))
2496 but not for a (set (pc) (label_ref FOO)). */
2499 update_cfg_for_uncondjump (rtx_insn *insn)
2501 basic_block bb = BLOCK_FOR_INSN (insn);
2502 gcc_assert (BB_END (bb) == insn);
2504 purge_dead_edges (bb);
2507 if (EDGE_COUNT (bb->succs) == 1)
2511 single_succ_edge (bb)->flags |= EDGE_FALLTHRU;
2513 /* Remove barriers from the footer if there are any. */
2514 for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
2515 if (BARRIER_P (insn))
2517 if (PREV_INSN (insn))
2518 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
2520 BB_FOOTER (bb) = NEXT_INSN (insn);
2521 if (NEXT_INSN (insn))
2522 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
2524 else if (LABEL_P (insn))
2529 /* Return whether PAT is a PARALLEL of exactly N register SETs followed
2530 by an arbitrary number of CLOBBERs. */
2532 is_parallel_of_n_reg_sets (rtx pat, int n)
2534 if (GET_CODE (pat) != PARALLEL)
2537 int len = XVECLEN (pat, 0);
2542 for (i = 0; i < n; i++)
2543 if (GET_CODE (XVECEXP (pat, 0, i)) != SET
2544 || !REG_P (SET_DEST (XVECEXP (pat, 0, i))))
2546 for ( ; i < len; i++)
2547 if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER
2548 || XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
2554 /* Return whether INSN, a PARALLEL of N register SETs (and maybe some
2555 CLOBBERs), can be split into individual SETs in that order, without
2556 changing semantics. */
2558 can_split_parallel_of_n_reg_sets (rtx_insn *insn, int n)
2560 if (!insn_nothrow_p (insn))
2563 rtx pat = PATTERN (insn);
2566 for (i = 0; i < n; i++)
2568 if (side_effects_p (SET_SRC (XVECEXP (pat, 0, i))))
2571 rtx reg = SET_DEST (XVECEXP (pat, 0, i));
2573 for (j = i + 1; j < n; j++)
2574 if (reg_referenced_p (reg, XVECEXP (pat, 0, j)))
2581 /* Try to combine the insns I0, I1 and I2 into I3.
2582 Here I0, I1 and I2 appear earlier than I3.
2583 I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2586 If we are combining more than two insns and the resulting insn is not
2587 recognized, try splitting it into two insns. If that happens, I2 and I3
2588 are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2589 Otherwise, I0, I1 and I2 are pseudo-deleted.
2591 Return 0 if the combination does not work. Then nothing is changed.
2592 If we did the combination, return the insn at which combine should
2595 Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2596 new direct jump instruction.
2598 LAST_COMBINED_INSN is either I3, or some insn after I3 that has
2599 been I3 passed to an earlier try_combine within the same basic
2603 try_combine (rtx_insn *i3, rtx_insn *i2, rtx_insn *i1, rtx_insn *i0,
2604 int *new_direct_jump_p, rtx_insn *last_combined_insn)
2606 /* New patterns for I3 and I2, respectively. */
2607 rtx newpat, newi2pat = 0;
2608 rtvec newpat_vec_with_clobbers = 0;
2609 int substed_i2 = 0, substed_i1 = 0, substed_i0 = 0;
2610 /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2612 int added_sets_0, added_sets_1, added_sets_2;
2613 /* Total number of SETs to put into I3. */
2615 /* Nonzero if I2's or I1's body now appears in I3. */
2616 int i2_is_used = 0, i1_is_used = 0;
2617 /* INSN_CODEs for new I3, new I2, and user of condition code. */
2618 int insn_code_number, i2_code_number = 0, other_code_number = 0;
2619 /* Contains I3 if the destination of I3 is used in its source, which means
2620 that the old life of I3 is being killed. If that usage is placed into
2621 I2 and not in I3, a REG_DEAD note must be made. */
2622 rtx i3dest_killed = 0;
2623 /* SET_DEST and SET_SRC of I2, I1 and I0. */
2624 rtx i2dest = 0, i2src = 0, i1dest = 0, i1src = 0, i0dest = 0, i0src = 0;
2625 /* Copy of SET_SRC of I1 and I0, if needed. */
2626 rtx i1src_copy = 0, i0src_copy = 0, i0src_copy2 = 0;
2627 /* Set if I2DEST was reused as a scratch register. */
2628 bool i2scratch = false;
2629 /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */
2630 rtx i0pat = 0, i1pat = 0, i2pat = 0;
2631 /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
2632 int i2dest_in_i2src = 0, i1dest_in_i1src = 0, i2dest_in_i1src = 0;
2633 int i0dest_in_i0src = 0, i1dest_in_i0src = 0, i2dest_in_i0src = 0;
2634 int i2dest_killed = 0, i1dest_killed = 0, i0dest_killed = 0;
2635 int i1_feeds_i2_n = 0, i0_feeds_i2_n = 0, i0_feeds_i1_n = 0;
2636 /* Notes that must be added to REG_NOTES in I3 and I2. */
2637 rtx new_i3_notes, new_i2_notes;
2638 /* Notes that we substituted I3 into I2 instead of the normal case. */
2639 int i3_subst_into_i2 = 0;
2640 /* Notes that I1, I2 or I3 is a MULT operation. */
2643 int changed_i3_dest = 0;
2646 rtx_insn *temp_insn;
2648 struct insn_link *link;
2650 rtx new_other_notes;
2652 scalar_int_mode dest_mode, temp_mode;
2654 /* Immediately return if any of I0,I1,I2 are the same insn (I3 can
2656 if (i1 == i2 || i0 == i2 || (i0 && i0 == i1))
2659 /* Only try four-insn combinations when there's high likelihood of
2660 success. Look for simple insns, such as loads of constants or
2661 binary operations involving a constant. */
2669 if (!flag_expensive_optimizations)
2672 for (i = 0; i < 4; i++)
2674 rtx_insn *insn = i == 0 ? i0 : i == 1 ? i1 : i == 2 ? i2 : i3;
2675 rtx set = single_set (insn);
2679 src = SET_SRC (set);
2680 if (CONSTANT_P (src))
2685 else if (BINARY_P (src) && CONSTANT_P (XEXP (src, 1)))
2687 else if (GET_CODE (src) == ASHIFT || GET_CODE (src) == ASHIFTRT
2688 || GET_CODE (src) == LSHIFTRT)
2692 /* If I0 loads a memory and I3 sets the same memory, then I1 and I2
2693 are likely manipulating its value. Ideally we'll be able to combine
2694 all four insns into a bitfield insertion of some kind.
2696 Note the source in I0 might be inside a sign/zero extension and the
2697 memory modes in I0 and I3 might be different. So extract the address
2698 from the destination of I3 and search for it in the source of I0.
2700 In the event that there's a match but the source/dest do not actually
2701 refer to the same memory, the worst that happens is we try some
2702 combinations that we wouldn't have otherwise. */
2703 if ((set0 = single_set (i0))
2704 /* Ensure the source of SET0 is a MEM, possibly buried inside
2706 && (GET_CODE (SET_SRC (set0)) == MEM
2707 || ((GET_CODE (SET_SRC (set0)) == ZERO_EXTEND
2708 || GET_CODE (SET_SRC (set0)) == SIGN_EXTEND)
2709 && GET_CODE (XEXP (SET_SRC (set0), 0)) == MEM))
2710 && (set3 = single_set (i3))
2711 /* Ensure the destination of SET3 is a MEM. */
2712 && GET_CODE (SET_DEST (set3)) == MEM
2713 /* Would it be better to extract the base address for the MEM
2714 in SET3 and look for that? I don't have cases where it matters
2715 but I could envision such cases. */
2716 && rtx_referenced_p (XEXP (SET_DEST (set3), 0), SET_SRC (set0)))
2719 if (ngood < 2 && nshift < 2)
2723 /* Exit early if one of the insns involved can't be used for
2726 || (i1 && CALL_P (i1))
2727 || (i0 && CALL_P (i0))
2728 || cant_combine_insn_p (i3)
2729 || cant_combine_insn_p (i2)
2730 || (i1 && cant_combine_insn_p (i1))
2731 || (i0 && cant_combine_insn_p (i0))
2732 || likely_spilled_retval_p (i3))
2736 undobuf.other_insn = 0;
2738 /* Reset the hard register usage information. */
2739 CLEAR_HARD_REG_SET (newpat_used_regs);
2741 if (dump_file && (dump_flags & TDF_DETAILS))
2744 fprintf (dump_file, "\nTrying %d, %d, %d -> %d:\n",
2745 INSN_UID (i0), INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2747 fprintf (dump_file, "\nTrying %d, %d -> %d:\n",
2748 INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2750 fprintf (dump_file, "\nTrying %d -> %d:\n",
2751 INSN_UID (i2), INSN_UID (i3));
2754 /* If multiple insns feed into one of I2 or I3, they can be in any
2755 order. To simplify the code below, reorder them in sequence. */
2756 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i2))
2758 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i1))
2760 if (i1 && DF_INSN_LUID (i1) > DF_INSN_LUID (i2))
2763 added_links_insn = 0;
2765 /* First check for one important special case that the code below will
2766 not handle. Namely, the case where I1 is zero, I2 is a PARALLEL
2767 and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case,
2768 we may be able to replace that destination with the destination of I3.
2769 This occurs in the common code where we compute both a quotient and
2770 remainder into a structure, in which case we want to do the computation
2771 directly into the structure to avoid register-register copies.
2773 Note that this case handles both multiple sets in I2 and also cases
2774 where I2 has a number of CLOBBERs inside the PARALLEL.
2776 We make very conservative checks below and only try to handle the
2777 most common cases of this. For example, we only handle the case
2778 where I2 and I3 are adjacent to avoid making difficult register
2781 if (i1 == 0 && NONJUMP_INSN_P (i3) && GET_CODE (PATTERN (i3)) == SET
2782 && REG_P (SET_SRC (PATTERN (i3)))
2783 && REGNO (SET_SRC (PATTERN (i3))) >= FIRST_PSEUDO_REGISTER
2784 && find_reg_note (i3, REG_DEAD, SET_SRC (PATTERN (i3)))
2785 && GET_CODE (PATTERN (i2)) == PARALLEL
2786 && ! side_effects_p (SET_DEST (PATTERN (i3)))
2787 /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2788 below would need to check what is inside (and reg_overlap_mentioned_p
2789 doesn't support those codes anyway). Don't allow those destinations;
2790 the resulting insn isn't likely to be recognized anyway. */
2791 && GET_CODE (SET_DEST (PATTERN (i3))) != ZERO_EXTRACT
2792 && GET_CODE (SET_DEST (PATTERN (i3))) != STRICT_LOW_PART
2793 && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3)),
2794 SET_DEST (PATTERN (i3)))
2795 && next_active_insn (i2) == i3)
2797 rtx p2 = PATTERN (i2);
2799 /* Make sure that the destination of I3,
2800 which we are going to substitute into one output of I2,
2801 is not used within another output of I2. We must avoid making this:
2802 (parallel [(set (mem (reg 69)) ...)
2803 (set (reg 69) ...)])
2804 which is not well-defined as to order of actions.
2805 (Besides, reload can't handle output reloads for this.)
2807 The problem can also happen if the dest of I3 is a memory ref,
2808 if another dest in I2 is an indirect memory ref.
2810 Neither can this PARALLEL be an asm. We do not allow combining
2811 that usually (see can_combine_p), so do not here either. */
2813 for (i = 0; ok && i < XVECLEN (p2, 0); i++)
2815 if ((GET_CODE (XVECEXP (p2, 0, i)) == SET
2816 || GET_CODE (XVECEXP (p2, 0, i)) == CLOBBER)
2817 && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3)),
2818 SET_DEST (XVECEXP (p2, 0, i))))
2820 else if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2821 && GET_CODE (SET_SRC (XVECEXP (p2, 0, i))) == ASM_OPERANDS)
2826 for (i = 0; i < XVECLEN (p2, 0); i++)
2827 if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2828 && SET_DEST (XVECEXP (p2, 0, i)) == SET_SRC (PATTERN (i3)))
2833 subst_low_luid = DF_INSN_LUID (i2);
2835 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2836 i2src = SET_SRC (XVECEXP (p2, 0, i));
2837 i2dest = SET_DEST (XVECEXP (p2, 0, i));
2838 i2dest_killed = dead_or_set_p (i2, i2dest);
2840 /* Replace the dest in I2 with our dest and make the resulting
2841 insn the new pattern for I3. Then skip to where we validate
2842 the pattern. Everything was set up above. */
2843 SUBST (SET_DEST (XVECEXP (p2, 0, i)), SET_DEST (PATTERN (i3)));
2845 i3_subst_into_i2 = 1;
2846 goto validate_replacement;
2850 /* If I2 is setting a pseudo to a constant and I3 is setting some
2851 sub-part of it to another constant, merge them by making a new
2854 && (temp_expr = single_set (i2)) != 0
2855 && is_a <scalar_int_mode> (GET_MODE (SET_DEST (temp_expr)), &temp_mode)
2856 && CONST_SCALAR_INT_P (SET_SRC (temp_expr))
2857 && GET_CODE (PATTERN (i3)) == SET
2858 && CONST_SCALAR_INT_P (SET_SRC (PATTERN (i3)))
2859 && reg_subword_p (SET_DEST (PATTERN (i3)), SET_DEST (temp_expr)))
2861 rtx dest = SET_DEST (PATTERN (i3));
2862 rtx temp_dest = SET_DEST (temp_expr);
2866 if (GET_CODE (dest) == ZERO_EXTRACT)
2868 if (CONST_INT_P (XEXP (dest, 1))
2869 && CONST_INT_P (XEXP (dest, 2))
2870 && is_a <scalar_int_mode> (GET_MODE (XEXP (dest, 0)),
2873 width = INTVAL (XEXP (dest, 1));
2874 offset = INTVAL (XEXP (dest, 2));
2875 dest = XEXP (dest, 0);
2876 if (BITS_BIG_ENDIAN)
2877 offset = GET_MODE_PRECISION (dest_mode) - width - offset;
2882 if (GET_CODE (dest) == STRICT_LOW_PART)
2883 dest = XEXP (dest, 0);
2884 if (is_a <scalar_int_mode> (GET_MODE (dest), &dest_mode))
2886 width = GET_MODE_PRECISION (dest_mode);
2893 /* If this is the low part, we're done. */
2894 if (subreg_lowpart_p (dest))
2896 /* Handle the case where inner is twice the size of outer. */
2897 else if (GET_MODE_PRECISION (temp_mode)
2898 == 2 * GET_MODE_PRECISION (dest_mode))
2899 offset += GET_MODE_PRECISION (dest_mode);
2900 /* Otherwise give up for now. */
2907 rtx inner = SET_SRC (PATTERN (i3));
2908 rtx outer = SET_SRC (temp_expr);
2910 wide_int o = wi::insert (rtx_mode_t (outer, temp_mode),
2911 rtx_mode_t (inner, dest_mode),
2916 subst_low_luid = DF_INSN_LUID (i2);
2917 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2919 i2dest_killed = dead_or_set_p (i2, i2dest);
2921 /* Replace the source in I2 with the new constant and make the
2922 resulting insn the new pattern for I3. Then skip to where we
2923 validate the pattern. Everything was set up above. */
2924 SUBST (SET_SRC (temp_expr),
2925 immed_wide_int_const (o, temp_mode));
2927 newpat = PATTERN (i2);
2929 /* The dest of I3 has been replaced with the dest of I2. */
2930 changed_i3_dest = 1;
2931 goto validate_replacement;
2935 /* If we have no I1 and I2 looks like:
2936 (parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
2938 make up a dummy I1 that is
2941 (set (reg:CC X) (compare:CC Y (const_int 0)))
2943 (We can ignore any trailing CLOBBERs.)
2945 This undoes a previous combination and allows us to match a branch-and-
2948 if (!HAVE_cc0 && i1 == 0
2949 && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
2950 && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2), 0, 0))))
2952 && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2), 0, 0))) == COMPARE
2953 && XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 1) == const0_rtx
2954 && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 0),
2955 SET_SRC (XVECEXP (PATTERN (i2), 0, 1)))
2956 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
2957 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
2959 /* We make I1 with the same INSN_UID as I2. This gives it
2960 the same DF_INSN_LUID for value tracking. Our fake I1 will
2961 never appear in the insn stream so giving it the same INSN_UID
2962 as I2 will not cause a problem. */
2964 i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
2965 XVECEXP (PATTERN (i2), 0, 1), INSN_LOCATION (i2),
2967 INSN_UID (i1) = INSN_UID (i2);
2969 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 0));
2970 SUBST (XEXP (SET_SRC (PATTERN (i2)), 0),
2971 SET_DEST (PATTERN (i1)));
2972 unsigned int regno = REGNO (SET_DEST (PATTERN (i1)));
2973 SUBST_LINK (LOG_LINKS (i2),
2974 alloc_insn_link (i1, regno, LOG_LINKS (i2)));
2977 /* If I2 is a PARALLEL of two SETs of REGs (and perhaps some CLOBBERs),
2978 make those two SETs separate I1 and I2 insns, and make an I0 that is
2980 if (!HAVE_cc0 && i0 == 0
2981 && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
2982 && can_split_parallel_of_n_reg_sets (i2, 2)
2983 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
2984 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
2986 /* If there is no I1, there is no I0 either. */
2989 /* We make I1 with the same INSN_UID as I2. This gives it
2990 the same DF_INSN_LUID for value tracking. Our fake I1 will
2991 never appear in the insn stream so giving it the same INSN_UID
2992 as I2 will not cause a problem. */
2994 i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
2995 XVECEXP (PATTERN (i2), 0, 0), INSN_LOCATION (i2),
2997 INSN_UID (i1) = INSN_UID (i2);
2999 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 1));
3002 /* Verify that I2 and I1 are valid for combining. */
3003 if (! can_combine_p (i2, i3, i0, i1, NULL, NULL, &i2dest, &i2src)
3004 || (i1 && ! can_combine_p (i1, i3, i0, NULL, i2, NULL,
3006 || (i0 && ! can_combine_p (i0, i3, NULL, NULL, i1, i2,
3013 /* Record whether I2DEST is used in I2SRC and similarly for the other
3014 cases. Knowing this will help in register status updating below. */
3015 i2dest_in_i2src = reg_overlap_mentioned_p (i2dest, i2src);
3016 i1dest_in_i1src = i1 && reg_overlap_mentioned_p (i1dest, i1src);
3017 i2dest_in_i1src = i1 && reg_overlap_mentioned_p (i2dest, i1src);
3018 i0dest_in_i0src = i0 && reg_overlap_mentioned_p (i0dest, i0src);
3019 i1dest_in_i0src = i0 && reg_overlap_mentioned_p (i1dest, i0src);
3020 i2dest_in_i0src = i0 && reg_overlap_mentioned_p (i2dest, i0src);
3021 i2dest_killed = dead_or_set_p (i2, i2dest);
3022 i1dest_killed = i1 && dead_or_set_p (i1, i1dest);
3023 i0dest_killed = i0 && dead_or_set_p (i0, i0dest);
3025 /* For the earlier insns, determine which of the subsequent ones they
3027 i1_feeds_i2_n = i1 && insn_a_feeds_b (i1, i2);
3028 i0_feeds_i1_n = i0 && insn_a_feeds_b (i0, i1);
3029 i0_feeds_i2_n = (i0 && (!i0_feeds_i1_n ? insn_a_feeds_b (i0, i2)
3030 : (!reg_overlap_mentioned_p (i1dest, i0dest)
3031 && reg_overlap_mentioned_p (i0dest, i2src))));
3033 /* Ensure that I3's pattern can be the destination of combines. */
3034 if (! combinable_i3pat (i3, &PATTERN (i3), i2dest, i1dest, i0dest,
3035 i1 && i2dest_in_i1src && !i1_feeds_i2_n,
3036 i0 && ((i2dest_in_i0src && !i0_feeds_i2_n)
3037 || (i1dest_in_i0src && !i0_feeds_i1_n)),
3044 /* See if any of the insns is a MULT operation. Unless one is, we will
3045 reject a combination that is, since it must be slower. Be conservative
3047 if (GET_CODE (i2src) == MULT
3048 || (i1 != 0 && GET_CODE (i1src) == MULT)
3049 || (i0 != 0 && GET_CODE (i0src) == MULT)
3050 || (GET_CODE (PATTERN (i3)) == SET
3051 && GET_CODE (SET_SRC (PATTERN (i3))) == MULT))
3054 /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
3055 We used to do this EXCEPT in one case: I3 has a post-inc in an
3056 output operand. However, that exception can give rise to insns like
3058 which is a famous insn on the PDP-11 where the value of r3 used as the
3059 source was model-dependent. Avoid this sort of thing. */
3062 if (!(GET_CODE (PATTERN (i3)) == SET
3063 && REG_P (SET_SRC (PATTERN (i3)))
3064 && MEM_P (SET_DEST (PATTERN (i3)))
3065 && (GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_INC
3066 || GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_DEC)))
3067 /* It's not the exception. */
3072 for (link = REG_NOTES (i3); link; link = XEXP (link, 1))
3073 if (REG_NOTE_KIND (link) == REG_INC
3074 && (reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i2))
3076 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i1)))))
3083 /* See if the SETs in I1 or I2 need to be kept around in the merged
3084 instruction: whenever the value set there is still needed past I3.
3085 For the SET in I2, this is easy: we see if I2DEST dies or is set in I3.
3087 For the SET in I1, we have two cases: if I1 and I2 independently feed
3088 into I3, the set in I1 needs to be kept around unless I1DEST dies
3089 or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set
3090 in I1 needs to be kept around unless I1DEST dies or is set in either
3091 I2 or I3. The same considerations apply to I0. */
3093 added_sets_2 = !dead_or_set_p (i3, i2dest);
3096 added_sets_1 = !(dead_or_set_p (i3, i1dest)
3097 || (i1_feeds_i2_n && dead_or_set_p (i2, i1dest)));
3102 added_sets_0 = !(dead_or_set_p (i3, i0dest)
3103 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest))
3104 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3105 && dead_or_set_p (i2, i0dest)));
3109 /* We are about to copy insns for the case where they need to be kept
3110 around. Check that they can be copied in the merged instruction. */
3112 if (targetm.cannot_copy_insn_p
3113 && ((added_sets_2 && targetm.cannot_copy_insn_p (i2))
3114 || (i1 && added_sets_1 && targetm.cannot_copy_insn_p (i1))
3115 || (i0 && added_sets_0 && targetm.cannot_copy_insn_p (i0))))
3121 /* If the set in I2 needs to be kept around, we must make a copy of
3122 PATTERN (I2), so that when we substitute I1SRC for I1DEST in
3123 PATTERN (I2), we are only substituting for the original I1DEST, not into
3124 an already-substituted copy. This also prevents making self-referential
3125 rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
3130 if (GET_CODE (PATTERN (i2)) == PARALLEL)
3131 i2pat = gen_rtx_SET (i2dest, copy_rtx (i2src));
3133 i2pat = copy_rtx (PATTERN (i2));
3138 if (GET_CODE (PATTERN (i1)) == PARALLEL)
3139 i1pat = gen_rtx_SET (i1dest, copy_rtx (i1src));
3141 i1pat = copy_rtx (PATTERN (i1));
3146 if (GET_CODE (PATTERN (i0)) == PARALLEL)
3147 i0pat = gen_rtx_SET (i0dest, copy_rtx (i0src));
3149 i0pat = copy_rtx (PATTERN (i0));
3154 /* Substitute in the latest insn for the regs set by the earlier ones. */
3156 maxreg = max_reg_num ();
3160 /* Many machines that don't use CC0 have insns that can both perform an
3161 arithmetic operation and set the condition code. These operations will
3162 be represented as a PARALLEL with the first element of the vector
3163 being a COMPARE of an arithmetic operation with the constant zero.
3164 The second element of the vector will set some pseudo to the result
3165 of the same arithmetic operation. If we simplify the COMPARE, we won't
3166 match such a pattern and so will generate an extra insn. Here we test
3167 for this case, where both the comparison and the operation result are
3168 needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
3169 I2SRC. Later we will make the PARALLEL that contains I2. */
3171 if (!HAVE_cc0 && i1 == 0 && added_sets_2 && GET_CODE (PATTERN (i3)) == SET
3172 && GET_CODE (SET_SRC (PATTERN (i3))) == COMPARE
3173 && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3)), 1))
3174 && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3)), 0), i2dest))
3177 rtx *cc_use_loc = NULL;
3178 rtx_insn *cc_use_insn = NULL;
3179 rtx op0 = i2src, op1 = XEXP (SET_SRC (PATTERN (i3)), 1);
3180 machine_mode compare_mode, orig_compare_mode;
3181 enum rtx_code compare_code = UNKNOWN, orig_compare_code = UNKNOWN;
3182 scalar_int_mode mode;
3184 newpat = PATTERN (i3);
3185 newpat_dest = SET_DEST (newpat);
3186 compare_mode = orig_compare_mode = GET_MODE (newpat_dest);
3188 if (undobuf.other_insn == 0
3189 && (cc_use_loc = find_single_use (SET_DEST (newpat), i3,
3192 compare_code = orig_compare_code = GET_CODE (*cc_use_loc);
3193 if (is_a <scalar_int_mode> (GET_MODE (i2dest), &mode))
3194 compare_code = simplify_compare_const (compare_code, mode,
3196 target_canonicalize_comparison (&compare_code, &op0, &op1, 1);
3199 /* Do the rest only if op1 is const0_rtx, which may be the
3200 result of simplification. */
3201 if (op1 == const0_rtx)
3203 /* If a single use of the CC is found, prepare to modify it
3204 when SELECT_CC_MODE returns a new CC-class mode, or when
3205 the above simplify_compare_const() returned a new comparison
3206 operator. undobuf.other_insn is assigned the CC use insn
3207 when modifying it. */
3210 #ifdef SELECT_CC_MODE
3211 machine_mode new_mode
3212 = SELECT_CC_MODE (compare_code, op0, op1);
3213 if (new_mode != orig_compare_mode
3214 && can_change_dest_mode (SET_DEST (newpat),
3215 added_sets_2, new_mode))
3217 unsigned int regno = REGNO (newpat_dest);
3218 compare_mode = new_mode;
3219 if (regno < FIRST_PSEUDO_REGISTER)
3220 newpat_dest = gen_rtx_REG (compare_mode, regno);
3223 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
3224 newpat_dest = regno_reg_rtx[regno];
3228 /* Cases for modifying the CC-using comparison. */
3229 if (compare_code != orig_compare_code
3230 /* ??? Do we need to verify the zero rtx? */
3231 && XEXP (*cc_use_loc, 1) == const0_rtx)
3233 /* Replace cc_use_loc with entire new RTX. */
3235 gen_rtx_fmt_ee (compare_code, compare_mode,
3236 newpat_dest, const0_rtx));
3237 undobuf.other_insn = cc_use_insn;
3239 else if (compare_mode != orig_compare_mode)
3241 /* Just replace the CC reg with a new mode. */
3242 SUBST (XEXP (*cc_use_loc, 0), newpat_dest);
3243 undobuf.other_insn = cc_use_insn;
3247 /* Now we modify the current newpat:
3248 First, SET_DEST(newpat) is updated if the CC mode has been
3249 altered. For targets without SELECT_CC_MODE, this should be
3251 if (compare_mode != orig_compare_mode)
3252 SUBST (SET_DEST (newpat), newpat_dest);
3253 /* This is always done to propagate i2src into newpat. */
3254 SUBST (SET_SRC (newpat),
3255 gen_rtx_COMPARE (compare_mode, op0, op1));
3256 /* Create new version of i2pat if needed; the below PARALLEL
3257 creation needs this to work correctly. */
3258 if (! rtx_equal_p (i2src, op0))
3259 i2pat = gen_rtx_SET (i2dest, op0);
3264 if (i2_is_used == 0)
3266 /* It is possible that the source of I2 or I1 may be performing
3267 an unneeded operation, such as a ZERO_EXTEND of something
3268 that is known to have the high part zero. Handle that case
3269 by letting subst look at the inner insns.
3271 Another way to do this would be to have a function that tries
3272 to simplify a single insn instead of merging two or more
3273 insns. We don't do this because of the potential of infinite
3274 loops and because of the potential extra memory required.
3275 However, doing it the way we are is a bit of a kludge and
3276 doesn't catch all cases.
3278 But only do this if -fexpensive-optimizations since it slows
3279 things down and doesn't usually win.
3281 This is not done in the COMPARE case above because the
3282 unmodified I2PAT is used in the PARALLEL and so a pattern
3283 with a modified I2SRC would not match. */
3285 if (flag_expensive_optimizations)
3287 /* Pass pc_rtx so no substitutions are done, just
3291 subst_low_luid = DF_INSN_LUID (i1);
3292 i1src = subst (i1src, pc_rtx, pc_rtx, 0, 0, 0);
3295 subst_low_luid = DF_INSN_LUID (i2);
3296 i2src = subst (i2src, pc_rtx, pc_rtx, 0, 0, 0);
3299 n_occurrences = 0; /* `subst' counts here */
3300 subst_low_luid = DF_INSN_LUID (i2);
3302 /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3303 copy of I2SRC each time we substitute it, in order to avoid creating
3304 self-referential RTL when we will be substituting I1SRC for I1DEST
3305 later. Likewise if I0 feeds into I2, either directly or indirectly
3306 through I1, and I0DEST is in I0SRC. */
3307 newpat = subst (PATTERN (i3), i2dest, i2src, 0, 0,
3308 (i1_feeds_i2_n && i1dest_in_i1src)
3309 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3310 && i0dest_in_i0src));
3313 /* Record whether I2's body now appears within I3's body. */
3314 i2_is_used = n_occurrences;
3317 /* If we already got a failure, don't try to do more. Otherwise, try to
3318 substitute I1 if we have it. */
3320 if (i1 && GET_CODE (newpat) != CLOBBER)
3322 /* Check that an autoincrement side-effect on I1 has not been lost.
3323 This happens if I1DEST is mentioned in I2 and dies there, and
3324 has disappeared from the new pattern. */
3325 if ((FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3327 && dead_or_set_p (i2, i1dest)
3328 && !reg_overlap_mentioned_p (i1dest, newpat))
3329 /* Before we can do this substitution, we must redo the test done
3330 above (see detailed comments there) that ensures I1DEST isn't
3331 mentioned in any SETs in NEWPAT that are field assignments. */
3332 || !combinable_i3pat (NULL, &newpat, i1dest, NULL_RTX, NULL_RTX,
3340 subst_low_luid = DF_INSN_LUID (i1);
3342 /* If the following substitution will modify I1SRC, make a copy of it
3343 for the case where it is substituted for I1DEST in I2PAT later. */
3344 if (added_sets_2 && i1_feeds_i2_n)
3345 i1src_copy = copy_rtx (i1src);
3347 /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3348 copy of I1SRC each time we substitute it, in order to avoid creating
3349 self-referential RTL when we will be substituting I0SRC for I0DEST
3351 newpat = subst (newpat, i1dest, i1src, 0, 0,
3352 i0_feeds_i1_n && i0dest_in_i0src);
3355 /* Record whether I1's body now appears within I3's body. */
3356 i1_is_used = n_occurrences;
3359 /* Likewise for I0 if we have it. */
3361 if (i0 && GET_CODE (newpat) != CLOBBER)
3363 if ((FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3364 && ((i0_feeds_i2_n && dead_or_set_p (i2, i0dest))
3365 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest)))
3366 && !reg_overlap_mentioned_p (i0dest, newpat))
3367 || !combinable_i3pat (NULL, &newpat, i0dest, NULL_RTX, NULL_RTX,
3374 /* If the following substitution will modify I0SRC, make a copy of it
3375 for the case where it is substituted for I0DEST in I1PAT later. */
3376 if (added_sets_1 && i0_feeds_i1_n)
3377 i0src_copy = copy_rtx (i0src);
3378 /* And a copy for I0DEST in I2PAT substitution. */
3379 if (added_sets_2 && ((i0_feeds_i1_n && i1_feeds_i2_n)
3380 || (i0_feeds_i2_n)))
3381 i0src_copy2 = copy_rtx (i0src);
3384 subst_low_luid = DF_INSN_LUID (i0);
3385 newpat = subst (newpat, i0dest, i0src, 0, 0, 0);
3389 /* Fail if an autoincrement side-effect has been duplicated. Be careful
3390 to count all the ways that I2SRC and I1SRC can be used. */
3391 if ((FIND_REG_INC_NOTE (i2, NULL_RTX) != 0
3392 && i2_is_used + added_sets_2 > 1)
3393 || (i1 != 0 && FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3394 && (i1_is_used + added_sets_1 + (added_sets_2 && i1_feeds_i2_n)
3396 || (i0 != 0 && FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3397 && (n_occurrences + added_sets_0
3398 + (added_sets_1 && i0_feeds_i1_n)
3399 + (added_sets_2 && i0_feeds_i2_n)
3401 /* Fail if we tried to make a new register. */
3402 || max_reg_num () != maxreg
3403 /* Fail if we couldn't do something and have a CLOBBER. */
3404 || GET_CODE (newpat) == CLOBBER
3405 /* Fail if this new pattern is a MULT and we didn't have one before
3406 at the outer level. */
3407 || (GET_CODE (newpat) == SET && GET_CODE (SET_SRC (newpat)) == MULT
3414 /* If the actions of the earlier insns must be kept
3415 in addition to substituting them into the latest one,
3416 we must make a new PARALLEL for the latest insn
3417 to hold additional the SETs. */
3419 if (added_sets_0 || added_sets_1 || added_sets_2)
3421 int extra_sets = added_sets_0 + added_sets_1 + added_sets_2;
3424 if (GET_CODE (newpat) == PARALLEL)
3426 rtvec old = XVEC (newpat, 0);
3427 total_sets = XVECLEN (newpat, 0) + extra_sets;
3428 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3429 memcpy (XVEC (newpat, 0)->elem, &old->elem[0],
3430 sizeof (old->elem[0]) * old->num_elem);
3435 total_sets = 1 + extra_sets;
3436 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3437 XVECEXP (newpat, 0, 0) = old;
3441 XVECEXP (newpat, 0, --total_sets) = i0pat;
3447 t = subst (t, i0dest, i0src_copy ? i0src_copy : i0src, 0, 0, 0);
3449 XVECEXP (newpat, 0, --total_sets) = t;
3455 t = subst (t, i1dest, i1src_copy ? i1src_copy : i1src, 0, 0,
3456 i0_feeds_i1_n && i0dest_in_i0src);
3457 if ((i0_feeds_i1_n && i1_feeds_i2_n) || i0_feeds_i2_n)
3458 t = subst (t, i0dest, i0src_copy2 ? i0src_copy2 : i0src, 0, 0, 0);
3460 XVECEXP (newpat, 0, --total_sets) = t;
3464 validate_replacement:
3466 /* Note which hard regs this insn has as inputs. */
3467 mark_used_regs_combine (newpat);
3469 /* If recog_for_combine fails, it strips existing clobbers. If we'll
3470 consider splitting this pattern, we might need these clobbers. */
3471 if (i1 && GET_CODE (newpat) == PARALLEL
3472 && GET_CODE (XVECEXP (newpat, 0, XVECLEN (newpat, 0) - 1)) == CLOBBER)
3474 int len = XVECLEN (newpat, 0);
3476 newpat_vec_with_clobbers = rtvec_alloc (len);
3477 for (i = 0; i < len; i++)
3478 RTVEC_ELT (newpat_vec_with_clobbers, i) = XVECEXP (newpat, 0, i);
3481 /* We have recognized nothing yet. */
3482 insn_code_number = -1;
3484 /* See if this is a PARALLEL of two SETs where one SET's destination is
3485 a register that is unused and this isn't marked as an instruction that
3486 might trap in an EH region. In that case, we just need the other SET.
3487 We prefer this over the PARALLEL.
3489 This can occur when simplifying a divmod insn. We *must* test for this
3490 case here because the code below that splits two independent SETs doesn't
3491 handle this case correctly when it updates the register status.
3493 It's pointless doing this if we originally had two sets, one from
3494 i3, and one from i2. Combining then splitting the parallel results
3495 in the original i2 again plus an invalid insn (which we delete).
3496 The net effect is only to move instructions around, which makes
3497 debug info less accurate.
3499 If the remaining SET came from I2 its destination should not be used
3500 between I2 and I3. See PR82024. */
3502 if (!(added_sets_2 && i1 == 0)
3503 && is_parallel_of_n_reg_sets (newpat, 2)
3504 && asm_noperands (newpat) < 0)
3506 rtx set0 = XVECEXP (newpat, 0, 0);
3507 rtx set1 = XVECEXP (newpat, 0, 1);
3508 rtx oldpat = newpat;
3510 if (((REG_P (SET_DEST (set1))
3511 && find_reg_note (i3, REG_UNUSED, SET_DEST (set1)))
3512 || (GET_CODE (SET_DEST (set1)) == SUBREG
3513 && find_reg_note (i3, REG_UNUSED, SUBREG_REG (SET_DEST (set1)))))
3514 && insn_nothrow_p (i3)
3515 && !side_effects_p (SET_SRC (set1)))
3518 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3521 else if (((REG_P (SET_DEST (set0))
3522 && find_reg_note (i3, REG_UNUSED, SET_DEST (set0)))
3523 || (GET_CODE (SET_DEST (set0)) == SUBREG
3524 && find_reg_note (i3, REG_UNUSED,
3525 SUBREG_REG (SET_DEST (set0)))))
3526 && insn_nothrow_p (i3)
3527 && !side_effects_p (SET_SRC (set0)))
3529 rtx dest = SET_DEST (set1);
3530 if (GET_CODE (dest) == SUBREG)
3531 dest = SUBREG_REG (dest);
3532 if (!reg_used_between_p (dest, i2, i3))
3535 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3537 if (insn_code_number >= 0)
3538 changed_i3_dest = 1;
3542 if (insn_code_number < 0)
3546 /* Is the result of combination a valid instruction? */
3547 if (insn_code_number < 0)
3548 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3550 /* If we were combining three insns and the result is a simple SET
3551 with no ASM_OPERANDS that wasn't recognized, try to split it into two
3552 insns. There are two ways to do this. It can be split using a
3553 machine-specific method (like when you have an addition of a large
3554 constant) or by combine in the function find_split_point. */
3556 if (i1 && insn_code_number < 0 && GET_CODE (newpat) == SET
3557 && asm_noperands (newpat) < 0)
3559 rtx parallel, *split;
3560 rtx_insn *m_split_insn;
3562 /* See if the MD file can split NEWPAT. If it can't, see if letting it
3563 use I2DEST as a scratch register will help. In the latter case,
3564 convert I2DEST to the mode of the source of NEWPAT if we can. */
3566 m_split_insn = combine_split_insns (newpat, i3);
3568 /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3569 inputs of NEWPAT. */
3571 /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3572 possible to try that as a scratch reg. This would require adding
3573 more code to make it work though. */
3575 if (m_split_insn == 0 && ! reg_overlap_mentioned_p (i2dest, newpat))
3577 machine_mode new_mode = GET_MODE (SET_DEST (newpat));
3579 /* ??? Reusing i2dest without resetting the reg_stat entry for it
3580 (temporarily, until we are committed to this instruction
3581 combination) does not work: for example, any call to nonzero_bits
3582 on the register (from a splitter in the MD file, for example)
3583 will get the old information, which is invalid.
3585 Since nowadays we can create registers during combine just fine,
3586 we should just create a new one here, not reuse i2dest. */
3588 /* First try to split using the original register as a
3589 scratch register. */
3590 parallel = gen_rtx_PARALLEL (VOIDmode,
3591 gen_rtvec (2, newpat,
3592 gen_rtx_CLOBBER (VOIDmode,
3594 m_split_insn = combine_split_insns (parallel, i3);
3596 /* If that didn't work, try changing the mode of I2DEST if
3598 if (m_split_insn == 0
3599 && new_mode != GET_MODE (i2dest)
3600 && new_mode != VOIDmode
3601 && can_change_dest_mode (i2dest, added_sets_2, new_mode))
3603 machine_mode old_mode = GET_MODE (i2dest);
3606 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3607 ni2dest = gen_rtx_REG (new_mode, REGNO (i2dest));
3610 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], new_mode);
3611 ni2dest = regno_reg_rtx[REGNO (i2dest)];
3614 parallel = (gen_rtx_PARALLEL
3616 gen_rtvec (2, newpat,
3617 gen_rtx_CLOBBER (VOIDmode,
3619 m_split_insn = combine_split_insns (parallel, i3);
3621 if (m_split_insn == 0
3622 && REGNO (i2dest) >= FIRST_PSEUDO_REGISTER)
3626 adjust_reg_mode (regno_reg_rtx[REGNO (i2dest)], old_mode);
3627 buf = undobuf.undos;
3628 undobuf.undos = buf->next;
3629 buf->next = undobuf.frees;
3630 undobuf.frees = buf;
3634 i2scratch = m_split_insn != 0;
3637 /* If recog_for_combine has discarded clobbers, try to use them
3638 again for the split. */
3639 if (m_split_insn == 0 && newpat_vec_with_clobbers)
3641 parallel = gen_rtx_PARALLEL (VOIDmode, newpat_vec_with_clobbers);
3642 m_split_insn = combine_split_insns (parallel, i3);
3645 if (m_split_insn && NEXT_INSN (m_split_insn) == NULL_RTX)
3647 rtx m_split_pat = PATTERN (m_split_insn);
3648 insn_code_number = recog_for_combine (&m_split_pat, i3, &new_i3_notes);
3649 if (insn_code_number >= 0)
3650 newpat = m_split_pat;
3652 else if (m_split_insn && NEXT_INSN (NEXT_INSN (m_split_insn)) == NULL_RTX
3653 && (next_nonnote_nondebug_insn (i2) == i3
3654 || ! use_crosses_set_p (PATTERN (m_split_insn), DF_INSN_LUID (i2))))
3657 rtx newi3pat = PATTERN (NEXT_INSN (m_split_insn));
3658 newi2pat = PATTERN (m_split_insn);
3660 i3set = single_set (NEXT_INSN (m_split_insn));
3661 i2set = single_set (m_split_insn);
3663 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3665 /* If I2 or I3 has multiple SETs, we won't know how to track
3666 register status, so don't use these insns. If I2's destination
3667 is used between I2 and I3, we also can't use these insns. */
3669 if (i2_code_number >= 0 && i2set && i3set
3670 && (next_nonnote_nondebug_insn (i2) == i3
3671 || ! reg_used_between_p (SET_DEST (i2set), i2, i3)))
3672 insn_code_number = recog_for_combine (&newi3pat, i3,
3674 if (insn_code_number >= 0)
3677 /* It is possible that both insns now set the destination of I3.
3678 If so, we must show an extra use of it. */
3680 if (insn_code_number >= 0)
3682 rtx new_i3_dest = SET_DEST (i3set);
3683 rtx new_i2_dest = SET_DEST (i2set);
3685 while (GET_CODE (new_i3_dest) == ZERO_EXTRACT
3686 || GET_CODE (new_i3_dest) == STRICT_LOW_PART
3687 || GET_CODE (new_i3_dest) == SUBREG)
3688 new_i3_dest = XEXP (new_i3_dest, 0);
3690 while (GET_CODE (new_i2_dest) == ZERO_EXTRACT
3691 || GET_CODE (new_i2_dest) == STRICT_LOW_PART
3692 || GET_CODE (new_i2_dest) == SUBREG)
3693 new_i2_dest = XEXP (new_i2_dest, 0);
3695 if (REG_P (new_i3_dest)
3696 && REG_P (new_i2_dest)
3697 && REGNO (new_i3_dest) == REGNO (new_i2_dest)
3698 && REGNO (new_i2_dest) < reg_n_sets_max)
3699 INC_REG_N_SETS (REGNO (new_i2_dest), 1);
3703 /* If we can split it and use I2DEST, go ahead and see if that
3704 helps things be recognized. Verify that none of the registers
3705 are set between I2 and I3. */
3706 if (insn_code_number < 0
3707 && (split = find_split_point (&newpat, i3, false)) != 0
3708 && (!HAVE_cc0 || REG_P (i2dest))
3709 /* We need I2DEST in the proper mode. If it is a hard register
3710 or the only use of a pseudo, we can change its mode.
3711 Make sure we don't change a hard register to have a mode that
3712 isn't valid for it, or change the number of registers. */
3713 && (GET_MODE (*split) == GET_MODE (i2dest)
3714 || GET_MODE (*split) == VOIDmode
3715 || can_change_dest_mode (i2dest, added_sets_2,
3717 && (next_nonnote_nondebug_insn (i2) == i3
3718 || ! use_crosses_set_p (*split, DF_INSN_LUID (i2)))
3719 /* We can't overwrite I2DEST if its value is still used by
3721 && ! reg_referenced_p (i2dest, newpat))
3723 rtx newdest = i2dest;
3724 enum rtx_code split_code = GET_CODE (*split);
3725 machine_mode split_mode = GET_MODE (*split);
3726 bool subst_done = false;
3727 newi2pat = NULL_RTX;
3731 /* *SPLIT may be part of I2SRC, so make sure we have the
3732 original expression around for later debug processing.
3733 We should not need I2SRC any more in other cases. */
3734 if (MAY_HAVE_DEBUG_INSNS)
3735 i2src = copy_rtx (i2src);
3739 /* Get NEWDEST as a register in the proper mode. We have already
3740 validated that we can do this. */
3741 if (GET_MODE (i2dest) != split_mode && split_mode != VOIDmode)
3743 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3744 newdest = gen_rtx_REG (split_mode, REGNO (i2dest));
3747 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], split_mode);
3748 newdest = regno_reg_rtx[REGNO (i2dest)];
3752 /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3753 an ASHIFT. This can occur if it was inside a PLUS and hence
3754 appeared to be a memory address. This is a kludge. */
3755 if (split_code == MULT
3756 && CONST_INT_P (XEXP (*split, 1))
3757 && INTVAL (XEXP (*split, 1)) > 0
3758 && (i = exact_log2 (UINTVAL (XEXP (*split, 1)))) >= 0)
3760 SUBST (*split, gen_rtx_ASHIFT (split_mode,
3761 XEXP (*split, 0), GEN_INT (i)));
3762 /* Update split_code because we may not have a multiply
3764 split_code = GET_CODE (*split);
3767 /* Similarly for (plus (mult FOO (const_int pow2))). */
3768 if (split_code == PLUS
3769 && GET_CODE (XEXP (*split, 0)) == MULT
3770 && CONST_INT_P (XEXP (XEXP (*split, 0), 1))
3771 && INTVAL (XEXP (XEXP (*split, 0), 1)) > 0
3772 && (i = exact_log2 (UINTVAL (XEXP (XEXP (*split, 0), 1)))) >= 0)
3774 rtx nsplit = XEXP (*split, 0);
3775 SUBST (XEXP (*split, 0), gen_rtx_ASHIFT (GET_MODE (nsplit),
3776 XEXP (nsplit, 0), GEN_INT (i)));
3777 /* Update split_code because we may not have a multiply
3779 split_code = GET_CODE (*split);
3782 #ifdef INSN_SCHEDULING
3783 /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3784 be written as a ZERO_EXTEND. */
3785 if (split_code == SUBREG && MEM_P (SUBREG_REG (*split)))
3787 /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3788 what it really is. */
3789 if (load_extend_op (GET_MODE (SUBREG_REG (*split)))
3791 SUBST (*split, gen_rtx_SIGN_EXTEND (split_mode,
3792 SUBREG_REG (*split)));
3794 SUBST (*split, gen_rtx_ZERO_EXTEND (split_mode,
3795 SUBREG_REG (*split)));
3799 /* Attempt to split binary operators using arithmetic identities. */
3800 if (BINARY_P (SET_SRC (newpat))
3801 && split_mode == GET_MODE (SET_SRC (newpat))
3802 && ! side_effects_p (SET_SRC (newpat)))
3804 rtx setsrc = SET_SRC (newpat);
3805 machine_mode mode = GET_MODE (setsrc);
3806 enum rtx_code code = GET_CODE (setsrc);
3807 rtx src_op0 = XEXP (setsrc, 0);
3808 rtx src_op1 = XEXP (setsrc, 1);
3810 /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */
3811 if (rtx_equal_p (src_op0, src_op1))
3813 newi2pat = gen_rtx_SET (newdest, src_op0);
3814 SUBST (XEXP (setsrc, 0), newdest);
3815 SUBST (XEXP (setsrc, 1), newdest);
3818 /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */
3819 else if ((code == PLUS || code == MULT)
3820 && GET_CODE (src_op0) == code
3821 && GET_CODE (XEXP (src_op0, 0)) == code
3822 && (INTEGRAL_MODE_P (mode)
3823 || (FLOAT_MODE_P (mode)
3824 && flag_unsafe_math_optimizations)))
3826 rtx p = XEXP (XEXP (src_op0, 0), 0);
3827 rtx q = XEXP (XEXP (src_op0, 0), 1);
3828 rtx r = XEXP (src_op0, 1);
3831 /* Split both "((X op Y) op X) op Y" and
3832 "((X op Y) op Y) op X" as "T op T" where T is
3834 if ((rtx_equal_p (p,r) && rtx_equal_p (q,s))
3835 || (rtx_equal_p (p,s) && rtx_equal_p (q,r)))
3837 newi2pat = gen_rtx_SET (newdest, XEXP (src_op0, 0));
3838 SUBST (XEXP (setsrc, 0), newdest);
3839 SUBST (XEXP (setsrc, 1), newdest);
3842 /* Split "((X op X) op Y) op Y)" as "T op T" where
3844 else if (rtx_equal_p (p,q) && rtx_equal_p (r,s))
3846 rtx tmp = simplify_gen_binary (code, mode, p, r);
3847 newi2pat = gen_rtx_SET (newdest, tmp);
3848 SUBST (XEXP (setsrc, 0), newdest);
3849 SUBST (XEXP (setsrc, 1), newdest);
3857 newi2pat = gen_rtx_SET (newdest, *split);
3858 SUBST (*split, newdest);
3861 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3863 /* recog_for_combine might have added CLOBBERs to newi2pat.
3864 Make sure NEWPAT does not depend on the clobbered regs. */
3865 if (GET_CODE (newi2pat) == PARALLEL)
3866 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3867 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3869 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
3870 if (reg_overlap_mentioned_p (reg, newpat))
3877 /* If the split point was a MULT and we didn't have one before,
3878 don't use one now. */
3879 if (i2_code_number >= 0 && ! (split_code == MULT && ! have_mult))
3880 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3884 /* Check for a case where we loaded from memory in a narrow mode and
3885 then sign extended it, but we need both registers. In that case,
3886 we have a PARALLEL with both loads from the same memory location.
3887 We can split this into a load from memory followed by a register-register
3888 copy. This saves at least one insn, more if register allocation can
3891 We cannot do this if the destination of the first assignment is a
3892 condition code register or cc0. We eliminate this case by making sure
3893 the SET_DEST and SET_SRC have the same mode.
3895 We cannot do this if the destination of the second assignment is
3896 a register that we have already assumed is zero-extended. Similarly
3897 for a SUBREG of such a register. */
3899 else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
3900 && GET_CODE (newpat) == PARALLEL
3901 && XVECLEN (newpat, 0) == 2
3902 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3903 && GET_CODE (SET_SRC (XVECEXP (newpat, 0, 0))) == SIGN_EXTEND
3904 && (GET_MODE (SET_DEST (XVECEXP (newpat, 0, 0)))
3905 == GET_MODE (SET_SRC (XVECEXP (newpat, 0, 0))))
3906 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3907 && rtx_equal_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3908 XEXP (SET_SRC (XVECEXP (newpat, 0, 0)), 0))
3909 && ! use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3911 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3912 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3913 && ! (temp_expr = SET_DEST (XVECEXP (newpat, 0, 1)),
3915 && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
3916 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < BITS_PER_WORD
3917 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < HOST_BITS_PER_INT
3918 && (reg_stat[REGNO (temp_expr)].nonzero_bits
3919 != GET_MODE_MASK (word_mode))))
3920 && ! (GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) == SUBREG
3921 && (temp_expr = SUBREG_REG (SET_DEST (XVECEXP (newpat, 0, 1))),
3923 && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
3924 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < BITS_PER_WORD
3925 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < HOST_BITS_PER_INT
3926 && (reg_stat[REGNO (temp_expr)].nonzero_bits
3927 != GET_MODE_MASK (word_mode)))))
3928 && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3929 SET_SRC (XVECEXP (newpat, 0, 1)))
3930 && ! find_reg_note (i3, REG_UNUSED,
3931 SET_DEST (XVECEXP (newpat, 0, 0))))
3935 newi2pat = XVECEXP (newpat, 0, 0);
3936 ni2dest = SET_DEST (XVECEXP (newpat, 0, 0));
3937 newpat = XVECEXP (newpat, 0, 1);
3938 SUBST (SET_SRC (newpat),
3939 gen_lowpart (GET_MODE (SET_SRC (newpat)), ni2dest));
3940 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3942 if (i2_code_number >= 0)
3943 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3945 if (insn_code_number >= 0)
3949 /* Similarly, check for a case where we have a PARALLEL of two independent
3950 SETs but we started with three insns. In this case, we can do the sets
3951 as two separate insns. This case occurs when some SET allows two
3952 other insns to combine, but the destination of that SET is still live.
3954 Also do this if we started with two insns and (at least) one of the
3955 resulting sets is a noop; this noop will be deleted later. */
3957 else if (insn_code_number < 0 && asm_noperands (newpat) < 0
3958 && GET_CODE (newpat) == PARALLEL
3959 && XVECLEN (newpat, 0) == 2
3960 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3961 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3962 && (i1 || set_noop_p (XVECEXP (newpat, 0, 0))
3963 || set_noop_p (XVECEXP (newpat, 0, 1)))
3964 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != ZERO_EXTRACT
3965 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != STRICT_LOW_PART
3966 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3967 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3968 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3969 XVECEXP (newpat, 0, 0))
3970 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 0)),
3971 XVECEXP (newpat, 0, 1))
3972 && ! (contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 0)))
3973 && contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 1)))))
3975 rtx set0 = XVECEXP (newpat, 0, 0);
3976 rtx set1 = XVECEXP (newpat, 0, 1);
3978 /* Normally, it doesn't matter which of the two is done first,
3979 but the one that references cc0 can't be the second, and
3980 one which uses any regs/memory set in between i2 and i3 can't
3981 be first. The PARALLEL might also have been pre-existing in i3,
3982 so we need to make sure that we won't wrongly hoist a SET to i2
3983 that would conflict with a death note present in there. */
3984 if (!use_crosses_set_p (SET_SRC (set1), DF_INSN_LUID (i2))
3985 && !(REG_P (SET_DEST (set1))
3986 && find_reg_note (i2, REG_DEAD, SET_DEST (set1)))
3987 && !(GET_CODE (SET_DEST (set1)) == SUBREG
3988 && find_reg_note (i2, REG_DEAD,
3989 SUBREG_REG (SET_DEST (set1))))
3990 && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set0))
3991 /* If I3 is a jump, ensure that set0 is a jump so that
3992 we do not create invalid RTL. */
3993 && (!JUMP_P (i3) || SET_DEST (set0) == pc_rtx)
3999 else if (!use_crosses_set_p (SET_SRC (set0), DF_INSN_LUID (i2))
4000 && !(REG_P (SET_DEST (set0))
4001 && find_reg_note (i2, REG_DEAD, SET_DEST (set0)))
4002 && !(GET_CODE (SET_DEST (set0)) == SUBREG
4003 && find_reg_note (i2, REG_DEAD,
4004 SUBREG_REG (SET_DEST (set0))))
4005 && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set1))
4006 /* If I3 is a jump, ensure that set1 is a jump so that
4007 we do not create invalid RTL. */
4008 && (!JUMP_P (i3) || SET_DEST (set1) == pc_rtx)
4020 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
4022 if (i2_code_number >= 0)
4024 /* recog_for_combine might have added CLOBBERs to newi2pat.
4025 Make sure NEWPAT does not depend on the clobbered regs. */
4026 if (GET_CODE (newi2pat) == PARALLEL)
4028 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
4029 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
4031 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
4032 if (reg_overlap_mentioned_p (reg, newpat))
4040 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
4044 /* If it still isn't recognized, fail and change things back the way they
4046 if ((insn_code_number < 0
4047 /* Is the result a reasonable ASM_OPERANDS? */
4048 && (! check_asm_operands (newpat) || added_sets_1 || added_sets_2)))
4054 /* If we had to change another insn, make sure it is valid also. */
4055 if (undobuf.other_insn)
4057 CLEAR_HARD_REG_SET (newpat_used_regs);
4059 other_pat = PATTERN (undobuf.other_insn);
4060 other_code_number = recog_for_combine (&other_pat, undobuf.other_insn,
4063 if (other_code_number < 0 && ! check_asm_operands (other_pat))
4070 /* If I2 is the CC0 setter and I3 is the CC0 user then check whether
4071 they are adjacent to each other or not. */
4074 rtx_insn *p = prev_nonnote_insn (i3);
4075 if (p && p != i2 && NONJUMP_INSN_P (p) && newi2pat
4076 && sets_cc0_p (newi2pat))
4083 /* Only allow this combination if insn_rtx_costs reports that the
4084 replacement instructions are cheaper than the originals. */
4085 if (!combine_validate_cost (i0, i1, i2, i3, newpat, newi2pat, other_pat))
4091 if (MAY_HAVE_DEBUG_INSNS)
4095 for (undo = undobuf.undos; undo; undo = undo->next)
4096 if (undo->kind == UNDO_MODE)
4098 rtx reg = *undo->where.r;
4099 machine_mode new_mode = GET_MODE (reg);
4100 machine_mode old_mode = undo->old_contents.m;
4102 /* Temporarily revert mode back. */
4103 adjust_reg_mode (reg, old_mode);
4105 if (reg == i2dest && i2scratch)
4107 /* If we used i2dest as a scratch register with a
4108 different mode, substitute it for the original
4109 i2src while its original mode is temporarily
4110 restored, and then clear i2scratch so that we don't
4111 do it again later. */
4112 propagate_for_debug (i2, last_combined_insn, reg, i2src,
4115 /* Put back the new mode. */
4116 adjust_reg_mode (reg, new_mode);
4120 rtx tempreg = gen_raw_REG (old_mode, REGNO (reg));
4121 rtx_insn *first, *last;
4126 last = last_combined_insn;
4131 last = undobuf.other_insn;
4133 if (DF_INSN_LUID (last)
4134 < DF_INSN_LUID (last_combined_insn))
4135 last = last_combined_insn;
4138 /* We're dealing with a reg that changed mode but not
4139 meaning, so we want to turn it into a subreg for
4140 the new mode. However, because of REG sharing and
4141 because its mode had already changed, we have to do
4142 it in two steps. First, replace any debug uses of
4143 reg, with its original mode temporarily restored,
4144 with this copy we have created; then, replace the
4145 copy with the SUBREG of the original shared reg,
4146 once again changed to the new mode. */
4147 propagate_for_debug (first, last, reg, tempreg,
4149 adjust_reg_mode (reg, new_mode);
4150 propagate_for_debug (first, last, tempreg,
4151 lowpart_subreg (old_mode, reg, new_mode),
4157 /* If we will be able to accept this, we have made a
4158 change to the destination of I3. This requires us to
4159 do a few adjustments. */
4161 if (changed_i3_dest)
4163 PATTERN (i3) = newpat;
4164 adjust_for_new_dest (i3);
4167 /* We now know that we can do this combination. Merge the insns and
4168 update the status of registers and LOG_LINKS. */
4170 if (undobuf.other_insn)
4174 PATTERN (undobuf.other_insn) = other_pat;
4176 /* If any of the notes in OTHER_INSN were REG_DEAD or REG_UNUSED,
4177 ensure that they are still valid. Then add any non-duplicate
4178 notes added by recog_for_combine. */
4179 for (note = REG_NOTES (undobuf.other_insn); note; note = next)
4181 next = XEXP (note, 1);
4183 if ((REG_NOTE_KIND (note) == REG_DEAD
4184 && !reg_referenced_p (XEXP (note, 0),
4185 PATTERN (undobuf.other_insn)))
4186 ||(REG_NOTE_KIND (note) == REG_UNUSED
4187 && !reg_set_p (XEXP (note, 0),
4188 PATTERN (undobuf.other_insn)))
4189 /* Simply drop equal note since it may be no longer valid
4190 for other_insn. It may be possible to record that CC
4191 register is changed and only discard those notes, but
4192 in practice it's unnecessary complication and doesn't
4193 give any meaningful improvement.
4196 || REG_NOTE_KIND (note) == REG_EQUAL
4197 || REG_NOTE_KIND (note) == REG_EQUIV)
4198 remove_note (undobuf.other_insn, note);
4201 distribute_notes (new_other_notes, undobuf.other_insn,
4202 undobuf.other_insn, NULL, NULL_RTX, NULL_RTX,
4209 struct insn_link *link;
4212 /* I3 now uses what used to be its destination and which is now
4213 I2's destination. This requires us to do a few adjustments. */
4214 PATTERN (i3) = newpat;
4215 adjust_for_new_dest (i3);
4217 /* We need a LOG_LINK from I3 to I2. But we used to have one,
4220 However, some later insn might be using I2's dest and have
4221 a LOG_LINK pointing at I3. We must remove this link.
4222 The simplest way to remove the link is to point it at I1,
4223 which we know will be a NOTE. */
4225 /* newi2pat is usually a SET here; however, recog_for_combine might
4226 have added some clobbers. */
4227 if (GET_CODE (newi2pat) == PARALLEL)
4228 ni2dest = SET_DEST (XVECEXP (newi2pat, 0, 0));
4230 ni2dest = SET_DEST (newi2pat);
4232 for (insn = NEXT_INSN (i3);
4233 insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4234 || insn != BB_HEAD (this_basic_block->next_bb));
4235 insn = NEXT_INSN (insn))
4237 if (NONDEBUG_INSN_P (insn)
4238 && reg_referenced_p (ni2dest, PATTERN (insn)))
4240 FOR_EACH_LOG_LINK (link, insn)
4241 if (link->insn == i3)
4250 rtx i3notes, i2notes, i1notes = 0, i0notes = 0;
4251 struct insn_link *i3links, *i2links, *i1links = 0, *i0links = 0;
4254 /* Compute which registers we expect to eliminate. newi2pat may be setting
4255 either i3dest or i2dest, so we must check it. */
4256 rtx elim_i2 = ((newi2pat && reg_set_p (i2dest, newi2pat))
4257 || i2dest_in_i2src || i2dest_in_i1src || i2dest_in_i0src
4260 /* For i1, we need to compute both local elimination and global
4261 elimination information with respect to newi2pat because i1dest
4262 may be the same as i3dest, in which case newi2pat may be setting
4263 i1dest. Global information is used when distributing REG_DEAD
4264 note for i2 and i3, in which case it does matter if newi2pat sets
4267 Local information is used when distributing REG_DEAD note for i1,
4268 in which case it doesn't matter if newi2pat sets i1dest or not.
4269 See PR62151, if we have four insns combination:
4271 i1: r1 <- i1src (using r0)
4273 i2: r0 <- i2src (using r1)
4274 i3: r3 <- i3src (using r0)
4276 From i1's point of view, r0 is eliminated, no matter if it is set
4277 by newi2pat or not. In other words, REG_DEAD info for r0 in i1
4278 should be discarded.
4280 Note local information only affects cases in forms like "I1->I2->I3",
4281 "I0->I1->I2->I3" or "I0&I1->I2, I2->I3". For other cases like
4282 "I0->I1, I1&I2->I3" or "I1&I2->I3", newi2pat won't set i1dest or
4284 rtx local_elim_i1 = (i1 == 0 || i1dest_in_i1src || i1dest_in_i0src
4287 rtx elim_i1 = (local_elim_i1 == 0
4288 || (newi2pat && reg_set_p (i1dest, newi2pat))
4290 /* Same case as i1. */
4291 rtx local_elim_i0 = (i0 == 0 || i0dest_in_i0src || !i0dest_killed
4293 rtx elim_i0 = (local_elim_i0 == 0
4294 || (newi2pat && reg_set_p (i0dest, newi2pat))
4297 /* Get the old REG_NOTES and LOG_LINKS from all our insns and
4299 i3notes = REG_NOTES (i3), i3links = LOG_LINKS (i3);
4300 i2notes = REG_NOTES (i2), i2links = LOG_LINKS (i2);
4302 i1notes = REG_NOTES (i1), i1links = LOG_LINKS (i1);
4304 i0notes = REG_NOTES (i0), i0links = LOG_LINKS (i0);
4306 /* Ensure that we do not have something that should not be shared but
4307 occurs multiple times in the new insns. Check this by first
4308 resetting all the `used' flags and then copying anything is shared. */
4310 reset_used_flags (i3notes);
4311 reset_used_flags (i2notes);
4312 reset_used_flags (i1notes);
4313 reset_used_flags (i0notes);
4314 reset_used_flags (newpat);
4315 reset_used_flags (newi2pat);
4316 if (undobuf.other_insn)
4317 reset_used_flags (PATTERN (undobuf.other_insn));
4319 i3notes = copy_rtx_if_shared (i3notes);
4320 i2notes = copy_rtx_if_shared (i2notes);
4321 i1notes = copy_rtx_if_shared (i1notes);
4322 i0notes = copy_rtx_if_shared (i0notes);
4323 newpat = copy_rtx_if_shared (newpat);
4324 newi2pat = copy_rtx_if_shared (newi2pat);
4325 if (undobuf.other_insn)
4326 reset_used_flags (PATTERN (undobuf.other_insn));
4328 INSN_CODE (i3) = insn_code_number;
4329 PATTERN (i3) = newpat;
4331 if (CALL_P (i3) && CALL_INSN_FUNCTION_USAGE (i3))
4333 for (rtx link = CALL_INSN_FUNCTION_USAGE (i3); link;
4334 link = XEXP (link, 1))
4338 /* I2SRC must still be meaningful at this point. Some
4339 splitting operations can invalidate I2SRC, but those
4340 operations do not apply to calls. */
4342 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4346 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4349 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4354 if (undobuf.other_insn)
4355 INSN_CODE (undobuf.other_insn) = other_code_number;
4357 /* We had one special case above where I2 had more than one set and
4358 we replaced a destination of one of those sets with the destination
4359 of I3. In that case, we have to update LOG_LINKS of insns later
4360 in this basic block. Note that this (expensive) case is rare.
4362 Also, in this case, we must pretend that all REG_NOTEs for I2
4363 actually came from I3, so that REG_UNUSED notes from I2 will be
4364 properly handled. */
4366 if (i3_subst_into_i2)
4368 for (i = 0; i < XVECLEN (PATTERN (i2), 0); i++)
4369 if ((GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == SET
4370 || GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == CLOBBER)
4371 && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, i)))
4372 && SET_DEST (XVECEXP (PATTERN (i2), 0, i)) != i2dest
4373 && ! find_reg_note (i2, REG_UNUSED,
4374 SET_DEST (XVECEXP (PATTERN (i2), 0, i))))
4375 for (temp_insn = NEXT_INSN (i2);
4377 && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4378 || BB_HEAD (this_basic_block) != temp_insn);
4379 temp_insn = NEXT_INSN (temp_insn))
4380 if (temp_insn != i3 && NONDEBUG_INSN_P (temp_insn))
4381 FOR_EACH_LOG_LINK (link, temp_insn)
4382 if (link->insn == i2)
4388 while (XEXP (link, 1))
4389 link = XEXP (link, 1);
4390 XEXP (link, 1) = i2notes;
4397 LOG_LINKS (i3) = NULL;
4399 LOG_LINKS (i2) = NULL;
4404 if (MAY_HAVE_DEBUG_INSNS && i2scratch)
4405 propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4407 INSN_CODE (i2) = i2_code_number;
4408 PATTERN (i2) = newi2pat;
4412 if (MAY_HAVE_DEBUG_INSNS && i2src)
4413 propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4415 SET_INSN_DELETED (i2);
4420 LOG_LINKS (i1) = NULL;
4422 if (MAY_HAVE_DEBUG_INSNS)
4423 propagate_for_debug (i1, last_combined_insn, i1dest, i1src,
4425 SET_INSN_DELETED (i1);
4430 LOG_LINKS (i0) = NULL;
4432 if (MAY_HAVE_DEBUG_INSNS)
4433 propagate_for_debug (i0, last_combined_insn, i0dest, i0src,
4435 SET_INSN_DELETED (i0);
4438 /* Get death notes for everything that is now used in either I3 or
4439 I2 and used to die in a previous insn. If we built two new
4440 patterns, move from I1 to I2 then I2 to I3 so that we get the
4441 proper movement on registers that I2 modifies. */
4444 from_luid = DF_INSN_LUID (i0);
4446 from_luid = DF_INSN_LUID (i1);
4448 from_luid = DF_INSN_LUID (i2);
4450 move_deaths (newi2pat, NULL_RTX, from_luid, i2, &midnotes);
4451 move_deaths (newpat, newi2pat, from_luid, i3, &midnotes);
4453 /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
4455 distribute_notes (i3notes, i3, i3, newi2pat ? i2 : NULL,
4456 elim_i2, elim_i1, elim_i0);
4458 distribute_notes (i2notes, i2, i3, newi2pat ? i2 : NULL,
4459 elim_i2, elim_i1, elim_i0);
4461 distribute_notes (i1notes, i1, i3, newi2pat ? i2 : NULL,
4462 elim_i2, local_elim_i1, local_elim_i0);
4464 distribute_notes (i0notes, i0, i3, newi2pat ? i2 : NULL,
4465 elim_i2, elim_i1, local_elim_i0);
4467 distribute_notes (midnotes, NULL, i3, newi2pat ? i2 : NULL,
4468 elim_i2, elim_i1, elim_i0);
4470 /* Distribute any notes added to I2 or I3 by recog_for_combine. We
4471 know these are REG_UNUSED and want them to go to the desired insn,
4472 so we always pass it as i3. */
4474 if (newi2pat && new_i2_notes)
4475 distribute_notes (new_i2_notes, i2, i2, NULL, NULL_RTX, NULL_RTX,
4479 distribute_notes (new_i3_notes, i3, i3, NULL, NULL_RTX, NULL_RTX,
4482 /* If I3DEST was used in I3SRC, it really died in I3. We may need to
4483 put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets
4484 I3DEST, the death must be somewhere before I2, not I3. If we passed I3
4485 in that case, it might delete I2. Similarly for I2 and I1.
4486 Show an additional death due to the REG_DEAD note we make here. If
4487 we discard it in distribute_notes, we will decrement it again. */
4491 rtx new_note = alloc_reg_note (REG_DEAD, i3dest_killed, NULL_RTX);
4492 if (newi2pat && reg_set_p (i3dest_killed, newi2pat))
4493 distribute_notes (new_note, NULL, i2, NULL, elim_i2,
4496 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4497 elim_i2, elim_i1, elim_i0);
4500 if (i2dest_in_i2src)
4502 rtx new_note = alloc_reg_note (REG_DEAD, i2dest, NULL_RTX);
4503 if (newi2pat && reg_set_p (i2dest, newi2pat))
4504 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4505 NULL_RTX, NULL_RTX);
4507 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4508 NULL_RTX, NULL_RTX, NULL_RTX);
4511 if (i1dest_in_i1src)
4513 rtx new_note = alloc_reg_note (REG_DEAD, i1dest, NULL_RTX);
4514 if (newi2pat && reg_set_p (i1dest, newi2pat))
4515 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4516 NULL_RTX, NULL_RTX);
4518 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4519 NULL_RTX, NULL_RTX, NULL_RTX);
4522 if (i0dest_in_i0src)
4524 rtx new_note = alloc_reg_note (REG_DEAD, i0dest, NULL_RTX);
4525 if (newi2pat && reg_set_p (i0dest, newi2pat))
4526 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4527 NULL_RTX, NULL_RTX);
4529 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4530 NULL_RTX, NULL_RTX, NULL_RTX);
4533 distribute_links (i3links);
4534 distribute_links (i2links);
4535 distribute_links (i1links);
4536 distribute_links (i0links);
4540 struct insn_link *link;
4541 rtx_insn *i2_insn = 0;
4542 rtx i2_val = 0, set;
4544 /* The insn that used to set this register doesn't exist, and
4545 this life of the register may not exist either. See if one of
4546 I3's links points to an insn that sets I2DEST. If it does,
4547 that is now the last known value for I2DEST. If we don't update
4548 this and I2 set the register to a value that depended on its old
4549 contents, we will get confused. If this insn is used, thing
4550 will be set correctly in combine_instructions. */
4551 FOR_EACH_LOG_LINK (link, i3)
4552 if ((set = single_set (link->insn)) != 0
4553 && rtx_equal_p (i2dest, SET_DEST (set)))
4554 i2_insn = link->insn, i2_val = SET_SRC (set);
4556 record_value_for_reg (i2dest, i2_insn, i2_val);
4558 /* If the reg formerly set in I2 died only once and that was in I3,
4559 zero its use count so it won't make `reload' do any work. */
4561 && (newi2pat == 0 || ! reg_mentioned_p (i2dest, newi2pat))
4562 && ! i2dest_in_i2src
4563 && REGNO (i2dest) < reg_n_sets_max)
4564 INC_REG_N_SETS (REGNO (i2dest), -1);
4567 if (i1 && REG_P (i1dest))
4569 struct insn_link *link;
4570 rtx_insn *i1_insn = 0;
4571 rtx i1_val = 0, set;
4573 FOR_EACH_LOG_LINK (link, i3)
4574 if ((set = single_set (link->insn)) != 0
4575 && rtx_equal_p (i1dest, SET_DEST (set)))
4576 i1_insn = link->insn, i1_val = SET_SRC (set);
4578 record_value_for_reg (i1dest, i1_insn, i1_val);
4581 && ! i1dest_in_i1src
4582 && REGNO (i1dest) < reg_n_sets_max)
4583 INC_REG_N_SETS (REGNO (i1dest), -1);
4586 if (i0 && REG_P (i0dest))
4588 struct insn_link *link;
4589 rtx_insn *i0_insn = 0;
4590 rtx i0_val = 0, set;
4592 FOR_EACH_LOG_LINK (link, i3)
4593 if ((set = single_set (link->insn)) != 0
4594 && rtx_equal_p (i0dest, SET_DEST (set)))
4595 i0_insn = link->insn, i0_val = SET_SRC (set);
4597 record_value_for_reg (i0dest, i0_insn, i0_val);
4600 && ! i0dest_in_i0src
4601 && REGNO (i0dest) < reg_n_sets_max)
4602 INC_REG_N_SETS (REGNO (i0dest), -1);
4605 /* Update reg_stat[].nonzero_bits et al for any changes that may have
4606 been made to this insn. The order is important, because newi2pat
4607 can affect nonzero_bits of newpat. */
4609 note_stores (newi2pat, set_nonzero_bits_and_sign_copies, NULL);
4610 note_stores (newpat, set_nonzero_bits_and_sign_copies, NULL);
4613 if (undobuf.other_insn != NULL_RTX)
4617 fprintf (dump_file, "modifying other_insn ");
4618 dump_insn_slim (dump_file, undobuf.other_insn);
4620 df_insn_rescan (undobuf.other_insn);
4623 if (i0 && !(NOTE_P (i0) && (NOTE_KIND (i0) == NOTE_INSN_DELETED)))
4627 fprintf (dump_file, "modifying insn i0 ");
4628 dump_insn_slim (dump_file, i0);
4630 df_insn_rescan (i0);
4633 if (i1 && !(NOTE_P (i1) && (NOTE_KIND (i1) == NOTE_INSN_DELETED)))
4637 fprintf (dump_file, "modifying insn i1 ");
4638 dump_insn_slim (dump_file, i1);
4640 df_insn_rescan (i1);
4643 if (i2 && !(NOTE_P (i2) && (NOTE_KIND (i2) == NOTE_INSN_DELETED)))
4647 fprintf (dump_file, "modifying insn i2 ");
4648 dump_insn_slim (dump_file, i2);
4650 df_insn_rescan (i2);
4653 if (i3 && !(NOTE_P (i3) && (NOTE_KIND (i3) == NOTE_INSN_DELETED)))
4657 fprintf (dump_file, "modifying insn i3 ");
4658 dump_insn_slim (dump_file, i3);
4660 df_insn_rescan (i3);
4663 /* Set new_direct_jump_p if a new return or simple jump instruction
4664 has been created. Adjust the CFG accordingly. */
4665 if (returnjump_p (i3) || any_uncondjump_p (i3))
4667 *new_direct_jump_p = 1;
4668 mark_jump_label (PATTERN (i3), i3, 0);
4669 update_cfg_for_uncondjump (i3);
4672 if (undobuf.other_insn != NULL_RTX
4673 && (returnjump_p (undobuf.other_insn)
4674 || any_uncondjump_p (undobuf.other_insn)))
4676 *new_direct_jump_p = 1;
4677 update_cfg_for_uncondjump (undobuf.other_insn);
4680 if (GET_CODE (PATTERN (i3)) == TRAP_IF
4681 && XEXP (PATTERN (i3), 0) == const1_rtx)
4683 basic_block bb = BLOCK_FOR_INSN (i3);
4685 remove_edge (split_block (bb, i3));
4686 emit_barrier_after_bb (bb);
4687 *new_direct_jump_p = 1;
4690 if (undobuf.other_insn
4691 && GET_CODE (PATTERN (undobuf.other_insn)) == TRAP_IF
4692 && XEXP (PATTERN (undobuf.other_insn), 0) == const1_rtx)
4694 basic_block bb = BLOCK_FOR_INSN (undobuf.other_insn);
4696 remove_edge (split_block (bb, undobuf.other_insn));
4697 emit_barrier_after_bb (bb);
4698 *new_direct_jump_p = 1;
4701 /* A noop might also need cleaning up of CFG, if it comes from the
4702 simplification of a jump. */
4704 && GET_CODE (newpat) == SET
4705 && SET_SRC (newpat) == pc_rtx
4706 && SET_DEST (newpat) == pc_rtx)
4708 *new_direct_jump_p = 1;
4709 update_cfg_for_uncondjump (i3);
4712 if (undobuf.other_insn != NULL_RTX
4713 && JUMP_P (undobuf.other_insn)
4714 && GET_CODE (PATTERN (undobuf.other_insn)) == SET
4715 && SET_SRC (PATTERN (undobuf.other_insn)) == pc_rtx
4716 && SET_DEST (PATTERN (undobuf.other_insn)) == pc_rtx)
4718 *new_direct_jump_p = 1;
4719 update_cfg_for_uncondjump (undobuf.other_insn);
4722 combine_successes++;
4725 if (added_links_insn
4726 && (newi2pat == 0 || DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (i2))
4727 && DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (i3))
4728 return added_links_insn;
4730 return newi2pat ? i2 : i3;
4733 /* Get a marker for undoing to the current state. */
4736 get_undo_marker (void)
4738 return undobuf.undos;
4741 /* Undo the modifications up to the marker. */
4744 undo_to_marker (void *marker)
4746 struct undo *undo, *next;
4748 for (undo = undobuf.undos; undo != marker; undo = next)
4756 *undo->where.r = undo->old_contents.r;
4759 *undo->where.i = undo->old_contents.i;
4762 adjust_reg_mode (*undo->where.r, undo->old_contents.m);
4765 *undo->where.l = undo->old_contents.l;
4771 undo->next = undobuf.frees;
4772 undobuf.frees = undo;
4775 undobuf.undos = (struct undo *) marker;
4778 /* Undo all the modifications recorded in undobuf. */
4786 /* We've committed to accepting the changes we made. Move all
4787 of the undos to the free list. */
4792 struct undo *undo, *next;
4794 for (undo = undobuf.undos; undo; undo = next)
4797 undo->next = undobuf.frees;
4798 undobuf.frees = undo;
4803 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4804 where we have an arithmetic expression and return that point. LOC will
4807 try_combine will call this function to see if an insn can be split into
4811 find_split_point (rtx *loc, rtx_insn *insn, bool set_src)
4814 enum rtx_code code = GET_CODE (x);
4816 unsigned HOST_WIDE_INT len = 0;
4817 HOST_WIDE_INT pos = 0;
4819 rtx inner = NULL_RTX;
4820 scalar_int_mode mode, inner_mode;
4822 /* First special-case some codes. */
4826 #ifdef INSN_SCHEDULING
4827 /* If we are making a paradoxical SUBREG invalid, it becomes a split
4829 if (MEM_P (SUBREG_REG (x)))
4832 return find_split_point (&SUBREG_REG (x), insn, false);
4835 /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4836 using LO_SUM and HIGH. */
4837 if (HAVE_lo_sum && (GET_CODE (XEXP (x, 0)) == CONST
4838 || GET_CODE (XEXP (x, 0)) == SYMBOL_REF))
4840 machine_mode address_mode = get_address_mode (x);
4843 gen_rtx_LO_SUM (address_mode,
4844 gen_rtx_HIGH (address_mode, XEXP (x, 0)),
4846 return &XEXP (XEXP (x, 0), 0);
4849 /* If we have a PLUS whose second operand is a constant and the
4850 address is not valid, perhaps will can split it up using
4851 the machine-specific way to split large constants. We use
4852 the first pseudo-reg (one of the virtual regs) as a placeholder;
4853 it will not remain in the result. */
4854 if (GET_CODE (XEXP (x, 0)) == PLUS
4855 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
4856 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4857 MEM_ADDR_SPACE (x)))
4859 rtx reg = regno_reg_rtx[FIRST_PSEUDO_REGISTER];
4860 rtx_insn *seq = combine_split_insns (gen_rtx_SET (reg, XEXP (x, 0)),
4863 /* This should have produced two insns, each of which sets our
4864 placeholder. If the source of the second is a valid address,
4865 we can make put both sources together and make a split point
4869 && NEXT_INSN (seq) != NULL_RTX
4870 && NEXT_INSN (NEXT_INSN (seq)) == NULL_RTX
4871 && NONJUMP_INSN_P (seq)
4872 && GET_CODE (PATTERN (seq)) == SET
4873 && SET_DEST (PATTERN (seq)) == reg
4874 && ! reg_mentioned_p (reg,
4875 SET_SRC (PATTERN (seq)))
4876 && NONJUMP_INSN_P (NEXT_INSN (seq))
4877 && GET_CODE (PATTERN (NEXT_INSN (seq))) == SET
4878 && SET_DEST (PATTERN (NEXT_INSN (seq))) == reg
4879 && memory_address_addr_space_p
4880 (GET_MODE (x), SET_SRC (PATTERN (NEXT_INSN (seq))),
4881 MEM_ADDR_SPACE (x)))
4883 rtx src1 = SET_SRC (PATTERN (seq));
4884 rtx src2 = SET_SRC (PATTERN (NEXT_INSN (seq)));
4886 /* Replace the placeholder in SRC2 with SRC1. If we can
4887 find where in SRC2 it was placed, that can become our
4888 split point and we can replace this address with SRC2.
4889 Just try two obvious places. */
4891 src2 = replace_rtx (src2, reg, src1);
4893 if (XEXP (src2, 0) == src1)
4894 split = &XEXP (src2, 0);
4895 else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2, 0)))[0] == 'e'
4896 && XEXP (XEXP (src2, 0), 0) == src1)
4897 split = &XEXP (XEXP (src2, 0), 0);
4901 SUBST (XEXP (x, 0), src2);
4906 /* If that didn't work, perhaps the first operand is complex and
4907 needs to be computed separately, so make a split point there.
4908 This will occur on machines that just support REG + CONST
4909 and have a constant moved through some previous computation. */
4911 else if (!OBJECT_P (XEXP (XEXP (x, 0), 0))
4912 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4913 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4914 return &XEXP (XEXP (x, 0), 0);
4917 /* If we have a PLUS whose first operand is complex, try computing it
4918 separately by making a split there. */
4919 if (GET_CODE (XEXP (x, 0)) == PLUS
4920 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4922 && ! OBJECT_P (XEXP (XEXP (x, 0), 0))
4923 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4924 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4925 return &XEXP (XEXP (x, 0), 0);
4929 /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
4930 ZERO_EXTRACT, the most likely reason why this doesn't match is that
4931 we need to put the operand into a register. So split at that
4934 if (SET_DEST (x) == cc0_rtx
4935 && GET_CODE (SET_SRC (x)) != COMPARE
4936 && GET_CODE (SET_SRC (x)) != ZERO_EXTRACT
4937 && !OBJECT_P (SET_SRC (x))
4938 && ! (GET_CODE (SET_SRC (x)) == SUBREG
4939 && OBJECT_P (SUBREG_REG (SET_SRC (x)))))
4940 return &SET_SRC (x);
4942 /* See if we can split SET_SRC as it stands. */
4943 split = find_split_point (&SET_SRC (x), insn, true);
4944 if (split && split != &SET_SRC (x))
4947 /* See if we can split SET_DEST as it stands. */
4948 split = find_split_point (&SET_DEST (x), insn, false);
4949 if (split && split != &SET_DEST (x))
4952 /* See if this is a bitfield assignment with everything constant. If
4953 so, this is an IOR of an AND, so split it into that. */
4954 if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
4955 && is_a <scalar_int_mode> (GET_MODE (XEXP (SET_DEST (x), 0)),
4957 && HWI_COMPUTABLE_MODE_P (inner_mode)
4958 && CONST_INT_P (XEXP (SET_DEST (x), 1))
4959 && CONST_INT_P (XEXP (SET_DEST (x), 2))
4960 && CONST_INT_P (SET_SRC (x))
4961 && ((INTVAL (XEXP (SET_DEST (x), 1))
4962 + INTVAL (XEXP (SET_DEST (x), 2)))
4963 <= GET_MODE_PRECISION (inner_mode))
4964 && ! side_effects_p (XEXP (SET_DEST (x), 0)))
4966 HOST_WIDE_INT pos = INTVAL (XEXP (SET_DEST (x), 2));
4967 unsigned HOST_WIDE_INT len = INTVAL (XEXP (SET_DEST (x), 1));
4968 unsigned HOST_WIDE_INT src = INTVAL (SET_SRC (x));
4969 rtx dest = XEXP (SET_DEST (x), 0);
4970 unsigned HOST_WIDE_INT mask
4971 = (HOST_WIDE_INT_1U << len) - 1;
4974 if (BITS_BIG_ENDIAN)
4975 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
4977 or_mask = gen_int_mode (src << pos, inner_mode);
4980 simplify_gen_binary (IOR, inner_mode, dest, or_mask));
4983 rtx negmask = gen_int_mode (~(mask << pos), inner_mode);
4985 simplify_gen_binary (IOR, inner_mode,
4986 simplify_gen_binary (AND, inner_mode,
4991 SUBST (SET_DEST (x), dest);
4993 split = find_split_point (&SET_SRC (x), insn, true);
4994 if (split && split != &SET_SRC (x))
4998 /* Otherwise, see if this is an operation that we can split into two.
4999 If so, try to split that. */
5000 code = GET_CODE (SET_SRC (x));
5005 /* If we are AND'ing with a large constant that is only a single
5006 bit and the result is only being used in a context where we
5007 need to know if it is zero or nonzero, replace it with a bit
5008 extraction. This will avoid the large constant, which might
5009 have taken more than one insn to make. If the constant were
5010 not a valid argument to the AND but took only one insn to make,
5011 this is no worse, but if it took more than one insn, it will
5014 if (CONST_INT_P (XEXP (SET_SRC (x), 1))
5015 && REG_P (XEXP (SET_SRC (x), 0))
5016 && (pos = exact_log2 (UINTVAL (XEXP (SET_SRC (x), 1)))) >= 7
5017 && REG_P (SET_DEST (x))
5018 && (split = find_single_use (SET_DEST (x), insn, NULL)) != 0
5019 && (GET_CODE (*split) == EQ || GET_CODE (*split) == NE)
5020 && XEXP (*split, 0) == SET_DEST (x)
5021 && XEXP (*split, 1) == const0_rtx)
5023 rtx extraction = make_extraction (GET_MODE (SET_DEST (x)),
5024 XEXP (SET_SRC (x), 0),
5025 pos, NULL_RTX, 1, 1, 0, 0);
5026 if (extraction != 0)
5028 SUBST (SET_SRC (x), extraction);
5029 return find_split_point (loc, insn, false);
5035 /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
5036 is known to be on, this can be converted into a NEG of a shift. */
5037 if (STORE_FLAG_VALUE == -1 && XEXP (SET_SRC (x), 1) == const0_rtx
5038 && GET_MODE (SET_SRC (x)) == GET_MODE (XEXP (SET_SRC (x), 0))
5039 && 1 <= (pos = exact_log2
5040 (nonzero_bits (XEXP (SET_SRC (x), 0),
5041 GET_MODE (XEXP (SET_SRC (x), 0))))))
5043 machine_mode mode = GET_MODE (XEXP (SET_SRC (x), 0));
5047 gen_rtx_LSHIFTRT (mode,
5048 XEXP (SET_SRC (x), 0),
5051 split = find_split_point (&SET_SRC (x), insn, true);
5052 if (split && split != &SET_SRC (x))
5058 inner = XEXP (SET_SRC (x), 0);
5060 /* We can't optimize if either mode is a partial integer
5061 mode as we don't know how many bits are significant
5063 if (!is_int_mode (GET_MODE (inner), &inner_mode)
5064 || GET_MODE_CLASS (GET_MODE (SET_SRC (x))) == MODE_PARTIAL_INT)
5068 len = GET_MODE_PRECISION (inner_mode);
5074 if (is_a <scalar_int_mode> (GET_MODE (XEXP (SET_SRC (x), 0)),
5076 && CONST_INT_P (XEXP (SET_SRC (x), 1))
5077 && CONST_INT_P (XEXP (SET_SRC (x), 2)))
5079 inner = XEXP (SET_SRC (x), 0);
5080 len = INTVAL (XEXP (SET_SRC (x), 1));
5081 pos = INTVAL (XEXP (SET_SRC (x), 2));
5083 if (BITS_BIG_ENDIAN)
5084 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
5085 unsignedp = (code == ZERO_EXTRACT);
5094 && pos + len <= GET_MODE_PRECISION (GET_MODE (inner))
5095 && is_a <scalar_int_mode> (GET_MODE (SET_SRC (x)), &mode))
5097 /* For unsigned, we have a choice of a shift followed by an
5098 AND or two shifts. Use two shifts for field sizes where the
5099 constant might be too large. We assume here that we can
5100 always at least get 8-bit constants in an AND insn, which is
5101 true for every current RISC. */
5103 if (unsignedp && len <= 8)
5105 unsigned HOST_WIDE_INT mask
5106 = (HOST_WIDE_INT_1U << len) - 1;
5110 (mode, gen_lowpart (mode, inner),
5112 gen_int_mode (mask, mode)));
5114 split = find_split_point (&SET_SRC (x), insn, true);
5115 if (split && split != &SET_SRC (x))
5122 (unsignedp ? LSHIFTRT : ASHIFTRT, mode,
5123 gen_rtx_ASHIFT (mode,
5124 gen_lowpart (mode, inner),
5125 GEN_INT (GET_MODE_PRECISION (mode)
5127 GEN_INT (GET_MODE_PRECISION (mode) - len)));
5129 split = find_split_point (&SET_SRC (x), insn, true);
5130 if (split && split != &SET_SRC (x))
5135 /* See if this is a simple operation with a constant as the second
5136 operand. It might be that this constant is out of range and hence
5137 could be used as a split point. */
5138 if (BINARY_P (SET_SRC (x))
5139 && CONSTANT_P (XEXP (SET_SRC (x), 1))
5140 && (OBJECT_P (XEXP (SET_SRC (x), 0))
5141 || (GET_CODE (XEXP (SET_SRC (x), 0)) == SUBREG
5142 && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x), 0))))))
5143 return &XEXP (SET_SRC (x), 1);
5145 /* Finally, see if this is a simple operation with its first operand
5146 not in a register. The operation might require this operand in a
5147 register, so return it as a split point. We can always do this
5148 because if the first operand were another operation, we would have
5149 already found it as a split point. */
5150 if ((BINARY_P (SET_SRC (x)) || UNARY_P (SET_SRC (x)))
5151 && ! register_operand (XEXP (SET_SRC (x), 0), VOIDmode))
5152 return &XEXP (SET_SRC (x), 0);
5158 /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
5159 it is better to write this as (not (ior A B)) so we can split it.
5160 Similarly for IOR. */
5161 if (GET_CODE (XEXP (x, 0)) == NOT && GET_CODE (XEXP (x, 1)) == NOT)
5164 gen_rtx_NOT (GET_MODE (x),
5165 gen_rtx_fmt_ee (code == IOR ? AND : IOR,
5167 XEXP (XEXP (x, 0), 0),
5168 XEXP (XEXP (x, 1), 0))));
5169 return find_split_point (loc, insn, set_src);
5172 /* Many RISC machines have a large set of logical insns. If the
5173 second operand is a NOT, put it first so we will try to split the
5174 other operand first. */
5175 if (GET_CODE (XEXP (x, 1)) == NOT)
5177 rtx tem = XEXP (x, 0);
5178 SUBST (XEXP (x, 0), XEXP (x, 1));
5179 SUBST (XEXP (x, 1), tem);
5185 /* Canonicalization can produce (minus A (mult B C)), where C is a
5186 constant. It may be better to try splitting (plus (mult B -C) A)
5187 instead if this isn't a multiply by a power of two. */
5188 if (set_src && code == MINUS && GET_CODE (XEXP (x, 1)) == MULT
5189 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
5190 && !pow2p_hwi (INTVAL (XEXP (XEXP (x, 1), 1))))
5192 machine_mode mode = GET_MODE (x);
5193 unsigned HOST_WIDE_INT this_int = INTVAL (XEXP (XEXP (x, 1), 1));
5194 HOST_WIDE_INT other_int = trunc_int_for_mode (-this_int, mode);
5195 SUBST (*loc, gen_rtx_PLUS (mode,
5197 XEXP (XEXP (x, 1), 0),
5198 gen_int_mode (other_int,
5201 return find_split_point (loc, insn, set_src);
5204 /* Split at a multiply-accumulate instruction. However if this is
5205 the SET_SRC, we likely do not have such an instruction and it's
5206 worthless to try this split. */
5208 && (GET_CODE (XEXP (x, 0)) == MULT
5209 || (GET_CODE (XEXP (x, 0)) == ASHIFT
5210 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
5217 /* Otherwise, select our actions depending on our rtx class. */
5218 switch (GET_RTX_CLASS (code))
5220 case RTX_BITFIELD_OPS: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */
5222 split = find_split_point (&XEXP (x, 2), insn, false);
5227 case RTX_COMM_ARITH:
5229 case RTX_COMM_COMPARE:
5230 split = find_split_point (&XEXP (x, 1), insn, false);
5235 /* Some machines have (and (shift ...) ...) insns. If X is not
5236 an AND, but XEXP (X, 0) is, use it as our split point. */
5237 if (GET_CODE (x) != AND && GET_CODE (XEXP (x, 0)) == AND)
5238 return &XEXP (x, 0);
5240 split = find_split_point (&XEXP (x, 0), insn, false);
5246 /* Otherwise, we don't have a split point. */
5251 /* Throughout X, replace FROM with TO, and return the result.
5252 The result is TO if X is FROM;
5253 otherwise the result is X, but its contents may have been modified.
5254 If they were modified, a record was made in undobuf so that
5255 undo_all will (among other things) return X to its original state.
5257 If the number of changes necessary is too much to record to undo,
5258 the excess changes are not made, so the result is invalid.
5259 The changes already made can still be undone.
5260 undobuf.num_undo is incremented for such changes, so by testing that
5261 the caller can tell whether the result is valid.
5263 `n_occurrences' is incremented each time FROM is replaced.
5265 IN_DEST is nonzero if we are processing the SET_DEST of a SET.
5267 IN_COND is nonzero if we are at the top level of a condition.
5269 UNIQUE_COPY is nonzero if each substitution must be unique. We do this
5270 by copying if `n_occurrences' is nonzero. */
5273 subst (rtx x, rtx from, rtx to, int in_dest, int in_cond, int unique_copy)
5275 enum rtx_code code = GET_CODE (x);
5276 machine_mode op0_mode = VOIDmode;
5281 /* Two expressions are equal if they are identical copies of a shared
5282 RTX or if they are both registers with the same register number
5285 #define COMBINE_RTX_EQUAL_P(X,Y) \
5287 || (REG_P (X) && REG_P (Y) \
5288 && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
5290 /* Do not substitute into clobbers of regs -- this will never result in
5292 if (GET_CODE (x) == CLOBBER && REG_P (XEXP (x, 0)))
5295 if (! in_dest && COMBINE_RTX_EQUAL_P (x, from))
5298 return (unique_copy && n_occurrences > 1 ? copy_rtx (to) : to);
5301 /* If X and FROM are the same register but different modes, they
5302 will not have been seen as equal above. However, the log links code
5303 will make a LOG_LINKS entry for that case. If we do nothing, we
5304 will try to rerecognize our original insn and, when it succeeds,
5305 we will delete the feeding insn, which is incorrect.
5307 So force this insn not to match in this (rare) case. */
5308 if (! in_dest && code == REG && REG_P (from)
5309 && reg_overlap_mentioned_p (x, from))
5310 return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
5312 /* If this is an object, we are done unless it is a MEM or LO_SUM, both
5313 of which may contain things that can be combined. */
5314 if (code != MEM && code != LO_SUM && OBJECT_P (x))
5317 /* It is possible to have a subexpression appear twice in the insn.
5318 Suppose that FROM is a register that appears within TO.
5319 Then, after that subexpression has been scanned once by `subst',
5320 the second time it is scanned, TO may be found. If we were
5321 to scan TO here, we would find FROM within it and create a
5322 self-referent rtl structure which is completely wrong. */
5323 if (COMBINE_RTX_EQUAL_P (x, to))
5326 /* Parallel asm_operands need special attention because all of the
5327 inputs are shared across the arms. Furthermore, unsharing the
5328 rtl results in recognition failures. Failure to handle this case
5329 specially can result in circular rtl.
5331 Solve this by doing a normal pass across the first entry of the
5332 parallel, and only processing the SET_DESTs of the subsequent
5335 if (code == PARALLEL
5336 && GET_CODE (XVECEXP (x, 0, 0)) == SET
5337 && GET_CODE (SET_SRC (XVECEXP (x, 0, 0))) == ASM_OPERANDS)
5339 new_rtx = subst (XVECEXP (x, 0, 0), from, to, 0, 0, unique_copy);
5341 /* If this substitution failed, this whole thing fails. */
5342 if (GET_CODE (new_rtx) == CLOBBER
5343 && XEXP (new_rtx, 0) == const0_rtx)
5346 SUBST (XVECEXP (x, 0, 0), new_rtx);
5348 for (i = XVECLEN (x, 0) - 1; i >= 1; i--)
5350 rtx dest = SET_DEST (XVECEXP (x, 0, i));
5353 && GET_CODE (dest) != CC0
5354 && GET_CODE (dest) != PC)
5356 new_rtx = subst (dest, from, to, 0, 0, unique_copy);
5358 /* If this substitution failed, this whole thing fails. */
5359 if (GET_CODE (new_rtx) == CLOBBER
5360 && XEXP (new_rtx, 0) == const0_rtx)
5363 SUBST (SET_DEST (XVECEXP (x, 0, i)), new_rtx);
5369 len = GET_RTX_LENGTH (code);
5370 fmt = GET_RTX_FORMAT (code);
5372 /* We don't need to process a SET_DEST that is a register, CC0,
5373 or PC, so set up to skip this common case. All other cases
5374 where we want to suppress replacing something inside a
5375 SET_SRC are handled via the IN_DEST operand. */
5377 && (REG_P (SET_DEST (x))
5378 || GET_CODE (SET_DEST (x)) == CC0
5379 || GET_CODE (SET_DEST (x)) == PC))
5382 /* Trying to simplify the operands of a widening MULT is not likely
5383 to create RTL matching a machine insn. */
5385 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
5386 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
5387 && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
5388 || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
5389 && REG_P (XEXP (XEXP (x, 0), 0))
5390 && REG_P (XEXP (XEXP (x, 1), 0))
5395 /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
5398 op0_mode = GET_MODE (XEXP (x, 0));
5400 for (i = 0; i < len; i++)
5405 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5407 if (COMBINE_RTX_EQUAL_P (XVECEXP (x, i, j), from))
5409 new_rtx = (unique_copy && n_occurrences
5410 ? copy_rtx (to) : to);
5415 new_rtx = subst (XVECEXP (x, i, j), from, to, 0, 0,
5418 /* If this substitution failed, this whole thing
5420 if (GET_CODE (new_rtx) == CLOBBER
5421 && XEXP (new_rtx, 0) == const0_rtx)
5425 SUBST (XVECEXP (x, i, j), new_rtx);
5428 else if (fmt[i] == 'e')
5430 /* If this is a register being set, ignore it. */
5431 new_rtx = XEXP (x, i);
5434 && (((code == SUBREG || code == ZERO_EXTRACT)
5436 || code == STRICT_LOW_PART))
5439 else if (COMBINE_RTX_EQUAL_P (XEXP (x, i), from))
5441 /* In general, don't install a subreg involving two
5442 modes not tieable. It can worsen register
5443 allocation, and can even make invalid reload
5444 insns, since the reg inside may need to be copied
5445 from in the outside mode, and that may be invalid
5446 if it is an fp reg copied in integer mode.
5448 We allow two exceptions to this: It is valid if
5449 it is inside another SUBREG and the mode of that
5450 SUBREG and the mode of the inside of TO is
5451 tieable and it is valid if X is a SET that copies
5454 if (GET_CODE (to) == SUBREG
5455 && !targetm.modes_tieable_p (GET_MODE (to),
5456 GET_MODE (SUBREG_REG (to)))
5457 && ! (code == SUBREG
5458 && (targetm.modes_tieable_p
5459 (GET_MODE (x), GET_MODE (SUBREG_REG (to)))))
5463 && XEXP (x, 0) == cc0_rtx))))
5464 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5468 && REGNO (to) < FIRST_PSEUDO_REGISTER
5469 && simplify_subreg_regno (REGNO (to), GET_MODE (to),
5472 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5474 new_rtx = (unique_copy && n_occurrences ? copy_rtx (to) : to);
5478 /* If we are in a SET_DEST, suppress most cases unless we
5479 have gone inside a MEM, in which case we want to
5480 simplify the address. We assume here that things that
5481 are actually part of the destination have their inner
5482 parts in the first expression. This is true for SUBREG,
5483 STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5484 things aside from REG and MEM that should appear in a
5486 new_rtx = subst (XEXP (x, i), from, to,
5488 && (code == SUBREG || code == STRICT_LOW_PART
5489 || code == ZERO_EXTRACT))
5492 code == IF_THEN_ELSE && i == 0,
5495 /* If we found that we will have to reject this combination,
5496 indicate that by returning the CLOBBER ourselves, rather than
5497 an expression containing it. This will speed things up as
5498 well as prevent accidents where two CLOBBERs are considered
5499 to be equal, thus producing an incorrect simplification. */
5501 if (GET_CODE (new_rtx) == CLOBBER && XEXP (new_rtx, 0) == const0_rtx)
5504 if (GET_CODE (x) == SUBREG && CONST_SCALAR_INT_P (new_rtx))
5506 machine_mode mode = GET_MODE (x);
5508 x = simplify_subreg (GET_MODE (x), new_rtx,
5509 GET_MODE (SUBREG_REG (x)),
5512 x = gen_rtx_CLOBBER (mode, const0_rtx);
5514 else if (CONST_SCALAR_INT_P (new_rtx)
5515 && GET_CODE (x) == ZERO_EXTEND)
5517 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
5518 new_rtx, GET_MODE (XEXP (x, 0)));
5522 SUBST (XEXP (x, i), new_rtx);
5527 /* Check if we are loading something from the constant pool via float
5528 extension; in this case we would undo compress_float_constant
5529 optimization and degenerate constant load to an immediate value. */
5530 if (GET_CODE (x) == FLOAT_EXTEND
5531 && MEM_P (XEXP (x, 0))
5532 && MEM_READONLY_P (XEXP (x, 0)))
5534 rtx tmp = avoid_constant_pool_reference (x);
5539 /* Try to simplify X. If the simplification changed the code, it is likely
5540 that further simplification will help, so loop, but limit the number
5541 of repetitions that will be performed. */
5543 for (i = 0; i < 4; i++)
5545 /* If X is sufficiently simple, don't bother trying to do anything
5547 if (code != CONST_INT && code != REG && code != CLOBBER)
5548 x = combine_simplify_rtx (x, op0_mode, in_dest, in_cond);
5550 if (GET_CODE (x) == code)
5553 code = GET_CODE (x);
5555 /* We no longer know the original mode of operand 0 since we
5556 have changed the form of X) */
5557 op0_mode = VOIDmode;
5563 /* If X is a commutative operation whose operands are not in the canonical
5564 order, use substitutions to swap them. */
5567 maybe_swap_commutative_operands (rtx x)
5569 if (COMMUTATIVE_ARITH_P (x)
5570 && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5572 rtx temp = XEXP (x, 0);
5573 SUBST (XEXP (x, 0), XEXP (x, 1));
5574 SUBST (XEXP (x, 1), temp);
5578 /* Simplify X, a piece of RTL. We just operate on the expression at the
5579 outer level; call `subst' to simplify recursively. Return the new
5582 OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is nonzero
5583 if we are inside a SET_DEST. IN_COND is nonzero if we are at the top level
5587 combine_simplify_rtx (rtx x, machine_mode op0_mode, int in_dest,
5590 enum rtx_code code = GET_CODE (x);
5591 machine_mode mode = GET_MODE (x);
5592 scalar_int_mode int_mode;
5596 /* If this is a commutative operation, put a constant last and a complex
5597 expression first. We don't need to do this for comparisons here. */
5598 maybe_swap_commutative_operands (x);
5600 /* Try to fold this expression in case we have constants that weren't
5603 switch (GET_RTX_CLASS (code))
5606 if (op0_mode == VOIDmode)
5607 op0_mode = GET_MODE (XEXP (x, 0));
5608 temp = simplify_unary_operation (code, mode, XEXP (x, 0), op0_mode);
5611 case RTX_COMM_COMPARE:
5613 machine_mode cmp_mode = GET_MODE (XEXP (x, 0));
5614 if (cmp_mode == VOIDmode)
5616 cmp_mode = GET_MODE (XEXP (x, 1));
5617 if (cmp_mode == VOIDmode)
5618 cmp_mode = op0_mode;
5620 temp = simplify_relational_operation (code, mode, cmp_mode,
5621 XEXP (x, 0), XEXP (x, 1));
5624 case RTX_COMM_ARITH:
5626 temp = simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5628 case RTX_BITFIELD_OPS:
5630 temp = simplify_ternary_operation (code, mode, op0_mode, XEXP (x, 0),
5631 XEXP (x, 1), XEXP (x, 2));
5640 code = GET_CODE (temp);
5641 op0_mode = VOIDmode;
5642 mode = GET_MODE (temp);
5645 /* If this is a simple operation applied to an IF_THEN_ELSE, try
5646 applying it to the arms of the IF_THEN_ELSE. This often simplifies
5647 things. Check for cases where both arms are testing the same
5650 Don't do anything if all operands are very simple. */
5653 && ((!OBJECT_P (XEXP (x, 0))
5654 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5655 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))
5656 || (!OBJECT_P (XEXP (x, 1))
5657 && ! (GET_CODE (XEXP (x, 1)) == SUBREG
5658 && OBJECT_P (SUBREG_REG (XEXP (x, 1)))))))
5660 && (!OBJECT_P (XEXP (x, 0))
5661 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5662 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))))
5664 rtx cond, true_rtx, false_rtx;
5666 cond = if_then_else_cond (x, &true_rtx, &false_rtx);
5668 /* If everything is a comparison, what we have is highly unlikely
5669 to be simpler, so don't use it. */
5670 && ! (COMPARISON_P (x)
5671 && (COMPARISON_P (true_rtx) || COMPARISON_P (false_rtx))))
5673 rtx cop1 = const0_rtx;
5674 enum rtx_code cond_code = simplify_comparison (NE, &cond, &cop1);
5676 if (cond_code == NE && COMPARISON_P (cond))
5679 /* Simplify the alternative arms; this may collapse the true and
5680 false arms to store-flag values. Be careful to use copy_rtx
5681 here since true_rtx or false_rtx might share RTL with x as a
5682 result of the if_then_else_cond call above. */
5683 true_rtx = subst (copy_rtx (true_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5684 false_rtx = subst (copy_rtx (false_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5686 /* If true_rtx and false_rtx are not general_operands, an if_then_else
5687 is unlikely to be simpler. */
5688 if (general_operand (true_rtx, VOIDmode)
5689 && general_operand (false_rtx, VOIDmode))
5691 enum rtx_code reversed;
5693 /* Restarting if we generate a store-flag expression will cause
5694 us to loop. Just drop through in this case. */
5696 /* If the result values are STORE_FLAG_VALUE and zero, we can
5697 just make the comparison operation. */
5698 if (true_rtx == const_true_rtx && false_rtx == const0_rtx)
5699 x = simplify_gen_relational (cond_code, mode, VOIDmode,
5701 else if (true_rtx == const0_rtx && false_rtx == const_true_rtx
5702 && ((reversed = reversed_comparison_code_parts
5703 (cond_code, cond, cop1, NULL))
5705 x = simplify_gen_relational (reversed, mode, VOIDmode,
5708 /* Likewise, we can make the negate of a comparison operation
5709 if the result values are - STORE_FLAG_VALUE and zero. */
5710 else if (CONST_INT_P (true_rtx)
5711 && INTVAL (true_rtx) == - STORE_FLAG_VALUE
5712 && false_rtx == const0_rtx)
5713 x = simplify_gen_unary (NEG, mode,
5714 simplify_gen_relational (cond_code,
5718 else if (CONST_INT_P (false_rtx)
5719 && INTVAL (false_rtx) == - STORE_FLAG_VALUE
5720 && true_rtx == const0_rtx
5721 && ((reversed = reversed_comparison_code_parts
5722 (cond_code, cond, cop1, NULL))
5724 x = simplify_gen_unary (NEG, mode,
5725 simplify_gen_relational (reversed,
5730 return gen_rtx_IF_THEN_ELSE (mode,
5731 simplify_gen_relational (cond_code,
5736 true_rtx, false_rtx);
5738 code = GET_CODE (x);
5739 op0_mode = VOIDmode;
5744 /* First see if we can apply the inverse distributive law. */
5745 if (code == PLUS || code == MINUS
5746 || code == AND || code == IOR || code == XOR)
5748 x = apply_distributive_law (x);
5749 code = GET_CODE (x);
5750 op0_mode = VOIDmode;
5753 /* If CODE is an associative operation not otherwise handled, see if we
5754 can associate some operands. This can win if they are constants or
5755 if they are logically related (i.e. (a & b) & a). */
5756 if ((code == PLUS || code == MINUS || code == MULT || code == DIV
5757 || code == AND || code == IOR || code == XOR
5758 || code == SMAX || code == SMIN || code == UMAX || code == UMIN)
5759 && ((INTEGRAL_MODE_P (mode) && code != DIV)
5760 || (flag_associative_math && FLOAT_MODE_P (mode))))
5762 if (GET_CODE (XEXP (x, 0)) == code)
5764 rtx other = XEXP (XEXP (x, 0), 0);
5765 rtx inner_op0 = XEXP (XEXP (x, 0), 1);
5766 rtx inner_op1 = XEXP (x, 1);
5769 /* Make sure we pass the constant operand if any as the second
5770 one if this is a commutative operation. */
5771 if (CONSTANT_P (inner_op0) && COMMUTATIVE_ARITH_P (x))
5772 std::swap (inner_op0, inner_op1);
5773 inner = simplify_binary_operation (code == MINUS ? PLUS
5774 : code == DIV ? MULT
5776 mode, inner_op0, inner_op1);
5778 /* For commutative operations, try the other pair if that one
5780 if (inner == 0 && COMMUTATIVE_ARITH_P (x))
5782 other = XEXP (XEXP (x, 0), 1);
5783 inner = simplify_binary_operation (code, mode,
5784 XEXP (XEXP (x, 0), 0),
5789 return simplify_gen_binary (code, mode, other, inner);
5793 /* A little bit of algebraic simplification here. */
5797 /* Ensure that our address has any ASHIFTs converted to MULT in case
5798 address-recognizing predicates are called later. */
5799 temp = make_compound_operation (XEXP (x, 0), MEM);
5800 SUBST (XEXP (x, 0), temp);
5804 if (op0_mode == VOIDmode)
5805 op0_mode = GET_MODE (SUBREG_REG (x));
5807 /* See if this can be moved to simplify_subreg. */
5808 if (CONSTANT_P (SUBREG_REG (x))
5809 && subreg_lowpart_offset (mode, op0_mode) == SUBREG_BYTE (x)
5810 /* Don't call gen_lowpart if the inner mode
5811 is VOIDmode and we cannot simplify it, as SUBREG without
5812 inner mode is invalid. */
5813 && (GET_MODE (SUBREG_REG (x)) != VOIDmode
5814 || gen_lowpart_common (mode, SUBREG_REG (x))))
5815 return gen_lowpart (mode, SUBREG_REG (x));
5817 if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_CC)
5821 temp = simplify_subreg (mode, SUBREG_REG (x), op0_mode,
5826 /* If op is known to have all lower bits zero, the result is zero. */
5827 scalar_int_mode int_mode, int_op0_mode;
5829 && is_a <scalar_int_mode> (mode, &int_mode)
5830 && is_a <scalar_int_mode> (op0_mode, &int_op0_mode)
5831 && (GET_MODE_PRECISION (int_mode)
5832 < GET_MODE_PRECISION (int_op0_mode))
5833 && (subreg_lowpart_offset (int_mode, int_op0_mode)
5835 && HWI_COMPUTABLE_MODE_P (int_op0_mode)
5836 && (nonzero_bits (SUBREG_REG (x), int_op0_mode)
5837 & GET_MODE_MASK (int_mode)) == 0)
5838 return CONST0_RTX (int_mode);
5841 /* Don't change the mode of the MEM if that would change the meaning
5843 if (MEM_P (SUBREG_REG (x))
5844 && (MEM_VOLATILE_P (SUBREG_REG (x))
5845 || mode_dependent_address_p (XEXP (SUBREG_REG (x), 0),
5846 MEM_ADDR_SPACE (SUBREG_REG (x)))))
5847 return gen_rtx_CLOBBER (mode, const0_rtx);
5849 /* Note that we cannot do any narrowing for non-constants since
5850 we might have been counting on using the fact that some bits were
5851 zero. We now do this in the SET. */
5856 temp = expand_compound_operation (XEXP (x, 0));
5858 /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
5859 replaced by (lshiftrt X C). This will convert
5860 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */
5862 if (GET_CODE (temp) == ASHIFTRT
5863 && CONST_INT_P (XEXP (temp, 1))
5864 && INTVAL (XEXP (temp, 1)) == GET_MODE_PRECISION (mode) - 1)
5865 return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (temp, 0),
5866 INTVAL (XEXP (temp, 1)));
5868 /* If X has only a single bit that might be nonzero, say, bit I, convert
5869 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
5870 MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to
5871 (sign_extract X 1 Y). But only do this if TEMP isn't a register
5872 or a SUBREG of one since we'd be making the expression more
5873 complex if it was just a register. */
5876 && ! (GET_CODE (temp) == SUBREG
5877 && REG_P (SUBREG_REG (temp)))
5878 && is_a <scalar_int_mode> (mode, &int_mode)
5879 && (i = exact_log2 (nonzero_bits (temp, int_mode))) >= 0)
5881 rtx temp1 = simplify_shift_const
5882 (NULL_RTX, ASHIFTRT, int_mode,
5883 simplify_shift_const (NULL_RTX, ASHIFT, int_mode, temp,
5884 GET_MODE_PRECISION (int_mode) - 1 - i),
5885 GET_MODE_PRECISION (int_mode) - 1 - i);
5887 /* If all we did was surround TEMP with the two shifts, we
5888 haven't improved anything, so don't use it. Otherwise,
5889 we are better off with TEMP1. */
5890 if (GET_CODE (temp1) != ASHIFTRT
5891 || GET_CODE (XEXP (temp1, 0)) != ASHIFT
5892 || XEXP (XEXP (temp1, 0), 0) != temp)
5898 /* We can't handle truncation to a partial integer mode here
5899 because we don't know the real bitsize of the partial
5901 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
5904 if (HWI_COMPUTABLE_MODE_P (mode))
5906 force_to_mode (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
5907 GET_MODE_MASK (mode), 0));
5909 /* We can truncate a constant value and return it. */
5910 if (CONST_INT_P (XEXP (x, 0)))
5911 return gen_int_mode (INTVAL (XEXP (x, 0)), mode);
5913 /* Similarly to what we do in simplify-rtx.c, a truncate of a register
5914 whose value is a comparison can be replaced with a subreg if
5915 STORE_FLAG_VALUE permits. */
5916 if (HWI_COMPUTABLE_MODE_P (mode)
5917 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0
5918 && (temp = get_last_value (XEXP (x, 0)))
5919 && COMPARISON_P (temp))
5920 return gen_lowpart (mode, XEXP (x, 0));
5924 /* (const (const X)) can become (const X). Do it this way rather than
5925 returning the inner CONST since CONST can be shared with a
5927 if (GET_CODE (XEXP (x, 0)) == CONST)
5928 SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
5932 /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we
5933 can add in an offset. find_split_point will split this address up
5934 again if it doesn't match. */
5935 if (HAVE_lo_sum && GET_CODE (XEXP (x, 0)) == HIGH
5936 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5941 /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
5942 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
5943 bit-field and can be replaced by either a sign_extend or a
5944 sign_extract. The `and' may be a zero_extend and the two
5945 <c>, -<c> constants may be reversed. */
5946 if (GET_CODE (XEXP (x, 0)) == XOR
5947 && is_a <scalar_int_mode> (mode, &int_mode)
5948 && CONST_INT_P (XEXP (x, 1))
5949 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
5950 && INTVAL (XEXP (x, 1)) == -INTVAL (XEXP (XEXP (x, 0), 1))
5951 && ((i = exact_log2 (UINTVAL (XEXP (XEXP (x, 0), 1)))) >= 0
5952 || (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0)
5953 && HWI_COMPUTABLE_MODE_P (int_mode)
5954 && ((GET_CODE (XEXP (XEXP (x, 0), 0)) == AND
5955 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
5956 && (UINTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1))
5957 == (HOST_WIDE_INT_1U << (i + 1)) - 1))
5958 || (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND
5959 && (GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)))
5960 == (unsigned int) i + 1))))
5961 return simplify_shift_const
5962 (NULL_RTX, ASHIFTRT, int_mode,
5963 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
5964 XEXP (XEXP (XEXP (x, 0), 0), 0),
5965 GET_MODE_PRECISION (int_mode) - (i + 1)),
5966 GET_MODE_PRECISION (int_mode) - (i + 1));
5968 /* If only the low-order bit of X is possibly nonzero, (plus x -1)
5969 can become (ashiftrt (ashift (xor x 1) C) C) where C is
5970 the bitsize of the mode - 1. This allows simplification of
5971 "a = (b & 8) == 0;" */
5972 if (XEXP (x, 1) == constm1_rtx
5973 && !REG_P (XEXP (x, 0))
5974 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5975 && REG_P (SUBREG_REG (XEXP (x, 0))))
5976 && is_a <scalar_int_mode> (mode, &int_mode)
5977 && nonzero_bits (XEXP (x, 0), int_mode) == 1)
5978 return simplify_shift_const
5979 (NULL_RTX, ASHIFTRT, int_mode,
5980 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
5981 gen_rtx_XOR (int_mode, XEXP (x, 0),
5983 GET_MODE_PRECISION (int_mode) - 1),
5984 GET_MODE_PRECISION (int_mode) - 1);
5986 /* If we are adding two things that have no bits in common, convert
5987 the addition into an IOR. This will often be further simplified,
5988 for example in cases like ((a & 1) + (a & 2)), which can
5991 if (HWI_COMPUTABLE_MODE_P (mode)
5992 && (nonzero_bits (XEXP (x, 0), mode)
5993 & nonzero_bits (XEXP (x, 1), mode)) == 0)
5995 /* Try to simplify the expression further. */
5996 rtx tor = simplify_gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1));
5997 temp = combine_simplify_rtx (tor, VOIDmode, in_dest, 0);
5999 /* If we could, great. If not, do not go ahead with the IOR
6000 replacement, since PLUS appears in many special purpose
6001 address arithmetic instructions. */
6002 if (GET_CODE (temp) != CLOBBER
6003 && (GET_CODE (temp) != IOR
6004 || ((XEXP (temp, 0) != XEXP (x, 0)
6005 || XEXP (temp, 1) != XEXP (x, 1))
6006 && (XEXP (temp, 0) != XEXP (x, 1)
6007 || XEXP (temp, 1) != XEXP (x, 0)))))
6011 /* Canonicalize x + x into x << 1. */
6012 if (GET_MODE_CLASS (mode) == MODE_INT
6013 && rtx_equal_p (XEXP (x, 0), XEXP (x, 1))
6014 && !side_effects_p (XEXP (x, 0)))
6015 return simplify_gen_binary (ASHIFT, mode, XEXP (x, 0), const1_rtx);
6020 /* (minus <foo> (and <foo> (const_int -pow2))) becomes
6021 (and <foo> (const_int pow2-1)) */
6022 if (is_a <scalar_int_mode> (mode, &int_mode)
6023 && GET_CODE (XEXP (x, 1)) == AND
6024 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
6025 && pow2p_hwi (-UINTVAL (XEXP (XEXP (x, 1), 1)))
6026 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
6027 return simplify_and_const_int (NULL_RTX, int_mode, XEXP (x, 0),
6028 -INTVAL (XEXP (XEXP (x, 1), 1)) - 1);
6032 /* If we have (mult (plus A B) C), apply the distributive law and then
6033 the inverse distributive law to see if things simplify. This
6034 occurs mostly in addresses, often when unrolling loops. */
6036 if (GET_CODE (XEXP (x, 0)) == PLUS)
6038 rtx result = distribute_and_simplify_rtx (x, 0);
6043 /* Try simplify a*(b/c) as (a*b)/c. */
6044 if (FLOAT_MODE_P (mode) && flag_associative_math
6045 && GET_CODE (XEXP (x, 0)) == DIV)
6047 rtx tem = simplify_binary_operation (MULT, mode,
6048 XEXP (XEXP (x, 0), 0),
6051 return simplify_gen_binary (DIV, mode, tem, XEXP (XEXP (x, 0), 1));
6056 /* If this is a divide by a power of two, treat it as a shift if
6057 its first operand is a shift. */
6058 if (is_a <scalar_int_mode> (mode, &int_mode)
6059 && CONST_INT_P (XEXP (x, 1))
6060 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
6061 && (GET_CODE (XEXP (x, 0)) == ASHIFT
6062 || GET_CODE (XEXP (x, 0)) == LSHIFTRT
6063 || GET_CODE (XEXP (x, 0)) == ASHIFTRT
6064 || GET_CODE (XEXP (x, 0)) == ROTATE
6065 || GET_CODE (XEXP (x, 0)) == ROTATERT))
6066 return simplify_shift_const (NULL_RTX, LSHIFTRT, int_mode,
6071 case GT: case GTU: case GE: case GEU:
6072 case LT: case LTU: case LE: case LEU:
6073 case UNEQ: case LTGT:
6074 case UNGT: case UNGE:
6075 case UNLT: case UNLE:
6076 case UNORDERED: case ORDERED:
6077 /* If the first operand is a condition code, we can't do anything
6079 if (GET_CODE (XEXP (x, 0)) == COMPARE
6080 || (GET_MODE_CLASS (GET_MODE (XEXP (x, 0))) != MODE_CC
6081 && ! CC0_P (XEXP (x, 0))))
6083 rtx op0 = XEXP (x, 0);
6084 rtx op1 = XEXP (x, 1);
6085 enum rtx_code new_code;
6087 if (GET_CODE (op0) == COMPARE)
6088 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
6090 /* Simplify our comparison, if possible. */
6091 new_code = simplify_comparison (code, &op0, &op1);
6093 /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
6094 if only the low-order bit is possibly nonzero in X (such as when
6095 X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to
6096 (xor X 1) or (minus 1 X); we use the former. Finally, if X is
6097 known to be either 0 or -1, NE becomes a NEG and EQ becomes
6100 Remove any ZERO_EXTRACT we made when thinking this was a
6101 comparison. It may now be simpler to use, e.g., an AND. If a
6102 ZERO_EXTRACT is indeed appropriate, it will be placed back by
6103 the call to make_compound_operation in the SET case.
6105 Don't apply these optimizations if the caller would
6106 prefer a comparison rather than a value.
6107 E.g., for the condition in an IF_THEN_ELSE most targets need
6108 an explicit comparison. */
6113 else if (STORE_FLAG_VALUE == 1
6115 && is_int_mode (mode, &int_mode)
6116 && op1 == const0_rtx
6117 && int_mode == GET_MODE (op0)
6118 && nonzero_bits (op0, int_mode) == 1)
6119 return gen_lowpart (int_mode,
6120 expand_compound_operation (op0));
6122 else if (STORE_FLAG_VALUE == 1
6124 && is_int_mode (mode, &int_mode)
6125 && op1 == const0_rtx
6126 && int_mode == GET_MODE (op0)
6127 && (num_sign_bit_copies (op0, int_mode)
6128 == GET_MODE_PRECISION (int_mode)))
6130 op0 = expand_compound_operation (op0);
6131 return simplify_gen_unary (NEG, int_mode,
6132 gen_lowpart (int_mode, op0),
6136 else if (STORE_FLAG_VALUE == 1
6138 && is_int_mode (mode, &int_mode)
6139 && op1 == const0_rtx
6140 && int_mode == GET_MODE (op0)
6141 && nonzero_bits (op0, int_mode) == 1)
6143 op0 = expand_compound_operation (op0);
6144 return simplify_gen_binary (XOR, int_mode,
6145 gen_lowpart (int_mode, op0),
6149 else if (STORE_FLAG_VALUE == 1
6151 && is_int_mode (mode, &int_mode)
6152 && op1 == const0_rtx
6153 && int_mode == GET_MODE (op0)
6154 && (num_sign_bit_copies (op0, int_mode)
6155 == GET_MODE_PRECISION (int_mode)))
6157 op0 = expand_compound_operation (op0);
6158 return plus_constant (int_mode, gen_lowpart (int_mode, op0), 1);
6161 /* If STORE_FLAG_VALUE is -1, we have cases similar to
6166 else if (STORE_FLAG_VALUE == -1
6168 && is_int_mode (mode, &int_mode)
6169 && op1 == const0_rtx
6170 && int_mode == GET_MODE (op0)
6171 && (num_sign_bit_copies (op0, int_mode)
6172 == GET_MODE_PRECISION (int_mode)))
6173 return gen_lowpart (int_mode, expand_compound_operation (op0));
6175 else if (STORE_FLAG_VALUE == -1
6177 && is_int_mode (mode, &int_mode)
6178 && op1 == const0_rtx
6179 && int_mode == GET_MODE (op0)
6180 && nonzero_bits (op0, int_mode) == 1)
6182 op0 = expand_compound_operation (op0);
6183 return simplify_gen_unary (NEG, int_mode,
6184 gen_lowpart (int_mode, op0),
6188 else if (STORE_FLAG_VALUE == -1
6190 && is_int_mode (mode, &int_mode)
6191 && op1 == const0_rtx
6192 && int_mode == GET_MODE (op0)
6193 && (num_sign_bit_copies (op0, int_mode)
6194 == GET_MODE_PRECISION (int_mode)))
6196 op0 = expand_compound_operation (op0);
6197 return simplify_gen_unary (NOT, int_mode,
6198 gen_lowpart (int_mode, op0),
6202 /* If X is 0/1, (eq X 0) is X-1. */
6203 else if (STORE_FLAG_VALUE == -1
6205 && is_int_mode (mode, &int_mode)
6206 && op1 == const0_rtx
6207 && int_mode == GET_MODE (op0)
6208 && nonzero_bits (op0, int_mode) == 1)
6210 op0 = expand_compound_operation (op0);
6211 return plus_constant (int_mode, gen_lowpart (int_mode, op0), -1);
6214 /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
6215 one bit that might be nonzero, we can convert (ne x 0) to
6216 (ashift x c) where C puts the bit in the sign bit. Remove any
6217 AND with STORE_FLAG_VALUE when we are done, since we are only
6218 going to test the sign bit. */
6220 && is_int_mode (mode, &int_mode)
6221 && HWI_COMPUTABLE_MODE_P (int_mode)
6222 && val_signbit_p (int_mode, STORE_FLAG_VALUE)
6223 && op1 == const0_rtx
6224 && int_mode == GET_MODE (op0)
6225 && (i = exact_log2 (nonzero_bits (op0, int_mode))) >= 0)
6227 x = simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6228 expand_compound_operation (op0),
6229 GET_MODE_PRECISION (int_mode) - 1 - i);
6230 if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx)
6236 /* If the code changed, return a whole new comparison.
6237 We also need to avoid using SUBST in cases where
6238 simplify_comparison has widened a comparison with a CONST_INT,
6239 since in that case the wider CONST_INT may fail the sanity
6240 checks in do_SUBST. */
6241 if (new_code != code
6242 || (CONST_INT_P (op1)
6243 && GET_MODE (op0) != GET_MODE (XEXP (x, 0))
6244 && GET_MODE (op0) != GET_MODE (XEXP (x, 1))))
6245 return gen_rtx_fmt_ee (new_code, mode, op0, op1);
6247 /* Otherwise, keep this operation, but maybe change its operands.
6248 This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */
6249 SUBST (XEXP (x, 0), op0);
6250 SUBST (XEXP (x, 1), op1);
6255 return simplify_if_then_else (x);
6261 /* If we are processing SET_DEST, we are done. */
6265 return expand_compound_operation (x);
6268 return simplify_set (x);
6272 return simplify_logical (x);
6279 /* If this is a shift by a constant amount, simplify it. */
6280 if (CONST_INT_P (XEXP (x, 1)))
6281 return simplify_shift_const (x, code, mode, XEXP (x, 0),
6282 INTVAL (XEXP (x, 1)));
6284 else if (SHIFT_COUNT_TRUNCATED && !REG_P (XEXP (x, 1)))
6286 force_to_mode (XEXP (x, 1), GET_MODE (XEXP (x, 1)),
6288 << exact_log2 (GET_MODE_BITSIZE (GET_MODE (x))))
6300 /* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */
6303 simplify_if_then_else (rtx x)
6305 machine_mode mode = GET_MODE (x);
6306 rtx cond = XEXP (x, 0);
6307 rtx true_rtx = XEXP (x, 1);
6308 rtx false_rtx = XEXP (x, 2);
6309 enum rtx_code true_code = GET_CODE (cond);
6310 int comparison_p = COMPARISON_P (cond);
6313 enum rtx_code false_code;
6315 scalar_int_mode int_mode, inner_mode;
6317 /* Simplify storing of the truth value. */
6318 if (comparison_p && true_rtx == const_true_rtx && false_rtx == const0_rtx)
6319 return simplify_gen_relational (true_code, mode, VOIDmode,
6320 XEXP (cond, 0), XEXP (cond, 1));
6322 /* Also when the truth value has to be reversed. */
6324 && true_rtx == const0_rtx && false_rtx == const_true_rtx
6325 && (reversed = reversed_comparison (cond, mode)))
6328 /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
6329 in it is being compared against certain values. Get the true and false
6330 comparisons and see if that says anything about the value of each arm. */
6333 && ((false_code = reversed_comparison_code (cond, NULL))
6335 && REG_P (XEXP (cond, 0)))
6338 rtx from = XEXP (cond, 0);
6339 rtx true_val = XEXP (cond, 1);
6340 rtx false_val = true_val;
6343 /* If FALSE_CODE is EQ, swap the codes and arms. */
6345 if (false_code == EQ)
6347 swapped = 1, true_code = EQ, false_code = NE;
6348 std::swap (true_rtx, false_rtx);
6351 scalar_int_mode from_mode;
6352 if (is_a <scalar_int_mode> (GET_MODE (from), &from_mode))
6354 /* If we are comparing against zero and the expression being
6355 tested has only a single bit that might be nonzero, that is
6356 its value when it is not equal to zero. Similarly if it is
6357 known to be -1 or 0. */
6359 && true_val == const0_rtx
6360 && pow2p_hwi (nzb = nonzero_bits (from, from_mode)))
6363 false_val = gen_int_mode (nzb, from_mode);
6365 else if (true_code == EQ
6366 && true_val == const0_rtx
6367 && (num_sign_bit_copies (from, from_mode)
6368 == GET_MODE_PRECISION (from_mode)))
6371 false_val = constm1_rtx;
6375 /* Now simplify an arm if we know the value of the register in the
6376 branch and it is used in the arm. Be careful due to the potential
6377 of locally-shared RTL. */
6379 if (reg_mentioned_p (from, true_rtx))
6380 true_rtx = subst (known_cond (copy_rtx (true_rtx), true_code,
6382 pc_rtx, pc_rtx, 0, 0, 0);
6383 if (reg_mentioned_p (from, false_rtx))
6384 false_rtx = subst (known_cond (copy_rtx (false_rtx), false_code,
6386 pc_rtx, pc_rtx, 0, 0, 0);
6388 SUBST (XEXP (x, 1), swapped ? false_rtx : true_rtx);
6389 SUBST (XEXP (x, 2), swapped ? true_rtx : false_rtx);
6391 true_rtx = XEXP (x, 1);
6392 false_rtx = XEXP (x, 2);
6393 true_code = GET_CODE (cond);
6396 /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
6397 reversed, do so to avoid needing two sets of patterns for
6398 subtract-and-branch insns. Similarly if we have a constant in the true
6399 arm, the false arm is the same as the first operand of the comparison, or
6400 the false arm is more complicated than the true arm. */
6403 && reversed_comparison_code (cond, NULL) != UNKNOWN
6404 && (true_rtx == pc_rtx
6405 || (CONSTANT_P (true_rtx)
6406 && !CONST_INT_P (false_rtx) && false_rtx != pc_rtx)
6407 || true_rtx == const0_rtx
6408 || (OBJECT_P (true_rtx) && !OBJECT_P (false_rtx))
6409 || (GET_CODE (true_rtx) == SUBREG && OBJECT_P (SUBREG_REG (true_rtx))
6410 && !OBJECT_P (false_rtx))
6411 || reg_mentioned_p (true_rtx, false_rtx)
6412 || rtx_equal_p (false_rtx, XEXP (cond, 0))))
6414 true_code = reversed_comparison_code (cond, NULL);
6415 SUBST (XEXP (x, 0), reversed_comparison (cond, GET_MODE (cond)));
6416 SUBST (XEXP (x, 1), false_rtx);
6417 SUBST (XEXP (x, 2), true_rtx);
6419 std::swap (true_rtx, false_rtx);
6422 /* It is possible that the conditional has been simplified out. */
6423 true_code = GET_CODE (cond);
6424 comparison_p = COMPARISON_P (cond);
6427 /* If the two arms are identical, we don't need the comparison. */
6429 if (rtx_equal_p (true_rtx, false_rtx) && ! side_effects_p (cond))
6432 /* Convert a == b ? b : a to "a". */
6433 if (true_code == EQ && ! side_effects_p (cond)
6434 && !HONOR_NANS (mode)
6435 && rtx_equal_p (XEXP (cond, 0), false_rtx)
6436 && rtx_equal_p (XEXP (cond, 1), true_rtx))
6438 else if (true_code == NE && ! side_effects_p (cond)
6439 && !HONOR_NANS (mode)
6440 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6441 && rtx_equal_p (XEXP (cond, 1), false_rtx))
6444 /* Look for cases where we have (abs x) or (neg (abs X)). */
6446 if (GET_MODE_CLASS (mode) == MODE_INT
6448 && XEXP (cond, 1) == const0_rtx
6449 && GET_CODE (false_rtx) == NEG
6450 && rtx_equal_p (true_rtx, XEXP (false_rtx, 0))
6451 && rtx_equal_p (true_rtx, XEXP (cond, 0))
6452 && ! side_effects_p (true_rtx))
6457 return simplify_gen_unary (ABS, mode, true_rtx, mode);
6461 simplify_gen_unary (NEG, mode,
6462 simplify_gen_unary (ABS, mode, true_rtx, mode),
6468 /* Look for MIN or MAX. */
6470 if ((! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
6472 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6473 && rtx_equal_p (XEXP (cond, 1), false_rtx)
6474 && ! side_effects_p (cond))
6479 return simplify_gen_binary (SMAX, mode, true_rtx, false_rtx);
6482 return simplify_gen_binary (SMIN, mode, true_rtx, false_rtx);
6485 return simplify_gen_binary (UMAX, mode, true_rtx, false_rtx);
6488 return simplify_gen_binary (UMIN, mode, true_rtx, false_rtx);
6493 /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6494 second operand is zero, this can be done as (OP Z (mult COND C2)) where
6495 C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6496 SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6497 We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6498 neither 1 or -1, but it isn't worth checking for. */
6500 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
6502 && is_int_mode (mode, &int_mode)
6503 && ! side_effects_p (x))
6505 rtx t = make_compound_operation (true_rtx, SET);
6506 rtx f = make_compound_operation (false_rtx, SET);
6507 rtx cond_op0 = XEXP (cond, 0);
6508 rtx cond_op1 = XEXP (cond, 1);
6509 enum rtx_code op = UNKNOWN, extend_op = UNKNOWN;
6510 scalar_int_mode m = int_mode;
6511 rtx z = 0, c1 = NULL_RTX;
6513 if ((GET_CODE (t) == PLUS || GET_CODE (t) == MINUS
6514 || GET_CODE (t) == IOR || GET_CODE (t) == XOR
6515 || GET_CODE (t) == ASHIFT
6516 || GET_CODE (t) == LSHIFTRT || GET_CODE (t) == ASHIFTRT)
6517 && rtx_equal_p (XEXP (t, 0), f))
6518 c1 = XEXP (t, 1), op = GET_CODE (t), z = f;
6520 /* If an identity-zero op is commutative, check whether there
6521 would be a match if we swapped the operands. */
6522 else if ((GET_CODE (t) == PLUS || GET_CODE (t) == IOR
6523 || GET_CODE (t) == XOR)
6524 && rtx_equal_p (XEXP (t, 1), f))
6525 c1 = XEXP (t, 0), op = GET_CODE (t), z = f;
6526 else if (GET_CODE (t) == SIGN_EXTEND
6527 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6528 && (GET_CODE (XEXP (t, 0)) == PLUS
6529 || GET_CODE (XEXP (t, 0)) == MINUS
6530 || GET_CODE (XEXP (t, 0)) == IOR
6531 || GET_CODE (XEXP (t, 0)) == XOR
6532 || GET_CODE (XEXP (t, 0)) == ASHIFT
6533 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6534 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6535 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6536 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6537 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6538 && (num_sign_bit_copies (f, GET_MODE (f))
6540 (GET_MODE_PRECISION (int_mode)
6541 - GET_MODE_PRECISION (inner_mode))))
6543 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6544 extend_op = SIGN_EXTEND;
6547 else if (GET_CODE (t) == SIGN_EXTEND
6548 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6549 && (GET_CODE (XEXP (t, 0)) == PLUS
6550 || GET_CODE (XEXP (t, 0)) == IOR
6551 || GET_CODE (XEXP (t, 0)) == XOR)
6552 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6553 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6554 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6555 && (num_sign_bit_copies (f, GET_MODE (f))
6557 (GET_MODE_PRECISION (int_mode)
6558 - GET_MODE_PRECISION (inner_mode))))
6560 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6561 extend_op = SIGN_EXTEND;
6564 else if (GET_CODE (t) == ZERO_EXTEND
6565 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6566 && (GET_CODE (XEXP (t, 0)) == PLUS
6567 || GET_CODE (XEXP (t, 0)) == MINUS
6568 || GET_CODE (XEXP (t, 0)) == IOR
6569 || GET_CODE (XEXP (t, 0)) == XOR
6570 || GET_CODE (XEXP (t, 0)) == ASHIFT
6571 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6572 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6573 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6574 && HWI_COMPUTABLE_MODE_P (int_mode)
6575 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6576 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6577 && ((nonzero_bits (f, GET_MODE (f))
6578 & ~GET_MODE_MASK (inner_mode))
6581 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6582 extend_op = ZERO_EXTEND;
6585 else if (GET_CODE (t) == ZERO_EXTEND
6586 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6587 && (GET_CODE (XEXP (t, 0)) == PLUS
6588 || GET_CODE (XEXP (t, 0)) == IOR
6589 || GET_CODE (XEXP (t, 0)) == XOR)
6590 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6591 && HWI_COMPUTABLE_MODE_P (int_mode)
6592 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6593 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6594 && ((nonzero_bits (f, GET_MODE (f))
6595 & ~GET_MODE_MASK (inner_mode))
6598 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6599 extend_op = ZERO_EXTEND;
6605 temp = subst (simplify_gen_relational (true_code, m, VOIDmode,
6606 cond_op0, cond_op1),
6607 pc_rtx, pc_rtx, 0, 0, 0);
6608 temp = simplify_gen_binary (MULT, m, temp,
6609 simplify_gen_binary (MULT, m, c1,
6611 temp = subst (temp, pc_rtx, pc_rtx, 0, 0, 0);
6612 temp = simplify_gen_binary (op, m, gen_lowpart (m, z), temp);
6614 if (extend_op != UNKNOWN)
6615 temp = simplify_gen_unary (extend_op, int_mode, temp, m);
6621 /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6622 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6623 negation of a single bit, we can convert this operation to a shift. We
6624 can actually do this more generally, but it doesn't seem worth it. */
6627 && is_a <scalar_int_mode> (mode, &int_mode)
6628 && XEXP (cond, 1) == const0_rtx
6629 && false_rtx == const0_rtx
6630 && CONST_INT_P (true_rtx)
6631 && ((1 == nonzero_bits (XEXP (cond, 0), int_mode)
6632 && (i = exact_log2 (UINTVAL (true_rtx))) >= 0)
6633 || ((num_sign_bit_copies (XEXP (cond, 0), int_mode)
6634 == GET_MODE_PRECISION (int_mode))
6635 && (i = exact_log2 (-UINTVAL (true_rtx))) >= 0)))
6637 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6638 gen_lowpart (int_mode, XEXP (cond, 0)), i);
6640 /* (IF_THEN_ELSE (NE A 0) C1 0) is A or a zero-extend of A if the only
6641 non-zero bit in A is C1. */
6642 if (true_code == NE && XEXP (cond, 1) == const0_rtx
6643 && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6644 && is_a <scalar_int_mode> (mode, &int_mode)
6645 && is_a <scalar_int_mode> (GET_MODE (XEXP (cond, 0)), &inner_mode)
6646 && (UINTVAL (true_rtx) & GET_MODE_MASK (int_mode))
6647 == nonzero_bits (XEXP (cond, 0), inner_mode)
6648 && (i = exact_log2 (UINTVAL (true_rtx) & GET_MODE_MASK (int_mode))) >= 0)
6650 rtx val = XEXP (cond, 0);
6651 if (inner_mode == int_mode)
6653 else if (GET_MODE_PRECISION (inner_mode) < GET_MODE_PRECISION (int_mode))
6654 return simplify_gen_unary (ZERO_EXTEND, int_mode, val, inner_mode);
6660 /* Simplify X, a SET expression. Return the new expression. */
6663 simplify_set (rtx x)
6665 rtx src = SET_SRC (x);
6666 rtx dest = SET_DEST (x);
6668 = GET_MODE (src) != VOIDmode ? GET_MODE (src) : GET_MODE (dest);
6669 rtx_insn *other_insn;
6671 scalar_int_mode int_mode;
6673 /* (set (pc) (return)) gets written as (return). */
6674 if (GET_CODE (dest) == PC && ANY_RETURN_P (src))
6677 /* Now that we know for sure which bits of SRC we are using, see if we can
6678 simplify the expression for the object knowing that we only need the
6681 if (GET_MODE_CLASS (mode) == MODE_INT && HWI_COMPUTABLE_MODE_P (mode))
6683 src = force_to_mode (src, mode, HOST_WIDE_INT_M1U, 0);
6684 SUBST (SET_SRC (x), src);
6687 /* If we are setting CC0 or if the source is a COMPARE, look for the use of
6688 the comparison result and try to simplify it unless we already have used
6689 undobuf.other_insn. */
6690 if ((GET_MODE_CLASS (mode) == MODE_CC
6691 || GET_CODE (src) == COMPARE
6693 && (cc_use = find_single_use (dest, subst_insn, &other_insn)) != 0
6694 && (undobuf.other_insn == 0 || other_insn == undobuf.other_insn)
6695 && COMPARISON_P (*cc_use)
6696 && rtx_equal_p (XEXP (*cc_use, 0), dest))
6698 enum rtx_code old_code = GET_CODE (*cc_use);
6699 enum rtx_code new_code;
6701 int other_changed = 0;
6702 rtx inner_compare = NULL_RTX;
6703 machine_mode compare_mode = GET_MODE (dest);
6705 if (GET_CODE (src) == COMPARE)
6707 op0 = XEXP (src, 0), op1 = XEXP (src, 1);
6708 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
6710 inner_compare = op0;
6711 op0 = XEXP (inner_compare, 0), op1 = XEXP (inner_compare, 1);
6715 op0 = src, op1 = CONST0_RTX (GET_MODE (src));
6717 tmp = simplify_relational_operation (old_code, compare_mode, VOIDmode,
6720 new_code = old_code;
6721 else if (!CONSTANT_P (tmp))
6723 new_code = GET_CODE (tmp);
6724 op0 = XEXP (tmp, 0);
6725 op1 = XEXP (tmp, 1);
6729 rtx pat = PATTERN (other_insn);
6730 undobuf.other_insn = other_insn;
6731 SUBST (*cc_use, tmp);
6733 /* Attempt to simplify CC user. */
6734 if (GET_CODE (pat) == SET)
6736 rtx new_rtx = simplify_rtx (SET_SRC (pat));
6737 if (new_rtx != NULL_RTX)
6738 SUBST (SET_SRC (pat), new_rtx);
6741 /* Convert X into a no-op move. */
6742 SUBST (SET_DEST (x), pc_rtx);
6743 SUBST (SET_SRC (x), pc_rtx);
6747 /* Simplify our comparison, if possible. */
6748 new_code = simplify_comparison (new_code, &op0, &op1);
6750 #ifdef SELECT_CC_MODE
6751 /* If this machine has CC modes other than CCmode, check to see if we
6752 need to use a different CC mode here. */
6753 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
6754 compare_mode = GET_MODE (op0);
6755 else if (inner_compare
6756 && GET_MODE_CLASS (GET_MODE (inner_compare)) == MODE_CC
6757 && new_code == old_code
6758 && op0 == XEXP (inner_compare, 0)
6759 && op1 == XEXP (inner_compare, 1))
6760 compare_mode = GET_MODE (inner_compare);
6762 compare_mode = SELECT_CC_MODE (new_code, op0, op1);
6764 /* If the mode changed, we have to change SET_DEST, the mode in the
6765 compare, and the mode in the place SET_DEST is used. If SET_DEST is
6766 a hard register, just build new versions with the proper mode. If it
6767 is a pseudo, we lose unless it is only time we set the pseudo, in
6768 which case we can safely change its mode. */
6769 if (!HAVE_cc0 && compare_mode != GET_MODE (dest))
6771 if (can_change_dest_mode (dest, 0, compare_mode))
6773 unsigned int regno = REGNO (dest);
6776 if (regno < FIRST_PSEUDO_REGISTER)
6777 new_dest = gen_rtx_REG (compare_mode, regno);
6780 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
6781 new_dest = regno_reg_rtx[regno];
6784 SUBST (SET_DEST (x), new_dest);
6785 SUBST (XEXP (*cc_use, 0), new_dest);
6791 #endif /* SELECT_CC_MODE */
6793 /* If the code changed, we have to build a new comparison in
6794 undobuf.other_insn. */
6795 if (new_code != old_code)
6797 int other_changed_previously = other_changed;
6798 unsigned HOST_WIDE_INT mask;
6799 rtx old_cc_use = *cc_use;
6801 SUBST (*cc_use, gen_rtx_fmt_ee (new_code, GET_MODE (*cc_use),
6805 /* If the only change we made was to change an EQ into an NE or
6806 vice versa, OP0 has only one bit that might be nonzero, and OP1
6807 is zero, check if changing the user of the condition code will
6808 produce a valid insn. If it won't, we can keep the original code
6809 in that insn by surrounding our operation with an XOR. */
6811 if (((old_code == NE && new_code == EQ)
6812 || (old_code == EQ && new_code == NE))
6813 && ! other_changed_previously && op1 == const0_rtx
6814 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
6815 && pow2p_hwi (mask = nonzero_bits (op0, GET_MODE (op0))))
6817 rtx pat = PATTERN (other_insn), note = 0;
6819 if ((recog_for_combine (&pat, other_insn, ¬e) < 0
6820 && ! check_asm_operands (pat)))
6822 *cc_use = old_cc_use;
6825 op0 = simplify_gen_binary (XOR, GET_MODE (op0), op0,
6833 undobuf.other_insn = other_insn;
6835 /* Don't generate a compare of a CC with 0, just use that CC. */
6836 if (GET_MODE (op0) == compare_mode && op1 == const0_rtx)
6838 SUBST (SET_SRC (x), op0);
6841 /* Otherwise, if we didn't previously have the same COMPARE we
6842 want, create it from scratch. */
6843 else if (GET_CODE (src) != COMPARE || GET_MODE (src) != compare_mode
6844 || XEXP (src, 0) != op0 || XEXP (src, 1) != op1)
6846 SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
6852 /* Get SET_SRC in a form where we have placed back any
6853 compound expressions. Then do the checks below. */
6854 src = make_compound_operation (src, SET);
6855 SUBST (SET_SRC (x), src);
6858 /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
6859 and X being a REG or (subreg (reg)), we may be able to convert this to
6860 (set (subreg:m2 x) (op)).
6862 We can always do this if M1 is narrower than M2 because that means that
6863 we only care about the low bits of the result.
6865 However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
6866 perform a narrower operation than requested since the high-order bits will
6867 be undefined. On machine where it is defined, this transformation is safe
6868 as long as M1 and M2 have the same number of words. */
6870 if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
6871 && !OBJECT_P (SUBREG_REG (src))
6872 && (((GET_MODE_SIZE (GET_MODE (src)) + (UNITS_PER_WORD - 1))
6874 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))
6875 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))
6876 && (WORD_REGISTER_OPERATIONS || !paradoxical_subreg_p (src))
6877 && ! (REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER
6878 && !REG_CAN_CHANGE_MODE_P (REGNO (dest),
6879 GET_MODE (SUBREG_REG (src)),
6882 || (GET_CODE (dest) == SUBREG
6883 && REG_P (SUBREG_REG (dest)))))
6885 SUBST (SET_DEST (x),
6886 gen_lowpart (GET_MODE (SUBREG_REG (src)),
6888 SUBST (SET_SRC (x), SUBREG_REG (src));
6890 src = SET_SRC (x), dest = SET_DEST (x);
6893 /* If we have (set (cc0) (subreg ...)), we try to remove the subreg
6896 && partial_subreg_p (src)
6897 && subreg_lowpart_p (src))
6899 rtx inner = SUBREG_REG (src);
6900 machine_mode inner_mode = GET_MODE (inner);
6902 /* Here we make sure that we don't have a sign bit on. */
6903 if (val_signbit_known_clear_p (GET_MODE (src),
6904 nonzero_bits (inner, inner_mode)))
6906 SUBST (SET_SRC (x), inner);
6911 /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
6912 would require a paradoxical subreg. Replace the subreg with a
6913 zero_extend to avoid the reload that would otherwise be required. */
6915 enum rtx_code extend_op;
6916 if (paradoxical_subreg_p (src)
6917 && MEM_P (SUBREG_REG (src))
6918 && (extend_op = load_extend_op (GET_MODE (SUBREG_REG (src)))) != UNKNOWN)
6921 gen_rtx_fmt_e (extend_op, GET_MODE (src), SUBREG_REG (src)));
6926 /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
6927 are comparing an item known to be 0 or -1 against 0, use a logical
6928 operation instead. Check for one of the arms being an IOR of the other
6929 arm with some value. We compute three terms to be IOR'ed together. In
6930 practice, at most two will be nonzero. Then we do the IOR's. */
6932 if (GET_CODE (dest) != PC
6933 && GET_CODE (src) == IF_THEN_ELSE
6934 && is_int_mode (GET_MODE (src), &int_mode)
6935 && (GET_CODE (XEXP (src, 0)) == EQ || GET_CODE (XEXP (src, 0)) == NE)
6936 && XEXP (XEXP (src, 0), 1) == const0_rtx
6937 && int_mode == GET_MODE (XEXP (XEXP (src, 0), 0))
6938 && (!HAVE_conditional_move
6939 || ! can_conditionally_move_p (int_mode))
6940 && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0), int_mode)
6941 == GET_MODE_PRECISION (int_mode))
6942 && ! side_effects_p (src))
6944 rtx true_rtx = (GET_CODE (XEXP (src, 0)) == NE
6945 ? XEXP (src, 1) : XEXP (src, 2));
6946 rtx false_rtx = (GET_CODE (XEXP (src, 0)) == NE
6947 ? XEXP (src, 2) : XEXP (src, 1));
6948 rtx term1 = const0_rtx, term2, term3;
6950 if (GET_CODE (true_rtx) == IOR
6951 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
6952 term1 = false_rtx, true_rtx = XEXP (true_rtx, 1), false_rtx = const0_rtx;
6953 else if (GET_CODE (true_rtx) == IOR
6954 && rtx_equal_p (XEXP (true_rtx, 1), false_rtx))
6955 term1 = false_rtx, true_rtx = XEXP (true_rtx, 0), false_rtx = const0_rtx;
6956 else if (GET_CODE (false_rtx) == IOR
6957 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx))
6958 term1 = true_rtx, false_rtx = XEXP (false_rtx, 1), true_rtx = const0_rtx;
6959 else if (GET_CODE (false_rtx) == IOR
6960 && rtx_equal_p (XEXP (false_rtx, 1), true_rtx))
6961 term1 = true_rtx, false_rtx = XEXP (false_rtx, 0), true_rtx = const0_rtx;
6963 term2 = simplify_gen_binary (AND, int_mode,
6964 XEXP (XEXP (src, 0), 0), true_rtx);
6965 term3 = simplify_gen_binary (AND, int_mode,
6966 simplify_gen_unary (NOT, int_mode,
6967 XEXP (XEXP (src, 0), 0),
6972 simplify_gen_binary (IOR, int_mode,
6973 simplify_gen_binary (IOR, int_mode,
6980 /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
6981 whole thing fail. */
6982 if (GET_CODE (src) == CLOBBER && XEXP (src, 0) == const0_rtx)
6984 else if (GET_CODE (dest) == CLOBBER && XEXP (dest, 0) == const0_rtx)
6987 /* Convert this into a field assignment operation, if possible. */
6988 return make_field_assignment (x);
6991 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
6995 simplify_logical (rtx x)
6997 rtx op0 = XEXP (x, 0);
6998 rtx op1 = XEXP (x, 1);
6999 scalar_int_mode mode;
7001 switch (GET_CODE (x))
7004 /* We can call simplify_and_const_int only if we don't lose
7005 any (sign) bits when converting INTVAL (op1) to
7006 "unsigned HOST_WIDE_INT". */
7007 if (is_a <scalar_int_mode> (GET_MODE (x), &mode)
7008 && CONST_INT_P (op1)
7009 && (HWI_COMPUTABLE_MODE_P (mode)
7010 || INTVAL (op1) > 0))
7012 x = simplify_and_const_int (x, mode, op0, INTVAL (op1));
7013 if (GET_CODE (x) != AND)
7020 /* If we have any of (and (ior A B) C) or (and (xor A B) C),
7021 apply the distributive law and then the inverse distributive
7022 law to see if things simplify. */
7023 if (GET_CODE (op0) == IOR || GET_CODE (op0) == XOR)
7025 rtx result = distribute_and_simplify_rtx (x, 0);
7029 if (GET_CODE (op1) == IOR || GET_CODE (op1) == XOR)
7031 rtx result = distribute_and_simplify_rtx (x, 1);
7038 /* If we have (ior (and A B) C), apply the distributive law and then
7039 the inverse distributive law to see if things simplify. */
7041 if (GET_CODE (op0) == AND)
7043 rtx result = distribute_and_simplify_rtx (x, 0);
7048 if (GET_CODE (op1) == AND)
7050 rtx result = distribute_and_simplify_rtx (x, 1);
7063 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
7064 operations" because they can be replaced with two more basic operations.
7065 ZERO_EXTEND is also considered "compound" because it can be replaced with
7066 an AND operation, which is simpler, though only one operation.
7068 The function expand_compound_operation is called with an rtx expression
7069 and will convert it to the appropriate shifts and AND operations,
7070 simplifying at each stage.
7072 The function make_compound_operation is called to convert an expression
7073 consisting of shifts and ANDs into the equivalent compound expression.
7074 It is the inverse of this function, loosely speaking. */
7077 expand_compound_operation (rtx x)
7079 unsigned HOST_WIDE_INT pos = 0, len;
7081 unsigned int modewidth;
7083 scalar_int_mode inner_mode;
7085 switch (GET_CODE (x))
7091 /* We can't necessarily use a const_int for a multiword mode;
7092 it depends on implicitly extending the value.
7093 Since we don't know the right way to extend it,
7094 we can't tell whether the implicit way is right.
7096 Even for a mode that is no wider than a const_int,
7097 we can't win, because we need to sign extend one of its bits through
7098 the rest of it, and we don't know which bit. */
7099 if (CONST_INT_P (XEXP (x, 0)))
7102 /* Reject modes that aren't scalar integers because turning vector
7103 or complex modes into shifts causes problems. */
7104 if (!is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
7107 /* Return if (subreg:MODE FROM 0) is not a safe replacement for
7108 (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM
7109 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
7110 reloaded. If not for that, MEM's would very rarely be safe.
7112 Reject modes bigger than a word, because we might not be able
7113 to reference a two-register group starting with an arbitrary register
7114 (and currently gen_lowpart might crash for a SUBREG). */
7116 if (GET_MODE_SIZE (inner_mode) > UNITS_PER_WORD)
7119 len = GET_MODE_PRECISION (inner_mode);
7120 /* If the inner object has VOIDmode (the only way this can happen
7121 is if it is an ASM_OPERANDS), we can't do anything since we don't
7122 know how much masking to do. */
7134 /* If the operand is a CLOBBER, just return it. */
7135 if (GET_CODE (XEXP (x, 0)) == CLOBBER)
7138 if (!CONST_INT_P (XEXP (x, 1))
7139 || !CONST_INT_P (XEXP (x, 2)))
7142 /* Reject modes that aren't scalar integers because turning vector
7143 or complex modes into shifts causes problems. */
7144 if (!is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
7147 len = INTVAL (XEXP (x, 1));
7148 pos = INTVAL (XEXP (x, 2));
7150 /* This should stay within the object being extracted, fail otherwise. */
7151 if (len + pos > GET_MODE_PRECISION (inner_mode))
7154 if (BITS_BIG_ENDIAN)
7155 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
7163 /* We've rejected non-scalar operations by now. */
7164 scalar_int_mode mode = as_a <scalar_int_mode> (GET_MODE (x));
7166 /* Convert sign extension to zero extension, if we know that the high
7167 bit is not set, as this is easier to optimize. It will be converted
7168 back to cheaper alternative in make_extraction. */
7169 if (GET_CODE (x) == SIGN_EXTEND
7170 && HWI_COMPUTABLE_MODE_P (mode)
7171 && ((nonzero_bits (XEXP (x, 0), inner_mode)
7172 & ~(((unsigned HOST_WIDE_INT) GET_MODE_MASK (inner_mode)) >> 1))
7175 rtx temp = gen_rtx_ZERO_EXTEND (mode, XEXP (x, 0));
7176 rtx temp2 = expand_compound_operation (temp);
7178 /* Make sure this is a profitable operation. */
7179 if (set_src_cost (x, mode, optimize_this_for_speed_p)
7180 > set_src_cost (temp2, mode, optimize_this_for_speed_p))
7182 else if (set_src_cost (x, mode, optimize_this_for_speed_p)
7183 > set_src_cost (temp, mode, optimize_this_for_speed_p))
7189 /* We can optimize some special cases of ZERO_EXTEND. */
7190 if (GET_CODE (x) == ZERO_EXTEND)
7192 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
7193 know that the last value didn't have any inappropriate bits
7195 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7196 && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode
7197 && HWI_COMPUTABLE_MODE_P (mode)
7198 && (nonzero_bits (XEXP (XEXP (x, 0), 0), mode)
7199 & ~GET_MODE_MASK (inner_mode)) == 0)
7200 return XEXP (XEXP (x, 0), 0);
7202 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7203 if (GET_CODE (XEXP (x, 0)) == SUBREG
7204 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == mode
7205 && subreg_lowpart_p (XEXP (x, 0))
7206 && HWI_COMPUTABLE_MODE_P (mode)
7207 && (nonzero_bits (SUBREG_REG (XEXP (x, 0)), mode)
7208 & ~GET_MODE_MASK (inner_mode)) == 0)
7209 return SUBREG_REG (XEXP (x, 0));
7211 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
7212 is a comparison and STORE_FLAG_VALUE permits. This is like
7213 the first case, but it works even when MODE is larger
7214 than HOST_WIDE_INT. */
7215 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7216 && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode
7217 && COMPARISON_P (XEXP (XEXP (x, 0), 0))
7218 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
7219 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (inner_mode)) == 0)
7220 return XEXP (XEXP (x, 0), 0);
7222 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7223 if (GET_CODE (XEXP (x, 0)) == SUBREG
7224 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == mode
7225 && subreg_lowpart_p (XEXP (x, 0))
7226 && COMPARISON_P (SUBREG_REG (XEXP (x, 0)))
7227 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
7228 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (inner_mode)) == 0)
7229 return SUBREG_REG (XEXP (x, 0));
7233 /* If we reach here, we want to return a pair of shifts. The inner
7234 shift is a left shift of BITSIZE - POS - LEN bits. The outer
7235 shift is a right shift of BITSIZE - LEN bits. It is arithmetic or
7236 logical depending on the value of UNSIGNEDP.
7238 If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
7239 converted into an AND of a shift.
7241 We must check for the case where the left shift would have a negative
7242 count. This can happen in a case like (x >> 31) & 255 on machines
7243 that can't shift by a constant. On those machines, we would first
7244 combine the shift with the AND to produce a variable-position
7245 extraction. Then the constant of 31 would be substituted in
7246 to produce such a position. */
7248 modewidth = GET_MODE_PRECISION (mode);
7249 if (modewidth >= pos + len)
7251 tem = gen_lowpart (mode, XEXP (x, 0));
7252 if (!tem || GET_CODE (tem) == CLOBBER)
7254 tem = simplify_shift_const (NULL_RTX, ASHIFT, mode,
7255 tem, modewidth - pos - len);
7256 tem = simplify_shift_const (NULL_RTX, unsignedp ? LSHIFTRT : ASHIFTRT,
7257 mode, tem, modewidth - len);
7259 else if (unsignedp && len < HOST_BITS_PER_WIDE_INT)
7260 tem = simplify_and_const_int (NULL_RTX, mode,
7261 simplify_shift_const (NULL_RTX, LSHIFTRT,
7264 (HOST_WIDE_INT_1U << len) - 1);
7266 /* Any other cases we can't handle. */
7269 /* If we couldn't do this for some reason, return the original
7271 if (GET_CODE (tem) == CLOBBER)
7277 /* X is a SET which contains an assignment of one object into
7278 a part of another (such as a bit-field assignment, STRICT_LOW_PART,
7279 or certain SUBREGS). If possible, convert it into a series of
7282 We half-heartedly support variable positions, but do not at all
7283 support variable lengths. */
7286 expand_field_assignment (const_rtx x)
7289 rtx pos; /* Always counts from low bit. */
7291 rtx mask, cleared, masked;
7292 scalar_int_mode compute_mode;
7294 /* Loop until we find something we can't simplify. */
7297 if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
7298 && GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG)
7300 inner = SUBREG_REG (XEXP (SET_DEST (x), 0));
7301 len = GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x), 0)));
7302 pos = GEN_INT (subreg_lsb (XEXP (SET_DEST (x), 0)));
7304 else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
7305 && CONST_INT_P (XEXP (SET_DEST (x), 1)))
7307 inner = XEXP (SET_DEST (x), 0);
7308 len = INTVAL (XEXP (SET_DEST (x), 1));
7309 pos = XEXP (SET_DEST (x), 2);
7311 /* A constant position should stay within the width of INNER. */
7312 if (CONST_INT_P (pos)
7313 && INTVAL (pos) + len > GET_MODE_PRECISION (GET_MODE (inner)))
7316 if (BITS_BIG_ENDIAN)
7318 if (CONST_INT_P (pos))
7319 pos = GEN_INT (GET_MODE_PRECISION (GET_MODE (inner)) - len
7321 else if (GET_CODE (pos) == MINUS
7322 && CONST_INT_P (XEXP (pos, 1))
7323 && (INTVAL (XEXP (pos, 1))
7324 == GET_MODE_PRECISION (GET_MODE (inner)) - len))
7325 /* If position is ADJUST - X, new position is X. */
7326 pos = XEXP (pos, 0);
7329 HOST_WIDE_INT prec = GET_MODE_PRECISION (GET_MODE (inner));
7330 pos = simplify_gen_binary (MINUS, GET_MODE (pos),
7331 gen_int_mode (prec - len,
7338 /* A SUBREG between two modes that occupy the same numbers of words
7339 can be done by moving the SUBREG to the source. */
7340 else if (GET_CODE (SET_DEST (x)) == SUBREG
7341 /* We need SUBREGs to compute nonzero_bits properly. */
7342 && nonzero_sign_valid
7343 && (((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
7344 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
7345 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
7346 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
7348 x = gen_rtx_SET (SUBREG_REG (SET_DEST (x)),
7350 (GET_MODE (SUBREG_REG (SET_DEST (x))),
7357 while (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
7358 inner = SUBREG_REG (inner);
7360 /* Don't attempt bitwise arithmetic on non scalar integer modes. */
7361 if (!is_a <scalar_int_mode> (GET_MODE (inner), &compute_mode))
7363 /* Don't do anything for vector or complex integral types. */
7364 if (! FLOAT_MODE_P (GET_MODE (inner)))
7367 /* Try to find an integral mode to pun with. */
7368 if (!int_mode_for_size (GET_MODE_BITSIZE (GET_MODE (inner)), 0)
7369 .exists (&compute_mode))
7372 inner = gen_lowpart (compute_mode, inner);
7375 /* Compute a mask of LEN bits, if we can do this on the host machine. */
7376 if (len >= HOST_BITS_PER_WIDE_INT)
7379 /* Don't try to compute in too wide unsupported modes. */
7380 if (!targetm.scalar_mode_supported_p (compute_mode))
7383 /* Now compute the equivalent expression. Make a copy of INNER
7384 for the SET_DEST in case it is a MEM into which we will substitute;
7385 we don't want shared RTL in that case. */
7386 mask = gen_int_mode ((HOST_WIDE_INT_1U << len) - 1,
7388 cleared = simplify_gen_binary (AND, compute_mode,
7389 simplify_gen_unary (NOT, compute_mode,
7390 simplify_gen_binary (ASHIFT,
7395 masked = simplify_gen_binary (ASHIFT, compute_mode,
7396 simplify_gen_binary (
7398 gen_lowpart (compute_mode, SET_SRC (x)),
7402 x = gen_rtx_SET (copy_rtx (inner),
7403 simplify_gen_binary (IOR, compute_mode,
7410 /* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero,
7411 it is an RTX that represents the (variable) starting position; otherwise,
7412 POS is the (constant) starting bit position. Both are counted from the LSB.
7414 UNSIGNEDP is nonzero for an unsigned reference and zero for a signed one.
7416 IN_DEST is nonzero if this is a reference in the destination of a SET.
7417 This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If nonzero,
7418 a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
7421 IN_COMPARE is nonzero if we are in a COMPARE. This means that a
7422 ZERO_EXTRACT should be built even for bits starting at bit 0.
7424 MODE is the desired mode of the result (if IN_DEST == 0).
7426 The result is an RTX for the extraction or NULL_RTX if the target
7430 make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos,
7431 rtx pos_rtx, unsigned HOST_WIDE_INT len, int unsignedp,
7432 int in_dest, int in_compare)
7434 /* This mode describes the size of the storage area
7435 to fetch the overall value from. Within that, we
7436 ignore the POS lowest bits, etc. */
7437 machine_mode is_mode = GET_MODE (inner);
7438 machine_mode inner_mode;
7439 scalar_int_mode wanted_inner_mode;
7440 scalar_int_mode wanted_inner_reg_mode = word_mode;
7441 scalar_int_mode pos_mode = word_mode;
7442 machine_mode extraction_mode = word_mode;
7444 rtx orig_pos_rtx = pos_rtx;
7445 HOST_WIDE_INT orig_pos;
7447 if (pos_rtx && CONST_INT_P (pos_rtx))
7448 pos = INTVAL (pos_rtx), pos_rtx = 0;
7450 if (GET_CODE (inner) == SUBREG
7451 && subreg_lowpart_p (inner)
7452 && (paradoxical_subreg_p (inner)
7453 /* If trying or potentionally trying to extract
7454 bits outside of is_mode, don't look through
7455 non-paradoxical SUBREGs. See PR82192. */
7456 || (pos_rtx == NULL_RTX
7457 && pos + len <= GET_MODE_PRECISION (is_mode))))
7459 /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
7460 consider just the QI as the memory to extract from.
7461 The subreg adds or removes high bits; its mode is
7462 irrelevant to the meaning of this extraction,
7463 since POS and LEN count from the lsb. */
7464 if (MEM_P (SUBREG_REG (inner)))
7465 is_mode = GET_MODE (SUBREG_REG (inner));
7466 inner = SUBREG_REG (inner);
7468 else if (GET_CODE (inner) == ASHIFT
7469 && CONST_INT_P (XEXP (inner, 1))
7470 && pos_rtx == 0 && pos == 0
7471 && len > UINTVAL (XEXP (inner, 1)))
7473 /* We're extracting the least significant bits of an rtx
7474 (ashift X (const_int C)), where LEN > C. Extract the
7475 least significant (LEN - C) bits of X, giving an rtx
7476 whose mode is MODE, then shift it left C times. */
7477 new_rtx = make_extraction (mode, XEXP (inner, 0),
7478 0, 0, len - INTVAL (XEXP (inner, 1)),
7479 unsignedp, in_dest, in_compare);
7481 return gen_rtx_ASHIFT (mode, new_rtx, XEXP (inner, 1));
7483 else if (GET_CODE (inner) == TRUNCATE
7484 /* If trying or potentionally trying to extract
7485 bits outside of is_mode, don't look through
7486 TRUNCATE. See PR82192. */
7487 && pos_rtx == NULL_RTX
7488 && pos + len <= GET_MODE_PRECISION (is_mode))
7489 inner = XEXP (inner, 0);
7491 inner_mode = GET_MODE (inner);
7493 /* See if this can be done without an extraction. We never can if the
7494 width of the field is not the same as that of some integer mode. For
7495 registers, we can only avoid the extraction if the position is at the
7496 low-order bit and this is either not in the destination or we have the
7497 appropriate STRICT_LOW_PART operation available.
7499 For MEM, we can avoid an extract if the field starts on an appropriate
7500 boundary and we can change the mode of the memory reference. */
7502 scalar_int_mode tmode;
7503 if (int_mode_for_size (len, 1).exists (&tmode)
7504 && ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0
7506 && (pos == 0 || REG_P (inner))
7507 && (inner_mode == tmode
7509 || TRULY_NOOP_TRUNCATION_MODES_P (tmode, inner_mode)
7510 || reg_truncated_to_mode (tmode, inner))
7513 && have_insn_for (STRICT_LOW_PART, tmode))))
7514 || (MEM_P (inner) && pos_rtx == 0
7516 % (STRICT_ALIGNMENT ? GET_MODE_ALIGNMENT (tmode)
7517 : BITS_PER_UNIT)) == 0
7518 /* We can't do this if we are widening INNER_MODE (it
7519 may not be aligned, for one thing). */
7520 && !paradoxical_subreg_p (tmode, inner_mode)
7521 && (inner_mode == tmode
7522 || (! mode_dependent_address_p (XEXP (inner, 0),
7523 MEM_ADDR_SPACE (inner))
7524 && ! MEM_VOLATILE_P (inner))))))
7526 /* If INNER is a MEM, make a new MEM that encompasses just the desired
7527 field. If the original and current mode are the same, we need not
7528 adjust the offset. Otherwise, we do if bytes big endian.
7530 If INNER is not a MEM, get a piece consisting of just the field
7531 of interest (in this case POS % BITS_PER_WORD must be 0). */
7535 HOST_WIDE_INT offset;
7537 /* POS counts from lsb, but make OFFSET count in memory order. */
7538 if (BYTES_BIG_ENDIAN)
7539 offset = (GET_MODE_PRECISION (is_mode) - len - pos) / BITS_PER_UNIT;
7541 offset = pos / BITS_PER_UNIT;
7543 new_rtx = adjust_address_nv (inner, tmode, offset);
7545 else if (REG_P (inner))
7547 if (tmode != inner_mode)
7549 /* We can't call gen_lowpart in a DEST since we
7550 always want a SUBREG (see below) and it would sometimes
7551 return a new hard register. */
7555 = subreg_offset_from_lsb (tmode, inner_mode, pos);
7557 /* Avoid creating invalid subregs, for example when
7558 simplifying (x>>32)&255. */
7559 if (!validate_subreg (tmode, inner_mode, inner, offset))
7562 new_rtx = gen_rtx_SUBREG (tmode, inner, offset);
7565 new_rtx = gen_lowpart (tmode, inner);
7571 new_rtx = force_to_mode (inner, tmode,
7572 len >= HOST_BITS_PER_WIDE_INT
7574 : (HOST_WIDE_INT_1U << len) - 1, 0);
7576 /* If this extraction is going into the destination of a SET,
7577 make a STRICT_LOW_PART unless we made a MEM. */
7580 return (MEM_P (new_rtx) ? new_rtx
7581 : (GET_CODE (new_rtx) != SUBREG
7582 ? gen_rtx_CLOBBER (tmode, const0_rtx)
7583 : gen_rtx_STRICT_LOW_PART (VOIDmode, new_rtx)));
7588 if (CONST_SCALAR_INT_P (new_rtx))
7589 return simplify_unary_operation (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7590 mode, new_rtx, tmode);
7592 /* If we know that no extraneous bits are set, and that the high
7593 bit is not set, convert the extraction to the cheaper of
7594 sign and zero extension, that are equivalent in these cases. */
7595 if (flag_expensive_optimizations
7596 && (HWI_COMPUTABLE_MODE_P (tmode)
7597 && ((nonzero_bits (new_rtx, tmode)
7598 & ~(((unsigned HOST_WIDE_INT)GET_MODE_MASK (tmode)) >> 1))
7601 rtx temp = gen_rtx_ZERO_EXTEND (mode, new_rtx);
7602 rtx temp1 = gen_rtx_SIGN_EXTEND (mode, new_rtx);
7604 /* Prefer ZERO_EXTENSION, since it gives more information to
7606 if (set_src_cost (temp, mode, optimize_this_for_speed_p)
7607 <= set_src_cost (temp1, mode, optimize_this_for_speed_p))
7612 /* Otherwise, sign- or zero-extend unless we already are in the
7615 return (gen_rtx_fmt_e (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7619 /* Unless this is a COMPARE or we have a funny memory reference,
7620 don't do anything with zero-extending field extracts starting at
7621 the low-order bit since they are simple AND operations. */
7622 if (pos_rtx == 0 && pos == 0 && ! in_dest
7623 && ! in_compare && unsignedp)
7626 /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7627 if the position is not a constant and the length is not 1. In all
7628 other cases, we would only be going outside our object in cases when
7629 an original shift would have been undefined. */
7631 && ((pos_rtx == 0 && pos + len > GET_MODE_PRECISION (is_mode))
7632 || (pos_rtx != 0 && len != 1)))
7635 enum extraction_pattern pattern = (in_dest ? EP_insv
7636 : unsignedp ? EP_extzv : EP_extv);
7638 /* If INNER is not from memory, we want it to have the mode of a register
7639 extraction pattern's structure operand, or word_mode if there is no
7640 such pattern. The same applies to extraction_mode and pos_mode
7641 and their respective operands.
7643 For memory, assume that the desired extraction_mode and pos_mode
7644 are the same as for a register operation, since at present we don't
7645 have named patterns for aligned memory structures. */
7646 struct extraction_insn insn;
7647 if (get_best_reg_extraction_insn (&insn, pattern,
7648 GET_MODE_BITSIZE (inner_mode), mode))
7650 wanted_inner_reg_mode = insn.struct_mode.require ();
7651 pos_mode = insn.pos_mode;
7652 extraction_mode = insn.field_mode;
7655 /* Never narrow an object, since that might not be safe. */
7657 if (mode != VOIDmode
7658 && partial_subreg_p (extraction_mode, mode))
7659 extraction_mode = mode;
7662 wanted_inner_mode = wanted_inner_reg_mode;
7665 /* Be careful not to go beyond the extracted object and maintain the
7666 natural alignment of the memory. */
7667 wanted_inner_mode = smallest_int_mode_for_size (len);
7668 while (pos % GET_MODE_BITSIZE (wanted_inner_mode) + len
7669 > GET_MODE_BITSIZE (wanted_inner_mode))
7670 wanted_inner_mode = GET_MODE_WIDER_MODE (wanted_inner_mode).require ();
7675 if (BITS_BIG_ENDIAN)
7677 /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7678 BITS_BIG_ENDIAN style. If position is constant, compute new
7679 position. Otherwise, build subtraction.
7680 Note that POS is relative to the mode of the original argument.
7681 If it's a MEM we need to recompute POS relative to that.
7682 However, if we're extracting from (or inserting into) a register,
7683 we want to recompute POS relative to wanted_inner_mode. */
7684 int width = (MEM_P (inner)
7685 ? GET_MODE_BITSIZE (is_mode)
7686 : GET_MODE_BITSIZE (wanted_inner_mode));
7689 pos = width - len - pos;
7692 = gen_rtx_MINUS (GET_MODE (pos_rtx),
7693 gen_int_mode (width - len, GET_MODE (pos_rtx)),
7695 /* POS may be less than 0 now, but we check for that below.
7696 Note that it can only be less than 0 if !MEM_P (inner). */
7699 /* If INNER has a wider mode, and this is a constant extraction, try to
7700 make it smaller and adjust the byte to point to the byte containing
7702 if (wanted_inner_mode != VOIDmode
7703 && inner_mode != wanted_inner_mode
7705 && partial_subreg_p (wanted_inner_mode, is_mode)
7707 && ! mode_dependent_address_p (XEXP (inner, 0), MEM_ADDR_SPACE (inner))
7708 && ! MEM_VOLATILE_P (inner))
7712 /* The computations below will be correct if the machine is big
7713 endian in both bits and bytes or little endian in bits and bytes.
7714 If it is mixed, we must adjust. */
7716 /* If bytes are big endian and we had a paradoxical SUBREG, we must
7717 adjust OFFSET to compensate. */
7718 if (BYTES_BIG_ENDIAN
7719 && paradoxical_subreg_p (is_mode, inner_mode))
7720 offset -= GET_MODE_SIZE (is_mode) - GET_MODE_SIZE (inner_mode);
7722 /* We can now move to the desired byte. */
7723 offset += (pos / GET_MODE_BITSIZE (wanted_inner_mode))
7724 * GET_MODE_SIZE (wanted_inner_mode);
7725 pos %= GET_MODE_BITSIZE (wanted_inner_mode);
7727 if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN
7728 && is_mode != wanted_inner_mode)
7729 offset = (GET_MODE_SIZE (is_mode)
7730 - GET_MODE_SIZE (wanted_inner_mode) - offset);
7732 inner = adjust_address_nv (inner, wanted_inner_mode, offset);
7735 /* If INNER is not memory, get it into the proper mode. If we are changing
7736 its mode, POS must be a constant and smaller than the size of the new
7738 else if (!MEM_P (inner))
7740 /* On the LHS, don't create paradoxical subregs implicitely truncating
7741 the register unless TARGET_TRULY_NOOP_TRUNCATION. */
7743 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner),
7747 if (GET_MODE (inner) != wanted_inner_mode
7749 || orig_pos + len > GET_MODE_BITSIZE (wanted_inner_mode)))
7755 inner = force_to_mode (inner, wanted_inner_mode,
7757 || len + orig_pos >= HOST_BITS_PER_WIDE_INT
7759 : (((HOST_WIDE_INT_1U << len) - 1)
7764 /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we
7765 have to zero extend. Otherwise, we can just use a SUBREG.
7767 We dealt with constant rtxes earlier, so pos_rtx cannot
7768 have VOIDmode at this point. */
7770 && (GET_MODE_SIZE (pos_mode)
7771 > GET_MODE_SIZE (as_a <scalar_int_mode> (GET_MODE (pos_rtx)))))
7773 rtx temp = simplify_gen_unary (ZERO_EXTEND, pos_mode, pos_rtx,
7774 GET_MODE (pos_rtx));
7776 /* If we know that no extraneous bits are set, and that the high
7777 bit is not set, convert extraction to cheaper one - either
7778 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7780 if (flag_expensive_optimizations
7781 && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx))
7782 && ((nonzero_bits (pos_rtx, GET_MODE (pos_rtx))
7783 & ~(((unsigned HOST_WIDE_INT)
7784 GET_MODE_MASK (GET_MODE (pos_rtx)))
7788 rtx temp1 = simplify_gen_unary (SIGN_EXTEND, pos_mode, pos_rtx,
7789 GET_MODE (pos_rtx));
7791 /* Prefer ZERO_EXTENSION, since it gives more information to
7793 if (set_src_cost (temp1, pos_mode, optimize_this_for_speed_p)
7794 < set_src_cost (temp, pos_mode, optimize_this_for_speed_p))
7800 /* Make POS_RTX unless we already have it and it is correct. If we don't
7801 have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
7803 if (pos_rtx == 0 && orig_pos_rtx != 0 && INTVAL (orig_pos_rtx) == pos)
7804 pos_rtx = orig_pos_rtx;
7806 else if (pos_rtx == 0)
7807 pos_rtx = GEN_INT (pos);
7809 /* Make the required operation. See if we can use existing rtx. */
7810 new_rtx = gen_rtx_fmt_eee (unsignedp ? ZERO_EXTRACT : SIGN_EXTRACT,
7811 extraction_mode, inner, GEN_INT (len), pos_rtx);
7813 new_rtx = gen_lowpart (mode, new_rtx);
7818 /* See if X (of mode MODE) contains an ASHIFT of COUNT or more bits that
7819 can be commuted with any other operations in X. Return X without
7820 that shift if so. */
7823 extract_left_shift (scalar_int_mode mode, rtx x, int count)
7825 enum rtx_code code = GET_CODE (x);
7831 /* This is the shift itself. If it is wide enough, we will return
7832 either the value being shifted if the shift count is equal to
7833 COUNT or a shift for the difference. */
7834 if (CONST_INT_P (XEXP (x, 1))
7835 && INTVAL (XEXP (x, 1)) >= count)
7836 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (x, 0),
7837 INTVAL (XEXP (x, 1)) - count);
7841 if ((tem = extract_left_shift (mode, XEXP (x, 0), count)) != 0)
7842 return simplify_gen_unary (code, mode, tem, mode);
7846 case PLUS: case IOR: case XOR: case AND:
7847 /* If we can safely shift this constant and we find the inner shift,
7848 make a new operation. */
7849 if (CONST_INT_P (XEXP (x, 1))
7850 && (UINTVAL (XEXP (x, 1))
7851 & (((HOST_WIDE_INT_1U << count)) - 1)) == 0
7852 && (tem = extract_left_shift (mode, XEXP (x, 0), count)) != 0)
7854 HOST_WIDE_INT val = INTVAL (XEXP (x, 1)) >> count;
7855 return simplify_gen_binary (code, mode, tem,
7856 gen_int_mode (val, mode));
7867 /* Subroutine of make_compound_operation. *X_PTR is the rtx at the current
7868 level of the expression and MODE is its mode. IN_CODE is as for
7869 make_compound_operation. *NEXT_CODE_PTR is the value of IN_CODE
7870 that should be used when recursing on operands of *X_PTR.
7872 There are two possible actions:
7874 - Return null. This tells the caller to recurse on *X_PTR with IN_CODE
7875 equal to *NEXT_CODE_PTR, after which *X_PTR holds the final value.
7877 - Return a new rtx, which the caller returns directly. */
7880 make_compound_operation_int (scalar_int_mode mode, rtx *x_ptr,
7881 enum rtx_code in_code,
7882 enum rtx_code *next_code_ptr)
7885 enum rtx_code next_code = *next_code_ptr;
7886 enum rtx_code code = GET_CODE (x);
7887 int mode_width = GET_MODE_PRECISION (mode);
7892 scalar_int_mode inner_mode;
7893 bool equality_comparison = false;
7897 equality_comparison = true;
7901 /* Process depending on the code of this operation. If NEW is set
7902 nonzero, it will be returned. */
7907 /* Convert shifts by constants into multiplications if inside
7909 if (in_code == MEM && CONST_INT_P (XEXP (x, 1))
7910 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
7911 && INTVAL (XEXP (x, 1)) >= 0)
7913 HOST_WIDE_INT count = INTVAL (XEXP (x, 1));
7914 HOST_WIDE_INT multval = HOST_WIDE_INT_1 << count;
7916 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
7917 if (GET_CODE (new_rtx) == NEG)
7919 new_rtx = XEXP (new_rtx, 0);
7922 multval = trunc_int_for_mode (multval, mode);
7923 new_rtx = gen_rtx_MULT (mode, new_rtx, gen_int_mode (multval, mode));
7930 lhs = make_compound_operation (lhs, next_code);
7931 rhs = make_compound_operation (rhs, next_code);
7932 if (GET_CODE (lhs) == MULT && GET_CODE (XEXP (lhs, 0)) == NEG)
7934 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (lhs, 0), 0),
7936 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
7938 else if (GET_CODE (lhs) == MULT
7939 && (CONST_INT_P (XEXP (lhs, 1)) && INTVAL (XEXP (lhs, 1)) < 0))
7941 tem = simplify_gen_binary (MULT, mode, XEXP (lhs, 0),
7942 simplify_gen_unary (NEG, mode,
7945 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
7949 SUBST (XEXP (x, 0), lhs);
7950 SUBST (XEXP (x, 1), rhs);
7952 maybe_swap_commutative_operands (x);
7958 lhs = make_compound_operation (lhs, next_code);
7959 rhs = make_compound_operation (rhs, next_code);
7960 if (GET_CODE (rhs) == MULT && GET_CODE (XEXP (rhs, 0)) == NEG)
7962 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (rhs, 0), 0),
7964 return simplify_gen_binary (PLUS, mode, tem, lhs);
7966 else if (GET_CODE (rhs) == MULT
7967 && (CONST_INT_P (XEXP (rhs, 1)) && INTVAL (XEXP (rhs, 1)) < 0))
7969 tem = simplify_gen_binary (MULT, mode, XEXP (rhs, 0),
7970 simplify_gen_unary (NEG, mode,
7973 return simplify_gen_binary (PLUS, mode, tem, lhs);
7977 SUBST (XEXP (x, 0), lhs);
7978 SUBST (XEXP (x, 1), rhs);
7983 /* If the second operand is not a constant, we can't do anything
7985 if (!CONST_INT_P (XEXP (x, 1)))
7988 /* If the constant is a power of two minus one and the first operand
7989 is a logical right shift, make an extraction. */
7990 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
7991 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7993 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
7994 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (XEXP (x, 0), 1),
7995 i, 1, 0, in_code == COMPARE);
7998 /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
7999 else if (GET_CODE (XEXP (x, 0)) == SUBREG
8000 && subreg_lowpart_p (XEXP (x, 0))
8001 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (XEXP (x, 0))),
8003 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == LSHIFTRT
8004 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8006 rtx inner_x0 = SUBREG_REG (XEXP (x, 0));
8007 new_rtx = make_compound_operation (XEXP (inner_x0, 0), next_code);
8008 new_rtx = make_extraction (inner_mode, new_rtx, 0,
8010 i, 1, 0, in_code == COMPARE);
8012 /* If we narrowed the mode when dropping the subreg, then we lose. */
8013 if (GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (mode))
8016 /* If that didn't give anything, see if the AND simplifies on
8018 if (!new_rtx && i >= 0)
8020 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8021 new_rtx = make_extraction (mode, new_rtx, 0, NULL_RTX, i, 1,
8022 0, in_code == COMPARE);
8025 /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */
8026 else if ((GET_CODE (XEXP (x, 0)) == XOR
8027 || GET_CODE (XEXP (x, 0)) == IOR)
8028 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LSHIFTRT
8029 && GET_CODE (XEXP (XEXP (x, 0), 1)) == LSHIFTRT
8030 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8032 /* Apply the distributive law, and then try to make extractions. */
8033 new_rtx = gen_rtx_fmt_ee (GET_CODE (XEXP (x, 0)), mode,
8034 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 0),
8036 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 1),
8038 new_rtx = make_compound_operation (new_rtx, in_code);
8041 /* If we are have (and (rotate X C) M) and C is larger than the number
8042 of bits in M, this is an extraction. */
8044 else if (GET_CODE (XEXP (x, 0)) == ROTATE
8045 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8046 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0
8047 && i <= INTVAL (XEXP (XEXP (x, 0), 1)))
8049 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
8050 new_rtx = make_extraction (mode, new_rtx,
8051 (GET_MODE_PRECISION (mode)
8052 - INTVAL (XEXP (XEXP (x, 0), 1))),
8053 NULL_RTX, i, 1, 0, in_code == COMPARE);
8056 /* On machines without logical shifts, if the operand of the AND is
8057 a logical shift and our mask turns off all the propagated sign
8058 bits, we can replace the logical shift with an arithmetic shift. */
8059 else if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8060 && !have_insn_for (LSHIFTRT, mode)
8061 && have_insn_for (ASHIFTRT, mode)
8062 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8063 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8064 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8065 && mode_width <= HOST_BITS_PER_WIDE_INT)
8067 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
8069 mask >>= INTVAL (XEXP (XEXP (x, 0), 1));
8070 if ((INTVAL (XEXP (x, 1)) & ~mask) == 0)
8072 gen_rtx_ASHIFTRT (mode,
8073 make_compound_operation (XEXP (XEXP (x,
8077 XEXP (XEXP (x, 0), 1)));
8080 /* If the constant is one less than a power of two, this might be
8081 representable by an extraction even if no shift is present.
8082 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
8083 we are in a COMPARE. */
8084 else if ((i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8085 new_rtx = make_extraction (mode,
8086 make_compound_operation (XEXP (x, 0),
8088 0, NULL_RTX, i, 1, 0, in_code == COMPARE);
8090 /* If we are in a comparison and this is an AND with a power of two,
8091 convert this into the appropriate bit extract. */
8092 else if (in_code == COMPARE
8093 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
8094 && (equality_comparison || i < GET_MODE_PRECISION (mode) - 1))
8095 new_rtx = make_extraction (mode,
8096 make_compound_operation (XEXP (x, 0),
8098 i, NULL_RTX, 1, 1, 0, 1);
8100 /* If the one operand is a paradoxical subreg of a register or memory and
8101 the constant (limited to the smaller mode) has only zero bits where
8102 the sub expression has known zero bits, this can be expressed as
8104 else if (GET_CODE (XEXP (x, 0)) == SUBREG)
8108 sub = XEXP (XEXP (x, 0), 0);
8109 machine_mode sub_mode = GET_MODE (sub);
8110 if ((REG_P (sub) || MEM_P (sub))
8111 && GET_MODE_PRECISION (sub_mode) < mode_width)
8113 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (sub_mode);
8114 unsigned HOST_WIDE_INT mask;
8116 /* original AND constant with all the known zero bits set */
8117 mask = UINTVAL (XEXP (x, 1)) | (~nonzero_bits (sub, sub_mode));
8118 if ((mask & mode_mask) == mode_mask)
8120 new_rtx = make_compound_operation (sub, next_code);
8121 new_rtx = make_extraction (mode, new_rtx, 0, 0,
8122 GET_MODE_PRECISION (sub_mode),
8123 1, 0, in_code == COMPARE);
8131 /* If the sign bit is known to be zero, replace this with an
8132 arithmetic shift. */
8133 if (have_insn_for (ASHIFTRT, mode)
8134 && ! have_insn_for (LSHIFTRT, mode)
8135 && mode_width <= HOST_BITS_PER_WIDE_INT
8136 && (nonzero_bits (XEXP (x, 0), mode) & (1 << (mode_width - 1))) == 0)
8138 new_rtx = gen_rtx_ASHIFTRT (mode,
8139 make_compound_operation (XEXP (x, 0),
8151 /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
8152 this is a SIGN_EXTRACT. */
8153 if (CONST_INT_P (rhs)
8154 && GET_CODE (lhs) == ASHIFT
8155 && CONST_INT_P (XEXP (lhs, 1))
8156 && INTVAL (rhs) >= INTVAL (XEXP (lhs, 1))
8157 && INTVAL (XEXP (lhs, 1)) >= 0
8158 && INTVAL (rhs) < mode_width)
8160 new_rtx = make_compound_operation (XEXP (lhs, 0), next_code);
8161 new_rtx = make_extraction (mode, new_rtx,
8162 INTVAL (rhs) - INTVAL (XEXP (lhs, 1)),
8163 NULL_RTX, mode_width - INTVAL (rhs),
8164 code == LSHIFTRT, 0, in_code == COMPARE);
8168 /* See if we have operations between an ASHIFTRT and an ASHIFT.
8169 If so, try to merge the shifts into a SIGN_EXTEND. We could
8170 also do this for some cases of SIGN_EXTRACT, but it doesn't
8171 seem worth the effort; the case checked for occurs on Alpha. */
8174 && ! (GET_CODE (lhs) == SUBREG
8175 && (OBJECT_P (SUBREG_REG (lhs))))
8176 && CONST_INT_P (rhs)
8177 && INTVAL (rhs) >= 0
8178 && INTVAL (rhs) < HOST_BITS_PER_WIDE_INT
8179 && INTVAL (rhs) < mode_width
8180 && (new_rtx = extract_left_shift (mode, lhs, INTVAL (rhs))) != 0)
8181 new_rtx = make_extraction (mode, make_compound_operation (new_rtx,
8183 0, NULL_RTX, mode_width - INTVAL (rhs),
8184 code == LSHIFTRT, 0, in_code == COMPARE);
8189 /* Call ourselves recursively on the inner expression. If we are
8190 narrowing the object and it has a different RTL code from
8191 what it originally did, do this SUBREG as a force_to_mode. */
8193 rtx inner = SUBREG_REG (x), simplified;
8194 enum rtx_code subreg_code = in_code;
8196 /* If the SUBREG is masking of a logical right shift,
8197 make an extraction. */
8198 if (GET_CODE (inner) == LSHIFTRT
8199 && is_a <scalar_int_mode> (GET_MODE (inner), &inner_mode)
8200 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (inner_mode)
8201 && CONST_INT_P (XEXP (inner, 1))
8202 && UINTVAL (XEXP (inner, 1)) < GET_MODE_PRECISION (inner_mode)
8203 && subreg_lowpart_p (x))
8205 new_rtx = make_compound_operation (XEXP (inner, 0), next_code);
8206 int width = GET_MODE_PRECISION (inner_mode)
8207 - INTVAL (XEXP (inner, 1));
8208 if (width > mode_width)
8210 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (inner, 1),
8211 width, 1, 0, in_code == COMPARE);
8215 /* If in_code is COMPARE, it isn't always safe to pass it through
8216 to the recursive make_compound_operation call. */
8217 if (subreg_code == COMPARE
8218 && (!subreg_lowpart_p (x)
8219 || GET_CODE (inner) == SUBREG
8220 /* (subreg:SI (and:DI (reg:DI) (const_int 0x800000000)) 0)
8221 is (const_int 0), rather than
8222 (subreg:SI (lshiftrt:DI (reg:DI) (const_int 35)) 0).
8223 Similarly (subreg:QI (and:SI (reg:SI) (const_int 0x80)) 0)
8224 for non-equality comparisons against 0 is not equivalent
8225 to (subreg:QI (lshiftrt:SI (reg:SI) (const_int 7)) 0). */
8226 || (GET_CODE (inner) == AND
8227 && CONST_INT_P (XEXP (inner, 1))
8228 && partial_subreg_p (x)
8229 && exact_log2 (UINTVAL (XEXP (inner, 1)))
8230 >= GET_MODE_BITSIZE (mode) - 1)))
8233 tem = make_compound_operation (inner, subreg_code);
8236 = simplify_subreg (mode, tem, GET_MODE (inner), SUBREG_BYTE (x));
8240 if (GET_CODE (tem) != GET_CODE (inner)
8241 && partial_subreg_p (x)
8242 && subreg_lowpart_p (x))
8245 = force_to_mode (tem, mode, HOST_WIDE_INT_M1U, 0);
8247 /* If we have something other than a SUBREG, we might have
8248 done an expansion, so rerun ourselves. */
8249 if (GET_CODE (newer) != SUBREG)
8250 newer = make_compound_operation (newer, in_code);
8252 /* force_to_mode can expand compounds. If it just re-expanded
8253 the compound, use gen_lowpart to convert to the desired
8255 if (rtx_equal_p (newer, x)
8256 /* Likewise if it re-expanded the compound only partially.
8257 This happens for SUBREG of ZERO_EXTRACT if they extract
8258 the same number of bits. */
8259 || (GET_CODE (newer) == SUBREG
8260 && (GET_CODE (SUBREG_REG (newer)) == LSHIFTRT
8261 || GET_CODE (SUBREG_REG (newer)) == ASHIFTRT)
8262 && GET_CODE (inner) == AND
8263 && rtx_equal_p (SUBREG_REG (newer), XEXP (inner, 0))))
8264 return gen_lowpart (GET_MODE (x), tem);
8279 *x_ptr = gen_lowpart (mode, new_rtx);
8280 *next_code_ptr = next_code;
8284 /* Look at the expression rooted at X. Look for expressions
8285 equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
8286 Form these expressions.
8288 Return the new rtx, usually just X.
8290 Also, for machines like the VAX that don't have logical shift insns,
8291 try to convert logical to arithmetic shift operations in cases where
8292 they are equivalent. This undoes the canonicalizations to logical
8293 shifts done elsewhere.
8295 We try, as much as possible, to re-use rtl expressions to save memory.
8297 IN_CODE says what kind of expression we are processing. Normally, it is
8298 SET. In a memory address it is MEM. When processing the arguments of
8299 a comparison or a COMPARE against zero, it is COMPARE, or EQ if more
8300 precisely it is an equality comparison against zero. */
8303 make_compound_operation (rtx x, enum rtx_code in_code)
8305 enum rtx_code code = GET_CODE (x);
8308 enum rtx_code next_code;
8311 /* Select the code to be used in recursive calls. Once we are inside an
8312 address, we stay there. If we have a comparison, set to COMPARE,
8313 but once inside, go back to our default of SET. */
8315 next_code = (code == MEM ? MEM
8316 : ((code == COMPARE || COMPARISON_P (x))
8317 && XEXP (x, 1) == const0_rtx) ? COMPARE
8318 : in_code == COMPARE || in_code == EQ ? SET : in_code);
8320 scalar_int_mode mode;
8321 if (is_a <scalar_int_mode> (GET_MODE (x), &mode))
8323 rtx new_rtx = make_compound_operation_int (mode, &x, in_code,
8327 code = GET_CODE (x);
8330 /* Now recursively process each operand of this operation. We need to
8331 handle ZERO_EXTEND specially so that we don't lose track of the
8333 if (code == ZERO_EXTEND)
8335 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8336 tem = simplify_const_unary_operation (ZERO_EXTEND, GET_MODE (x),
8337 new_rtx, GET_MODE (XEXP (x, 0)));
8340 SUBST (XEXP (x, 0), new_rtx);
8344 fmt = GET_RTX_FORMAT (code);
8345 for (i = 0; i < GET_RTX_LENGTH (code); i++)
8348 new_rtx = make_compound_operation (XEXP (x, i), next_code);
8349 SUBST (XEXP (x, i), new_rtx);
8351 else if (fmt[i] == 'E')
8352 for (j = 0; j < XVECLEN (x, i); j++)
8354 new_rtx = make_compound_operation (XVECEXP (x, i, j), next_code);
8355 SUBST (XVECEXP (x, i, j), new_rtx);
8358 maybe_swap_commutative_operands (x);
8362 /* Given M see if it is a value that would select a field of bits
8363 within an item, but not the entire word. Return -1 if not.
8364 Otherwise, return the starting position of the field, where 0 is the
8367 *PLEN is set to the length of the field. */
8370 get_pos_from_mask (unsigned HOST_WIDE_INT m, unsigned HOST_WIDE_INT *plen)
8372 /* Get the bit number of the first 1 bit from the right, -1 if none. */
8373 int pos = m ? ctz_hwi (m) : -1;
8377 /* Now shift off the low-order zero bits and see if we have a
8378 power of two minus 1. */
8379 len = exact_log2 ((m >> pos) + 1);
8388 /* If X refers to a register that equals REG in value, replace these
8389 references with REG. */
8391 canon_reg_for_combine (rtx x, rtx reg)
8398 enum rtx_code code = GET_CODE (x);
8399 switch (GET_RTX_CLASS (code))
8402 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8403 if (op0 != XEXP (x, 0))
8404 return simplify_gen_unary (GET_CODE (x), GET_MODE (x), op0,
8409 case RTX_COMM_ARITH:
8410 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8411 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8412 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8413 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1);
8417 case RTX_COMM_COMPARE:
8418 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8419 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8420 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8421 return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
8422 GET_MODE (op0), op0, op1);
8426 case RTX_BITFIELD_OPS:
8427 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8428 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8429 op2 = canon_reg_for_combine (XEXP (x, 2), reg);
8430 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1) || op2 != XEXP (x, 2))
8431 return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
8432 GET_MODE (op0), op0, op1, op2);
8438 if (rtx_equal_p (get_last_value (reg), x)
8439 || rtx_equal_p (reg, get_last_value (x)))
8448 fmt = GET_RTX_FORMAT (code);
8450 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8453 rtx op = canon_reg_for_combine (XEXP (x, i), reg);
8454 if (op != XEXP (x, i))
8464 else if (fmt[i] == 'E')
8467 for (j = 0; j < XVECLEN (x, i); j++)
8469 rtx op = canon_reg_for_combine (XVECEXP (x, i, j), reg);
8470 if (op != XVECEXP (x, i, j))
8477 XVECEXP (x, i, j) = op;
8488 /* Return X converted to MODE. If the value is already truncated to
8489 MODE we can just return a subreg even though in the general case we
8490 would need an explicit truncation. */
8493 gen_lowpart_or_truncate (machine_mode mode, rtx x)
8495 if (!CONST_INT_P (x)
8496 && partial_subreg_p (mode, GET_MODE (x))
8497 && !TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (x))
8498 && !(REG_P (x) && reg_truncated_to_mode (mode, x)))
8500 /* Bit-cast X into an integer mode. */
8501 if (!SCALAR_INT_MODE_P (GET_MODE (x)))
8502 x = gen_lowpart (int_mode_for_mode (GET_MODE (x)).require (), x);
8503 x = simplify_gen_unary (TRUNCATE, int_mode_for_mode (mode).require (),
8507 return gen_lowpart (mode, x);
8510 /* See if X can be simplified knowing that we will only refer to it in
8511 MODE and will only refer to those bits that are nonzero in MASK.
8512 If other bits are being computed or if masking operations are done
8513 that select a superset of the bits in MASK, they can sometimes be
8516 Return a possibly simplified expression, but always convert X to
8517 MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
8519 If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
8520 are all off in X. This is used when X will be complemented, by either
8521 NOT, NEG, or XOR. */
8524 force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask,
8527 enum rtx_code code = GET_CODE (x);
8528 int next_select = just_select || code == XOR || code == NOT || code == NEG;
8529 machine_mode op_mode;
8530 unsigned HOST_WIDE_INT nonzero;
8532 /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the
8533 code below will do the wrong thing since the mode of such an
8534 expression is VOIDmode.
8536 Also do nothing if X is a CLOBBER; this can happen if X was
8537 the return value from a call to gen_lowpart. */
8538 if (code == CALL || code == ASM_OPERANDS || code == CLOBBER)
8541 /* We want to perform the operation in its present mode unless we know
8542 that the operation is valid in MODE, in which case we do the operation
8544 op_mode = ((GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (x))
8545 && have_insn_for (code, mode))
8546 ? mode : GET_MODE (x));
8548 /* It is not valid to do a right-shift in a narrower mode
8549 than the one it came in with. */
8550 if ((code == LSHIFTRT || code == ASHIFTRT)
8551 && partial_subreg_p (mode, GET_MODE (x)))
8552 op_mode = GET_MODE (x);
8554 /* Truncate MASK to fit OP_MODE. */
8556 mask &= GET_MODE_MASK (op_mode);
8558 /* Determine what bits of X are guaranteed to be (non)zero. */
8559 nonzero = nonzero_bits (x, mode);
8561 /* If none of the bits in X are needed, return a zero. */
8562 if (!just_select && (nonzero & mask) == 0 && !side_effects_p (x))
8565 /* If X is a CONST_INT, return a new one. Do this here since the
8566 test below will fail. */
8567 if (CONST_INT_P (x))
8569 if (SCALAR_INT_MODE_P (mode))
8570 return gen_int_mode (INTVAL (x) & mask, mode);
8573 x = GEN_INT (INTVAL (x) & mask);
8574 return gen_lowpart_common (mode, x);
8578 /* If X is narrower than MODE and we want all the bits in X's mode, just
8579 get X in the proper mode. */
8580 if (paradoxical_subreg_p (mode, GET_MODE (x))
8581 && (GET_MODE_MASK (GET_MODE (x)) & ~mask) == 0)
8582 return gen_lowpart (mode, x);
8584 /* We can ignore the effect of a SUBREG if it narrows the mode or
8585 if the constant masks to zero all the bits the mode doesn't have. */
8586 if (GET_CODE (x) == SUBREG
8587 && subreg_lowpart_p (x)
8588 && (partial_subreg_p (x)
8590 & GET_MODE_MASK (GET_MODE (x))
8591 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x)))))))
8592 return force_to_mode (SUBREG_REG (x), mode, mask, next_select);
8594 scalar_int_mode int_mode, xmode;
8595 if (is_a <scalar_int_mode> (mode, &int_mode)
8596 && is_a <scalar_int_mode> (GET_MODE (x), &xmode))
8597 /* OP_MODE is either MODE or XMODE, so it must be a scalar
8599 return force_int_to_mode (x, int_mode, xmode,
8600 as_a <scalar_int_mode> (op_mode),
8603 return gen_lowpart_or_truncate (mode, x);
8606 /* Subroutine of force_to_mode that handles cases in which both X and
8607 the result are scalar integers. MODE is the mode of the result,
8608 XMODE is the mode of X, and OP_MODE says which of MODE or XMODE
8609 is preferred for simplified versions of X. The other arguments
8610 are as for force_to_mode. */
8613 force_int_to_mode (rtx x, scalar_int_mode mode, scalar_int_mode xmode,
8614 scalar_int_mode op_mode, unsigned HOST_WIDE_INT mask,
8617 enum rtx_code code = GET_CODE (x);
8618 int next_select = just_select || code == XOR || code == NOT || code == NEG;
8619 unsigned HOST_WIDE_INT fuller_mask;
8622 /* When we have an arithmetic operation, or a shift whose count we
8623 do not know, we need to assume that all bits up to the highest-order
8624 bit in MASK will be needed. This is how we form such a mask. */
8625 if (mask & (HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1)))
8626 fuller_mask = HOST_WIDE_INT_M1U;
8628 fuller_mask = ((HOST_WIDE_INT_1U << (floor_log2 (mask) + 1))
8634 /* If X is a (clobber (const_int)), return it since we know we are
8635 generating something that won't match. */
8642 x = expand_compound_operation (x);
8643 if (GET_CODE (x) != code)
8644 return force_to_mode (x, mode, mask, next_select);
8648 /* Similarly for a truncate. */
8649 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8652 /* If this is an AND with a constant, convert it into an AND
8653 whose constant is the AND of that constant with MASK. If it
8654 remains an AND of MASK, delete it since it is redundant. */
8656 if (CONST_INT_P (XEXP (x, 1)))
8658 x = simplify_and_const_int (x, op_mode, XEXP (x, 0),
8659 mask & INTVAL (XEXP (x, 1)));
8662 /* If X is still an AND, see if it is an AND with a mask that
8663 is just some low-order bits. If so, and it is MASK, we don't
8666 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8667 && (INTVAL (XEXP (x, 1)) & GET_MODE_MASK (xmode)) == mask)
8670 /* If it remains an AND, try making another AND with the bits
8671 in the mode mask that aren't in MASK turned on. If the
8672 constant in the AND is wide enough, this might make a
8673 cheaper constant. */
8675 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8676 && GET_MODE_MASK (xmode) != mask
8677 && HWI_COMPUTABLE_MODE_P (xmode))
8679 unsigned HOST_WIDE_INT cval
8680 = UINTVAL (XEXP (x, 1)) | (GET_MODE_MASK (xmode) & ~mask);
8683 y = simplify_gen_binary (AND, xmode, XEXP (x, 0),
8684 gen_int_mode (cval, xmode));
8685 if (set_src_cost (y, xmode, optimize_this_for_speed_p)
8686 < set_src_cost (x, xmode, optimize_this_for_speed_p))
8696 /* In (and (plus FOO C1) M), if M is a mask that just turns off
8697 low-order bits (as in an alignment operation) and FOO is already
8698 aligned to that boundary, mask C1 to that boundary as well.
8699 This may eliminate that PLUS and, later, the AND. */
8702 unsigned int width = GET_MODE_PRECISION (mode);
8703 unsigned HOST_WIDE_INT smask = mask;
8705 /* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8706 number, sign extend it. */
8708 if (width < HOST_BITS_PER_WIDE_INT
8709 && (smask & (HOST_WIDE_INT_1U << (width - 1))) != 0)
8710 smask |= HOST_WIDE_INT_M1U << width;
8712 if (CONST_INT_P (XEXP (x, 1))
8713 && pow2p_hwi (- smask)
8714 && (nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0
8715 && (INTVAL (XEXP (x, 1)) & ~smask) != 0)
8716 return force_to_mode (plus_constant (xmode, XEXP (x, 0),
8717 (INTVAL (XEXP (x, 1)) & smask)),
8718 mode, smask, next_select);
8724 /* Substituting into the operands of a widening MULT is not likely to
8725 create RTL matching a machine insn. */
8727 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
8728 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
8729 && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
8730 || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
8731 && REG_P (XEXP (XEXP (x, 0), 0))
8732 && REG_P (XEXP (XEXP (x, 1), 0)))
8733 return gen_lowpart_or_truncate (mode, x);
8735 /* For PLUS, MINUS and MULT, we need any bits less significant than the
8736 most significant bit in MASK since carries from those bits will
8737 affect the bits we are interested in. */
8742 /* If X is (minus C Y) where C's least set bit is larger than any bit
8743 in the mask, then we may replace with (neg Y). */
8744 if (CONST_INT_P (XEXP (x, 0))
8745 && least_bit_hwi (UINTVAL (XEXP (x, 0))) > mask)
8747 x = simplify_gen_unary (NEG, xmode, XEXP (x, 1), xmode);
8748 return force_to_mode (x, mode, mask, next_select);
8751 /* Similarly, if C contains every bit in the fuller_mask, then we may
8752 replace with (not Y). */
8753 if (CONST_INT_P (XEXP (x, 0))
8754 && ((UINTVAL (XEXP (x, 0)) | fuller_mask) == UINTVAL (XEXP (x, 0))))
8756 x = simplify_gen_unary (NOT, xmode, XEXP (x, 1), xmode);
8757 return force_to_mode (x, mode, mask, next_select);
8765 /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8766 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8767 operation which may be a bitfield extraction. Ensure that the
8768 constant we form is not wider than the mode of X. */
8770 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8771 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8772 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8773 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8774 && CONST_INT_P (XEXP (x, 1))
8775 && ((INTVAL (XEXP (XEXP (x, 0), 1))
8776 + floor_log2 (INTVAL (XEXP (x, 1))))
8777 < GET_MODE_PRECISION (xmode))
8778 && (UINTVAL (XEXP (x, 1))
8779 & ~nonzero_bits (XEXP (x, 0), xmode)) == 0)
8781 temp = gen_int_mode ((INTVAL (XEXP (x, 1)) & mask)
8782 << INTVAL (XEXP (XEXP (x, 0), 1)),
8784 temp = simplify_gen_binary (GET_CODE (x), xmode,
8785 XEXP (XEXP (x, 0), 0), temp);
8786 x = simplify_gen_binary (LSHIFTRT, xmode, temp,
8787 XEXP (XEXP (x, 0), 1));
8788 return force_to_mode (x, mode, mask, next_select);
8792 /* For most binary operations, just propagate into the operation and
8793 change the mode if we have an operation of that mode. */
8795 op0 = force_to_mode (XEXP (x, 0), mode, mask, next_select);
8796 op1 = force_to_mode (XEXP (x, 1), mode, mask, next_select);
8798 /* If we ended up truncating both operands, truncate the result of the
8799 operation instead. */
8800 if (GET_CODE (op0) == TRUNCATE
8801 && GET_CODE (op1) == TRUNCATE)
8803 op0 = XEXP (op0, 0);
8804 op1 = XEXP (op1, 0);
8807 op0 = gen_lowpart_or_truncate (op_mode, op0);
8808 op1 = gen_lowpart_or_truncate (op_mode, op1);
8810 if (op_mode != xmode || op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8812 x = simplify_gen_binary (code, op_mode, op0, op1);
8818 /* For left shifts, do the same, but just for the first operand.
8819 However, we cannot do anything with shifts where we cannot
8820 guarantee that the counts are smaller than the size of the mode
8821 because such a count will have a different meaning in a
8824 if (! (CONST_INT_P (XEXP (x, 1))
8825 && INTVAL (XEXP (x, 1)) >= 0
8826 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (mode))
8827 && ! (GET_MODE (XEXP (x, 1)) != VOIDmode
8828 && (nonzero_bits (XEXP (x, 1), GET_MODE (XEXP (x, 1)))
8829 < (unsigned HOST_WIDE_INT) GET_MODE_PRECISION (mode))))
8832 /* If the shift count is a constant and we can do arithmetic in
8833 the mode of the shift, refine which bits we need. Otherwise, use the
8834 conservative form of the mask. */
8835 if (CONST_INT_P (XEXP (x, 1))
8836 && INTVAL (XEXP (x, 1)) >= 0
8837 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (op_mode)
8838 && HWI_COMPUTABLE_MODE_P (op_mode))
8839 mask >>= INTVAL (XEXP (x, 1));
8843 op0 = gen_lowpart_or_truncate (op_mode,
8844 force_to_mode (XEXP (x, 0), op_mode,
8845 mask, next_select));
8847 if (op_mode != xmode || op0 != XEXP (x, 0))
8849 x = simplify_gen_binary (code, op_mode, op0, XEXP (x, 1));
8855 /* Here we can only do something if the shift count is a constant,
8856 this shift constant is valid for the host, and we can do arithmetic
8859 if (CONST_INT_P (XEXP (x, 1))
8860 && INTVAL (XEXP (x, 1)) >= 0
8861 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
8862 && HWI_COMPUTABLE_MODE_P (op_mode))
8864 rtx inner = XEXP (x, 0);
8865 unsigned HOST_WIDE_INT inner_mask;
8867 /* Select the mask of the bits we need for the shift operand. */
8868 inner_mask = mask << INTVAL (XEXP (x, 1));
8870 /* We can only change the mode of the shift if we can do arithmetic
8871 in the mode of the shift and INNER_MASK is no wider than the
8872 width of X's mode. */
8873 if ((inner_mask & ~GET_MODE_MASK (xmode)) != 0)
8876 inner = force_to_mode (inner, op_mode, inner_mask, next_select);
8878 if (xmode != op_mode || inner != XEXP (x, 0))
8880 x = simplify_gen_binary (LSHIFTRT, op_mode, inner, XEXP (x, 1));
8885 /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
8886 shift and AND produces only copies of the sign bit (C2 is one less
8887 than a power of two), we can do this with just a shift. */
8889 if (GET_CODE (x) == LSHIFTRT
8890 && CONST_INT_P (XEXP (x, 1))
8891 /* The shift puts one of the sign bit copies in the least significant
8893 && ((INTVAL (XEXP (x, 1))
8894 + num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
8895 >= GET_MODE_PRECISION (xmode))
8896 && pow2p_hwi (mask + 1)
8897 /* Number of bits left after the shift must be more than the mask
8899 && ((INTVAL (XEXP (x, 1)) + exact_log2 (mask + 1))
8900 <= GET_MODE_PRECISION (xmode))
8901 /* Must be more sign bit copies than the mask needs. */
8902 && ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
8903 >= exact_log2 (mask + 1)))
8904 x = simplify_gen_binary (LSHIFTRT, xmode, XEXP (x, 0),
8905 GEN_INT (GET_MODE_PRECISION (xmode)
8906 - exact_log2 (mask + 1)));
8911 /* If we are just looking for the sign bit, we don't need this shift at
8912 all, even if it has a variable count. */
8913 if (val_signbit_p (xmode, mask))
8914 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8916 /* If this is a shift by a constant, get a mask that contains those bits
8917 that are not copies of the sign bit. We then have two cases: If
8918 MASK only includes those bits, this can be a logical shift, which may
8919 allow simplifications. If MASK is a single-bit field not within
8920 those bits, we are requesting a copy of the sign bit and hence can
8921 shift the sign bit to the appropriate location. */
8923 if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) >= 0
8924 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
8926 unsigned HOST_WIDE_INT nonzero;
8929 /* If the considered data is wider than HOST_WIDE_INT, we can't
8930 represent a mask for all its bits in a single scalar.
8931 But we only care about the lower bits, so calculate these. */
8933 if (GET_MODE_PRECISION (xmode) > HOST_BITS_PER_WIDE_INT)
8935 nonzero = HOST_WIDE_INT_M1U;
8937 /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8938 is the number of bits a full-width mask would have set.
8939 We need only shift if these are fewer than nonzero can
8940 hold. If not, we must keep all bits set in nonzero. */
8942 if (GET_MODE_PRECISION (xmode) - INTVAL (XEXP (x, 1))
8943 < HOST_BITS_PER_WIDE_INT)
8944 nonzero >>= INTVAL (XEXP (x, 1))
8945 + HOST_BITS_PER_WIDE_INT
8946 - GET_MODE_PRECISION (xmode);
8950 nonzero = GET_MODE_MASK (xmode);
8951 nonzero >>= INTVAL (XEXP (x, 1));
8954 if ((mask & ~nonzero) == 0)
8956 x = simplify_shift_const (NULL_RTX, LSHIFTRT, xmode,
8957 XEXP (x, 0), INTVAL (XEXP (x, 1)));
8958 if (GET_CODE (x) != ASHIFTRT)
8959 return force_to_mode (x, mode, mask, next_select);
8962 else if ((i = exact_log2 (mask)) >= 0)
8964 x = simplify_shift_const
8965 (NULL_RTX, LSHIFTRT, xmode, XEXP (x, 0),
8966 GET_MODE_PRECISION (xmode) - 1 - i);
8968 if (GET_CODE (x) != ASHIFTRT)
8969 return force_to_mode (x, mode, mask, next_select);
8973 /* If MASK is 1, convert this to an LSHIFTRT. This can be done
8974 even if the shift count isn't a constant. */
8976 x = simplify_gen_binary (LSHIFTRT, xmode, XEXP (x, 0), XEXP (x, 1));
8980 /* If this is a zero- or sign-extension operation that just affects bits
8981 we don't care about, remove it. Be sure the call above returned
8982 something that is still a shift. */
8984 if ((GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ASHIFTRT)
8985 && CONST_INT_P (XEXP (x, 1))
8986 && INTVAL (XEXP (x, 1)) >= 0
8987 && (INTVAL (XEXP (x, 1))
8988 <= GET_MODE_PRECISION (xmode) - (floor_log2 (mask) + 1))
8989 && GET_CODE (XEXP (x, 0)) == ASHIFT
8990 && XEXP (XEXP (x, 0), 1) == XEXP (x, 1))
8991 return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask,
8998 /* If the shift count is constant and we can do computations
8999 in the mode of X, compute where the bits we care about are.
9000 Otherwise, we can't do anything. Don't change the mode of
9001 the shift or propagate MODE into the shift, though. */
9002 if (CONST_INT_P (XEXP (x, 1))
9003 && INTVAL (XEXP (x, 1)) >= 0)
9005 temp = simplify_binary_operation (code == ROTATE ? ROTATERT : ROTATE,
9006 xmode, gen_int_mode (mask, xmode),
9008 if (temp && CONST_INT_P (temp))
9009 x = simplify_gen_binary (code, xmode,
9010 force_to_mode (XEXP (x, 0), xmode,
9011 INTVAL (temp), next_select),
9017 /* If we just want the low-order bit, the NEG isn't needed since it
9018 won't change the low-order bit. */
9020 return force_to_mode (XEXP (x, 0), mode, mask, just_select);
9022 /* We need any bits less significant than the most significant bit in
9023 MASK since carries from those bits will affect the bits we are
9029 /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
9030 same as the XOR case above. Ensure that the constant we form is not
9031 wider than the mode of X. */
9033 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
9034 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
9035 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
9036 && (INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (mask)
9037 < GET_MODE_PRECISION (xmode))
9038 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT)
9040 temp = gen_int_mode (mask << INTVAL (XEXP (XEXP (x, 0), 1)), xmode);
9041 temp = simplify_gen_binary (XOR, xmode, XEXP (XEXP (x, 0), 0), temp);
9042 x = simplify_gen_binary (LSHIFTRT, xmode,
9043 temp, XEXP (XEXP (x, 0), 1));
9045 return force_to_mode (x, mode, mask, next_select);
9048 /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
9049 use the full mask inside the NOT. */
9053 op0 = gen_lowpart_or_truncate (op_mode,
9054 force_to_mode (XEXP (x, 0), mode, mask,
9056 if (op_mode != xmode || op0 != XEXP (x, 0))
9058 x = simplify_gen_unary (code, op_mode, op0, op_mode);
9064 /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
9065 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
9066 which is equal to STORE_FLAG_VALUE. */
9067 if ((mask & ~STORE_FLAG_VALUE) == 0
9068 && XEXP (x, 1) == const0_rtx
9069 && GET_MODE (XEXP (x, 0)) == mode
9070 && pow2p_hwi (nonzero_bits (XEXP (x, 0), mode))
9071 && (nonzero_bits (XEXP (x, 0), mode)
9072 == (unsigned HOST_WIDE_INT) STORE_FLAG_VALUE))
9073 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
9078 /* We have no way of knowing if the IF_THEN_ELSE can itself be
9079 written in a narrower mode. We play it safe and do not do so. */
9081 op0 = gen_lowpart_or_truncate (xmode,
9082 force_to_mode (XEXP (x, 1), mode,
9083 mask, next_select));
9084 op1 = gen_lowpart_or_truncate (xmode,
9085 force_to_mode (XEXP (x, 2), mode,
9086 mask, next_select));
9087 if (op0 != XEXP (x, 1) || op1 != XEXP (x, 2))
9088 x = simplify_gen_ternary (IF_THEN_ELSE, xmode,
9089 GET_MODE (XEXP (x, 0)), XEXP (x, 0),
9097 /* Ensure we return a value of the proper mode. */
9098 return gen_lowpart_or_truncate (mode, x);
9101 /* Return nonzero if X is an expression that has one of two values depending on
9102 whether some other value is zero or nonzero. In that case, we return the
9103 value that is being tested, *PTRUE is set to the value if the rtx being
9104 returned has a nonzero value, and *PFALSE is set to the other alternative.
9106 If we return zero, we set *PTRUE and *PFALSE to X. */
9109 if_then_else_cond (rtx x, rtx *ptrue, rtx *pfalse)
9111 machine_mode mode = GET_MODE (x);
9112 enum rtx_code code = GET_CODE (x);
9113 rtx cond0, cond1, true0, true1, false0, false1;
9114 unsigned HOST_WIDE_INT nz;
9115 scalar_int_mode int_mode;
9117 /* If we are comparing a value against zero, we are done. */
9118 if ((code == NE || code == EQ)
9119 && XEXP (x, 1) == const0_rtx)
9121 *ptrue = (code == NE) ? const_true_rtx : const0_rtx;
9122 *pfalse = (code == NE) ? const0_rtx : const_true_rtx;
9126 /* If this is a unary operation whose operand has one of two values, apply
9127 our opcode to compute those values. */
9128 else if (UNARY_P (x)
9129 && (cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0)) != 0)
9131 *ptrue = simplify_gen_unary (code, mode, true0, GET_MODE (XEXP (x, 0)));
9132 *pfalse = simplify_gen_unary (code, mode, false0,
9133 GET_MODE (XEXP (x, 0)));
9137 /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
9138 make can't possibly match and would suppress other optimizations. */
9139 else if (code == COMPARE)
9142 /* If this is a binary operation, see if either side has only one of two
9143 values. If either one does or if both do and they are conditional on
9144 the same value, compute the new true and false values. */
9145 else if (BINARY_P (x))
9147 rtx op0 = XEXP (x, 0);
9148 rtx op1 = XEXP (x, 1);
9149 cond0 = if_then_else_cond (op0, &true0, &false0);
9150 cond1 = if_then_else_cond (op1, &true1, &false1);
9152 if ((cond0 != 0 && cond1 != 0 && !rtx_equal_p (cond0, cond1))
9153 && (REG_P (op0) || REG_P (op1)))
9155 /* Try to enable a simplification by undoing work done by
9156 if_then_else_cond if it converted a REG into something more
9161 true0 = false0 = op0;
9166 true1 = false1 = op1;
9170 if ((cond0 != 0 || cond1 != 0)
9171 && ! (cond0 != 0 && cond1 != 0 && !rtx_equal_p (cond0, cond1)))
9173 /* If if_then_else_cond returned zero, then true/false are the
9174 same rtl. We must copy one of them to prevent invalid rtl
9177 true0 = copy_rtx (true0);
9178 else if (cond1 == 0)
9179 true1 = copy_rtx (true1);
9181 if (COMPARISON_P (x))
9183 *ptrue = simplify_gen_relational (code, mode, VOIDmode,
9185 *pfalse = simplify_gen_relational (code, mode, VOIDmode,
9190 *ptrue = simplify_gen_binary (code, mode, true0, true1);
9191 *pfalse = simplify_gen_binary (code, mode, false0, false1);
9194 return cond0 ? cond0 : cond1;
9197 /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
9198 operands is zero when the other is nonzero, and vice-versa,
9199 and STORE_FLAG_VALUE is 1 or -1. */
9201 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
9202 && (code == PLUS || code == IOR || code == XOR || code == MINUS
9204 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
9206 rtx op0 = XEXP (XEXP (x, 0), 1);
9207 rtx op1 = XEXP (XEXP (x, 1), 1);
9209 cond0 = XEXP (XEXP (x, 0), 0);
9210 cond1 = XEXP (XEXP (x, 1), 0);
9212 if (COMPARISON_P (cond0)
9213 && COMPARISON_P (cond1)
9214 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
9215 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
9216 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
9217 || ((swap_condition (GET_CODE (cond0))
9218 == reversed_comparison_code (cond1, NULL))
9219 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
9220 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
9221 && ! side_effects_p (x))
9223 *ptrue = simplify_gen_binary (MULT, mode, op0, const_true_rtx);
9224 *pfalse = simplify_gen_binary (MULT, mode,
9226 ? simplify_gen_unary (NEG, mode,
9234 /* Similarly for MULT, AND and UMIN, except that for these the result
9236 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
9237 && (code == MULT || code == AND || code == UMIN)
9238 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
9240 cond0 = XEXP (XEXP (x, 0), 0);
9241 cond1 = XEXP (XEXP (x, 1), 0);
9243 if (COMPARISON_P (cond0)
9244 && COMPARISON_P (cond1)
9245 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
9246 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
9247 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
9248 || ((swap_condition (GET_CODE (cond0))
9249 == reversed_comparison_code (cond1, NULL))
9250 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
9251 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
9252 && ! side_effects_p (x))
9254 *ptrue = *pfalse = const0_rtx;
9260 else if (code == IF_THEN_ELSE)
9262 /* If we have IF_THEN_ELSE already, extract the condition and
9263 canonicalize it if it is NE or EQ. */
9264 cond0 = XEXP (x, 0);
9265 *ptrue = XEXP (x, 1), *pfalse = XEXP (x, 2);
9266 if (GET_CODE (cond0) == NE && XEXP (cond0, 1) == const0_rtx)
9267 return XEXP (cond0, 0);
9268 else if (GET_CODE (cond0) == EQ && XEXP (cond0, 1) == const0_rtx)
9270 *ptrue = XEXP (x, 2), *pfalse = XEXP (x, 1);
9271 return XEXP (cond0, 0);
9277 /* If X is a SUBREG, we can narrow both the true and false values
9278 if the inner expression, if there is a condition. */
9279 else if (code == SUBREG
9280 && 0 != (cond0 = if_then_else_cond (SUBREG_REG (x),
9283 true0 = simplify_gen_subreg (mode, true0,
9284 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
9285 false0 = simplify_gen_subreg (mode, false0,
9286 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
9287 if (true0 && false0)
9295 /* If X is a constant, this isn't special and will cause confusions
9296 if we treat it as such. Likewise if it is equivalent to a constant. */
9297 else if (CONSTANT_P (x)
9298 || ((cond0 = get_last_value (x)) != 0 && CONSTANT_P (cond0)))
9301 /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
9302 will be least confusing to the rest of the compiler. */
9303 else if (mode == BImode)
9305 *ptrue = GEN_INT (STORE_FLAG_VALUE), *pfalse = const0_rtx;
9309 /* If X is known to be either 0 or -1, those are the true and
9310 false values when testing X. */
9311 else if (x == constm1_rtx || x == const0_rtx
9312 || (is_a <scalar_int_mode> (mode, &int_mode)
9313 && (num_sign_bit_copies (x, int_mode)
9314 == GET_MODE_PRECISION (int_mode))))
9316 *ptrue = constm1_rtx, *pfalse = const0_rtx;
9320 /* Likewise for 0 or a single bit. */
9321 else if (HWI_COMPUTABLE_MODE_P (mode)
9322 && pow2p_hwi (nz = nonzero_bits (x, mode)))
9324 *ptrue = gen_int_mode (nz, mode), *pfalse = const0_rtx;
9328 /* Otherwise fail; show no condition with true and false values the same. */
9329 *ptrue = *pfalse = x;
9333 /* Return the value of expression X given the fact that condition COND
9334 is known to be true when applied to REG as its first operand and VAL
9335 as its second. X is known to not be shared and so can be modified in
9338 We only handle the simplest cases, and specifically those cases that
9339 arise with IF_THEN_ELSE expressions. */
9342 known_cond (rtx x, enum rtx_code cond, rtx reg, rtx val)
9344 enum rtx_code code = GET_CODE (x);
9348 if (side_effects_p (x))
9351 /* If either operand of the condition is a floating point value,
9352 then we have to avoid collapsing an EQ comparison. */
9354 && rtx_equal_p (x, reg)
9355 && ! FLOAT_MODE_P (GET_MODE (x))
9356 && ! FLOAT_MODE_P (GET_MODE (val)))
9359 if (cond == UNEQ && rtx_equal_p (x, reg))
9362 /* If X is (abs REG) and we know something about REG's relationship
9363 with zero, we may be able to simplify this. */
9365 if (code == ABS && rtx_equal_p (XEXP (x, 0), reg) && val == const0_rtx)
9368 case GE: case GT: case EQ:
9371 return simplify_gen_unary (NEG, GET_MODE (XEXP (x, 0)),
9373 GET_MODE (XEXP (x, 0)));
9378 /* The only other cases we handle are MIN, MAX, and comparisons if the
9379 operands are the same as REG and VAL. */
9381 else if (COMPARISON_P (x) || COMMUTATIVE_ARITH_P (x))
9383 if (rtx_equal_p (XEXP (x, 0), val))
9385 std::swap (val, reg);
9386 cond = swap_condition (cond);
9389 if (rtx_equal_p (XEXP (x, 0), reg) && rtx_equal_p (XEXP (x, 1), val))
9391 if (COMPARISON_P (x))
9393 if (comparison_dominates_p (cond, code))
9394 return const_true_rtx;
9396 code = reversed_comparison_code (x, NULL);
9398 && comparison_dominates_p (cond, code))
9403 else if (code == SMAX || code == SMIN
9404 || code == UMIN || code == UMAX)
9406 int unsignedp = (code == UMIN || code == UMAX);
9408 /* Do not reverse the condition when it is NE or EQ.
9409 This is because we cannot conclude anything about
9410 the value of 'SMAX (x, y)' when x is not equal to y,
9411 but we can when x equals y. */
9412 if ((code == SMAX || code == UMAX)
9413 && ! (cond == EQ || cond == NE))
9414 cond = reverse_condition (cond);
9419 return unsignedp ? x : XEXP (x, 1);
9421 return unsignedp ? x : XEXP (x, 0);
9423 return unsignedp ? XEXP (x, 1) : x;
9425 return unsignedp ? XEXP (x, 0) : x;
9432 else if (code == SUBREG)
9434 machine_mode inner_mode = GET_MODE (SUBREG_REG (x));
9435 rtx new_rtx, r = known_cond (SUBREG_REG (x), cond, reg, val);
9437 if (SUBREG_REG (x) != r)
9439 /* We must simplify subreg here, before we lose track of the
9440 original inner_mode. */
9441 new_rtx = simplify_subreg (GET_MODE (x), r,
9442 inner_mode, SUBREG_BYTE (x));
9446 SUBST (SUBREG_REG (x), r);
9451 /* We don't have to handle SIGN_EXTEND here, because even in the
9452 case of replacing something with a modeless CONST_INT, a
9453 CONST_INT is already (supposed to be) a valid sign extension for
9454 its narrower mode, which implies it's already properly
9455 sign-extended for the wider mode. Now, for ZERO_EXTEND, the
9456 story is different. */
9457 else if (code == ZERO_EXTEND)
9459 machine_mode inner_mode = GET_MODE (XEXP (x, 0));
9460 rtx new_rtx, r = known_cond (XEXP (x, 0), cond, reg, val);
9462 if (XEXP (x, 0) != r)
9464 /* We must simplify the zero_extend here, before we lose
9465 track of the original inner_mode. */
9466 new_rtx = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
9471 SUBST (XEXP (x, 0), r);
9477 fmt = GET_RTX_FORMAT (code);
9478 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
9481 SUBST (XEXP (x, i), known_cond (XEXP (x, i), cond, reg, val));
9482 else if (fmt[i] == 'E')
9483 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9484 SUBST (XVECEXP (x, i, j), known_cond (XVECEXP (x, i, j),
9491 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
9492 assignment as a field assignment. */
9495 rtx_equal_for_field_assignment_p (rtx x, rtx y, bool widen_x)
9497 if (widen_x && GET_MODE (x) != GET_MODE (y))
9499 if (paradoxical_subreg_p (GET_MODE (x), GET_MODE (y)))
9501 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
9503 /* For big endian, adjust the memory offset. */
9504 if (BYTES_BIG_ENDIAN)
9505 x = adjust_address_nv (x, GET_MODE (y),
9506 -subreg_lowpart_offset (GET_MODE (x),
9509 x = adjust_address_nv (x, GET_MODE (y), 0);
9512 if (x == y || rtx_equal_p (x, y))
9515 if (x == 0 || y == 0 || GET_MODE (x) != GET_MODE (y))
9518 /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
9519 Note that all SUBREGs of MEM are paradoxical; otherwise they
9520 would have been rewritten. */
9521 if (MEM_P (x) && GET_CODE (y) == SUBREG
9522 && MEM_P (SUBREG_REG (y))
9523 && rtx_equal_p (SUBREG_REG (y),
9524 gen_lowpart (GET_MODE (SUBREG_REG (y)), x)))
9527 if (MEM_P (y) && GET_CODE (x) == SUBREG
9528 && MEM_P (SUBREG_REG (x))
9529 && rtx_equal_p (SUBREG_REG (x),
9530 gen_lowpart (GET_MODE (SUBREG_REG (x)), y)))
9533 /* We used to see if get_last_value of X and Y were the same but that's
9534 not correct. In one direction, we'll cause the assignment to have
9535 the wrong destination and in the case, we'll import a register into this
9536 insn that might have already have been dead. So fail if none of the
9537 above cases are true. */
9541 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
9542 Return that assignment if so.
9544 We only handle the most common cases. */
9547 make_field_assignment (rtx x)
9549 rtx dest = SET_DEST (x);
9550 rtx src = SET_SRC (x);
9555 unsigned HOST_WIDE_INT len;
9558 /* All the rules in this function are specific to scalar integers. */
9559 scalar_int_mode mode;
9560 if (!is_a <scalar_int_mode> (GET_MODE (dest), &mode))
9563 /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
9564 a clear of a one-bit field. We will have changed it to
9565 (and (rotate (const_int -2) POS) DEST), so check for that. Also check
9568 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == ROTATE
9569 && CONST_INT_P (XEXP (XEXP (src, 0), 0))
9570 && INTVAL (XEXP (XEXP (src, 0), 0)) == -2
9571 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9573 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9576 return gen_rtx_SET (assign, const0_rtx);
9580 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == SUBREG
9581 && subreg_lowpart_p (XEXP (src, 0))
9582 && partial_subreg_p (XEXP (src, 0))
9583 && GET_CODE (SUBREG_REG (XEXP (src, 0))) == ROTATE
9584 && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src, 0)), 0))
9585 && INTVAL (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == -2
9586 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9588 assign = make_extraction (VOIDmode, dest, 0,
9589 XEXP (SUBREG_REG (XEXP (src, 0)), 1),
9592 return gen_rtx_SET (assign, const0_rtx);
9596 /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
9598 if (GET_CODE (src) == IOR && GET_CODE (XEXP (src, 0)) == ASHIFT
9599 && XEXP (XEXP (src, 0), 0) == const1_rtx
9600 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9602 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9605 return gen_rtx_SET (assign, const1_rtx);
9609 /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
9610 SRC is an AND with all bits of that field set, then we can discard
9612 if (GET_CODE (dest) == ZERO_EXTRACT
9613 && CONST_INT_P (XEXP (dest, 1))
9614 && GET_CODE (src) == AND
9615 && CONST_INT_P (XEXP (src, 1)))
9617 HOST_WIDE_INT width = INTVAL (XEXP (dest, 1));
9618 unsigned HOST_WIDE_INT and_mask = INTVAL (XEXP (src, 1));
9619 unsigned HOST_WIDE_INT ze_mask;
9621 if (width >= HOST_BITS_PER_WIDE_INT)
9624 ze_mask = ((unsigned HOST_WIDE_INT)1 << width) - 1;
9626 /* Complete overlap. We can remove the source AND. */
9627 if ((and_mask & ze_mask) == ze_mask)
9628 return gen_rtx_SET (dest, XEXP (src, 0));
9630 /* Partial overlap. We can reduce the source AND. */
9631 if ((and_mask & ze_mask) != and_mask)
9633 src = gen_rtx_AND (mode, XEXP (src, 0),
9634 gen_int_mode (and_mask & ze_mask, mode));
9635 return gen_rtx_SET (dest, src);
9639 /* The other case we handle is assignments into a constant-position
9640 field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents
9641 a mask that has all one bits except for a group of zero bits and
9642 OTHER is known to have zeros where C1 has ones, this is such an
9643 assignment. Compute the position and length from C1. Shift OTHER
9644 to the appropriate position, force it to the required mode, and
9645 make the extraction. Check for the AND in both operands. */
9647 /* One or more SUBREGs might obscure the constant-position field
9648 assignment. The first one we are likely to encounter is an outer
9649 narrowing SUBREG, which we can just strip for the purposes of
9650 identifying the constant-field assignment. */
9651 scalar_int_mode src_mode = mode;
9652 if (GET_CODE (src) == SUBREG
9653 && subreg_lowpart_p (src)
9654 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (src)), &src_mode))
9655 src = SUBREG_REG (src);
9657 if (GET_CODE (src) != IOR && GET_CODE (src) != XOR)
9660 rhs = expand_compound_operation (XEXP (src, 0));
9661 lhs = expand_compound_operation (XEXP (src, 1));
9663 if (GET_CODE (rhs) == AND
9664 && CONST_INT_P (XEXP (rhs, 1))
9665 && rtx_equal_for_field_assignment_p (XEXP (rhs, 0), dest))
9666 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9667 /* The second SUBREG that might get in the way is a paradoxical
9668 SUBREG around the first operand of the AND. We want to
9669 pretend the operand is as wide as the destination here. We
9670 do this by adjusting the MEM to wider mode for the sole
9671 purpose of the call to rtx_equal_for_field_assignment_p. Also
9672 note this trick only works for MEMs. */
9673 else if (GET_CODE (rhs) == AND
9674 && paradoxical_subreg_p (XEXP (rhs, 0))
9675 && MEM_P (SUBREG_REG (XEXP (rhs, 0)))
9676 && CONST_INT_P (XEXP (rhs, 1))
9677 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (rhs, 0)),
9679 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9680 else if (GET_CODE (lhs) == AND
9681 && CONST_INT_P (XEXP (lhs, 1))
9682 && rtx_equal_for_field_assignment_p (XEXP (lhs, 0), dest))
9683 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9684 /* The second SUBREG that might get in the way is a paradoxical
9685 SUBREG around the first operand of the AND. We want to
9686 pretend the operand is as wide as the destination here. We
9687 do this by adjusting the MEM to wider mode for the sole
9688 purpose of the call to rtx_equal_for_field_assignment_p. Also
9689 note this trick only works for MEMs. */
9690 else if (GET_CODE (lhs) == AND
9691 && paradoxical_subreg_p (XEXP (lhs, 0))
9692 && MEM_P (SUBREG_REG (XEXP (lhs, 0)))
9693 && CONST_INT_P (XEXP (lhs, 1))
9694 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (lhs, 0)),
9696 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9700 pos = get_pos_from_mask ((~c1) & GET_MODE_MASK (mode), &len);
9702 || pos + len > GET_MODE_PRECISION (mode)
9703 || GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
9704 || (c1 & nonzero_bits (other, mode)) != 0)
9707 assign = make_extraction (VOIDmode, dest, pos, NULL_RTX, len, 1, 1, 0);
9711 /* The mode to use for the source is the mode of the assignment, or of
9712 what is inside a possible STRICT_LOW_PART. */
9713 machine_mode new_mode = (GET_CODE (assign) == STRICT_LOW_PART
9714 ? GET_MODE (XEXP (assign, 0)) : GET_MODE (assign));
9716 /* Shift OTHER right POS places and make it the source, restricting it
9717 to the proper length and mode. */
9719 src = canon_reg_for_combine (simplify_shift_const (NULL_RTX, LSHIFTRT,
9720 src_mode, other, pos),
9722 src = force_to_mode (src, new_mode,
9723 len >= HOST_BITS_PER_WIDE_INT
9725 : (HOST_WIDE_INT_1U << len) - 1,
9728 /* If SRC is masked by an AND that does not make a difference in
9729 the value being stored, strip it. */
9730 if (GET_CODE (assign) == ZERO_EXTRACT
9731 && CONST_INT_P (XEXP (assign, 1))
9732 && INTVAL (XEXP (assign, 1)) < HOST_BITS_PER_WIDE_INT
9733 && GET_CODE (src) == AND
9734 && CONST_INT_P (XEXP (src, 1))
9735 && UINTVAL (XEXP (src, 1))
9736 == (HOST_WIDE_INT_1U << INTVAL (XEXP (assign, 1))) - 1)
9737 src = XEXP (src, 0);
9739 return gen_rtx_SET (assign, src);
9742 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9746 apply_distributive_law (rtx x)
9748 enum rtx_code code = GET_CODE (x);
9749 enum rtx_code inner_code;
9750 rtx lhs, rhs, other;
9753 /* Distributivity is not true for floating point as it can change the
9754 value. So we don't do it unless -funsafe-math-optimizations. */
9755 if (FLOAT_MODE_P (GET_MODE (x))
9756 && ! flag_unsafe_math_optimizations)
9759 /* The outer operation can only be one of the following: */
9760 if (code != IOR && code != AND && code != XOR
9761 && code != PLUS && code != MINUS)
9767 /* If either operand is a primitive we can't do anything, so get out
9769 if (OBJECT_P (lhs) || OBJECT_P (rhs))
9772 lhs = expand_compound_operation (lhs);
9773 rhs = expand_compound_operation (rhs);
9774 inner_code = GET_CODE (lhs);
9775 if (inner_code != GET_CODE (rhs))
9778 /* See if the inner and outer operations distribute. */
9785 /* These all distribute except over PLUS. */
9786 if (code == PLUS || code == MINUS)
9791 if (code != PLUS && code != MINUS)
9796 /* This is also a multiply, so it distributes over everything. */
9799 /* This used to handle SUBREG, but this turned out to be counter-
9800 productive, since (subreg (op ...)) usually is not handled by
9801 insn patterns, and this "optimization" therefore transformed
9802 recognizable patterns into unrecognizable ones. Therefore the
9803 SUBREG case was removed from here.
9805 It is possible that distributing SUBREG over arithmetic operations
9806 leads to an intermediate result than can then be optimized further,
9807 e.g. by moving the outer SUBREG to the other side of a SET as done
9808 in simplify_set. This seems to have been the original intent of
9809 handling SUBREGs here.
9811 However, with current GCC this does not appear to actually happen,
9812 at least on major platforms. If some case is found where removing
9813 the SUBREG case here prevents follow-on optimizations, distributing
9814 SUBREGs ought to be re-added at that place, e.g. in simplify_set. */
9820 /* Set LHS and RHS to the inner operands (A and B in the example
9821 above) and set OTHER to the common operand (C in the example).
9822 There is only one way to do this unless the inner operation is
9824 if (COMMUTATIVE_ARITH_P (lhs)
9825 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 0)))
9826 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 1);
9827 else if (COMMUTATIVE_ARITH_P (lhs)
9828 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 1)))
9829 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 0);
9830 else if (COMMUTATIVE_ARITH_P (lhs)
9831 && rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 0)))
9832 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 1);
9833 else if (rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 1)))
9834 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 0);
9838 /* Form the new inner operation, seeing if it simplifies first. */
9839 tem = simplify_gen_binary (code, GET_MODE (x), lhs, rhs);
9841 /* There is one exception to the general way of distributing:
9842 (a | c) ^ (b | c) -> (a ^ b) & ~c */
9843 if (code == XOR && inner_code == IOR)
9846 other = simplify_gen_unary (NOT, GET_MODE (x), other, GET_MODE (x));
9849 /* We may be able to continuing distributing the result, so call
9850 ourselves recursively on the inner operation before forming the
9851 outer operation, which we return. */
9852 return simplify_gen_binary (inner_code, GET_MODE (x),
9853 apply_distributive_law (tem), other);
9856 /* See if X is of the form (* (+ A B) C), and if so convert to
9857 (+ (* A C) (* B C)) and try to simplify.
9859 Most of the time, this results in no change. However, if some of
9860 the operands are the same or inverses of each other, simplifications
9863 For example, (and (ior A B) (not B)) can occur as the result of
9864 expanding a bit field assignment. When we apply the distributive
9865 law to this, we get (ior (and (A (not B))) (and (B (not B)))),
9866 which then simplifies to (and (A (not B))).
9868 Note that no checks happen on the validity of applying the inverse
9869 distributive law. This is pointless since we can do it in the
9870 few places where this routine is called.
9872 N is the index of the term that is decomposed (the arithmetic operation,
9873 i.e. (+ A B) in the first example above). !N is the index of the term that
9874 is distributed, i.e. of C in the first example above. */
9876 distribute_and_simplify_rtx (rtx x, int n)
9879 enum rtx_code outer_code, inner_code;
9880 rtx decomposed, distributed, inner_op0, inner_op1, new_op0, new_op1, tmp;
9882 /* Distributivity is not true for floating point as it can change the
9883 value. So we don't do it unless -funsafe-math-optimizations. */
9884 if (FLOAT_MODE_P (GET_MODE (x))
9885 && ! flag_unsafe_math_optimizations)
9888 decomposed = XEXP (x, n);
9889 if (!ARITHMETIC_P (decomposed))
9892 mode = GET_MODE (x);
9893 outer_code = GET_CODE (x);
9894 distributed = XEXP (x, !n);
9896 inner_code = GET_CODE (decomposed);
9897 inner_op0 = XEXP (decomposed, 0);
9898 inner_op1 = XEXP (decomposed, 1);
9900 /* Special case (and (xor B C) (not A)), which is equivalent to
9901 (xor (ior A B) (ior A C)) */
9902 if (outer_code == AND && inner_code == XOR && GET_CODE (distributed) == NOT)
9904 distributed = XEXP (distributed, 0);
9910 /* Distribute the second term. */
9911 new_op0 = simplify_gen_binary (outer_code, mode, inner_op0, distributed);
9912 new_op1 = simplify_gen_binary (outer_code, mode, inner_op1, distributed);
9916 /* Distribute the first term. */
9917 new_op0 = simplify_gen_binary (outer_code, mode, distributed, inner_op0);
9918 new_op1 = simplify_gen_binary (outer_code, mode, distributed, inner_op1);
9921 tmp = apply_distributive_law (simplify_gen_binary (inner_code, mode,
9923 if (GET_CODE (tmp) != outer_code
9924 && (set_src_cost (tmp, mode, optimize_this_for_speed_p)
9925 < set_src_cost (x, mode, optimize_this_for_speed_p)))
9931 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
9932 in MODE. Return an equivalent form, if different from (and VAROP
9933 (const_int CONSTOP)). Otherwise, return NULL_RTX. */
9936 simplify_and_const_int_1 (scalar_int_mode mode, rtx varop,
9937 unsigned HOST_WIDE_INT constop)
9939 unsigned HOST_WIDE_INT nonzero;
9940 unsigned HOST_WIDE_INT orig_constop;
9945 orig_constop = constop;
9946 if (GET_CODE (varop) == CLOBBER)
9949 /* Simplify VAROP knowing that we will be only looking at some of the
9952 Note by passing in CONSTOP, we guarantee that the bits not set in
9953 CONSTOP are not significant and will never be examined. We must
9954 ensure that is the case by explicitly masking out those bits
9955 before returning. */
9956 varop = force_to_mode (varop, mode, constop, 0);
9958 /* If VAROP is a CLOBBER, we will fail so return it. */
9959 if (GET_CODE (varop) == CLOBBER)
9962 /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
9963 to VAROP and return the new constant. */
9964 if (CONST_INT_P (varop))
9965 return gen_int_mode (INTVAL (varop) & constop, mode);
9967 /* See what bits may be nonzero in VAROP. Unlike the general case of
9968 a call to nonzero_bits, here we don't care about bits outside
9971 nonzero = nonzero_bits (varop, mode) & GET_MODE_MASK (mode);
9973 /* Turn off all bits in the constant that are known to already be zero.
9974 Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
9975 which is tested below. */
9979 /* If we don't have any bits left, return zero. */
9983 /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
9984 a power of two, we can replace this with an ASHIFT. */
9985 if (GET_CODE (varop) == NEG && nonzero_bits (XEXP (varop, 0), mode) == 1
9986 && (i = exact_log2 (constop)) >= 0)
9987 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (varop, 0), i);
9989 /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
9990 or XOR, then try to apply the distributive law. This may eliminate
9991 operations if either branch can be simplified because of the AND.
9992 It may also make some cases more complex, but those cases probably
9993 won't match a pattern either with or without this. */
9995 if (GET_CODE (varop) == IOR || GET_CODE (varop) == XOR)
9997 scalar_int_mode varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10001 apply_distributive_law
10002 (simplify_gen_binary (GET_CODE (varop), varop_mode,
10003 simplify_and_const_int (NULL_RTX, varop_mode,
10006 simplify_and_const_int (NULL_RTX, varop_mode,
10011 /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
10012 the AND and see if one of the operands simplifies to zero. If so, we
10013 may eliminate it. */
10015 if (GET_CODE (varop) == PLUS
10016 && pow2p_hwi (constop + 1))
10020 o0 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 0), constop);
10021 o1 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 1), constop);
10022 if (o0 == const0_rtx)
10024 if (o1 == const0_rtx)
10028 /* Make a SUBREG if necessary. If we can't make it, fail. */
10029 varop = gen_lowpart (mode, varop);
10030 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
10033 /* If we are only masking insignificant bits, return VAROP. */
10034 if (constop == nonzero)
10037 if (varop == orig_varop && constop == orig_constop)
10040 /* Otherwise, return an AND. */
10041 return simplify_gen_binary (AND, mode, varop, gen_int_mode (constop, mode));
10045 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
10048 Return an equivalent form, if different from X. Otherwise, return X. If
10049 X is zero, we are to always construct the equivalent form. */
10052 simplify_and_const_int (rtx x, scalar_int_mode mode, rtx varop,
10053 unsigned HOST_WIDE_INT constop)
10055 rtx tem = simplify_and_const_int_1 (mode, varop, constop);
10060 x = simplify_gen_binary (AND, GET_MODE (varop), varop,
10061 gen_int_mode (constop, mode));
10062 if (GET_MODE (x) != mode)
10063 x = gen_lowpart (mode, x);
10067 /* Given a REG X of mode XMODE, compute which bits in X can be nonzero.
10068 We don't care about bits outside of those defined in MODE.
10070 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
10071 a shift, AND, or zero_extract, we can do better. */
10074 reg_nonzero_bits_for_combine (const_rtx x, scalar_int_mode xmode,
10075 scalar_int_mode mode,
10076 unsigned HOST_WIDE_INT *nonzero)
10079 reg_stat_type *rsp;
10081 /* If X is a register whose nonzero bits value is current, use it.
10082 Otherwise, if X is a register whose value we can find, use that
10083 value. Otherwise, use the previously-computed global nonzero bits
10084 for this register. */
10086 rsp = ®_stat[REGNO (x)];
10087 if (rsp->last_set_value != 0
10088 && (rsp->last_set_mode == mode
10089 || (GET_MODE_CLASS (rsp->last_set_mode) == MODE_INT
10090 && GET_MODE_CLASS (mode) == MODE_INT))
10091 && ((rsp->last_set_label >= label_tick_ebb_start
10092 && rsp->last_set_label < label_tick)
10093 || (rsp->last_set_label == label_tick
10094 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
10095 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10096 && REGNO (x) < reg_n_sets_max
10097 && REG_N_SETS (REGNO (x)) == 1
10098 && !REGNO_REG_SET_P
10099 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
10102 /* Note that, even if the precision of last_set_mode is lower than that
10103 of mode, record_value_for_reg invoked nonzero_bits on the register
10104 with nonzero_bits_mode (because last_set_mode is necessarily integral
10105 and HWI_COMPUTABLE_MODE_P in this case) so bits in nonzero_bits_mode
10106 are all valid, hence in mode too since nonzero_bits_mode is defined
10107 to the largest HWI_COMPUTABLE_MODE_P mode. */
10108 *nonzero &= rsp->last_set_nonzero_bits;
10112 tem = get_last_value (x);
10115 if (SHORT_IMMEDIATES_SIGN_EXTEND)
10116 tem = sign_extend_short_imm (tem, xmode, GET_MODE_PRECISION (mode));
10121 if (nonzero_sign_valid && rsp->nonzero_bits)
10123 unsigned HOST_WIDE_INT mask = rsp->nonzero_bits;
10125 if (GET_MODE_PRECISION (xmode) < GET_MODE_PRECISION (mode))
10126 /* We don't know anything about the upper bits. */
10127 mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (xmode);
10135 /* Given a reg X of mode XMODE, return the number of bits at the high-order
10136 end of X that are known to be equal to the sign bit. X will be used
10137 in mode MODE; the returned value will always be between 1 and the
10138 number of bits in MODE. */
10141 reg_num_sign_bit_copies_for_combine (const_rtx x, scalar_int_mode xmode,
10142 scalar_int_mode mode,
10143 unsigned int *result)
10146 reg_stat_type *rsp;
10148 rsp = ®_stat[REGNO (x)];
10149 if (rsp->last_set_value != 0
10150 && rsp->last_set_mode == mode
10151 && ((rsp->last_set_label >= label_tick_ebb_start
10152 && rsp->last_set_label < label_tick)
10153 || (rsp->last_set_label == label_tick
10154 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
10155 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10156 && REGNO (x) < reg_n_sets_max
10157 && REG_N_SETS (REGNO (x)) == 1
10158 && !REGNO_REG_SET_P
10159 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
10162 *result = rsp->last_set_sign_bit_copies;
10166 tem = get_last_value (x);
10170 if (nonzero_sign_valid && rsp->sign_bit_copies != 0
10171 && GET_MODE_PRECISION (xmode) == GET_MODE_PRECISION (mode))
10172 *result = rsp->sign_bit_copies;
10177 /* Return the number of "extended" bits there are in X, when interpreted
10178 as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For
10179 unsigned quantities, this is the number of high-order zero bits.
10180 For signed quantities, this is the number of copies of the sign bit
10181 minus 1. In both case, this function returns the number of "spare"
10182 bits. For example, if two quantities for which this function returns
10183 at least 1 are added, the addition is known not to overflow.
10185 This function will always return 0 unless called during combine, which
10186 implies that it must be called from a define_split. */
10189 extended_count (const_rtx x, machine_mode mode, int unsignedp)
10191 if (nonzero_sign_valid == 0)
10194 scalar_int_mode int_mode;
10196 ? (is_a <scalar_int_mode> (mode, &int_mode)
10197 && HWI_COMPUTABLE_MODE_P (int_mode)
10198 ? (unsigned int) (GET_MODE_PRECISION (int_mode) - 1
10199 - floor_log2 (nonzero_bits (x, int_mode)))
10201 : num_sign_bit_copies (x, mode) - 1);
10204 /* This function is called from `simplify_shift_const' to merge two
10205 outer operations. Specifically, we have already found that we need
10206 to perform operation *POP0 with constant *PCONST0 at the outermost
10207 position. We would now like to also perform OP1 with constant CONST1
10208 (with *POP0 being done last).
10210 Return 1 if we can do the operation and update *POP0 and *PCONST0 with
10211 the resulting operation. *PCOMP_P is set to 1 if we would need to
10212 complement the innermost operand, otherwise it is unchanged.
10214 MODE is the mode in which the operation will be done. No bits outside
10215 the width of this mode matter. It is assumed that the width of this mode
10216 is smaller than or equal to HOST_BITS_PER_WIDE_INT.
10218 If *POP0 or OP1 are UNKNOWN, it means no operation is required. Only NEG, PLUS,
10219 IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper
10220 result is simply *PCONST0.
10222 If the resulting operation cannot be expressed as one operation, we
10223 return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */
10226 merge_outer_ops (enum rtx_code *pop0, HOST_WIDE_INT *pconst0, enum rtx_code op1, HOST_WIDE_INT const1, machine_mode mode, int *pcomp_p)
10228 enum rtx_code op0 = *pop0;
10229 HOST_WIDE_INT const0 = *pconst0;
10231 const0 &= GET_MODE_MASK (mode);
10232 const1 &= GET_MODE_MASK (mode);
10234 /* If OP0 is an AND, clear unimportant bits in CONST1. */
10238 /* If OP0 or OP1 is UNKNOWN, this is easy. Similarly if they are the same or
10241 if (op1 == UNKNOWN || op0 == SET)
10244 else if (op0 == UNKNOWN)
10245 op0 = op1, const0 = const1;
10247 else if (op0 == op1)
10271 /* Otherwise, if either is a PLUS or NEG, we can't do anything. */
10272 else if (op0 == PLUS || op1 == PLUS || op0 == NEG || op1 == NEG)
10275 /* If the two constants aren't the same, we can't do anything. The
10276 remaining six cases can all be done. */
10277 else if (const0 != const1)
10285 /* (a & b) | b == b */
10287 else /* op1 == XOR */
10288 /* (a ^ b) | b == a | b */
10294 /* (a & b) ^ b == (~a) & b */
10295 op0 = AND, *pcomp_p = 1;
10296 else /* op1 == IOR */
10297 /* (a | b) ^ b == a & ~b */
10298 op0 = AND, const0 = ~const0;
10303 /* (a | b) & b == b */
10305 else /* op1 == XOR */
10306 /* (a ^ b) & b) == (~a) & b */
10313 /* Check for NO-OP cases. */
10314 const0 &= GET_MODE_MASK (mode);
10316 && (op0 == IOR || op0 == XOR || op0 == PLUS))
10318 else if (const0 == 0 && op0 == AND)
10320 else if ((unsigned HOST_WIDE_INT) const0 == GET_MODE_MASK (mode)
10326 /* ??? Slightly redundant with the above mask, but not entirely.
10327 Moving this above means we'd have to sign-extend the mode mask
10328 for the final test. */
10329 if (op0 != UNKNOWN && op0 != NEG)
10330 *pconst0 = trunc_int_for_mode (const0, mode);
10335 /* A helper to simplify_shift_const_1 to determine the mode we can perform
10336 the shift in. The original shift operation CODE is performed on OP in
10337 ORIG_MODE. Return the wider mode MODE if we can perform the operation
10338 in that mode. Return ORIG_MODE otherwise. We can also assume that the
10339 result of the shift is subject to operation OUTER_CODE with operand
10342 static scalar_int_mode
10343 try_widen_shift_mode (enum rtx_code code, rtx op, int count,
10344 scalar_int_mode orig_mode, scalar_int_mode mode,
10345 enum rtx_code outer_code, HOST_WIDE_INT outer_const)
10347 gcc_assert (GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (orig_mode));
10349 /* In general we can't perform in wider mode for right shift and rotate. */
10353 /* We can still widen if the bits brought in from the left are identical
10354 to the sign bit of ORIG_MODE. */
10355 if (num_sign_bit_copies (op, mode)
10356 > (unsigned) (GET_MODE_PRECISION (mode)
10357 - GET_MODE_PRECISION (orig_mode)))
10362 /* Similarly here but with zero bits. */
10363 if (HWI_COMPUTABLE_MODE_P (mode)
10364 && (nonzero_bits (op, mode) & ~GET_MODE_MASK (orig_mode)) == 0)
10367 /* We can also widen if the bits brought in will be masked off. This
10368 operation is performed in ORIG_MODE. */
10369 if (outer_code == AND)
10371 int care_bits = low_bitmask_len (orig_mode, outer_const);
10374 && GET_MODE_PRECISION (orig_mode) - care_bits >= count)
10383 gcc_unreachable ();
10390 /* Simplify a shift of VAROP by ORIG_COUNT bits. CODE says what kind
10391 of shift. The result of the shift is RESULT_MODE. Return NULL_RTX
10392 if we cannot simplify it. Otherwise, return a simplified value.
10394 The shift is normally computed in the widest mode we find in VAROP, as
10395 long as it isn't a different number of words than RESULT_MODE. Exceptions
10396 are ASHIFTRT and ROTATE, which are always done in their original mode. */
10399 simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
10400 rtx varop, int orig_count)
10402 enum rtx_code orig_code = code;
10403 rtx orig_varop = varop;
10405 machine_mode mode = result_mode;
10406 machine_mode shift_mode;
10407 scalar_int_mode tmode, inner_mode, int_mode, int_varop_mode, int_result_mode;
10408 unsigned int mode_words
10409 = (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
10410 /* We form (outer_op (code varop count) (outer_const)). */
10411 enum rtx_code outer_op = UNKNOWN;
10412 HOST_WIDE_INT outer_const = 0;
10413 int complement_p = 0;
10416 /* Make sure and truncate the "natural" shift on the way in. We don't
10417 want to do this inside the loop as it makes it more difficult to
10419 if (SHIFT_COUNT_TRUNCATED)
10420 orig_count &= GET_MODE_UNIT_BITSIZE (mode) - 1;
10422 /* If we were given an invalid count, don't do anything except exactly
10423 what was requested. */
10425 if (orig_count < 0 || orig_count >= (int) GET_MODE_UNIT_PRECISION (mode))
10428 count = orig_count;
10430 /* Unless one of the branches of the `if' in this loop does a `continue',
10431 we will `break' the loop after the `if'. */
10435 /* If we have an operand of (clobber (const_int 0)), fail. */
10436 if (GET_CODE (varop) == CLOBBER)
10439 /* Convert ROTATERT to ROTATE. */
10440 if (code == ROTATERT)
10442 unsigned int bitsize = GET_MODE_UNIT_PRECISION (result_mode);
10444 count = bitsize - count;
10447 shift_mode = result_mode;
10448 if (shift_mode != mode)
10450 /* We only change the modes of scalar shifts. */
10451 int_mode = as_a <scalar_int_mode> (mode);
10452 int_result_mode = as_a <scalar_int_mode> (result_mode);
10453 shift_mode = try_widen_shift_mode (code, varop, count,
10454 int_result_mode, int_mode,
10455 outer_op, outer_const);
10458 scalar_int_mode shift_unit_mode
10459 = as_a <scalar_int_mode> (GET_MODE_INNER (shift_mode));
10461 /* Handle cases where the count is greater than the size of the mode
10462 minus 1. For ASHIFT, use the size minus one as the count (this can
10463 occur when simplifying (lshiftrt (ashiftrt ..))). For rotates,
10464 take the count modulo the size. For other shifts, the result is
10467 Since these shifts are being produced by the compiler by combining
10468 multiple operations, each of which are defined, we know what the
10469 result is supposed to be. */
10471 if (count > (GET_MODE_PRECISION (shift_unit_mode) - 1))
10473 if (code == ASHIFTRT)
10474 count = GET_MODE_PRECISION (shift_unit_mode) - 1;
10475 else if (code == ROTATE || code == ROTATERT)
10476 count %= GET_MODE_PRECISION (shift_unit_mode);
10479 /* We can't simply return zero because there may be an
10481 varop = const0_rtx;
10487 /* If we discovered we had to complement VAROP, leave. Making a NOT
10488 here would cause an infinite loop. */
10492 if (shift_mode == shift_unit_mode)
10494 /* An arithmetic right shift of a quantity known to be -1 or 0
10496 if (code == ASHIFTRT
10497 && (num_sign_bit_copies (varop, shift_unit_mode)
10498 == GET_MODE_PRECISION (shift_unit_mode)))
10504 /* If we are doing an arithmetic right shift and discarding all but
10505 the sign bit copies, this is equivalent to doing a shift by the
10506 bitsize minus one. Convert it into that shift because it will
10507 often allow other simplifications. */
10509 if (code == ASHIFTRT
10510 && (count + num_sign_bit_copies (varop, shift_unit_mode)
10511 >= GET_MODE_PRECISION (shift_unit_mode)))
10512 count = GET_MODE_PRECISION (shift_unit_mode) - 1;
10514 /* We simplify the tests below and elsewhere by converting
10515 ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
10516 `make_compound_operation' will convert it to an ASHIFTRT for
10517 those machines (such as VAX) that don't have an LSHIFTRT. */
10518 if (code == ASHIFTRT
10519 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10520 && val_signbit_known_clear_p (shift_unit_mode,
10521 nonzero_bits (varop,
10525 if (((code == LSHIFTRT
10526 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10527 && !(nonzero_bits (varop, shift_unit_mode) >> count))
10529 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10530 && !((nonzero_bits (varop, shift_unit_mode) << count)
10531 & GET_MODE_MASK (shift_unit_mode))))
10532 && !side_effects_p (varop))
10533 varop = const0_rtx;
10536 switch (GET_CODE (varop))
10542 new_rtx = expand_compound_operation (varop);
10543 if (new_rtx != varop)
10551 /* The following rules apply only to scalars. */
10552 if (shift_mode != shift_unit_mode)
10554 int_mode = as_a <scalar_int_mode> (mode);
10556 /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
10557 minus the width of a smaller mode, we can do this with a
10558 SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */
10559 if ((code == ASHIFTRT || code == LSHIFTRT)
10560 && ! mode_dependent_address_p (XEXP (varop, 0),
10561 MEM_ADDR_SPACE (varop))
10562 && ! MEM_VOLATILE_P (varop)
10563 && (int_mode_for_size (GET_MODE_BITSIZE (int_mode) - count, 1)
10566 new_rtx = adjust_address_nv (varop, tmode,
10567 BYTES_BIG_ENDIAN ? 0
10568 : count / BITS_PER_UNIT);
10570 varop = gen_rtx_fmt_e (code == ASHIFTRT ? SIGN_EXTEND
10571 : ZERO_EXTEND, int_mode, new_rtx);
10578 /* The following rules apply only to scalars. */
10579 if (shift_mode != shift_unit_mode)
10581 int_mode = as_a <scalar_int_mode> (mode);
10582 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10584 /* If VAROP is a SUBREG, strip it as long as the inner operand has
10585 the same number of words as what we've seen so far. Then store
10586 the widest mode in MODE. */
10587 if (subreg_lowpart_p (varop)
10588 && is_int_mode (GET_MODE (SUBREG_REG (varop)), &inner_mode)
10589 && GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (int_varop_mode)
10590 && (unsigned int) ((GET_MODE_SIZE (inner_mode)
10591 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
10593 && GET_MODE_CLASS (int_varop_mode) == MODE_INT)
10595 varop = SUBREG_REG (varop);
10596 if (GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (int_mode))
10603 /* Some machines use MULT instead of ASHIFT because MULT
10604 is cheaper. But it is still better on those machines to
10605 merge two shifts into one. */
10606 if (CONST_INT_P (XEXP (varop, 1))
10607 && exact_log2 (UINTVAL (XEXP (varop, 1))) >= 0)
10610 = simplify_gen_binary (ASHIFT, GET_MODE (varop),
10612 GEN_INT (exact_log2 (
10613 UINTVAL (XEXP (varop, 1)))));
10619 /* Similar, for when divides are cheaper. */
10620 if (CONST_INT_P (XEXP (varop, 1))
10621 && exact_log2 (UINTVAL (XEXP (varop, 1))) >= 0)
10624 = simplify_gen_binary (LSHIFTRT, GET_MODE (varop),
10626 GEN_INT (exact_log2 (
10627 UINTVAL (XEXP (varop, 1)))));
10633 /* If we are extracting just the sign bit of an arithmetic
10634 right shift, that shift is not needed. However, the sign
10635 bit of a wider mode may be different from what would be
10636 interpreted as the sign bit in a narrower mode, so, if
10637 the result is narrower, don't discard the shift. */
10638 if (code == LSHIFTRT
10639 && count == (GET_MODE_UNIT_BITSIZE (result_mode) - 1)
10640 && (GET_MODE_UNIT_BITSIZE (result_mode)
10641 >= GET_MODE_UNIT_BITSIZE (GET_MODE (varop))))
10643 varop = XEXP (varop, 0);
10652 /* The following rules apply only to scalars. */
10653 if (shift_mode != shift_unit_mode)
10655 int_mode = as_a <scalar_int_mode> (mode);
10656 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10657 int_result_mode = as_a <scalar_int_mode> (result_mode);
10659 /* Here we have two nested shifts. The result is usually the
10660 AND of a new shift with a mask. We compute the result below. */
10661 if (CONST_INT_P (XEXP (varop, 1))
10662 && INTVAL (XEXP (varop, 1)) >= 0
10663 && INTVAL (XEXP (varop, 1)) < GET_MODE_PRECISION (int_varop_mode)
10664 && HWI_COMPUTABLE_MODE_P (int_result_mode)
10665 && HWI_COMPUTABLE_MODE_P (int_mode))
10667 enum rtx_code first_code = GET_CODE (varop);
10668 unsigned int first_count = INTVAL (XEXP (varop, 1));
10669 unsigned HOST_WIDE_INT mask;
10672 /* We have one common special case. We can't do any merging if
10673 the inner code is an ASHIFTRT of a smaller mode. However, if
10674 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
10675 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
10676 we can convert it to
10677 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
10678 This simplifies certain SIGN_EXTEND operations. */
10679 if (code == ASHIFT && first_code == ASHIFTRT
10680 && count == (GET_MODE_PRECISION (int_result_mode)
10681 - GET_MODE_PRECISION (int_varop_mode)))
10683 /* C3 has the low-order C1 bits zero. */
10685 mask = GET_MODE_MASK (int_mode)
10686 & ~((HOST_WIDE_INT_1U << first_count) - 1);
10688 varop = simplify_and_const_int (NULL_RTX, int_result_mode,
10689 XEXP (varop, 0), mask);
10690 varop = simplify_shift_const (NULL_RTX, ASHIFT,
10691 int_result_mode, varop, count);
10692 count = first_count;
10697 /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
10698 than C1 high-order bits equal to the sign bit, we can convert
10699 this to either an ASHIFT or an ASHIFTRT depending on the
10702 We cannot do this if VAROP's mode is not SHIFT_UNIT_MODE. */
10704 if (code == ASHIFTRT && first_code == ASHIFT
10705 && int_varop_mode == shift_unit_mode
10706 && (num_sign_bit_copies (XEXP (varop, 0), shift_unit_mode)
10709 varop = XEXP (varop, 0);
10710 count -= first_count;
10720 /* There are some cases we can't do. If CODE is ASHIFTRT,
10721 we can only do this if FIRST_CODE is also ASHIFTRT.
10723 We can't do the case when CODE is ROTATE and FIRST_CODE is
10726 If the mode of this shift is not the mode of the outer shift,
10727 we can't do this if either shift is a right shift or ROTATE.
10729 Finally, we can't do any of these if the mode is too wide
10730 unless the codes are the same.
10732 Handle the case where the shift codes are the same
10735 if (code == first_code)
10737 if (int_varop_mode != int_result_mode
10738 && (code == ASHIFTRT || code == LSHIFTRT
10739 || code == ROTATE))
10742 count += first_count;
10743 varop = XEXP (varop, 0);
10747 if (code == ASHIFTRT
10748 || (code == ROTATE && first_code == ASHIFTRT)
10749 || GET_MODE_PRECISION (int_mode) > HOST_BITS_PER_WIDE_INT
10750 || (int_varop_mode != int_result_mode
10751 && (first_code == ASHIFTRT || first_code == LSHIFTRT
10752 || first_code == ROTATE
10753 || code == ROTATE)))
10756 /* To compute the mask to apply after the shift, shift the
10757 nonzero bits of the inner shift the same way the
10758 outer shift will. */
10760 mask_rtx = gen_int_mode (nonzero_bits (varop, int_varop_mode),
10764 = simplify_const_binary_operation (code, int_result_mode,
10765 mask_rtx, GEN_INT (count));
10767 /* Give up if we can't compute an outer operation to use. */
10769 || !CONST_INT_P (mask_rtx)
10770 || ! merge_outer_ops (&outer_op, &outer_const, AND,
10772 int_result_mode, &complement_p))
10775 /* If the shifts are in the same direction, we add the
10776 counts. Otherwise, we subtract them. */
10777 if ((code == ASHIFTRT || code == LSHIFTRT)
10778 == (first_code == ASHIFTRT || first_code == LSHIFTRT))
10779 count += first_count;
10781 count -= first_count;
10783 /* If COUNT is positive, the new shift is usually CODE,
10784 except for the two exceptions below, in which case it is
10785 FIRST_CODE. If the count is negative, FIRST_CODE should
10788 && ((first_code == ROTATE && code == ASHIFT)
10789 || (first_code == ASHIFTRT && code == LSHIFTRT)))
10791 else if (count < 0)
10792 code = first_code, count = -count;
10794 varop = XEXP (varop, 0);
10798 /* If we have (A << B << C) for any shift, we can convert this to
10799 (A << C << B). This wins if A is a constant. Only try this if
10800 B is not a constant. */
10802 else if (GET_CODE (varop) == code
10803 && CONST_INT_P (XEXP (varop, 0))
10804 && !CONST_INT_P (XEXP (varop, 1)))
10806 /* For ((unsigned) (cstULL >> count)) >> cst2 we have to make
10807 sure the result will be masked. See PR70222. */
10808 if (code == LSHIFTRT
10809 && int_mode != int_result_mode
10810 && !merge_outer_ops (&outer_op, &outer_const, AND,
10811 GET_MODE_MASK (int_result_mode)
10812 >> orig_count, int_result_mode,
10815 /* For ((int) (cstLL >> count)) >> cst2 just give up. Queuing
10816 up outer sign extension (often left and right shift) is
10817 hardly more efficient than the original. See PR70429. */
10818 if (code == ASHIFTRT && int_mode != int_result_mode)
10821 rtx new_rtx = simplify_const_binary_operation (code, int_mode,
10824 varop = gen_rtx_fmt_ee (code, int_mode, new_rtx, XEXP (varop, 1));
10831 /* The following rules apply only to scalars. */
10832 if (shift_mode != shift_unit_mode)
10835 /* Make this fit the case below. */
10836 varop = gen_rtx_XOR (mode, XEXP (varop, 0), constm1_rtx);
10842 /* The following rules apply only to scalars. */
10843 if (shift_mode != shift_unit_mode)
10845 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10846 int_result_mode = as_a <scalar_int_mode> (result_mode);
10848 /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
10849 with C the size of VAROP - 1 and the shift is logical if
10850 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10851 we have an (le X 0) operation. If we have an arithmetic shift
10852 and STORE_FLAG_VALUE is 1 or we have a logical shift with
10853 STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */
10855 if (GET_CODE (varop) == IOR && GET_CODE (XEXP (varop, 0)) == PLUS
10856 && XEXP (XEXP (varop, 0), 1) == constm1_rtx
10857 && (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
10858 && (code == LSHIFTRT || code == ASHIFTRT)
10859 && count == (GET_MODE_PRECISION (int_varop_mode) - 1)
10860 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
10863 varop = gen_rtx_LE (int_varop_mode, XEXP (varop, 1),
10866 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
10867 varop = gen_rtx_NEG (int_varop_mode, varop);
10872 /* If we have (shift (logical)), move the logical to the outside
10873 to allow it to possibly combine with another logical and the
10874 shift to combine with another shift. This also canonicalizes to
10875 what a ZERO_EXTRACT looks like. Also, some machines have
10876 (and (shift)) insns. */
10878 if (CONST_INT_P (XEXP (varop, 1))
10879 /* We can't do this if we have (ashiftrt (xor)) and the
10880 constant has its sign bit set in shift_unit_mode with
10881 shift_unit_mode wider than result_mode. */
10882 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10883 && int_result_mode != shift_unit_mode
10884 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10886 && (new_rtx = simplify_const_binary_operation
10887 (code, int_result_mode,
10888 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
10889 GEN_INT (count))) != 0
10890 && CONST_INT_P (new_rtx)
10891 && merge_outer_ops (&outer_op, &outer_const, GET_CODE (varop),
10892 INTVAL (new_rtx), int_result_mode,
10895 varop = XEXP (varop, 0);
10899 /* If we can't do that, try to simplify the shift in each arm of the
10900 logical expression, make a new logical expression, and apply
10901 the inverse distributive law. This also can't be done for
10902 (ashiftrt (xor)) where we've widened the shift and the constant
10903 changes the sign bit. */
10904 if (CONST_INT_P (XEXP (varop, 1))
10905 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10906 && int_result_mode != shift_unit_mode
10907 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10910 rtx lhs = simplify_shift_const (NULL_RTX, code, shift_unit_mode,
10911 XEXP (varop, 0), count);
10912 rtx rhs = simplify_shift_const (NULL_RTX, code, shift_unit_mode,
10913 XEXP (varop, 1), count);
10915 varop = simplify_gen_binary (GET_CODE (varop), shift_unit_mode,
10917 varop = apply_distributive_law (varop);
10925 /* The following rules apply only to scalars. */
10926 if (shift_mode != shift_unit_mode)
10928 int_result_mode = as_a <scalar_int_mode> (result_mode);
10930 /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
10931 says that the sign bit can be tested, FOO has mode MODE, C is
10932 GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit
10933 that may be nonzero. */
10934 if (code == LSHIFTRT
10935 && XEXP (varop, 1) == const0_rtx
10936 && GET_MODE (XEXP (varop, 0)) == int_result_mode
10937 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
10938 && HWI_COMPUTABLE_MODE_P (int_result_mode)
10939 && STORE_FLAG_VALUE == -1
10940 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1
10941 && merge_outer_ops (&outer_op, &outer_const, XOR, 1,
10942 int_result_mode, &complement_p))
10944 varop = XEXP (varop, 0);
10951 /* The following rules apply only to scalars. */
10952 if (shift_mode != shift_unit_mode)
10954 int_result_mode = as_a <scalar_int_mode> (result_mode);
10956 /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
10957 than the number of bits in the mode is equivalent to A. */
10958 if (code == LSHIFTRT
10959 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
10960 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1)
10962 varop = XEXP (varop, 0);
10967 /* NEG commutes with ASHIFT since it is multiplication. Move the
10968 NEG outside to allow shifts to combine. */
10970 && merge_outer_ops (&outer_op, &outer_const, NEG, 0,
10971 int_result_mode, &complement_p))
10973 varop = XEXP (varop, 0);
10979 /* The following rules apply only to scalars. */
10980 if (shift_mode != shift_unit_mode)
10982 int_result_mode = as_a <scalar_int_mode> (result_mode);
10984 /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
10985 is one less than the number of bits in the mode is
10986 equivalent to (xor A 1). */
10987 if (code == LSHIFTRT
10988 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
10989 && XEXP (varop, 1) == constm1_rtx
10990 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1
10991 && merge_outer_ops (&outer_op, &outer_const, XOR, 1,
10992 int_result_mode, &complement_p))
10995 varop = XEXP (varop, 0);
10999 /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
11000 that might be nonzero in BAR are those being shifted out and those
11001 bits are known zero in FOO, we can replace the PLUS with FOO.
11002 Similarly in the other operand order. This code occurs when
11003 we are computing the size of a variable-size array. */
11005 if ((code == ASHIFTRT || code == LSHIFTRT)
11006 && count < HOST_BITS_PER_WIDE_INT
11007 && nonzero_bits (XEXP (varop, 1), int_result_mode) >> count == 0
11008 && (nonzero_bits (XEXP (varop, 1), int_result_mode)
11009 & nonzero_bits (XEXP (varop, 0), int_result_mode)) == 0)
11011 varop = XEXP (varop, 0);
11014 else if ((code == ASHIFTRT || code == LSHIFTRT)
11015 && count < HOST_BITS_PER_WIDE_INT
11016 && HWI_COMPUTABLE_MODE_P (int_result_mode)
11017 && 0 == (nonzero_bits (XEXP (varop, 0), int_result_mode)
11019 && 0 == (nonzero_bits (XEXP (varop, 0), int_result_mode)
11020 & nonzero_bits (XEXP (varop, 1), int_result_mode)))
11022 varop = XEXP (varop, 1);
11026 /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */
11028 && CONST_INT_P (XEXP (varop, 1))
11029 && (new_rtx = simplify_const_binary_operation
11030 (ASHIFT, int_result_mode,
11031 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
11032 GEN_INT (count))) != 0
11033 && CONST_INT_P (new_rtx)
11034 && merge_outer_ops (&outer_op, &outer_const, PLUS,
11035 INTVAL (new_rtx), int_result_mode,
11038 varop = XEXP (varop, 0);
11042 /* Check for 'PLUS signbit', which is the canonical form of 'XOR
11043 signbit', and attempt to change the PLUS to an XOR and move it to
11044 the outer operation as is done above in the AND/IOR/XOR case
11045 leg for shift(logical). See details in logical handling above
11046 for reasoning in doing so. */
11047 if (code == LSHIFTRT
11048 && CONST_INT_P (XEXP (varop, 1))
11049 && mode_signbit_p (int_result_mode, XEXP (varop, 1))
11050 && (new_rtx = simplify_const_binary_operation
11051 (code, int_result_mode,
11052 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
11053 GEN_INT (count))) != 0
11054 && CONST_INT_P (new_rtx)
11055 && merge_outer_ops (&outer_op, &outer_const, XOR,
11056 INTVAL (new_rtx), int_result_mode,
11059 varop = XEXP (varop, 0);
11066 /* The following rules apply only to scalars. */
11067 if (shift_mode != shift_unit_mode)
11069 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
11071 /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
11072 with C the size of VAROP - 1 and the shift is logical if
11073 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
11074 we have a (gt X 0) operation. If the shift is arithmetic with
11075 STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
11076 we have a (neg (gt X 0)) operation. */
11078 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
11079 && GET_CODE (XEXP (varop, 0)) == ASHIFTRT
11080 && count == (GET_MODE_PRECISION (int_varop_mode) - 1)
11081 && (code == LSHIFTRT || code == ASHIFTRT)
11082 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
11083 && INTVAL (XEXP (XEXP (varop, 0), 1)) == count
11084 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
11087 varop = gen_rtx_GT (int_varop_mode, XEXP (varop, 1),
11090 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
11091 varop = gen_rtx_NEG (int_varop_mode, varop);
11098 /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
11099 if the truncate does not affect the value. */
11100 if (code == LSHIFTRT
11101 && GET_CODE (XEXP (varop, 0)) == LSHIFTRT
11102 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
11103 && (INTVAL (XEXP (XEXP (varop, 0), 1))
11104 >= (GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (varop, 0)))
11105 - GET_MODE_UNIT_PRECISION (GET_MODE (varop)))))
11107 rtx varop_inner = XEXP (varop, 0);
11110 = gen_rtx_LSHIFTRT (GET_MODE (varop_inner),
11111 XEXP (varop_inner, 0),
11113 (count + INTVAL (XEXP (varop_inner, 1))));
11114 varop = gen_rtx_TRUNCATE (GET_MODE (varop), varop_inner);
11127 shift_mode = result_mode;
11128 if (shift_mode != mode)
11130 /* We only change the modes of scalar shifts. */
11131 int_mode = as_a <scalar_int_mode> (mode);
11132 int_result_mode = as_a <scalar_int_mode> (result_mode);
11133 shift_mode = try_widen_shift_mode (code, varop, count, int_result_mode,
11134 int_mode, outer_op, outer_const);
11137 /* We have now finished analyzing the shift. The result should be
11138 a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
11139 OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
11140 to the result of the shift. OUTER_CONST is the relevant constant,
11141 but we must turn off all bits turned off in the shift. */
11143 if (outer_op == UNKNOWN
11144 && orig_code == code && orig_count == count
11145 && varop == orig_varop
11146 && shift_mode == GET_MODE (varop))
11149 /* Make a SUBREG if necessary. If we can't make it, fail. */
11150 varop = gen_lowpart (shift_mode, varop);
11151 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
11154 /* If we have an outer operation and we just made a shift, it is
11155 possible that we could have simplified the shift were it not
11156 for the outer operation. So try to do the simplification
11159 if (outer_op != UNKNOWN)
11160 x = simplify_shift_const_1 (code, shift_mode, varop, count);
11165 x = simplify_gen_binary (code, shift_mode, varop, GEN_INT (count));
11167 /* If we were doing an LSHIFTRT in a wider mode than it was originally,
11168 turn off all the bits that the shift would have turned off. */
11169 if (orig_code == LSHIFTRT && result_mode != shift_mode)
11170 /* We only change the modes of scalar shifts. */
11171 x = simplify_and_const_int (NULL_RTX, as_a <scalar_int_mode> (shift_mode),
11172 x, GET_MODE_MASK (result_mode) >> orig_count);
11174 /* Do the remainder of the processing in RESULT_MODE. */
11175 x = gen_lowpart_or_truncate (result_mode, x);
11177 /* If COMPLEMENT_P is set, we have to complement X before doing the outer
11180 x = simplify_gen_unary (NOT, result_mode, x, result_mode);
11182 if (outer_op != UNKNOWN)
11184 int_result_mode = as_a <scalar_int_mode> (result_mode);
11186 if (GET_RTX_CLASS (outer_op) != RTX_UNARY
11187 && GET_MODE_PRECISION (int_result_mode) < HOST_BITS_PER_WIDE_INT)
11188 outer_const = trunc_int_for_mode (outer_const, int_result_mode);
11190 if (outer_op == AND)
11191 x = simplify_and_const_int (NULL_RTX, int_result_mode, x, outer_const);
11192 else if (outer_op == SET)
11194 /* This means that we have determined that the result is
11195 equivalent to a constant. This should be rare. */
11196 if (!side_effects_p (x))
11197 x = GEN_INT (outer_const);
11199 else if (GET_RTX_CLASS (outer_op) == RTX_UNARY)
11200 x = simplify_gen_unary (outer_op, int_result_mode, x, int_result_mode);
11202 x = simplify_gen_binary (outer_op, int_result_mode, x,
11203 GEN_INT (outer_const));
11209 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
11210 The result of the shift is RESULT_MODE. If we cannot simplify it,
11211 return X or, if it is NULL, synthesize the expression with
11212 simplify_gen_binary. Otherwise, return a simplified value.
11214 The shift is normally computed in the widest mode we find in VAROP, as
11215 long as it isn't a different number of words than RESULT_MODE. Exceptions
11216 are ASHIFTRT and ROTATE, which are always done in their original mode. */
11219 simplify_shift_const (rtx x, enum rtx_code code, machine_mode result_mode,
11220 rtx varop, int count)
11222 rtx tem = simplify_shift_const_1 (code, result_mode, varop, count);
11227 x = simplify_gen_binary (code, GET_MODE (varop), varop, GEN_INT (count));
11228 if (GET_MODE (x) != result_mode)
11229 x = gen_lowpart (result_mode, x);
11234 /* A subroutine of recog_for_combine. See there for arguments and
11238 recog_for_combine_1 (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
11240 rtx pat = *pnewpat;
11241 rtx pat_without_clobbers;
11242 int insn_code_number;
11243 int num_clobbers_to_add = 0;
11245 rtx notes = NULL_RTX;
11246 rtx old_notes, old_pat;
11249 /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
11250 we use to indicate that something didn't match. If we find such a
11251 thing, force rejection. */
11252 if (GET_CODE (pat) == PARALLEL)
11253 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
11254 if (GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER
11255 && XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
11258 old_pat = PATTERN (insn);
11259 old_notes = REG_NOTES (insn);
11260 PATTERN (insn) = pat;
11261 REG_NOTES (insn) = NULL_RTX;
11263 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
11264 if (dump_file && (dump_flags & TDF_DETAILS))
11266 if (insn_code_number < 0)
11267 fputs ("Failed to match this instruction:\n", dump_file);
11269 fputs ("Successfully matched this instruction:\n", dump_file);
11270 print_rtl_single (dump_file, pat);
11273 /* If it isn't, there is the possibility that we previously had an insn
11274 that clobbered some register as a side effect, but the combined
11275 insn doesn't need to do that. So try once more without the clobbers
11276 unless this represents an ASM insn. */
11278 if (insn_code_number < 0 && ! check_asm_operands (pat)
11279 && GET_CODE (pat) == PARALLEL)
11283 for (pos = 0, i = 0; i < XVECLEN (pat, 0); i++)
11284 if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER)
11287 SUBST (XVECEXP (pat, 0, pos), XVECEXP (pat, 0, i));
11291 SUBST_INT (XVECLEN (pat, 0), pos);
11294 pat = XVECEXP (pat, 0, 0);
11296 PATTERN (insn) = pat;
11297 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
11298 if (dump_file && (dump_flags & TDF_DETAILS))
11300 if (insn_code_number < 0)
11301 fputs ("Failed to match this instruction:\n", dump_file);
11303 fputs ("Successfully matched this instruction:\n", dump_file);
11304 print_rtl_single (dump_file, pat);
11308 pat_without_clobbers = pat;
11310 PATTERN (insn) = old_pat;
11311 REG_NOTES (insn) = old_notes;
11313 /* Recognize all noop sets, these will be killed by followup pass. */
11314 if (insn_code_number < 0 && GET_CODE (pat) == SET && set_noop_p (pat))
11315 insn_code_number = NOOP_MOVE_INSN_CODE, num_clobbers_to_add = 0;
11317 /* If we had any clobbers to add, make a new pattern than contains
11318 them. Then check to make sure that all of them are dead. */
11319 if (num_clobbers_to_add)
11321 rtx newpat = gen_rtx_PARALLEL (VOIDmode,
11322 rtvec_alloc (GET_CODE (pat) == PARALLEL
11323 ? (XVECLEN (pat, 0)
11324 + num_clobbers_to_add)
11325 : num_clobbers_to_add + 1));
11327 if (GET_CODE (pat) == PARALLEL)
11328 for (i = 0; i < XVECLEN (pat, 0); i++)
11329 XVECEXP (newpat, 0, i) = XVECEXP (pat, 0, i);
11331 XVECEXP (newpat, 0, 0) = pat;
11333 add_clobbers (newpat, insn_code_number);
11335 for (i = XVECLEN (newpat, 0) - num_clobbers_to_add;
11336 i < XVECLEN (newpat, 0); i++)
11338 if (REG_P (XEXP (XVECEXP (newpat, 0, i), 0))
11339 && ! reg_dead_at_p (XEXP (XVECEXP (newpat, 0, i), 0), insn))
11341 if (GET_CODE (XEXP (XVECEXP (newpat, 0, i), 0)) != SCRATCH)
11343 gcc_assert (REG_P (XEXP (XVECEXP (newpat, 0, i), 0)));
11344 notes = alloc_reg_note (REG_UNUSED,
11345 XEXP (XVECEXP (newpat, 0, i), 0), notes);
11351 if (insn_code_number >= 0
11352 && insn_code_number != NOOP_MOVE_INSN_CODE)
11354 old_pat = PATTERN (insn);
11355 old_notes = REG_NOTES (insn);
11356 old_icode = INSN_CODE (insn);
11357 PATTERN (insn) = pat;
11358 REG_NOTES (insn) = notes;
11359 INSN_CODE (insn) = insn_code_number;
11361 /* Allow targets to reject combined insn. */
11362 if (!targetm.legitimate_combined_insn (insn))
11364 if (dump_file && (dump_flags & TDF_DETAILS))
11365 fputs ("Instruction not appropriate for target.",
11368 /* Callers expect recog_for_combine to strip
11369 clobbers from the pattern on failure. */
11370 pat = pat_without_clobbers;
11373 insn_code_number = -1;
11376 PATTERN (insn) = old_pat;
11377 REG_NOTES (insn) = old_notes;
11378 INSN_CODE (insn) = old_icode;
11384 return insn_code_number;
11387 /* Change every ZERO_EXTRACT and ZERO_EXTEND of a SUBREG that can be
11388 expressed as an AND and maybe an LSHIFTRT, to that formulation.
11389 Return whether anything was so changed. */
11392 change_zero_ext (rtx pat)
11394 bool changed = false;
11395 rtx *src = &SET_SRC (pat);
11397 subrtx_ptr_iterator::array_type array;
11398 FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST)
11401 scalar_int_mode mode, inner_mode;
11402 if (!is_a <scalar_int_mode> (GET_MODE (x), &mode))
11406 if (GET_CODE (x) == ZERO_EXTRACT
11407 && CONST_INT_P (XEXP (x, 1))
11408 && CONST_INT_P (XEXP (x, 2))
11409 && is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode)
11410 && GET_MODE_PRECISION (inner_mode) <= GET_MODE_PRECISION (mode))
11412 size = INTVAL (XEXP (x, 1));
11414 int start = INTVAL (XEXP (x, 2));
11415 if (BITS_BIG_ENDIAN)
11416 start = GET_MODE_PRECISION (inner_mode) - size - start;
11419 x = gen_rtx_LSHIFTRT (inner_mode, XEXP (x, 0), GEN_INT (start));
11422 if (mode != inner_mode)
11423 x = gen_lowpart_SUBREG (mode, x);
11425 else if (GET_CODE (x) == ZERO_EXTEND
11426 && GET_CODE (XEXP (x, 0)) == SUBREG
11427 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (XEXP (x, 0))))
11428 && !paradoxical_subreg_p (XEXP (x, 0))
11429 && subreg_lowpart_p (XEXP (x, 0)))
11431 inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
11432 size = GET_MODE_PRECISION (inner_mode);
11433 x = SUBREG_REG (XEXP (x, 0));
11434 if (GET_MODE (x) != mode)
11435 x = gen_lowpart_SUBREG (mode, x);
11437 else if (GET_CODE (x) == ZERO_EXTEND
11438 && REG_P (XEXP (x, 0))
11439 && HARD_REGISTER_P (XEXP (x, 0))
11440 && can_change_dest_mode (XEXP (x, 0), 0, mode))
11442 inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
11443 size = GET_MODE_PRECISION (inner_mode);
11444 x = gen_rtx_REG (mode, REGNO (XEXP (x, 0)));
11449 if (!(GET_CODE (x) == LSHIFTRT
11450 && CONST_INT_P (XEXP (x, 1))
11451 && size + INTVAL (XEXP (x, 1)) == GET_MODE_PRECISION (mode)))
11453 wide_int mask = wi::mask (size, false, GET_MODE_PRECISION (mode));
11454 x = gen_rtx_AND (mode, x, immed_wide_int_const (mask, mode));
11462 FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST)
11463 maybe_swap_commutative_operands (**iter);
11465 rtx *dst = &SET_DEST (pat);
11466 scalar_int_mode mode;
11467 if (GET_CODE (*dst) == ZERO_EXTRACT
11468 && REG_P (XEXP (*dst, 0))
11469 && is_a <scalar_int_mode> (GET_MODE (XEXP (*dst, 0)), &mode)
11470 && CONST_INT_P (XEXP (*dst, 1))
11471 && CONST_INT_P (XEXP (*dst, 2)))
11473 rtx reg = XEXP (*dst, 0);
11474 int width = INTVAL (XEXP (*dst, 1));
11475 int offset = INTVAL (XEXP (*dst, 2));
11476 int reg_width = GET_MODE_PRECISION (mode);
11477 if (BITS_BIG_ENDIAN)
11478 offset = reg_width - width - offset;
11481 wide_int mask = wi::shifted_mask (offset, width, true, reg_width);
11482 wide_int mask2 = wi::shifted_mask (offset, width, false, reg_width);
11483 x = gen_rtx_AND (mode, reg, immed_wide_int_const (mask, mode));
11485 y = gen_rtx_ASHIFT (mode, SET_SRC (pat), GEN_INT (offset));
11488 z = gen_rtx_AND (mode, y, immed_wide_int_const (mask2, mode));
11489 w = gen_rtx_IOR (mode, x, z);
11490 SUBST (SET_DEST (pat), reg);
11491 SUBST (SET_SRC (pat), w);
11499 /* Like recog, but we receive the address of a pointer to a new pattern.
11500 We try to match the rtx that the pointer points to.
11501 If that fails, we may try to modify or replace the pattern,
11502 storing the replacement into the same pointer object.
11504 Modifications include deletion or addition of CLOBBERs. If the
11505 instruction will still not match, we change ZERO_EXTEND and ZERO_EXTRACT
11506 to the equivalent AND and perhaps LSHIFTRT patterns, and try with that
11507 (and undo if that fails).
11509 PNOTES is a pointer to a location where any REG_UNUSED notes added for
11510 the CLOBBERs are placed.
11512 The value is the final insn code from the pattern ultimately matched,
11516 recog_for_combine (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
11518 rtx pat = *pnewpat;
11519 int insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11520 if (insn_code_number >= 0 || check_asm_operands (pat))
11521 return insn_code_number;
11523 void *marker = get_undo_marker ();
11524 bool changed = false;
11526 if (GET_CODE (pat) == SET)
11527 changed = change_zero_ext (pat);
11528 else if (GET_CODE (pat) == PARALLEL)
11531 for (i = 0; i < XVECLEN (pat, 0); i++)
11533 rtx set = XVECEXP (pat, 0, i);
11534 if (GET_CODE (set) == SET)
11535 changed |= change_zero_ext (set);
11541 insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11543 if (insn_code_number < 0)
11544 undo_to_marker (marker);
11547 return insn_code_number;
11550 /* Like gen_lowpart_general but for use by combine. In combine it
11551 is not possible to create any new pseudoregs. However, it is
11552 safe to create invalid memory addresses, because combine will
11553 try to recognize them and all they will do is make the combine
11556 If for some reason this cannot do its job, an rtx
11557 (clobber (const_int 0)) is returned.
11558 An insn containing that will not be recognized. */
11561 gen_lowpart_for_combine (machine_mode omode, rtx x)
11563 machine_mode imode = GET_MODE (x);
11564 unsigned int osize = GET_MODE_SIZE (omode);
11565 unsigned int isize = GET_MODE_SIZE (imode);
11568 if (omode == imode)
11571 /* We can only support MODE being wider than a word if X is a
11572 constant integer or has a mode the same size. */
11573 if (GET_MODE_SIZE (omode) > UNITS_PER_WORD
11574 && ! (CONST_SCALAR_INT_P (x) || isize == osize))
11577 /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
11578 won't know what to do. So we will strip off the SUBREG here and
11579 process normally. */
11580 if (GET_CODE (x) == SUBREG && MEM_P (SUBREG_REG (x)))
11582 x = SUBREG_REG (x);
11584 /* For use in case we fall down into the address adjustments
11585 further below, we need to adjust the known mode and size of
11586 x; imode and isize, since we just adjusted x. */
11587 imode = GET_MODE (x);
11589 if (imode == omode)
11592 isize = GET_MODE_SIZE (imode);
11595 result = gen_lowpart_common (omode, x);
11604 /* Refuse to work on a volatile memory ref or one with a mode-dependent
11606 if (MEM_VOLATILE_P (x)
11607 || mode_dependent_address_p (XEXP (x, 0), MEM_ADDR_SPACE (x)))
11610 /* If we want to refer to something bigger than the original memref,
11611 generate a paradoxical subreg instead. That will force a reload
11612 of the original memref X. */
11613 if (paradoxical_subreg_p (omode, imode))
11614 return gen_rtx_SUBREG (omode, x, 0);
11616 if (WORDS_BIG_ENDIAN)
11617 offset = MAX (isize, UNITS_PER_WORD) - MAX (osize, UNITS_PER_WORD);
11619 /* Adjust the address so that the address-after-the-data is
11621 if (BYTES_BIG_ENDIAN)
11622 offset -= MIN (UNITS_PER_WORD, osize) - MIN (UNITS_PER_WORD, isize);
11624 return adjust_address_nv (x, omode, offset);
11627 /* If X is a comparison operator, rewrite it in a new mode. This
11628 probably won't match, but may allow further simplifications. */
11629 else if (COMPARISON_P (x))
11630 return gen_rtx_fmt_ee (GET_CODE (x), omode, XEXP (x, 0), XEXP (x, 1));
11632 /* If we couldn't simplify X any other way, just enclose it in a
11633 SUBREG. Normally, this SUBREG won't match, but some patterns may
11634 include an explicit SUBREG or we may simplify it further in combine. */
11639 if (imode == VOIDmode)
11641 imode = int_mode_for_mode (omode).require ();
11642 x = gen_lowpart_common (imode, x);
11646 res = lowpart_subreg (omode, x, imode);
11652 return gen_rtx_CLOBBER (omode, const0_rtx);
11655 /* Try to simplify a comparison between OP0 and a constant OP1,
11656 where CODE is the comparison code that will be tested, into a
11657 (CODE OP0 const0_rtx) form.
11659 The result is a possibly different comparison code to use.
11660 *POP1 may be updated. */
11662 static enum rtx_code
11663 simplify_compare_const (enum rtx_code code, machine_mode mode,
11664 rtx op0, rtx *pop1)
11666 scalar_int_mode int_mode;
11667 HOST_WIDE_INT const_op = INTVAL (*pop1);
11669 /* Get the constant we are comparing against and turn off all bits
11670 not on in our mode. */
11671 if (mode != VOIDmode)
11672 const_op = trunc_int_for_mode (const_op, mode);
11674 /* If we are comparing against a constant power of two and the value
11675 being compared can only have that single bit nonzero (e.g., it was
11676 `and'ed with that bit), we can replace this with a comparison
11679 && (code == EQ || code == NE || code == GE || code == GEU
11680 || code == LT || code == LTU)
11681 && is_a <scalar_int_mode> (mode, &int_mode)
11682 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11683 && pow2p_hwi (const_op & GET_MODE_MASK (int_mode))
11684 && (nonzero_bits (op0, int_mode)
11685 == (unsigned HOST_WIDE_INT) (const_op & GET_MODE_MASK (int_mode))))
11687 code = (code == EQ || code == GE || code == GEU ? NE : EQ);
11691 /* Similarly, if we are comparing a value known to be either -1 or
11692 0 with -1, change it to the opposite comparison against zero. */
11694 && (code == EQ || code == NE || code == GT || code == LE
11695 || code == GEU || code == LTU)
11696 && is_a <scalar_int_mode> (mode, &int_mode)
11697 && num_sign_bit_copies (op0, int_mode) == GET_MODE_PRECISION (int_mode))
11699 code = (code == EQ || code == LE || code == GEU ? NE : EQ);
11703 /* Do some canonicalizations based on the comparison code. We prefer
11704 comparisons against zero and then prefer equality comparisons.
11705 If we can reduce the size of a constant, we will do that too. */
11709 /* < C is equivalent to <= (C - 1) */
11714 /* ... fall through to LE case below. */
11715 gcc_fallthrough ();
11721 /* <= C is equivalent to < (C + 1); we do this for C < 0 */
11728 /* If we are doing a <= 0 comparison on a value known to have
11729 a zero sign bit, we can replace this with == 0. */
11730 else if (const_op == 0
11731 && is_a <scalar_int_mode> (mode, &int_mode)
11732 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11733 && (nonzero_bits (op0, int_mode)
11734 & (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11740 /* >= C is equivalent to > (C - 1). */
11745 /* ... fall through to GT below. */
11746 gcc_fallthrough ();
11752 /* > C is equivalent to >= (C + 1); we do this for C < 0. */
11759 /* If we are doing a > 0 comparison on a value known to have
11760 a zero sign bit, we can replace this with != 0. */
11761 else if (const_op == 0
11762 && is_a <scalar_int_mode> (mode, &int_mode)
11763 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11764 && (nonzero_bits (op0, int_mode)
11765 & (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11771 /* < C is equivalent to <= (C - 1). */
11776 /* ... fall through ... */
11778 /* (unsigned) < 0x80000000 is equivalent to >= 0. */
11779 else if (is_a <scalar_int_mode> (mode, &int_mode)
11780 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11781 && ((unsigned HOST_WIDE_INT) const_op
11782 == HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11792 /* unsigned <= 0 is equivalent to == 0 */
11795 /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
11796 else if (is_a <scalar_int_mode> (mode, &int_mode)
11797 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11798 && ((unsigned HOST_WIDE_INT) const_op
11799 == ((HOST_WIDE_INT_1U
11800 << (GET_MODE_PRECISION (int_mode) - 1)) - 1)))
11808 /* >= C is equivalent to > (C - 1). */
11813 /* ... fall through ... */
11816 /* (unsigned) >= 0x80000000 is equivalent to < 0. */
11817 else if (is_a <scalar_int_mode> (mode, &int_mode)
11818 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11819 && ((unsigned HOST_WIDE_INT) const_op
11820 == HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11830 /* unsigned > 0 is equivalent to != 0 */
11833 /* (unsigned) > 0x7fffffff is equivalent to < 0. */
11834 else if (is_a <scalar_int_mode> (mode, &int_mode)
11835 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11836 && ((unsigned HOST_WIDE_INT) const_op
11837 == (HOST_WIDE_INT_1U
11838 << (GET_MODE_PRECISION (int_mode) - 1)) - 1))
11849 *pop1 = GEN_INT (const_op);
11853 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
11854 comparison code that will be tested.
11856 The result is a possibly different comparison code to use. *POP0 and
11857 *POP1 may be updated.
11859 It is possible that we might detect that a comparison is either always
11860 true or always false. However, we do not perform general constant
11861 folding in combine, so this knowledge isn't useful. Such tautologies
11862 should have been detected earlier. Hence we ignore all such cases. */
11864 static enum rtx_code
11865 simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1)
11871 scalar_int_mode mode, inner_mode, tmode;
11872 opt_scalar_int_mode tmode_iter;
11874 /* Try a few ways of applying the same transformation to both operands. */
11877 /* The test below this one won't handle SIGN_EXTENDs on these machines,
11878 so check specially. */
11879 if (!WORD_REGISTER_OPERATIONS
11880 && code != GTU && code != GEU && code != LTU && code != LEU
11881 && GET_CODE (op0) == ASHIFTRT && GET_CODE (op1) == ASHIFTRT
11882 && GET_CODE (XEXP (op0, 0)) == ASHIFT
11883 && GET_CODE (XEXP (op1, 0)) == ASHIFT
11884 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == SUBREG
11885 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SUBREG
11886 && is_a <scalar_int_mode> (GET_MODE (op0), &mode)
11887 && (is_a <scalar_int_mode>
11888 (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0))), &inner_mode))
11889 && inner_mode == GET_MODE (SUBREG_REG (XEXP (XEXP (op1, 0), 0)))
11890 && CONST_INT_P (XEXP (op0, 1))
11891 && XEXP (op0, 1) == XEXP (op1, 1)
11892 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
11893 && XEXP (op0, 1) == XEXP (XEXP (op1, 0), 1)
11894 && (INTVAL (XEXP (op0, 1))
11895 == (GET_MODE_PRECISION (mode)
11896 - GET_MODE_PRECISION (inner_mode))))
11898 op0 = SUBREG_REG (XEXP (XEXP (op0, 0), 0));
11899 op1 = SUBREG_REG (XEXP (XEXP (op1, 0), 0));
11902 /* If both operands are the same constant shift, see if we can ignore the
11903 shift. We can if the shift is a rotate or if the bits shifted out of
11904 this shift are known to be zero for both inputs and if the type of
11905 comparison is compatible with the shift. */
11906 if (GET_CODE (op0) == GET_CODE (op1)
11907 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
11908 && ((GET_CODE (op0) == ROTATE && (code == NE || code == EQ))
11909 || ((GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFT)
11910 && (code != GT && code != LT && code != GE && code != LE))
11911 || (GET_CODE (op0) == ASHIFTRT
11912 && (code != GTU && code != LTU
11913 && code != GEU && code != LEU)))
11914 && CONST_INT_P (XEXP (op0, 1))
11915 && INTVAL (XEXP (op0, 1)) >= 0
11916 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
11917 && XEXP (op0, 1) == XEXP (op1, 1))
11919 machine_mode mode = GET_MODE (op0);
11920 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
11921 int shift_count = INTVAL (XEXP (op0, 1));
11923 if (GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFTRT)
11924 mask &= (mask >> shift_count) << shift_count;
11925 else if (GET_CODE (op0) == ASHIFT)
11926 mask = (mask & (mask << shift_count)) >> shift_count;
11928 if ((nonzero_bits (XEXP (op0, 0), mode) & ~mask) == 0
11929 && (nonzero_bits (XEXP (op1, 0), mode) & ~mask) == 0)
11930 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0);
11935 /* If both operands are AND's of a paradoxical SUBREG by constant, the
11936 SUBREGs are of the same mode, and, in both cases, the AND would
11937 be redundant if the comparison was done in the narrower mode,
11938 do the comparison in the narrower mode (e.g., we are AND'ing with 1
11939 and the operand's possibly nonzero bits are 0xffffff01; in that case
11940 if we only care about QImode, we don't need the AND). This case
11941 occurs if the output mode of an scc insn is not SImode and
11942 STORE_FLAG_VALUE == 1 (e.g., the 386).
11944 Similarly, check for a case where the AND's are ZERO_EXTEND
11945 operations from some narrower mode even though a SUBREG is not
11948 else if (GET_CODE (op0) == AND && GET_CODE (op1) == AND
11949 && CONST_INT_P (XEXP (op0, 1))
11950 && CONST_INT_P (XEXP (op1, 1)))
11952 rtx inner_op0 = XEXP (op0, 0);
11953 rtx inner_op1 = XEXP (op1, 0);
11954 HOST_WIDE_INT c0 = INTVAL (XEXP (op0, 1));
11955 HOST_WIDE_INT c1 = INTVAL (XEXP (op1, 1));
11958 if (paradoxical_subreg_p (inner_op0)
11959 && GET_CODE (inner_op1) == SUBREG
11960 && (GET_MODE (SUBREG_REG (inner_op0))
11961 == GET_MODE (SUBREG_REG (inner_op1)))
11962 && (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (inner_op0)))
11963 <= HOST_BITS_PER_WIDE_INT)
11964 && (0 == ((~c0) & nonzero_bits (SUBREG_REG (inner_op0),
11965 GET_MODE (SUBREG_REG (inner_op0)))))
11966 && (0 == ((~c1) & nonzero_bits (SUBREG_REG (inner_op1),
11967 GET_MODE (SUBREG_REG (inner_op1))))))
11969 op0 = SUBREG_REG (inner_op0);
11970 op1 = SUBREG_REG (inner_op1);
11972 /* The resulting comparison is always unsigned since we masked
11973 off the original sign bit. */
11974 code = unsigned_condition (code);
11980 FOR_EACH_MODE_UNTIL (tmode,
11981 as_a <scalar_int_mode> (GET_MODE (op0)))
11982 if ((unsigned HOST_WIDE_INT) c0 == GET_MODE_MASK (tmode))
11984 op0 = gen_lowpart_or_truncate (tmode, inner_op0);
11985 op1 = gen_lowpart_or_truncate (tmode, inner_op1);
11986 code = unsigned_condition (code);
11995 /* If both operands are NOT, we can strip off the outer operation
11996 and adjust the comparison code for swapped operands; similarly for
11997 NEG, except that this must be an equality comparison. */
11998 else if ((GET_CODE (op0) == NOT && GET_CODE (op1) == NOT)
11999 || (GET_CODE (op0) == NEG && GET_CODE (op1) == NEG
12000 && (code == EQ || code == NE)))
12001 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0), code = swap_condition (code);
12007 /* If the first operand is a constant, swap the operands and adjust the
12008 comparison code appropriately, but don't do this if the second operand
12009 is already a constant integer. */
12010 if (swap_commutative_operands_p (op0, op1))
12012 std::swap (op0, op1);
12013 code = swap_condition (code);
12016 /* We now enter a loop during which we will try to simplify the comparison.
12017 For the most part, we only are concerned with comparisons with zero,
12018 but some things may really be comparisons with zero but not start
12019 out looking that way. */
12021 while (CONST_INT_P (op1))
12023 machine_mode raw_mode = GET_MODE (op0);
12024 scalar_int_mode int_mode;
12025 int equality_comparison_p;
12026 int sign_bit_comparison_p;
12027 int unsigned_comparison_p;
12028 HOST_WIDE_INT const_op;
12030 /* We only want to handle integral modes. This catches VOIDmode,
12031 CCmode, and the floating-point modes. An exception is that we
12032 can handle VOIDmode if OP0 is a COMPARE or a comparison
12035 if (GET_MODE_CLASS (raw_mode) != MODE_INT
12036 && ! (raw_mode == VOIDmode
12037 && (GET_CODE (op0) == COMPARE || COMPARISON_P (op0))))
12040 /* Try to simplify the compare to constant, possibly changing the
12041 comparison op, and/or changing op1 to zero. */
12042 code = simplify_compare_const (code, raw_mode, op0, &op1);
12043 const_op = INTVAL (op1);
12045 /* Compute some predicates to simplify code below. */
12047 equality_comparison_p = (code == EQ || code == NE);
12048 sign_bit_comparison_p = ((code == LT || code == GE) && const_op == 0);
12049 unsigned_comparison_p = (code == LTU || code == LEU || code == GTU
12052 /* If this is a sign bit comparison and we can do arithmetic in
12053 MODE, say that we will only be needing the sign bit of OP0. */
12054 if (sign_bit_comparison_p
12055 && is_a <scalar_int_mode> (raw_mode, &int_mode)
12056 && HWI_COMPUTABLE_MODE_P (int_mode))
12057 op0 = force_to_mode (op0, int_mode,
12059 << (GET_MODE_PRECISION (int_mode) - 1),
12062 if (COMPARISON_P (op0))
12064 /* We can't do anything if OP0 is a condition code value, rather
12065 than an actual data value. */
12067 || CC0_P (XEXP (op0, 0))
12068 || GET_MODE_CLASS (GET_MODE (XEXP (op0, 0))) == MODE_CC)
12071 /* Get the two operands being compared. */
12072 if (GET_CODE (XEXP (op0, 0)) == COMPARE)
12073 tem = XEXP (XEXP (op0, 0), 0), tem1 = XEXP (XEXP (op0, 0), 1);
12075 tem = XEXP (op0, 0), tem1 = XEXP (op0, 1);
12077 /* Check for the cases where we simply want the result of the
12078 earlier test or the opposite of that result. */
12079 if (code == NE || code == EQ
12080 || (val_signbit_known_set_p (raw_mode, STORE_FLAG_VALUE)
12081 && (code == LT || code == GE)))
12083 enum rtx_code new_code;
12084 if (code == LT || code == NE)
12085 new_code = GET_CODE (op0);
12087 new_code = reversed_comparison_code (op0, NULL);
12089 if (new_code != UNKNOWN)
12100 if (raw_mode == VOIDmode)
12102 scalar_int_mode mode = as_a <scalar_int_mode> (raw_mode);
12104 /* Now try cases based on the opcode of OP0. If none of the cases
12105 does a "continue", we exit this loop immediately after the
12108 unsigned int mode_width = GET_MODE_PRECISION (mode);
12109 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
12110 switch (GET_CODE (op0))
12113 /* If we are extracting a single bit from a variable position in
12114 a constant that has only a single bit set and are comparing it
12115 with zero, we can convert this into an equality comparison
12116 between the position and the location of the single bit. */
12117 /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
12118 have already reduced the shift count modulo the word size. */
12119 if (!SHIFT_COUNT_TRUNCATED
12120 && CONST_INT_P (XEXP (op0, 0))
12121 && XEXP (op0, 1) == const1_rtx
12122 && equality_comparison_p && const_op == 0
12123 && (i = exact_log2 (UINTVAL (XEXP (op0, 0)))) >= 0)
12125 if (BITS_BIG_ENDIAN)
12126 i = BITS_PER_WORD - 1 - i;
12128 op0 = XEXP (op0, 2);
12132 /* Result is nonzero iff shift count is equal to I. */
12133 code = reverse_condition (code);
12140 tem = expand_compound_operation (op0);
12149 /* If testing for equality, we can take the NOT of the constant. */
12150 if (equality_comparison_p
12151 && (tem = simplify_unary_operation (NOT, mode, op1, mode)) != 0)
12153 op0 = XEXP (op0, 0);
12158 /* If just looking at the sign bit, reverse the sense of the
12160 if (sign_bit_comparison_p)
12162 op0 = XEXP (op0, 0);
12163 code = (code == GE ? LT : GE);
12169 /* If testing for equality, we can take the NEG of the constant. */
12170 if (equality_comparison_p
12171 && (tem = simplify_unary_operation (NEG, mode, op1, mode)) != 0)
12173 op0 = XEXP (op0, 0);
12178 /* The remaining cases only apply to comparisons with zero. */
12182 /* When X is ABS or is known positive,
12183 (neg X) is < 0 if and only if X != 0. */
12185 if (sign_bit_comparison_p
12186 && (GET_CODE (XEXP (op0, 0)) == ABS
12187 || (mode_width <= HOST_BITS_PER_WIDE_INT
12188 && (nonzero_bits (XEXP (op0, 0), mode)
12189 & (HOST_WIDE_INT_1U << (mode_width - 1)))
12192 op0 = XEXP (op0, 0);
12193 code = (code == LT ? NE : EQ);
12197 /* If we have NEG of something whose two high-order bits are the
12198 same, we know that "(-a) < 0" is equivalent to "a > 0". */
12199 if (num_sign_bit_copies (op0, mode) >= 2)
12201 op0 = XEXP (op0, 0);
12202 code = swap_condition (code);
12208 /* If we are testing equality and our count is a constant, we
12209 can perform the inverse operation on our RHS. */
12210 if (equality_comparison_p && CONST_INT_P (XEXP (op0, 1))
12211 && (tem = simplify_binary_operation (ROTATERT, mode,
12212 op1, XEXP (op0, 1))) != 0)
12214 op0 = XEXP (op0, 0);
12219 /* If we are doing a < 0 or >= 0 comparison, it means we are testing
12220 a particular bit. Convert it to an AND of a constant of that
12221 bit. This will be converted into a ZERO_EXTRACT. */
12222 if (const_op == 0 && sign_bit_comparison_p
12223 && CONST_INT_P (XEXP (op0, 1))
12224 && mode_width <= HOST_BITS_PER_WIDE_INT)
12226 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
12229 - INTVAL (XEXP (op0, 1)))));
12230 code = (code == LT ? NE : EQ);
12234 /* Fall through. */
12237 /* ABS is ignorable inside an equality comparison with zero. */
12238 if (const_op == 0 && equality_comparison_p)
12240 op0 = XEXP (op0, 0);
12246 /* Can simplify (compare (zero/sign_extend FOO) CONST) to
12247 (compare FOO CONST) if CONST fits in FOO's mode and we
12248 are either testing inequality or have an unsigned
12249 comparison with ZERO_EXTEND or a signed comparison with
12250 SIGN_EXTEND. But don't do it if we don't have a compare
12251 insn of the given mode, since we'd have to revert it
12252 later on, and then we wouldn't know whether to sign- or
12254 if (is_int_mode (GET_MODE (XEXP (op0, 0)), &mode)
12255 && ! unsigned_comparison_p
12256 && HWI_COMPUTABLE_MODE_P (mode)
12257 && trunc_int_for_mode (const_op, mode) == const_op
12258 && have_insn_for (COMPARE, mode))
12260 op0 = XEXP (op0, 0);
12266 /* Check for the case where we are comparing A - C1 with C2, that is
12268 (subreg:MODE (plus (A) (-C1))) op (C2)
12270 with C1 a constant, and try to lift the SUBREG, i.e. to do the
12271 comparison in the wider mode. One of the following two conditions
12272 must be true in order for this to be valid:
12274 1. The mode extension results in the same bit pattern being added
12275 on both sides and the comparison is equality or unsigned. As
12276 C2 has been truncated to fit in MODE, the pattern can only be
12279 2. The mode extension results in the sign bit being copied on
12282 The difficulty here is that we have predicates for A but not for
12283 (A - C1) so we need to check that C1 is within proper bounds so
12284 as to perturbate A as little as possible. */
12286 if (mode_width <= HOST_BITS_PER_WIDE_INT
12287 && subreg_lowpart_p (op0)
12288 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
12290 && GET_MODE_PRECISION (inner_mode) > mode_width
12291 && GET_CODE (SUBREG_REG (op0)) == PLUS
12292 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1)))
12294 rtx a = XEXP (SUBREG_REG (op0), 0);
12295 HOST_WIDE_INT c1 = -INTVAL (XEXP (SUBREG_REG (op0), 1));
12298 && (unsigned HOST_WIDE_INT) c1
12299 < HOST_WIDE_INT_1U << (mode_width - 1)
12300 && (equality_comparison_p || unsigned_comparison_p)
12301 /* (A - C1) zero-extends if it is positive and sign-extends
12302 if it is negative, C2 both zero- and sign-extends. */
12303 && ((0 == (nonzero_bits (a, inner_mode)
12304 & ~GET_MODE_MASK (mode))
12306 /* (A - C1) sign-extends if it is positive and 1-extends
12307 if it is negative, C2 both sign- and 1-extends. */
12308 || (num_sign_bit_copies (a, inner_mode)
12309 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
12312 || ((unsigned HOST_WIDE_INT) c1
12313 < HOST_WIDE_INT_1U << (mode_width - 2)
12314 /* (A - C1) always sign-extends, like C2. */
12315 && num_sign_bit_copies (a, inner_mode)
12316 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
12317 - (mode_width - 1))))
12319 op0 = SUBREG_REG (op0);
12324 /* If the inner mode is narrower and we are extracting the low part,
12325 we can treat the SUBREG as if it were a ZERO_EXTEND. */
12326 if (paradoxical_subreg_p (op0))
12328 else if (subreg_lowpart_p (op0)
12329 && GET_MODE_CLASS (mode) == MODE_INT
12330 && is_int_mode (GET_MODE (SUBREG_REG (op0)), &inner_mode)
12331 && (code == NE || code == EQ)
12332 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
12333 && !paradoxical_subreg_p (op0)
12334 && (nonzero_bits (SUBREG_REG (op0), inner_mode)
12335 & ~GET_MODE_MASK (mode)) == 0)
12337 /* Remove outer subregs that don't do anything. */
12338 tem = gen_lowpart (inner_mode, op1);
12340 if ((nonzero_bits (tem, inner_mode)
12341 & ~GET_MODE_MASK (mode)) == 0)
12343 op0 = SUBREG_REG (op0);
12355 if (is_int_mode (GET_MODE (XEXP (op0, 0)), &mode)
12356 && (unsigned_comparison_p || equality_comparison_p)
12357 && HWI_COMPUTABLE_MODE_P (mode)
12358 && (unsigned HOST_WIDE_INT) const_op <= GET_MODE_MASK (mode)
12360 && have_insn_for (COMPARE, mode))
12362 op0 = XEXP (op0, 0);
12368 /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do
12369 this for equality comparisons due to pathological cases involving
12371 if (equality_comparison_p
12372 && 0 != (tem = simplify_binary_operation (MINUS, mode,
12373 op1, XEXP (op0, 1))))
12375 op0 = XEXP (op0, 0);
12380 /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */
12381 if (const_op == 0 && XEXP (op0, 1) == constm1_rtx
12382 && GET_CODE (XEXP (op0, 0)) == ABS && sign_bit_comparison_p)
12384 op0 = XEXP (XEXP (op0, 0), 0);
12385 code = (code == LT ? EQ : NE);
12391 /* We used to optimize signed comparisons against zero, but that
12392 was incorrect. Unsigned comparisons against zero (GTU, LEU)
12393 arrive here as equality comparisons, or (GEU, LTU) are
12394 optimized away. No need to special-case them. */
12396 /* (eq (minus A B) C) -> (eq A (plus B C)) or
12397 (eq B (minus A C)), whichever simplifies. We can only do
12398 this for equality comparisons due to pathological cases involving
12400 if (equality_comparison_p
12401 && 0 != (tem = simplify_binary_operation (PLUS, mode,
12402 XEXP (op0, 1), op1)))
12404 op0 = XEXP (op0, 0);
12409 if (equality_comparison_p
12410 && 0 != (tem = simplify_binary_operation (MINUS, mode,
12411 XEXP (op0, 0), op1)))
12413 op0 = XEXP (op0, 1);
12418 /* The sign bit of (minus (ashiftrt X C) X), where C is the number
12419 of bits in X minus 1, is one iff X > 0. */
12420 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == ASHIFTRT
12421 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12422 && UINTVAL (XEXP (XEXP (op0, 0), 1)) == mode_width - 1
12423 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
12425 op0 = XEXP (op0, 1);
12426 code = (code == GE ? LE : GT);
12432 /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification
12433 if C is zero or B is a constant. */
12434 if (equality_comparison_p
12435 && 0 != (tem = simplify_binary_operation (XOR, mode,
12436 XEXP (op0, 1), op1)))
12438 op0 = XEXP (op0, 0);
12446 /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
12448 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == PLUS
12449 && XEXP (XEXP (op0, 0), 1) == constm1_rtx
12450 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
12452 op0 = XEXP (op0, 1);
12453 code = (code == GE ? GT : LE);
12459 /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This
12460 will be converted to a ZERO_EXTRACT later. */
12461 if (const_op == 0 && equality_comparison_p
12462 && GET_CODE (XEXP (op0, 0)) == ASHIFT
12463 && XEXP (XEXP (op0, 0), 0) == const1_rtx)
12465 op0 = gen_rtx_LSHIFTRT (mode, XEXP (op0, 1),
12466 XEXP (XEXP (op0, 0), 1));
12467 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12471 /* If we are comparing (and (lshiftrt X C1) C2) for equality with
12472 zero and X is a comparison and C1 and C2 describe only bits set
12473 in STORE_FLAG_VALUE, we can compare with X. */
12474 if (const_op == 0 && equality_comparison_p
12475 && mode_width <= HOST_BITS_PER_WIDE_INT
12476 && CONST_INT_P (XEXP (op0, 1))
12477 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT
12478 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12479 && INTVAL (XEXP (XEXP (op0, 0), 1)) >= 0
12480 && INTVAL (XEXP (XEXP (op0, 0), 1)) < HOST_BITS_PER_WIDE_INT)
12482 mask = ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12483 << INTVAL (XEXP (XEXP (op0, 0), 1)));
12484 if ((~STORE_FLAG_VALUE & mask) == 0
12485 && (COMPARISON_P (XEXP (XEXP (op0, 0), 0))
12486 || ((tem = get_last_value (XEXP (XEXP (op0, 0), 0))) != 0
12487 && COMPARISON_P (tem))))
12489 op0 = XEXP (XEXP (op0, 0), 0);
12494 /* If we are doing an equality comparison of an AND of a bit equal
12495 to the sign bit, replace this with a LT or GE comparison of
12496 the underlying value. */
12497 if (equality_comparison_p
12499 && CONST_INT_P (XEXP (op0, 1))
12500 && mode_width <= HOST_BITS_PER_WIDE_INT
12501 && ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12502 == HOST_WIDE_INT_1U << (mode_width - 1)))
12504 op0 = XEXP (op0, 0);
12505 code = (code == EQ ? GE : LT);
12509 /* If this AND operation is really a ZERO_EXTEND from a narrower
12510 mode, the constant fits within that mode, and this is either an
12511 equality or unsigned comparison, try to do this comparison in
12516 (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
12517 -> (ne:DI (reg:SI 4) (const_int 0))
12519 unless TARGET_TRULY_NOOP_TRUNCATION allows it or the register is
12520 known to hold a value of the required mode the
12521 transformation is invalid. */
12522 if ((equality_comparison_p || unsigned_comparison_p)
12523 && CONST_INT_P (XEXP (op0, 1))
12524 && (i = exact_log2 ((UINTVAL (XEXP (op0, 1))
12525 & GET_MODE_MASK (mode))
12527 && const_op >> i == 0
12528 && int_mode_for_size (i, 1).exists (&tmode))
12530 op0 = gen_lowpart_or_truncate (tmode, XEXP (op0, 0));
12534 /* If this is (and:M1 (subreg:M1 X:M2 0) (const_int C1)) where C1
12535 fits in both M1 and M2 and the SUBREG is either paradoxical
12536 or represents the low part, permute the SUBREG and the AND
12538 if (GET_CODE (XEXP (op0, 0)) == SUBREG
12539 && CONST_INT_P (XEXP (op0, 1)))
12541 unsigned HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
12542 /* Require an integral mode, to avoid creating something like
12544 if ((is_a <scalar_int_mode>
12545 (GET_MODE (SUBREG_REG (XEXP (op0, 0))), &tmode))
12546 /* It is unsafe to commute the AND into the SUBREG if the
12547 SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
12548 not defined. As originally written the upper bits
12549 have a defined value due to the AND operation.
12550 However, if we commute the AND inside the SUBREG then
12551 they no longer have defined values and the meaning of
12552 the code has been changed.
12553 Also C1 should not change value in the smaller mode,
12554 see PR67028 (a positive C1 can become negative in the
12555 smaller mode, so that the AND does no longer mask the
12557 && ((WORD_REGISTER_OPERATIONS
12558 && mode_width > GET_MODE_PRECISION (tmode)
12559 && mode_width <= BITS_PER_WORD
12560 && trunc_int_for_mode (c1, tmode) == (HOST_WIDE_INT) c1)
12561 || (mode_width <= GET_MODE_PRECISION (tmode)
12562 && subreg_lowpart_p (XEXP (op0, 0))))
12563 && mode_width <= HOST_BITS_PER_WIDE_INT
12564 && HWI_COMPUTABLE_MODE_P (tmode)
12565 && (c1 & ~mask) == 0
12566 && (c1 & ~GET_MODE_MASK (tmode)) == 0
12568 && c1 != GET_MODE_MASK (tmode))
12570 op0 = simplify_gen_binary (AND, tmode,
12571 SUBREG_REG (XEXP (op0, 0)),
12572 gen_int_mode (c1, tmode));
12573 op0 = gen_lowpart (mode, op0);
12578 /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */
12579 if (const_op == 0 && equality_comparison_p
12580 && XEXP (op0, 1) == const1_rtx
12581 && GET_CODE (XEXP (op0, 0)) == NOT)
12583 op0 = simplify_and_const_int (NULL_RTX, mode,
12584 XEXP (XEXP (op0, 0), 0), 1);
12585 code = (code == NE ? EQ : NE);
12589 /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
12590 (eq (and (lshiftrt X) 1) 0).
12591 Also handle the case where (not X) is expressed using xor. */
12592 if (const_op == 0 && equality_comparison_p
12593 && XEXP (op0, 1) == const1_rtx
12594 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT)
12596 rtx shift_op = XEXP (XEXP (op0, 0), 0);
12597 rtx shift_count = XEXP (XEXP (op0, 0), 1);
12599 if (GET_CODE (shift_op) == NOT
12600 || (GET_CODE (shift_op) == XOR
12601 && CONST_INT_P (XEXP (shift_op, 1))
12602 && CONST_INT_P (shift_count)
12603 && HWI_COMPUTABLE_MODE_P (mode)
12604 && (UINTVAL (XEXP (shift_op, 1))
12605 == HOST_WIDE_INT_1U
12606 << INTVAL (shift_count))))
12609 = gen_rtx_LSHIFTRT (mode, XEXP (shift_op, 0), shift_count);
12610 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12611 code = (code == NE ? EQ : NE);
12618 /* If we have (compare (ashift FOO N) (const_int C)) and
12619 the high order N bits of FOO (N+1 if an inequality comparison)
12620 are known to be zero, we can do this by comparing FOO with C
12621 shifted right N bits so long as the low-order N bits of C are
12623 if (CONST_INT_P (XEXP (op0, 1))
12624 && INTVAL (XEXP (op0, 1)) >= 0
12625 && ((INTVAL (XEXP (op0, 1)) + ! equality_comparison_p)
12626 < HOST_BITS_PER_WIDE_INT)
12627 && (((unsigned HOST_WIDE_INT) const_op
12628 & ((HOST_WIDE_INT_1U << INTVAL (XEXP (op0, 1)))
12630 && mode_width <= HOST_BITS_PER_WIDE_INT
12631 && (nonzero_bits (XEXP (op0, 0), mode)
12632 & ~(mask >> (INTVAL (XEXP (op0, 1))
12633 + ! equality_comparison_p))) == 0)
12635 /* We must perform a logical shift, not an arithmetic one,
12636 as we want the top N bits of C to be zero. */
12637 unsigned HOST_WIDE_INT temp = const_op & GET_MODE_MASK (mode);
12639 temp >>= INTVAL (XEXP (op0, 1));
12640 op1 = gen_int_mode (temp, mode);
12641 op0 = XEXP (op0, 0);
12645 /* If we are doing a sign bit comparison, it means we are testing
12646 a particular bit. Convert it to the appropriate AND. */
12647 if (sign_bit_comparison_p && CONST_INT_P (XEXP (op0, 1))
12648 && mode_width <= HOST_BITS_PER_WIDE_INT)
12650 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
12653 - INTVAL (XEXP (op0, 1)))));
12654 code = (code == LT ? NE : EQ);
12658 /* If this an equality comparison with zero and we are shifting
12659 the low bit to the sign bit, we can convert this to an AND of the
12661 if (const_op == 0 && equality_comparison_p
12662 && CONST_INT_P (XEXP (op0, 1))
12663 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12665 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0), 1);
12671 /* If this is an equality comparison with zero, we can do this
12672 as a logical shift, which might be much simpler. */
12673 if (equality_comparison_p && const_op == 0
12674 && CONST_INT_P (XEXP (op0, 1)))
12676 op0 = simplify_shift_const (NULL_RTX, LSHIFTRT, mode,
12678 INTVAL (XEXP (op0, 1)));
12682 /* If OP0 is a sign extension and CODE is not an unsigned comparison,
12683 do the comparison in a narrower mode. */
12684 if (! unsigned_comparison_p
12685 && CONST_INT_P (XEXP (op0, 1))
12686 && GET_CODE (XEXP (op0, 0)) == ASHIFT
12687 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
12688 && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1)
12690 && (((unsigned HOST_WIDE_INT) const_op
12691 + (GET_MODE_MASK (tmode) >> 1) + 1)
12692 <= GET_MODE_MASK (tmode)))
12694 op0 = gen_lowpart (tmode, XEXP (XEXP (op0, 0), 0));
12698 /* Likewise if OP0 is a PLUS of a sign extension with a
12699 constant, which is usually represented with the PLUS
12700 between the shifts. */
12701 if (! unsigned_comparison_p
12702 && CONST_INT_P (XEXP (op0, 1))
12703 && GET_CODE (XEXP (op0, 0)) == PLUS
12704 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12705 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == ASHIFT
12706 && XEXP (op0, 1) == XEXP (XEXP (XEXP (op0, 0), 0), 1)
12707 && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1)
12709 && (((unsigned HOST_WIDE_INT) const_op
12710 + (GET_MODE_MASK (tmode) >> 1) + 1)
12711 <= GET_MODE_MASK (tmode)))
12713 rtx inner = XEXP (XEXP (XEXP (op0, 0), 0), 0);
12714 rtx add_const = XEXP (XEXP (op0, 0), 1);
12715 rtx new_const = simplify_gen_binary (ASHIFTRT, mode,
12716 add_const, XEXP (op0, 1));
12718 op0 = simplify_gen_binary (PLUS, tmode,
12719 gen_lowpart (tmode, inner),
12726 /* If we have (compare (xshiftrt FOO N) (const_int C)) and
12727 the low order N bits of FOO are known to be zero, we can do this
12728 by comparing FOO with C shifted left N bits so long as no
12729 overflow occurs. Even if the low order N bits of FOO aren't known
12730 to be zero, if the comparison is >= or < we can use the same
12731 optimization and for > or <= by setting all the low
12732 order N bits in the comparison constant. */
12733 if (CONST_INT_P (XEXP (op0, 1))
12734 && INTVAL (XEXP (op0, 1)) > 0
12735 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
12736 && mode_width <= HOST_BITS_PER_WIDE_INT
12737 && (((unsigned HOST_WIDE_INT) const_op
12738 + (GET_CODE (op0) != LSHIFTRT
12739 ? ((GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1)) >> 1)
12742 <= GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1))))
12744 unsigned HOST_WIDE_INT low_bits
12745 = (nonzero_bits (XEXP (op0, 0), mode)
12746 & ((HOST_WIDE_INT_1U
12747 << INTVAL (XEXP (op0, 1))) - 1));
12748 if (low_bits == 0 || !equality_comparison_p)
12750 /* If the shift was logical, then we must make the condition
12752 if (GET_CODE (op0) == LSHIFTRT)
12753 code = unsigned_condition (code);
12755 const_op = (unsigned HOST_WIDE_INT) const_op
12756 << INTVAL (XEXP (op0, 1));
12758 && (code == GT || code == GTU
12759 || code == LE || code == LEU))
12761 |= ((HOST_WIDE_INT_1 << INTVAL (XEXP (op0, 1))) - 1);
12762 op1 = GEN_INT (const_op);
12763 op0 = XEXP (op0, 0);
12768 /* If we are using this shift to extract just the sign bit, we
12769 can replace this with an LT or GE comparison. */
12771 && (equality_comparison_p || sign_bit_comparison_p)
12772 && CONST_INT_P (XEXP (op0, 1))
12773 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12775 op0 = XEXP (op0, 0);
12776 code = (code == NE || code == GT ? LT : GE);
12788 /* Now make any compound operations involved in this comparison. Then,
12789 check for an outmost SUBREG on OP0 that is not doing anything or is
12790 paradoxical. The latter transformation must only be performed when
12791 it is known that the "extra" bits will be the same in op0 and op1 or
12792 that they don't matter. There are three cases to consider:
12794 1. SUBREG_REG (op0) is a register. In this case the bits are don't
12795 care bits and we can assume they have any convenient value. So
12796 making the transformation is safe.
12798 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is UNKNOWN.
12799 In this case the upper bits of op0 are undefined. We should not make
12800 the simplification in that case as we do not know the contents of
12803 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not UNKNOWN.
12804 In that case we know those bits are zeros or ones. We must also be
12805 sure that they are the same as the upper bits of op1.
12807 We can never remove a SUBREG for a non-equality comparison because
12808 the sign bit is in a different place in the underlying object. */
12810 rtx_code op0_mco_code = SET;
12811 if (op1 == const0_rtx)
12812 op0_mco_code = code == NE || code == EQ ? EQ : COMPARE;
12814 op0 = make_compound_operation (op0, op0_mco_code);
12815 op1 = make_compound_operation (op1, SET);
12817 if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0)
12818 && is_int_mode (GET_MODE (op0), &mode)
12819 && is_int_mode (GET_MODE (SUBREG_REG (op0)), &inner_mode)
12820 && (code == NE || code == EQ))
12822 if (paradoxical_subreg_p (op0))
12824 /* For paradoxical subregs, allow case 1 as above. Case 3 isn't
12826 if (REG_P (SUBREG_REG (op0)))
12828 op0 = SUBREG_REG (op0);
12829 op1 = gen_lowpart (inner_mode, op1);
12832 else if (GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
12833 && (nonzero_bits (SUBREG_REG (op0), inner_mode)
12834 & ~GET_MODE_MASK (mode)) == 0)
12836 tem = gen_lowpart (inner_mode, op1);
12838 if ((nonzero_bits (tem, inner_mode) & ~GET_MODE_MASK (mode)) == 0)
12839 op0 = SUBREG_REG (op0), op1 = tem;
12843 /* We now do the opposite procedure: Some machines don't have compare
12844 insns in all modes. If OP0's mode is an integer mode smaller than a
12845 word and we can't do a compare in that mode, see if there is a larger
12846 mode for which we can do the compare. There are a number of cases in
12847 which we can use the wider mode. */
12849 if (is_int_mode (GET_MODE (op0), &mode)
12850 && GET_MODE_SIZE (mode) < UNITS_PER_WORD
12851 && ! have_insn_for (COMPARE, mode))
12852 FOR_EACH_WIDER_MODE (tmode_iter, mode)
12854 tmode = tmode_iter.require ();
12855 if (!HWI_COMPUTABLE_MODE_P (tmode))
12857 if (have_insn_for (COMPARE, tmode))
12861 /* If this is a test for negative, we can make an explicit
12862 test of the sign bit. Test this first so we can use
12863 a paradoxical subreg to extend OP0. */
12865 if (op1 == const0_rtx && (code == LT || code == GE)
12866 && HWI_COMPUTABLE_MODE_P (mode))
12868 unsigned HOST_WIDE_INT sign
12869 = HOST_WIDE_INT_1U << (GET_MODE_BITSIZE (mode) - 1);
12870 op0 = simplify_gen_binary (AND, tmode,
12871 gen_lowpart (tmode, op0),
12872 gen_int_mode (sign, tmode));
12873 code = (code == LT) ? NE : EQ;
12877 /* If the only nonzero bits in OP0 and OP1 are those in the
12878 narrower mode and this is an equality or unsigned comparison,
12879 we can use the wider mode. Similarly for sign-extended
12880 values, in which case it is true for all comparisons. */
12881 zero_extended = ((code == EQ || code == NE
12882 || code == GEU || code == GTU
12883 || code == LEU || code == LTU)
12884 && (nonzero_bits (op0, tmode)
12885 & ~GET_MODE_MASK (mode)) == 0
12886 && ((CONST_INT_P (op1)
12887 || (nonzero_bits (op1, tmode)
12888 & ~GET_MODE_MASK (mode)) == 0)));
12891 || ((num_sign_bit_copies (op0, tmode)
12892 > (unsigned int) (GET_MODE_PRECISION (tmode)
12893 - GET_MODE_PRECISION (mode)))
12894 && (num_sign_bit_copies (op1, tmode)
12895 > (unsigned int) (GET_MODE_PRECISION (tmode)
12896 - GET_MODE_PRECISION (mode)))))
12898 /* If OP0 is an AND and we don't have an AND in MODE either,
12899 make a new AND in the proper mode. */
12900 if (GET_CODE (op0) == AND
12901 && !have_insn_for (AND, mode))
12902 op0 = simplify_gen_binary (AND, tmode,
12903 gen_lowpart (tmode,
12905 gen_lowpart (tmode,
12911 op0 = simplify_gen_unary (ZERO_EXTEND, tmode,
12913 op1 = simplify_gen_unary (ZERO_EXTEND, tmode,
12918 op0 = simplify_gen_unary (SIGN_EXTEND, tmode,
12920 op1 = simplify_gen_unary (SIGN_EXTEND, tmode,
12929 /* We may have changed the comparison operands. Re-canonicalize. */
12930 if (swap_commutative_operands_p (op0, op1))
12932 std::swap (op0, op1);
12933 code = swap_condition (code);
12936 /* If this machine only supports a subset of valid comparisons, see if we
12937 can convert an unsupported one into a supported one. */
12938 target_canonicalize_comparison (&code, &op0, &op1, 0);
12946 /* Utility function for record_value_for_reg. Count number of
12951 enum rtx_code code = GET_CODE (x);
12955 if (GET_RTX_CLASS (code) == RTX_BIN_ARITH
12956 || GET_RTX_CLASS (code) == RTX_COMM_ARITH)
12958 rtx x0 = XEXP (x, 0);
12959 rtx x1 = XEXP (x, 1);
12962 return 1 + 2 * count_rtxs (x0);
12964 if ((GET_RTX_CLASS (GET_CODE (x1)) == RTX_BIN_ARITH
12965 || GET_RTX_CLASS (GET_CODE (x1)) == RTX_COMM_ARITH)
12966 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
12967 return 2 + 2 * count_rtxs (x0)
12968 + count_rtxs (x == XEXP (x1, 0)
12969 ? XEXP (x1, 1) : XEXP (x1, 0));
12971 if ((GET_RTX_CLASS (GET_CODE (x0)) == RTX_BIN_ARITH
12972 || GET_RTX_CLASS (GET_CODE (x0)) == RTX_COMM_ARITH)
12973 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
12974 return 2 + 2 * count_rtxs (x1)
12975 + count_rtxs (x == XEXP (x0, 0)
12976 ? XEXP (x0, 1) : XEXP (x0, 0));
12979 fmt = GET_RTX_FORMAT (code);
12980 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12982 ret += count_rtxs (XEXP (x, i));
12983 else if (fmt[i] == 'E')
12984 for (j = 0; j < XVECLEN (x, i); j++)
12985 ret += count_rtxs (XVECEXP (x, i, j));
12990 /* Utility function for following routine. Called when X is part of a value
12991 being stored into last_set_value. Sets last_set_table_tick
12992 for each register mentioned. Similar to mention_regs in cse.c */
12995 update_table_tick (rtx x)
12997 enum rtx_code code = GET_CODE (x);
12998 const char *fmt = GET_RTX_FORMAT (code);
13003 unsigned int regno = REGNO (x);
13004 unsigned int endregno = END_REGNO (x);
13007 for (r = regno; r < endregno; r++)
13009 reg_stat_type *rsp = ®_stat[r];
13010 rsp->last_set_table_tick = label_tick;
13016 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13019 /* Check for identical subexpressions. If x contains
13020 identical subexpression we only have to traverse one of
13022 if (i == 0 && ARITHMETIC_P (x))
13024 /* Note that at this point x1 has already been
13026 rtx x0 = XEXP (x, 0);
13027 rtx x1 = XEXP (x, 1);
13029 /* If x0 and x1 are identical then there is no need to
13034 /* If x0 is identical to a subexpression of x1 then while
13035 processing x1, x0 has already been processed. Thus we
13036 are done with x. */
13037 if (ARITHMETIC_P (x1)
13038 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13041 /* If x1 is identical to a subexpression of x0 then we
13042 still have to process the rest of x0. */
13043 if (ARITHMETIC_P (x0)
13044 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13046 update_table_tick (XEXP (x0, x1 == XEXP (x0, 0) ? 1 : 0));
13051 update_table_tick (XEXP (x, i));
13053 else if (fmt[i] == 'E')
13054 for (j = 0; j < XVECLEN (x, i); j++)
13055 update_table_tick (XVECEXP (x, i, j));
13058 /* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we
13059 are saying that the register is clobbered and we no longer know its
13060 value. If INSN is zero, don't update reg_stat[].last_set; this is
13061 only permitted with VALUE also zero and is used to invalidate the
13065 record_value_for_reg (rtx reg, rtx_insn *insn, rtx value)
13067 unsigned int regno = REGNO (reg);
13068 unsigned int endregno = END_REGNO (reg);
13070 reg_stat_type *rsp;
13072 /* If VALUE contains REG and we have a previous value for REG, substitute
13073 the previous value. */
13074 if (value && insn && reg_overlap_mentioned_p (reg, value))
13078 /* Set things up so get_last_value is allowed to see anything set up to
13080 subst_low_luid = DF_INSN_LUID (insn);
13081 tem = get_last_value (reg);
13083 /* If TEM is simply a binary operation with two CLOBBERs as operands,
13084 it isn't going to be useful and will take a lot of time to process,
13085 so just use the CLOBBER. */
13089 if (ARITHMETIC_P (tem)
13090 && GET_CODE (XEXP (tem, 0)) == CLOBBER
13091 && GET_CODE (XEXP (tem, 1)) == CLOBBER)
13092 tem = XEXP (tem, 0);
13093 else if (count_occurrences (value, reg, 1) >= 2)
13095 /* If there are two or more occurrences of REG in VALUE,
13096 prevent the value from growing too much. */
13097 if (count_rtxs (tem) > MAX_LAST_VALUE_RTL)
13098 tem = gen_rtx_CLOBBER (GET_MODE (tem), const0_rtx);
13101 value = replace_rtx (copy_rtx (value), reg, tem);
13105 /* For each register modified, show we don't know its value, that
13106 we don't know about its bitwise content, that its value has been
13107 updated, and that we don't know the location of the death of the
13109 for (i = regno; i < endregno; i++)
13111 rsp = ®_stat[i];
13114 rsp->last_set = insn;
13116 rsp->last_set_value = 0;
13117 rsp->last_set_mode = VOIDmode;
13118 rsp->last_set_nonzero_bits = 0;
13119 rsp->last_set_sign_bit_copies = 0;
13120 rsp->last_death = 0;
13121 rsp->truncated_to_mode = VOIDmode;
13124 /* Mark registers that are being referenced in this value. */
13126 update_table_tick (value);
13128 /* Now update the status of each register being set.
13129 If someone is using this register in this block, set this register
13130 to invalid since we will get confused between the two lives in this
13131 basic block. This makes using this register always invalid. In cse, we
13132 scan the table to invalidate all entries using this register, but this
13133 is too much work for us. */
13135 for (i = regno; i < endregno; i++)
13137 rsp = ®_stat[i];
13138 rsp->last_set_label = label_tick;
13140 || (value && rsp->last_set_table_tick >= label_tick_ebb_start))
13141 rsp->last_set_invalid = 1;
13143 rsp->last_set_invalid = 0;
13146 /* The value being assigned might refer to X (like in "x++;"). In that
13147 case, we must replace it with (clobber (const_int 0)) to prevent
13149 rsp = ®_stat[regno];
13150 if (value && !get_last_value_validate (&value, insn, label_tick, 0))
13152 value = copy_rtx (value);
13153 if (!get_last_value_validate (&value, insn, label_tick, 1))
13157 /* For the main register being modified, update the value, the mode, the
13158 nonzero bits, and the number of sign bit copies. */
13160 rsp->last_set_value = value;
13164 machine_mode mode = GET_MODE (reg);
13165 subst_low_luid = DF_INSN_LUID (insn);
13166 rsp->last_set_mode = mode;
13167 if (GET_MODE_CLASS (mode) == MODE_INT
13168 && HWI_COMPUTABLE_MODE_P (mode))
13169 mode = nonzero_bits_mode;
13170 rsp->last_set_nonzero_bits = nonzero_bits (value, mode);
13171 rsp->last_set_sign_bit_copies
13172 = num_sign_bit_copies (value, GET_MODE (reg));
13176 /* Called via note_stores from record_dead_and_set_regs to handle one
13177 SET or CLOBBER in an insn. DATA is the instruction in which the
13178 set is occurring. */
13181 record_dead_and_set_regs_1 (rtx dest, const_rtx setter, void *data)
13183 rtx_insn *record_dead_insn = (rtx_insn *) data;
13185 if (GET_CODE (dest) == SUBREG)
13186 dest = SUBREG_REG (dest);
13188 if (!record_dead_insn)
13191 record_value_for_reg (dest, NULL, NULL_RTX);
13197 /* If we are setting the whole register, we know its value. Otherwise
13198 show that we don't know the value. We can handle SUBREG in
13200 if (GET_CODE (setter) == SET && dest == SET_DEST (setter))
13201 record_value_for_reg (dest, record_dead_insn, SET_SRC (setter));
13202 else if (GET_CODE (setter) == SET
13203 && GET_CODE (SET_DEST (setter)) == SUBREG
13204 && SUBREG_REG (SET_DEST (setter)) == dest
13205 && GET_MODE_PRECISION (GET_MODE (dest)) <= BITS_PER_WORD
13206 && subreg_lowpart_p (SET_DEST (setter)))
13207 record_value_for_reg (dest, record_dead_insn,
13208 gen_lowpart (GET_MODE (dest),
13209 SET_SRC (setter)));
13211 record_value_for_reg (dest, record_dead_insn, NULL_RTX);
13213 else if (MEM_P (dest)
13214 /* Ignore pushes, they clobber nothing. */
13215 && ! push_operand (dest, GET_MODE (dest)))
13216 mem_last_set = DF_INSN_LUID (record_dead_insn);
13219 /* Update the records of when each REG was most recently set or killed
13220 for the things done by INSN. This is the last thing done in processing
13221 INSN in the combiner loop.
13223 We update reg_stat[], in particular fields last_set, last_set_value,
13224 last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
13225 last_death, and also the similar information mem_last_set (which insn
13226 most recently modified memory) and last_call_luid (which insn was the
13227 most recent subroutine call). */
13230 record_dead_and_set_regs (rtx_insn *insn)
13235 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
13237 if (REG_NOTE_KIND (link) == REG_DEAD
13238 && REG_P (XEXP (link, 0)))
13240 unsigned int regno = REGNO (XEXP (link, 0));
13241 unsigned int endregno = END_REGNO (XEXP (link, 0));
13243 for (i = regno; i < endregno; i++)
13245 reg_stat_type *rsp;
13247 rsp = ®_stat[i];
13248 rsp->last_death = insn;
13251 else if (REG_NOTE_KIND (link) == REG_INC)
13252 record_value_for_reg (XEXP (link, 0), insn, NULL_RTX);
13257 hard_reg_set_iterator hrsi;
13258 EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call, 0, i, hrsi)
13260 reg_stat_type *rsp;
13262 rsp = ®_stat[i];
13263 rsp->last_set_invalid = 1;
13264 rsp->last_set = insn;
13265 rsp->last_set_value = 0;
13266 rsp->last_set_mode = VOIDmode;
13267 rsp->last_set_nonzero_bits = 0;
13268 rsp->last_set_sign_bit_copies = 0;
13269 rsp->last_death = 0;
13270 rsp->truncated_to_mode = VOIDmode;
13273 last_call_luid = mem_last_set = DF_INSN_LUID (insn);
13275 /* We can't combine into a call pattern. Remember, though, that
13276 the return value register is set at this LUID. We could
13277 still replace a register with the return value from the
13278 wrong subroutine call! */
13279 note_stores (PATTERN (insn), record_dead_and_set_regs_1, NULL_RTX);
13282 note_stores (PATTERN (insn), record_dead_and_set_regs_1, insn);
13285 /* If a SUBREG has the promoted bit set, it is in fact a property of the
13286 register present in the SUBREG, so for each such SUBREG go back and
13287 adjust nonzero and sign bit information of the registers that are
13288 known to have some zero/sign bits set.
13290 This is needed because when combine blows the SUBREGs away, the
13291 information on zero/sign bits is lost and further combines can be
13292 missed because of that. */
13295 record_promoted_value (rtx_insn *insn, rtx subreg)
13297 struct insn_link *links;
13299 unsigned int regno = REGNO (SUBREG_REG (subreg));
13300 machine_mode mode = GET_MODE (subreg);
13302 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT)
13305 for (links = LOG_LINKS (insn); links;)
13307 reg_stat_type *rsp;
13309 insn = links->insn;
13310 set = single_set (insn);
13312 if (! set || !REG_P (SET_DEST (set))
13313 || REGNO (SET_DEST (set)) != regno
13314 || GET_MODE (SET_DEST (set)) != GET_MODE (SUBREG_REG (subreg)))
13316 links = links->next;
13320 rsp = ®_stat[regno];
13321 if (rsp->last_set == insn)
13323 if (SUBREG_PROMOTED_UNSIGNED_P (subreg))
13324 rsp->last_set_nonzero_bits &= GET_MODE_MASK (mode);
13327 if (REG_P (SET_SRC (set)))
13329 regno = REGNO (SET_SRC (set));
13330 links = LOG_LINKS (insn);
13337 /* Check if X, a register, is known to contain a value already
13338 truncated to MODE. In this case we can use a subreg to refer to
13339 the truncated value even though in the generic case we would need
13340 an explicit truncation. */
13343 reg_truncated_to_mode (machine_mode mode, const_rtx x)
13345 reg_stat_type *rsp = ®_stat[REGNO (x)];
13346 machine_mode truncated = rsp->truncated_to_mode;
13349 || rsp->truncation_label < label_tick_ebb_start)
13351 if (!partial_subreg_p (mode, truncated))
13353 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, truncated))
13358 /* If X is a hard reg or a subreg record the mode that the register is
13359 accessed in. For non-TARGET_TRULY_NOOP_TRUNCATION targets we might be
13360 able to turn a truncate into a subreg using this information. Return true
13361 if traversing X is complete. */
13364 record_truncated_value (rtx x)
13366 machine_mode truncated_mode;
13367 reg_stat_type *rsp;
13369 if (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x)))
13371 machine_mode original_mode = GET_MODE (SUBREG_REG (x));
13372 truncated_mode = GET_MODE (x);
13374 if (!partial_subreg_p (truncated_mode, original_mode))
13377 truncated_mode = GET_MODE (x);
13378 if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode, original_mode))
13381 x = SUBREG_REG (x);
13383 /* ??? For hard-regs we now record everything. We might be able to
13384 optimize this using last_set_mode. */
13385 else if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
13386 truncated_mode = GET_MODE (x);
13390 rsp = ®_stat[REGNO (x)];
13391 if (rsp->truncated_to_mode == 0
13392 || rsp->truncation_label < label_tick_ebb_start
13393 || partial_subreg_p (truncated_mode, rsp->truncated_to_mode))
13395 rsp->truncated_to_mode = truncated_mode;
13396 rsp->truncation_label = label_tick;
13402 /* Callback for note_uses. Find hardregs and subregs of pseudos and
13403 the modes they are used in. This can help truning TRUNCATEs into
13407 record_truncated_values (rtx *loc, void *data ATTRIBUTE_UNUSED)
13409 subrtx_var_iterator::array_type array;
13410 FOR_EACH_SUBRTX_VAR (iter, array, *loc, NONCONST)
13411 if (record_truncated_value (*iter))
13412 iter.skip_subrtxes ();
13415 /* Scan X for promoted SUBREGs. For each one found,
13416 note what it implies to the registers used in it. */
13419 check_promoted_subreg (rtx_insn *insn, rtx x)
13421 if (GET_CODE (x) == SUBREG
13422 && SUBREG_PROMOTED_VAR_P (x)
13423 && REG_P (SUBREG_REG (x)))
13424 record_promoted_value (insn, x);
13427 const char *format = GET_RTX_FORMAT (GET_CODE (x));
13430 for (i = 0; i < GET_RTX_LENGTH (GET_CODE (x)); i++)
13434 check_promoted_subreg (insn, XEXP (x, i));
13438 if (XVEC (x, i) != 0)
13439 for (j = 0; j < XVECLEN (x, i); j++)
13440 check_promoted_subreg (insn, XVECEXP (x, i, j));
13446 /* Verify that all the registers and memory references mentioned in *LOC are
13447 still valid. *LOC was part of a value set in INSN when label_tick was
13448 equal to TICK. Return 0 if some are not. If REPLACE is nonzero, replace
13449 the invalid references with (clobber (const_int 0)) and return 1. This
13450 replacement is useful because we often can get useful information about
13451 the form of a value (e.g., if it was produced by a shift that always
13452 produces -1 or 0) even though we don't know exactly what registers it
13453 was produced from. */
13456 get_last_value_validate (rtx *loc, rtx_insn *insn, int tick, int replace)
13459 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
13460 int len = GET_RTX_LENGTH (GET_CODE (x));
13465 unsigned int regno = REGNO (x);
13466 unsigned int endregno = END_REGNO (x);
13469 for (j = regno; j < endregno; j++)
13471 reg_stat_type *rsp = ®_stat[j];
13472 if (rsp->last_set_invalid
13473 /* If this is a pseudo-register that was only set once and not
13474 live at the beginning of the function, it is always valid. */
13475 || (! (regno >= FIRST_PSEUDO_REGISTER
13476 && regno < reg_n_sets_max
13477 && REG_N_SETS (regno) == 1
13478 && (!REGNO_REG_SET_P
13479 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
13481 && rsp->last_set_label > tick))
13484 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13491 /* If this is a memory reference, make sure that there were no stores after
13492 it that might have clobbered the value. We don't have alias info, so we
13493 assume any store invalidates it. Moreover, we only have local UIDs, so
13494 we also assume that there were stores in the intervening basic blocks. */
13495 else if (MEM_P (x) && !MEM_READONLY_P (x)
13496 && (tick != label_tick || DF_INSN_LUID (insn) <= mem_last_set))
13499 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13503 for (i = 0; i < len; i++)
13507 /* Check for identical subexpressions. If x contains
13508 identical subexpression we only have to traverse one of
13510 if (i == 1 && ARITHMETIC_P (x))
13512 /* Note that at this point x0 has already been checked
13513 and found valid. */
13514 rtx x0 = XEXP (x, 0);
13515 rtx x1 = XEXP (x, 1);
13517 /* If x0 and x1 are identical then x is also valid. */
13521 /* If x1 is identical to a subexpression of x0 then
13522 while checking x0, x1 has already been checked. Thus
13523 it is valid and so as x. */
13524 if (ARITHMETIC_P (x0)
13525 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13528 /* If x0 is identical to a subexpression of x1 then x is
13529 valid iff the rest of x1 is valid. */
13530 if (ARITHMETIC_P (x1)
13531 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13533 get_last_value_validate (&XEXP (x1,
13534 x0 == XEXP (x1, 0) ? 1 : 0),
13535 insn, tick, replace);
13538 if (get_last_value_validate (&XEXP (x, i), insn, tick,
13542 else if (fmt[i] == 'E')
13543 for (j = 0; j < XVECLEN (x, i); j++)
13544 if (get_last_value_validate (&XVECEXP (x, i, j),
13545 insn, tick, replace) == 0)
13549 /* If we haven't found a reason for it to be invalid, it is valid. */
13553 /* Get the last value assigned to X, if known. Some registers
13554 in the value may be replaced with (clobber (const_int 0)) if their value
13555 is known longer known reliably. */
13558 get_last_value (const_rtx x)
13560 unsigned int regno;
13562 reg_stat_type *rsp;
13564 /* If this is a non-paradoxical SUBREG, get the value of its operand and
13565 then convert it to the desired mode. If this is a paradoxical SUBREG,
13566 we cannot predict what values the "extra" bits might have. */
13567 if (GET_CODE (x) == SUBREG
13568 && subreg_lowpart_p (x)
13569 && !paradoxical_subreg_p (x)
13570 && (value = get_last_value (SUBREG_REG (x))) != 0)
13571 return gen_lowpart (GET_MODE (x), value);
13577 rsp = ®_stat[regno];
13578 value = rsp->last_set_value;
13580 /* If we don't have a value, or if it isn't for this basic block and
13581 it's either a hard register, set more than once, or it's a live
13582 at the beginning of the function, return 0.
13584 Because if it's not live at the beginning of the function then the reg
13585 is always set before being used (is never used without being set).
13586 And, if it's set only once, and it's always set before use, then all
13587 uses must have the same last value, even if it's not from this basic
13591 || (rsp->last_set_label < label_tick_ebb_start
13592 && (regno < FIRST_PSEUDO_REGISTER
13593 || regno >= reg_n_sets_max
13594 || REG_N_SETS (regno) != 1
13596 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), regno))))
13599 /* If the value was set in a later insn than the ones we are processing,
13600 we can't use it even if the register was only set once. */
13601 if (rsp->last_set_label == label_tick
13602 && DF_INSN_LUID (rsp->last_set) >= subst_low_luid)
13605 /* If fewer bits were set than what we are asked for now, we cannot use
13607 if (GET_MODE_PRECISION (rsp->last_set_mode)
13608 < GET_MODE_PRECISION (GET_MODE (x)))
13611 /* If the value has all its registers valid, return it. */
13612 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 0))
13615 /* Otherwise, make a copy and replace any invalid register with
13616 (clobber (const_int 0)). If that fails for some reason, return 0. */
13618 value = copy_rtx (value);
13619 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 1))
13625 /* Return nonzero if expression X refers to a REG or to memory
13626 that is set in an instruction more recent than FROM_LUID. */
13629 use_crosses_set_p (const_rtx x, int from_luid)
13633 enum rtx_code code = GET_CODE (x);
13637 unsigned int regno = REGNO (x);
13638 unsigned endreg = END_REGNO (x);
13640 #ifdef PUSH_ROUNDING
13641 /* Don't allow uses of the stack pointer to be moved,
13642 because we don't know whether the move crosses a push insn. */
13643 if (regno == STACK_POINTER_REGNUM && PUSH_ARGS)
13646 for (; regno < endreg; regno++)
13648 reg_stat_type *rsp = ®_stat[regno];
13650 && rsp->last_set_label == label_tick
13651 && DF_INSN_LUID (rsp->last_set) > from_luid)
13657 if (code == MEM && mem_last_set > from_luid)
13660 fmt = GET_RTX_FORMAT (code);
13662 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13667 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
13668 if (use_crosses_set_p (XVECEXP (x, i, j), from_luid))
13671 else if (fmt[i] == 'e'
13672 && use_crosses_set_p (XEXP (x, i), from_luid))
13678 /* Define three variables used for communication between the following
13681 static unsigned int reg_dead_regno, reg_dead_endregno;
13682 static int reg_dead_flag;
13684 /* Function called via note_stores from reg_dead_at_p.
13686 If DEST is within [reg_dead_regno, reg_dead_endregno), set
13687 reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */
13690 reg_dead_at_p_1 (rtx dest, const_rtx x, void *data ATTRIBUTE_UNUSED)
13692 unsigned int regno, endregno;
13697 regno = REGNO (dest);
13698 endregno = END_REGNO (dest);
13699 if (reg_dead_endregno > regno && reg_dead_regno < endregno)
13700 reg_dead_flag = (GET_CODE (x) == CLOBBER) ? 1 : -1;
13703 /* Return nonzero if REG is known to be dead at INSN.
13705 We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER
13706 referencing REG, it is dead. If we hit a SET referencing REG, it is
13707 live. Otherwise, see if it is live or dead at the start of the basic
13708 block we are in. Hard regs marked as being live in NEWPAT_USED_REGS
13709 must be assumed to be always live. */
13712 reg_dead_at_p (rtx reg, rtx_insn *insn)
13717 /* Set variables for reg_dead_at_p_1. */
13718 reg_dead_regno = REGNO (reg);
13719 reg_dead_endregno = END_REGNO (reg);
13723 /* Check that reg isn't mentioned in NEWPAT_USED_REGS. For fixed registers
13724 we allow the machine description to decide whether use-and-clobber
13725 patterns are OK. */
13726 if (reg_dead_regno < FIRST_PSEUDO_REGISTER)
13728 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13729 if (!fixed_regs[i] && TEST_HARD_REG_BIT (newpat_used_regs, i))
13733 /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
13734 beginning of basic block. */
13735 block = BLOCK_FOR_INSN (insn);
13740 if (find_regno_note (insn, REG_UNUSED, reg_dead_regno))
13743 note_stores (PATTERN (insn), reg_dead_at_p_1, NULL);
13745 return reg_dead_flag == 1 ? 1 : 0;
13747 if (find_regno_note (insn, REG_DEAD, reg_dead_regno))
13751 if (insn == BB_HEAD (block))
13754 insn = PREV_INSN (insn);
13757 /* Look at live-in sets for the basic block that we were in. */
13758 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13759 if (REGNO_REG_SET_P (df_get_live_in (block), i))
13765 /* Note hard registers in X that are used. */
13768 mark_used_regs_combine (rtx x)
13770 RTX_CODE code = GET_CODE (x);
13771 unsigned int regno;
13782 case ADDR_DIFF_VEC:
13784 /* CC0 must die in the insn after it is set, so we don't need to take
13785 special note of it here. */
13790 /* If we are clobbering a MEM, mark any hard registers inside the
13791 address as used. */
13792 if (MEM_P (XEXP (x, 0)))
13793 mark_used_regs_combine (XEXP (XEXP (x, 0), 0));
13798 /* A hard reg in a wide mode may really be multiple registers.
13799 If so, mark all of them just like the first. */
13800 if (regno < FIRST_PSEUDO_REGISTER)
13802 /* None of this applies to the stack, frame or arg pointers. */
13803 if (regno == STACK_POINTER_REGNUM
13804 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
13805 && regno == HARD_FRAME_POINTER_REGNUM)
13806 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
13807 && regno == ARG_POINTER_REGNUM && fixed_regs[regno])
13808 || regno == FRAME_POINTER_REGNUM)
13811 add_to_hard_reg_set (&newpat_used_regs, GET_MODE (x), regno);
13817 /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
13819 rtx testreg = SET_DEST (x);
13821 while (GET_CODE (testreg) == SUBREG
13822 || GET_CODE (testreg) == ZERO_EXTRACT
13823 || GET_CODE (testreg) == STRICT_LOW_PART)
13824 testreg = XEXP (testreg, 0);
13826 if (MEM_P (testreg))
13827 mark_used_regs_combine (XEXP (testreg, 0));
13829 mark_used_regs_combine (SET_SRC (x));
13837 /* Recursively scan the operands of this expression. */
13840 const char *fmt = GET_RTX_FORMAT (code);
13842 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13845 mark_used_regs_combine (XEXP (x, i));
13846 else if (fmt[i] == 'E')
13850 for (j = 0; j < XVECLEN (x, i); j++)
13851 mark_used_regs_combine (XVECEXP (x, i, j));
13857 /* Remove register number REGNO from the dead registers list of INSN.
13859 Return the note used to record the death, if there was one. */
13862 remove_death (unsigned int regno, rtx_insn *insn)
13864 rtx note = find_regno_note (insn, REG_DEAD, regno);
13867 remove_note (insn, note);
13872 /* For each register (hardware or pseudo) used within expression X, if its
13873 death is in an instruction with luid between FROM_LUID (inclusive) and
13874 TO_INSN (exclusive), put a REG_DEAD note for that register in the
13875 list headed by PNOTES.
13877 That said, don't move registers killed by maybe_kill_insn.
13879 This is done when X is being merged by combination into TO_INSN. These
13880 notes will then be distributed as needed. */
13883 move_deaths (rtx x, rtx maybe_kill_insn, int from_luid, rtx_insn *to_insn,
13888 enum rtx_code code = GET_CODE (x);
13892 unsigned int regno = REGNO (x);
13893 rtx_insn *where_dead = reg_stat[regno].last_death;
13895 /* Don't move the register if it gets killed in between from and to. */
13896 if (maybe_kill_insn && reg_set_p (x, maybe_kill_insn)
13897 && ! reg_referenced_p (x, maybe_kill_insn))
13901 && BLOCK_FOR_INSN (where_dead) == BLOCK_FOR_INSN (to_insn)
13902 && DF_INSN_LUID (where_dead) >= from_luid
13903 && DF_INSN_LUID (where_dead) < DF_INSN_LUID (to_insn))
13905 rtx note = remove_death (regno, where_dead);
13907 /* It is possible for the call above to return 0. This can occur
13908 when last_death points to I2 or I1 that we combined with.
13909 In that case make a new note.
13911 We must also check for the case where X is a hard register
13912 and NOTE is a death note for a range of hard registers
13913 including X. In that case, we must put REG_DEAD notes for
13914 the remaining registers in place of NOTE. */
13916 if (note != 0 && regno < FIRST_PSEUDO_REGISTER
13917 && partial_subreg_p (GET_MODE (x), GET_MODE (XEXP (note, 0))))
13919 unsigned int deadregno = REGNO (XEXP (note, 0));
13920 unsigned int deadend = END_REGNO (XEXP (note, 0));
13921 unsigned int ourend = END_REGNO (x);
13924 for (i = deadregno; i < deadend; i++)
13925 if (i < regno || i >= ourend)
13926 add_reg_note (where_dead, REG_DEAD, regno_reg_rtx[i]);
13929 /* If we didn't find any note, or if we found a REG_DEAD note that
13930 covers only part of the given reg, and we have a multi-reg hard
13931 register, then to be safe we must check for REG_DEAD notes
13932 for each register other than the first. They could have
13933 their own REG_DEAD notes lying around. */
13934 else if ((note == 0
13936 && partial_subreg_p (GET_MODE (XEXP (note, 0)),
13938 && regno < FIRST_PSEUDO_REGISTER
13939 && REG_NREGS (x) > 1)
13941 unsigned int ourend = END_REGNO (x);
13942 unsigned int i, offset;
13946 offset = hard_regno_nregs (regno, GET_MODE (XEXP (note, 0)));
13950 for (i = regno + offset; i < ourend; i++)
13951 move_deaths (regno_reg_rtx[i],
13952 maybe_kill_insn, from_luid, to_insn, &oldnotes);
13955 if (note != 0 && GET_MODE (XEXP (note, 0)) == GET_MODE (x))
13957 XEXP (note, 1) = *pnotes;
13961 *pnotes = alloc_reg_note (REG_DEAD, x, *pnotes);
13967 else if (GET_CODE (x) == SET)
13969 rtx dest = SET_DEST (x);
13971 move_deaths (SET_SRC (x), maybe_kill_insn, from_luid, to_insn, pnotes);
13973 /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
13974 that accesses one word of a multi-word item, some
13975 piece of everything register in the expression is used by
13976 this insn, so remove any old death. */
13977 /* ??? So why do we test for equality of the sizes? */
13979 if (GET_CODE (dest) == ZERO_EXTRACT
13980 || GET_CODE (dest) == STRICT_LOW_PART
13981 || (GET_CODE (dest) == SUBREG
13982 && (((GET_MODE_SIZE (GET_MODE (dest))
13983 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
13984 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
13985 + UNITS_PER_WORD - 1) / UNITS_PER_WORD))))
13987 move_deaths (dest, maybe_kill_insn, from_luid, to_insn, pnotes);
13991 /* If this is some other SUBREG, we know it replaces the entire
13992 value, so use that as the destination. */
13993 if (GET_CODE (dest) == SUBREG)
13994 dest = SUBREG_REG (dest);
13996 /* If this is a MEM, adjust deaths of anything used in the address.
13997 For a REG (the only other possibility), the entire value is
13998 being replaced so the old value is not used in this insn. */
14001 move_deaths (XEXP (dest, 0), maybe_kill_insn, from_luid,
14006 else if (GET_CODE (x) == CLOBBER)
14009 len = GET_RTX_LENGTH (code);
14010 fmt = GET_RTX_FORMAT (code);
14012 for (i = 0; i < len; i++)
14017 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
14018 move_deaths (XVECEXP (x, i, j), maybe_kill_insn, from_luid,
14021 else if (fmt[i] == 'e')
14022 move_deaths (XEXP (x, i), maybe_kill_insn, from_luid, to_insn, pnotes);
14026 /* Return 1 if X is the target of a bit-field assignment in BODY, the
14027 pattern of an insn. X must be a REG. */
14030 reg_bitfield_target_p (rtx x, rtx body)
14034 if (GET_CODE (body) == SET)
14036 rtx dest = SET_DEST (body);
14038 unsigned int regno, tregno, endregno, endtregno;
14040 if (GET_CODE (dest) == ZERO_EXTRACT)
14041 target = XEXP (dest, 0);
14042 else if (GET_CODE (dest) == STRICT_LOW_PART)
14043 target = SUBREG_REG (XEXP (dest, 0));
14047 if (GET_CODE (target) == SUBREG)
14048 target = SUBREG_REG (target);
14050 if (!REG_P (target))
14053 tregno = REGNO (target), regno = REGNO (x);
14054 if (tregno >= FIRST_PSEUDO_REGISTER || regno >= FIRST_PSEUDO_REGISTER)
14055 return target == x;
14057 endtregno = end_hard_regno (GET_MODE (target), tregno);
14058 endregno = end_hard_regno (GET_MODE (x), regno);
14060 return endregno > tregno && regno < endtregno;
14063 else if (GET_CODE (body) == PARALLEL)
14064 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
14065 if (reg_bitfield_target_p (x, XVECEXP (body, 0, i)))
14071 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
14072 as appropriate. I3 and I2 are the insns resulting from the combination
14073 insns including FROM (I2 may be zero).
14075 ELIM_I2 and ELIM_I1 are either zero or registers that we know will
14076 not need REG_DEAD notes because they are being substituted for. This
14077 saves searching in the most common cases.
14079 Each note in the list is either ignored or placed on some insns, depending
14080 on the type of note. */
14083 distribute_notes (rtx notes, rtx_insn *from_insn, rtx_insn *i3, rtx_insn *i2,
14084 rtx elim_i2, rtx elim_i1, rtx elim_i0)
14086 rtx note, next_note;
14088 rtx_insn *tem_insn;
14090 for (note = notes; note; note = next_note)
14092 rtx_insn *place = 0, *place2 = 0;
14094 next_note = XEXP (note, 1);
14095 switch (REG_NOTE_KIND (note))
14099 /* Doesn't matter much where we put this, as long as it's somewhere.
14100 It is preferable to keep these notes on branches, which is most
14101 likely to be i3. */
14105 case REG_NON_LOCAL_GOTO:
14110 gcc_assert (i2 && JUMP_P (i2));
14115 case REG_EH_REGION:
14116 /* These notes must remain with the call or trapping instruction. */
14119 else if (i2 && CALL_P (i2))
14123 gcc_assert (cfun->can_throw_non_call_exceptions);
14124 if (may_trap_p (i3))
14126 else if (i2 && may_trap_p (i2))
14128 /* ??? Otherwise assume we've combined things such that we
14129 can now prove that the instructions can't trap. Drop the
14130 note in this case. */
14134 case REG_ARGS_SIZE:
14135 /* ??? How to distribute between i3-i1. Assume i3 contains the
14136 entire adjustment. Assert i3 contains at least some adjust. */
14137 if (!noop_move_p (i3))
14139 int old_size, args_size = INTVAL (XEXP (note, 0));
14140 /* fixup_args_size_notes looks at REG_NORETURN note,
14141 so ensure the note is placed there first. */
14145 for (np = &next_note; *np; np = &XEXP (*np, 1))
14146 if (REG_NOTE_KIND (*np) == REG_NORETURN)
14150 XEXP (n, 1) = REG_NOTES (i3);
14151 REG_NOTES (i3) = n;
14155 old_size = fixup_args_size_notes (PREV_INSN (i3), i3, args_size);
14156 /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS
14157 REG_ARGS_SIZE note to all noreturn calls, allow that here. */
14158 gcc_assert (old_size != args_size
14160 && !ACCUMULATE_OUTGOING_ARGS
14161 && find_reg_note (i3, REG_NORETURN, NULL_RTX)));
14168 case REG_CALL_DECL:
14169 /* These notes must remain with the call. It should not be
14170 possible for both I2 and I3 to be a call. */
14175 gcc_assert (i2 && CALL_P (i2));
14181 /* Any clobbers for i3 may still exist, and so we must process
14182 REG_UNUSED notes from that insn.
14184 Any clobbers from i2 or i1 can only exist if they were added by
14185 recog_for_combine. In that case, recog_for_combine created the
14186 necessary REG_UNUSED notes. Trying to keep any original
14187 REG_UNUSED notes from these insns can cause incorrect output
14188 if it is for the same register as the original i3 dest.
14189 In that case, we will notice that the register is set in i3,
14190 and then add a REG_UNUSED note for the destination of i3, which
14191 is wrong. However, it is possible to have REG_UNUSED notes from
14192 i2 or i1 for register which were both used and clobbered, so
14193 we keep notes from i2 or i1 if they will turn into REG_DEAD
14196 /* If this register is set or clobbered in I3, put the note there
14197 unless there is one already. */
14198 if (reg_set_p (XEXP (note, 0), PATTERN (i3)))
14200 if (from_insn != i3)
14203 if (! (REG_P (XEXP (note, 0))
14204 ? find_regno_note (i3, REG_UNUSED, REGNO (XEXP (note, 0)))
14205 : find_reg_note (i3, REG_UNUSED, XEXP (note, 0))))
14208 /* Otherwise, if this register is used by I3, then this register
14209 now dies here, so we must put a REG_DEAD note here unless there
14211 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3))
14212 && ! (REG_P (XEXP (note, 0))
14213 ? find_regno_note (i3, REG_DEAD,
14214 REGNO (XEXP (note, 0)))
14215 : find_reg_note (i3, REG_DEAD, XEXP (note, 0))))
14217 PUT_REG_NOTE_KIND (note, REG_DEAD);
14225 /* These notes say something about results of an insn. We can
14226 only support them if they used to be on I3 in which case they
14227 remain on I3. Otherwise they are ignored.
14229 If the note refers to an expression that is not a constant, we
14230 must also ignore the note since we cannot tell whether the
14231 equivalence is still true. It might be possible to do
14232 slightly better than this (we only have a problem if I2DEST
14233 or I1DEST is present in the expression), but it doesn't
14234 seem worth the trouble. */
14236 if (from_insn == i3
14237 && (XEXP (note, 0) == 0 || CONSTANT_P (XEXP (note, 0))))
14242 /* These notes say something about how a register is used. They must
14243 be present on any use of the register in I2 or I3. */
14244 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3)))
14247 if (i2 && reg_mentioned_p (XEXP (note, 0), PATTERN (i2)))
14256 case REG_LABEL_TARGET:
14257 case REG_LABEL_OPERAND:
14258 /* This can show up in several ways -- either directly in the
14259 pattern, or hidden off in the constant pool with (or without?)
14260 a REG_EQUAL note. */
14261 /* ??? Ignore the without-reg_equal-note problem for now. */
14262 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3))
14263 || ((tem_note = find_reg_note (i3, REG_EQUAL, NULL_RTX))
14264 && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
14265 && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0)))
14269 && (reg_mentioned_p (XEXP (note, 0), PATTERN (i2))
14270 || ((tem_note = find_reg_note (i2, REG_EQUAL, NULL_RTX))
14271 && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
14272 && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0))))
14280 /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
14281 as a JUMP_LABEL or decrement LABEL_NUSES if it's already
14283 if (place && JUMP_P (place)
14284 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
14285 && (JUMP_LABEL (place) == NULL
14286 || JUMP_LABEL (place) == XEXP (note, 0)))
14288 rtx label = JUMP_LABEL (place);
14291 JUMP_LABEL (place) = XEXP (note, 0);
14292 else if (LABEL_P (label))
14293 LABEL_NUSES (label)--;
14296 if (place2 && JUMP_P (place2)
14297 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
14298 && (JUMP_LABEL (place2) == NULL
14299 || JUMP_LABEL (place2) == XEXP (note, 0)))
14301 rtx label = JUMP_LABEL (place2);
14304 JUMP_LABEL (place2) = XEXP (note, 0);
14305 else if (LABEL_P (label))
14306 LABEL_NUSES (label)--;
14312 /* This note says something about the value of a register prior
14313 to the execution of an insn. It is too much trouble to see
14314 if the note is still correct in all situations. It is better
14315 to simply delete it. */
14319 /* If we replaced the right hand side of FROM_INSN with a
14320 REG_EQUAL note, the original use of the dying register
14321 will not have been combined into I3 and I2. In such cases,
14322 FROM_INSN is guaranteed to be the first of the combined
14323 instructions, so we simply need to search back before
14324 FROM_INSN for the previous use or set of this register,
14325 then alter the notes there appropriately.
14327 If the register is used as an input in I3, it dies there.
14328 Similarly for I2, if it is nonzero and adjacent to I3.
14330 If the register is not used as an input in either I3 or I2
14331 and it is not one of the registers we were supposed to eliminate,
14332 there are two possibilities. We might have a non-adjacent I2
14333 or we might have somehow eliminated an additional register
14334 from a computation. For example, we might have had A & B where
14335 we discover that B will always be zero. In this case we will
14336 eliminate the reference to A.
14338 In both cases, we must search to see if we can find a previous
14339 use of A and put the death note there. */
14342 && from_insn == i2mod
14343 && !reg_overlap_mentioned_p (XEXP (note, 0), i2mod_new_rhs))
14344 tem_insn = from_insn;
14348 && CALL_P (from_insn)
14349 && find_reg_fusage (from_insn, USE, XEXP (note, 0)))
14351 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3)))
14353 else if (i2 != 0 && next_nonnote_nondebug_insn (i2) == i3
14354 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14356 else if ((rtx_equal_p (XEXP (note, 0), elim_i2)
14358 && reg_overlap_mentioned_p (XEXP (note, 0),
14360 || rtx_equal_p (XEXP (note, 0), elim_i1)
14361 || rtx_equal_p (XEXP (note, 0), elim_i0))
14364 /* If the new I2 sets the same register that is marked dead
14365 in the note, we do not know where to put the note.
14367 if (i2 != 0 && reg_set_p (XEXP (note, 0), PATTERN (i2)))
14373 basic_block bb = this_basic_block;
14375 for (tem_insn = PREV_INSN (tem_insn); place == 0; tem_insn = PREV_INSN (tem_insn))
14377 if (!NONDEBUG_INSN_P (tem_insn))
14379 if (tem_insn == BB_HEAD (bb))
14384 /* If the register is being set at TEM_INSN, see if that is all
14385 TEM_INSN is doing. If so, delete TEM_INSN. Otherwise, make this
14386 into a REG_UNUSED note instead. Don't delete sets to
14387 global register vars. */
14388 if ((REGNO (XEXP (note, 0)) >= FIRST_PSEUDO_REGISTER
14389 || !global_regs[REGNO (XEXP (note, 0))])
14390 && reg_set_p (XEXP (note, 0), PATTERN (tem_insn)))
14392 rtx set = single_set (tem_insn);
14393 rtx inner_dest = 0;
14394 rtx_insn *cc0_setter = NULL;
14397 for (inner_dest = SET_DEST (set);
14398 (GET_CODE (inner_dest) == STRICT_LOW_PART
14399 || GET_CODE (inner_dest) == SUBREG
14400 || GET_CODE (inner_dest) == ZERO_EXTRACT);
14401 inner_dest = XEXP (inner_dest, 0))
14404 /* Verify that it was the set, and not a clobber that
14405 modified the register.
14407 CC0 targets must be careful to maintain setter/user
14408 pairs. If we cannot delete the setter due to side
14409 effects, mark the user with an UNUSED note instead
14412 if (set != 0 && ! side_effects_p (SET_SRC (set))
14413 && rtx_equal_p (XEXP (note, 0), inner_dest)
14415 || (! reg_mentioned_p (cc0_rtx, SET_SRC (set))
14416 || ((cc0_setter = prev_cc0_setter (tem_insn)) != NULL
14417 && sets_cc0_p (PATTERN (cc0_setter)) > 0))))
14419 /* Move the notes and links of TEM_INSN elsewhere.
14420 This might delete other dead insns recursively.
14421 First set the pattern to something that won't use
14423 rtx old_notes = REG_NOTES (tem_insn);
14425 PATTERN (tem_insn) = pc_rtx;
14426 REG_NOTES (tem_insn) = NULL;
14428 distribute_notes (old_notes, tem_insn, tem_insn, NULL,
14429 NULL_RTX, NULL_RTX, NULL_RTX);
14430 distribute_links (LOG_LINKS (tem_insn));
14432 unsigned int regno = REGNO (XEXP (note, 0));
14433 reg_stat_type *rsp = ®_stat[regno];
14434 if (rsp->last_set == tem_insn)
14435 record_value_for_reg (XEXP (note, 0), NULL, NULL_RTX);
14437 SET_INSN_DELETED (tem_insn);
14438 if (tem_insn == i2)
14441 /* Delete the setter too. */
14444 PATTERN (cc0_setter) = pc_rtx;
14445 old_notes = REG_NOTES (cc0_setter);
14446 REG_NOTES (cc0_setter) = NULL;
14448 distribute_notes (old_notes, cc0_setter,
14450 NULL_RTX, NULL_RTX, NULL_RTX);
14451 distribute_links (LOG_LINKS (cc0_setter));
14453 SET_INSN_DELETED (cc0_setter);
14454 if (cc0_setter == i2)
14460 PUT_REG_NOTE_KIND (note, REG_UNUSED);
14462 /* If there isn't already a REG_UNUSED note, put one
14463 here. Do not place a REG_DEAD note, even if
14464 the register is also used here; that would not
14465 match the algorithm used in lifetime analysis
14466 and can cause the consistency check in the
14467 scheduler to fail. */
14468 if (! find_regno_note (tem_insn, REG_UNUSED,
14469 REGNO (XEXP (note, 0))))
14474 else if (reg_referenced_p (XEXP (note, 0), PATTERN (tem_insn))
14475 || (CALL_P (tem_insn)
14476 && find_reg_fusage (tem_insn, USE, XEXP (note, 0))))
14480 /* If we are doing a 3->2 combination, and we have a
14481 register which formerly died in i3 and was not used
14482 by i2, which now no longer dies in i3 and is used in
14483 i2 but does not die in i2, and place is between i2
14484 and i3, then we may need to move a link from place to
14486 if (i2 && DF_INSN_LUID (place) > DF_INSN_LUID (i2)
14488 && DF_INSN_LUID (from_insn) > DF_INSN_LUID (i2)
14489 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14491 struct insn_link *links = LOG_LINKS (place);
14492 LOG_LINKS (place) = NULL;
14493 distribute_links (links);
14498 if (tem_insn == BB_HEAD (bb))
14504 /* If the register is set or already dead at PLACE, we needn't do
14505 anything with this note if it is still a REG_DEAD note.
14506 We check here if it is set at all, not if is it totally replaced,
14507 which is what `dead_or_set_p' checks, so also check for it being
14510 if (place && REG_NOTE_KIND (note) == REG_DEAD)
14512 unsigned int regno = REGNO (XEXP (note, 0));
14513 reg_stat_type *rsp = ®_stat[regno];
14515 if (dead_or_set_p (place, XEXP (note, 0))
14516 || reg_bitfield_target_p (XEXP (note, 0), PATTERN (place)))
14518 /* Unless the register previously died in PLACE, clear
14519 last_death. [I no longer understand why this is
14521 if (rsp->last_death != place)
14522 rsp->last_death = 0;
14526 rsp->last_death = place;
14528 /* If this is a death note for a hard reg that is occupying
14529 multiple registers, ensure that we are still using all
14530 parts of the object. If we find a piece of the object
14531 that is unused, we must arrange for an appropriate REG_DEAD
14532 note to be added for it. However, we can't just emit a USE
14533 and tag the note to it, since the register might actually
14534 be dead; so we recourse, and the recursive call then finds
14535 the previous insn that used this register. */
14537 if (place && REG_NREGS (XEXP (note, 0)) > 1)
14539 unsigned int endregno = END_REGNO (XEXP (note, 0));
14540 bool all_used = true;
14543 for (i = regno; i < endregno; i++)
14544 if ((! refers_to_regno_p (i, PATTERN (place))
14545 && ! find_regno_fusage (place, USE, i))
14546 || dead_or_set_regno_p (place, i))
14554 /* Put only REG_DEAD notes for pieces that are
14555 not already dead or set. */
14557 for (i = regno; i < endregno;
14558 i += hard_regno_nregs (i, reg_raw_mode[i]))
14560 rtx piece = regno_reg_rtx[i];
14561 basic_block bb = this_basic_block;
14563 if (! dead_or_set_p (place, piece)
14564 && ! reg_bitfield_target_p (piece,
14567 rtx new_note = alloc_reg_note (REG_DEAD, piece,
14570 distribute_notes (new_note, place, place,
14571 NULL, NULL_RTX, NULL_RTX,
14574 else if (! refers_to_regno_p (i, PATTERN (place))
14575 && ! find_regno_fusage (place, USE, i))
14576 for (tem_insn = PREV_INSN (place); ;
14577 tem_insn = PREV_INSN (tem_insn))
14579 if (!NONDEBUG_INSN_P (tem_insn))
14581 if (tem_insn == BB_HEAD (bb))
14585 if (dead_or_set_p (tem_insn, piece)
14586 || reg_bitfield_target_p (piece,
14587 PATTERN (tem_insn)))
14589 add_reg_note (tem_insn, REG_UNUSED, piece);
14602 /* Any other notes should not be present at this point in the
14604 gcc_unreachable ();
14609 XEXP (note, 1) = REG_NOTES (place);
14610 REG_NOTES (place) = note;
14614 add_shallow_copy_of_reg_note (place2, note);
14618 /* Similarly to above, distribute the LOG_LINKS that used to be present on
14619 I3, I2, and I1 to new locations. This is also called to add a link
14620 pointing at I3 when I3's destination is changed. */
14623 distribute_links (struct insn_link *links)
14625 struct insn_link *link, *next_link;
14627 for (link = links; link; link = next_link)
14629 rtx_insn *place = 0;
14633 next_link = link->next;
14635 /* If the insn that this link points to is a NOTE, ignore it. */
14636 if (NOTE_P (link->insn))
14640 rtx pat = PATTERN (link->insn);
14641 if (GET_CODE (pat) == SET)
14643 else if (GET_CODE (pat) == PARALLEL)
14646 for (i = 0; i < XVECLEN (pat, 0); i++)
14648 set = XVECEXP (pat, 0, i);
14649 if (GET_CODE (set) != SET)
14652 reg = SET_DEST (set);
14653 while (GET_CODE (reg) == ZERO_EXTRACT
14654 || GET_CODE (reg) == STRICT_LOW_PART
14655 || GET_CODE (reg) == SUBREG)
14656 reg = XEXP (reg, 0);
14661 if (REGNO (reg) == link->regno)
14664 if (i == XVECLEN (pat, 0))
14670 reg = SET_DEST (set);
14672 while (GET_CODE (reg) == ZERO_EXTRACT
14673 || GET_CODE (reg) == STRICT_LOW_PART
14674 || GET_CODE (reg) == SUBREG)
14675 reg = XEXP (reg, 0);
14677 /* A LOG_LINK is defined as being placed on the first insn that uses
14678 a register and points to the insn that sets the register. Start
14679 searching at the next insn after the target of the link and stop
14680 when we reach a set of the register or the end of the basic block.
14682 Note that this correctly handles the link that used to point from
14683 I3 to I2. Also note that not much searching is typically done here
14684 since most links don't point very far away. */
14686 for (insn = NEXT_INSN (link->insn);
14687 (insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
14688 || BB_HEAD (this_basic_block->next_bb) != insn));
14689 insn = NEXT_INSN (insn))
14690 if (DEBUG_INSN_P (insn))
14692 else if (INSN_P (insn) && reg_overlap_mentioned_p (reg, PATTERN (insn)))
14694 if (reg_referenced_p (reg, PATTERN (insn)))
14698 else if (CALL_P (insn)
14699 && find_reg_fusage (insn, USE, reg))
14704 else if (INSN_P (insn) && reg_set_p (reg, insn))
14707 /* If we found a place to put the link, place it there unless there
14708 is already a link to the same insn as LINK at that point. */
14712 struct insn_link *link2;
14714 FOR_EACH_LOG_LINK (link2, place)
14715 if (link2->insn == link->insn && link2->regno == link->regno)
14720 link->next = LOG_LINKS (place);
14721 LOG_LINKS (place) = link;
14723 /* Set added_links_insn to the earliest insn we added a
14725 if (added_links_insn == 0
14726 || DF_INSN_LUID (added_links_insn) > DF_INSN_LUID (place))
14727 added_links_insn = place;
14733 /* Check for any register or memory mentioned in EQUIV that is not
14734 mentioned in EXPR. This is used to restrict EQUIV to "specializations"
14735 of EXPR where some registers may have been replaced by constants. */
14738 unmentioned_reg_p (rtx equiv, rtx expr)
14740 subrtx_iterator::array_type array;
14741 FOR_EACH_SUBRTX (iter, array, equiv, NONCONST)
14743 const_rtx x = *iter;
14744 if ((REG_P (x) || MEM_P (x))
14745 && !reg_mentioned_p (x, expr))
14751 DEBUG_FUNCTION void
14752 dump_combine_stats (FILE *file)
14756 ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
14757 combine_attempts, combine_merges, combine_extras, combine_successes);
14761 dump_combine_total_stats (FILE *file)
14765 "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
14766 total_attempts, total_merges, total_extras, total_successes);
14769 /* Try combining insns through substitution. */
14770 static unsigned int
14771 rest_of_handle_combine (void)
14773 int rebuild_jump_labels_after_combine;
14775 df_set_flags (DF_LR_RUN_DCE + DF_DEFER_INSN_RESCAN);
14776 df_note_add_problem ();
14779 regstat_init_n_sets_and_refs ();
14780 reg_n_sets_max = max_reg_num ();
14782 rebuild_jump_labels_after_combine
14783 = combine_instructions (get_insns (), max_reg_num ());
14785 /* Combining insns may have turned an indirect jump into a
14786 direct jump. Rebuild the JUMP_LABEL fields of jumping
14788 if (rebuild_jump_labels_after_combine)
14790 if (dom_info_available_p (CDI_DOMINATORS))
14791 free_dominance_info (CDI_DOMINATORS);
14792 timevar_push (TV_JUMP);
14793 rebuild_jump_labels (get_insns ());
14795 timevar_pop (TV_JUMP);
14798 regstat_free_n_sets_and_refs ();
14804 const pass_data pass_data_combine =
14806 RTL_PASS, /* type */
14807 "combine", /* name */
14808 OPTGROUP_NONE, /* optinfo_flags */
14809 TV_COMBINE, /* tv_id */
14810 PROP_cfglayout, /* properties_required */
14811 0, /* properties_provided */
14812 0, /* properties_destroyed */
14813 0, /* todo_flags_start */
14814 TODO_df_finish, /* todo_flags_finish */
14817 class pass_combine : public rtl_opt_pass
14820 pass_combine (gcc::context *ctxt)
14821 : rtl_opt_pass (pass_data_combine, ctxt)
14824 /* opt_pass methods: */
14825 virtual bool gate (function *) { return (optimize > 0); }
14826 virtual unsigned int execute (function *)
14828 return rest_of_handle_combine ();
14831 }; // class pass_combine
14833 } // anon namespace
14836 make_pass_combine (gcc::context *ctxt)
14838 return new pass_combine (ctxt);