1 /* RTL dead store elimination.
2 Copyright (C) 2005, 2006, 2007 Free Software Foundation, Inc.
4 Contributed by Richard Sandiford <rsandifor@codesourcery.com>
5 and Kenneth Zadeck <zadeck@naturalbridge.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 2, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to the Free
21 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
28 #include "coretypes.h"
34 #include "hard-reg-set.h"
39 #include "tree-pass.h"
40 #include "alloc-pool.h"
42 #include "insn-config.h"
48 /* This file contains three techniques for performing Dead Store
51 * The first technique performs dse locally on any base address. It
52 is based on the cselib which is a local value numbering technique.
53 This technique is local to a basic block but deals with a fairly
56 * The second technique performs dse globally but is restricted to
57 base addresses that are either constant or are relative to the
60 * The third technique, (which is only done after register allocation)
61 processes the spill spill slots. This differs from the second
62 technique because it takes advantage of the fact that spilling is
63 completely free from the effects of aliasing.
65 Logically, dse is a backwards dataflow problem. A store can be
66 deleted if it if cannot be reached in the backward direction by any
67 use of the value being stored. However, the local technique uses a
68 forwards scan of the basic block because cselib requires that the
69 block be processed in that order.
71 The pass is logically broken into 7 steps:
75 1) The local algorithm, as well as scanning the insns for the two
78 2) Analysis to see if the global algs are necessary. In the case
79 of stores base on a constant address, there must be at least two
80 stores to that address, to make it possible to delete some of the
81 stores. In the case of stores off of the frame or spill related
82 stores, only one store to an address is necessary because those
83 stores die at the end of the function.
85 3) Set up the global dataflow equations based on processing the
86 info parsed in the first step.
88 4) Solve the dataflow equations.
90 5) Delete the insns that the global analysis has indicated are
95 This step uses cselib and canon_rtx to build the largest expression
96 possible for each address. This pass is a forwards pass through
97 each basic block. From the point of view of the global technique,
98 the first pass could examine a block in either direction. The
99 forwards ordering is to accommodate cselib.
101 We a simplifying assumption: addresses fall into four broad
104 1) base has rtx_varies_p == false, offset is constant.
105 2) base has rtx_varies_p == false, offset variable.
106 3) base has rtx_varies_p == true, offset constant.
107 4) base has rtx_varies_p == true, offset variable.
109 The local passes are able to process all 4 kinds of addresses. The
110 global pass only handles (1).
112 The global problem is formulated as follows:
114 A store, S1, to address A, where A is not relative to the stack
115 frame, can be eliminated if all paths from S1 to the end of the
116 of the function contain another store to A before a read to A.
118 If the address A is relative to the stack frame, a store S2 to A
119 can be eliminated if there are no paths from S1 that reach the
120 end of the function that read A before another store to A. In
121 this case S2 can be deleted if there are paths to from S2 to the
122 end of the function that have no reads or writes to A. This
123 second case allows stores to the stack frame to be deleted that
124 would otherwise die when the function returns. This cannot be
125 done if stores_off_frame_dead_at_return is not true. See the doc
126 for that variable for when this variable is false.
128 The global problem is formulated as a backwards set union
129 dataflow problem where the stores are the gens and reads are the
130 kills. Set union problems are rare and require some special
131 handling given our representation of bitmaps. A straightforward
132 implementation of requires a lot of bitmaps filled with 1s.
133 These are expensive and cumbersome in our bitmap formulation so
134 care has been taken to avoid large vectors filled with 1s. See
135 the comments in bb_info and in the dataflow confluence functions
138 There are two places for further enhancements to this algorithm:
140 1) The original dse which was embedded in a pass called flow also
141 did local address forwarding. For example in
146 flow would replace the right hand side of the second insn with a
147 reference to r100. Most of the information is available to add this
148 to this pass. It has not done it because it is a lot of work in
149 the case that either r100 is assigned to between the first and
150 second insn and/or the second insn is a load of part of the value
151 stored by the first insn.
153 insn 5 in gcc.c-torture/compile/990203-1.c simple case.
154 insn 15 in gcc.c-torture/execute/20001017-2.c simple case.
155 insn 25 in gcc.c-torture/execute/20001026-1.c simple case.
156 insn 44 in gcc.c-torture/execute/20010910-1.c simple case.
158 2) The cleaning up of spill code is quite profitable. It currently
159 depends on reading tea leaves and chicken entrails left by reload.
160 This pass depends on reload creating a singleton alias set for each
161 spill slot and telling the next dse pass which of these alias sets
162 are the singletons. Rather than analyze the addresses of the
163 spills, dse's spill processing just does analysis of the loads and
164 stores that use those alias sets. There are three cases where this
167 a) Reload sometimes creates the slot for one mode of access, and
168 then inserts loads and/or stores for a smaller mode. In this
169 case, the current code just punts on the slot. The proper thing
170 to do is to back out and use one bit vector position for each
171 byte of the entity associated with the slot. This depends on
172 KNOWING that reload always generates the accesses for each of the
173 bytes in some canonical (read that easy to understand several
174 passes after reload happens) way.
176 b) Reload sometimes decides that spill slot it allocated was not
177 large enough for the mode and goes back and allocates more slots
178 with the same mode and alias set. The backout in this case is a
179 little more graceful than (a). In this case the slot is unmarked
180 as being a spill slot and if final address comes out to be based
181 off the frame pointer, the global algorithm handles this slot.
183 c) For any pass that may prespill, there is currently no
184 mechanism to tell the dse pass that the slot being used has the
185 special properties that reload uses. It may be that all that is
186 required is to have those passes make the same calls that reload
187 does, assuming that the alias sets can be manipulated in the same
190 /* There are limits to the size of constant offsets we model for the
191 global problem. There are certainly test cases, that exceed this
192 limit, however, it is unlikely that there are important programs
193 that really have constant offsets this size. */
194 #define MAX_OFFSET (64 * 1024)
197 static bitmap scratch = NULL;
200 /* This structure holds information about a candidate store. */
204 /* False means this is a clobber. */
207 /* The id of the mem group of the base address. If rtx_varies_p is
208 true, this is -1. Otherwise, it is the index into the group
212 /* This is the cselib value. */
213 cselib_val *cse_base;
215 /* This canonized mem. */
218 /* The result of get_addr on mem. */
221 /* If this is non-zero, it is the alias set of a spill location. */
222 HOST_WIDE_INT alias_set;
224 /* The offset of the first and byte before the last byte associated
225 with the operation. */
228 /* An bitmask as wide as the number of bytes in the word that
229 contains a 1 if the byte may be needed. The store is unused if
230 all of the bits are 0. */
231 long positions_needed;
233 /* The next store info for this insn. */
234 struct store_info *next;
236 /* The right hand side of the store. This is used if there is a
237 subsequent reload of the mems address somewhere later in the
242 typedef struct store_info *store_info_t;
243 static alloc_pool cse_store_info_pool;
244 static alloc_pool rtx_store_info_pool;
246 /* This structure holds information about a load. These are only
247 built for rtx bases. */
250 /* The id of the mem group of the base address. */
253 /* If this is non-zero, it is the alias set of a spill location. */
254 HOST_WIDE_INT alias_set;
256 /* The offset of the first and byte after the last byte associated
257 with the operation. If begin == end == 0, the read did not have
258 a constant offset. */
261 /* The mem being read. */
264 /* The next read_info for this insn. */
265 struct read_info *next;
267 typedef struct read_info *read_info_t;
268 static alloc_pool read_info_pool;
271 /* One of these records is created for each insn. */
275 /* Set true if the insn contains a store but the insn itself cannot
276 be deleted. This is set if the insn is a parallel and there is
277 more than one non dead output or if the insn is in some way
281 /* This field is only used by the global algorithm. It is set true
282 if the insn contains any read of mem except for a (1). This is
283 also set if the insn is a call or has a clobber mem. If the insn
284 contains a wild read, the use_rec will be null. */
287 /* This field is set for const function calls. Const functions
288 cannot read memory, but they can read the stack because that is
289 where they may get their parms. So having this set is less
290 severe than a wild read, it just means that all of the stores to
291 the stack are killed rather than all stores. */
294 /* This is true if any of the sets within the store contains a
295 cselib base. Such stores can only be deleted by the local
297 bool contains_cselib_groups;
302 /* The list of mem sets or mem clobbers that are contained in this
303 insn. If the insn is deletable, it contains only one mem set.
304 But it could also contain clobbers. Insns that contain more than
305 one mem set are not deletable, but each of those mems are here in
306 order to provide info to delete other insns. */
307 store_info_t store_rec;
309 /* The linked list of mem uses in this insn. Only the reads from
310 rtx bases are listed here. The reads to cselib bases are
311 completely processed during the first scan and so are never
313 read_info_t read_rec;
315 /* The prev insn in the basic block. */
316 struct insn_info * prev_insn;
318 /* The linked list of insns that are in consideration for removal in
319 the forwards pass thru the basic block. This pointer may be
320 trash as it is not cleared when a wild read occurs. The only
321 time it is guaranteed to be correct is when the traveral starts
322 at active_local_stores. */
323 struct insn_info * next_local_store;
326 typedef struct insn_info *insn_info_t;
327 static alloc_pool insn_info_pool;
329 /* The linked list of stores that are under consideration in this
331 static insn_info_t active_local_stores;
336 /* Pointer to the insn info for the last insn in the block. These
337 are linked so this is how all of the insns are reached. During
338 scanning this is the current insn being scanned. */
339 insn_info_t last_insn;
341 /* The info for the global dataflow problem. */
344 /* This is set if the transfer function should and in the wild_read
345 bitmap before applying the kill and gen sets. That vector knocks
346 out most of the bits in the bitmap and thus speeds up the
348 bool apply_wild_read;
350 /* The set of store positions that exist in this block before a wild read. */
353 /* The set of load positions that exist in this block above the
354 same position of a store. */
357 /* The set of stores that reach the top of the block without being
360 Do not represent the in if it is all ones. Note that this is
361 what the bitvector should logically be initialized to for a set
362 intersection problem. However, like the kill set, this is too
363 expensive. So initially, the in set will only be created for the
364 exit block and any block that contains a wild read. */
367 /* The set of stores that reach the bottom of the block from it's
370 Do not represent the in if it is all ones. Note that this is
371 what the bitvector should logically be initialized to for a set
372 intersection problem. However, like the kill and in set, this is
373 too expensive. So what is done is that the confluence operator
374 just initializes the vector from one of the out sets of the
375 successors of the block. */
379 typedef struct bb_info *bb_info_t;
380 static alloc_pool bb_info_pool;
382 /* Table to hold all bb_infos. */
383 static bb_info_t *bb_table;
385 /* There is a group_info for each rtx base that is used to reference
386 memory. There are also not many of the rtx bases because they are
387 very limited in scope. */
391 /* The actual base of the address. */
394 /* The sequential id of the base. This allows us to have a
395 canonical ordering of these that is not based on addresses. */
398 /* A mem wrapped around the base pointer for the group in order to
399 do read dependency. */
402 /* Canonized version of base_mem, most likely the same thing. */
405 /* These two sets of two bitmaps are used to keep track of how many
406 stores are actually referencing that position from this base. We
407 only do this for rtx bases as this will be used to assign
408 positions in the bitmaps for the global problem. Bit N is set in
409 store1 on the first store for offset N. Bit N is set in store2
410 for the second store to offset N. This is all we need since we
411 only care about offsets that have two or more stores for them.
413 The "_n" suffix is for offsets less than 0 and the "_p" suffix is
414 for 0 and greater offsets.
416 There is one special case here, for stores into the stack frame,
417 we will or store1 into store2 before deciding which stores look
418 at globally. This is because stores to the stack frame that have
419 no other reads before the end of the function can also be
421 bitmap store1_n, store1_p, store2_n, store2_p;
423 /* The positions in this bitmap have the same assignments as the in,
424 out, gen and kill bitmaps. This bitmap is all zeros except for
425 the positions that are occupied by stores for this group. */
428 /* True if there are any positions that are to be processed
430 bool process_globally;
432 /* True if the base of this group is either the frame_pointer or
433 hard_frame_pointer. */
436 /* The offset_map is used to map the offsets from this base into
437 positions in the global bitmaps. It is only created after all of
438 the all of stores have been scanned and we know which ones we
440 int *offset_map_n, *offset_map_p;
441 int offset_map_size_n, offset_map_size_p;
443 typedef struct group_info *group_info_t;
444 typedef const struct group_info *const_group_info_t;
445 static alloc_pool rtx_group_info_pool;
447 /* Tables of group_info structures, hashed by base value. */
448 static htab_t rtx_group_table;
450 /* Index into the rtx_group_vec. */
451 static int rtx_group_next_id;
453 DEF_VEC_P(group_info_t);
454 DEF_VEC_ALLOC_P(group_info_t,heap);
456 static VEC(group_info_t,heap) *rtx_group_vec;
459 /* This structure holds the set of changes that are being deferred
460 when removing read operation. See replace_read. */
461 struct deferred_change
464 /* The mem that is being replaced. */
467 /* The reg it is being replaced with. */
470 struct deferred_change *next;
473 typedef struct deferred_change *deferred_change_t;
474 static alloc_pool deferred_change_pool;
476 static deferred_change_t deferred_change_list = NULL;
478 /* This are used to hold the alias sets of spill variables. Since
479 these are never aliased and there may be a lot of them, it makes
480 sense to treat them specially. This bitvector is only allocated in
481 calls from dse_record_singleton_alias_set which currently is only
482 made during reload1. So when dse is called before reload this
483 mechanism does nothing. */
485 static bitmap clear_alias_sets = NULL;
487 /* The set of clear_alias_sets that have been disqualified because
488 there are loads or stores using a different mode than the alias set
489 was registered with. */
490 static bitmap disqualified_clear_alias_sets = NULL;
492 /* The group that holds all of the clear_alias_sets. */
493 static group_info_t clear_alias_group;
495 /* The modes of the clear_alias_sets. */
496 static htab_t clear_alias_mode_table;
498 /* Hash table element to look up the mode for an alias set. */
499 struct clear_alias_mode_holder
501 HOST_WIDE_INT alias_set;
502 enum machine_mode mode;
505 static alloc_pool clear_alias_mode_pool;
507 /* This is true except for two cases:
508 (1) current_function_stdarg -- i.e. we cannot do this
509 for vararg functions because they play games with the frame.
510 (2) In ada, it is sometimes not safe to do assume that any stores
511 based off the stack frame go dead at the exit to a function. */
512 static bool stores_off_frame_dead_at_return;
514 /* Counter for stats. */
515 static int globally_deleted;
516 static int locally_deleted;
517 static int spill_deleted;
519 static bitmap all_blocks;
521 /* The number of bits used in the global bitmaps. */
522 static unsigned int current_position;
525 static bool gate_dse (void);
528 /*----------------------------------------------------------------------------
532 ----------------------------------------------------------------------------*/
534 /* Hashtable callbacks for maintaining the "bases" field of
535 store_group_info, given that the addresses are function invariants. */
538 clear_alias_mode_eq (const void *p1, const void *p2)
540 const struct clear_alias_mode_holder * h1
541 = (const struct clear_alias_mode_holder *) p1;
542 const struct clear_alias_mode_holder * h2
543 = (const struct clear_alias_mode_holder *) p2;
544 return h1->alias_set == h2->alias_set;
549 clear_alias_mode_hash (const void *p)
551 const struct clear_alias_mode_holder *holder
552 = (const struct clear_alias_mode_holder *) p;
553 return holder->alias_set;
557 /* Find the entry associated with ALIAS_SET. */
559 static struct clear_alias_mode_holder *
560 clear_alias_set_lookup (HOST_WIDE_INT alias_set)
562 struct clear_alias_mode_holder tmp_holder;
565 tmp_holder.alias_set = alias_set;
566 slot = htab_find_slot (clear_alias_mode_table, &tmp_holder, NO_INSERT);
573 /* Hashtable callbacks for maintaining the "bases" field of
574 store_group_info, given that the addresses are function invariants. */
577 invariant_group_base_eq (const void *p1, const void *p2)
579 const_group_info_t gi1 = (const_group_info_t) p1;
580 const_group_info_t gi2 = (const_group_info_t) p2;
581 return rtx_equal_p (gi1->rtx_base, gi2->rtx_base);
586 invariant_group_base_hash (const void *p)
588 const_group_info_t gi = (const_group_info_t) p;
590 return hash_rtx (gi->rtx_base, Pmode, &do_not_record, NULL, false);
594 /* Get the GROUP for BASE. Add a new group if it is not there. */
597 get_group_info (rtx base)
599 struct group_info tmp_gi;
605 /* Find the store_base_info structure for BASE, creating a new one
607 tmp_gi.rtx_base = base;
608 slot = htab_find_slot (rtx_group_table, &tmp_gi, INSERT);
609 gi = (group_info_t) *slot;
613 if (!clear_alias_group)
615 clear_alias_group = gi = pool_alloc (rtx_group_info_pool);
616 memset (gi, 0, sizeof (struct group_info));
617 gi->id = rtx_group_next_id++;
618 gi->store1_n = BITMAP_ALLOC (NULL);
619 gi->store1_p = BITMAP_ALLOC (NULL);
620 gi->store2_n = BITMAP_ALLOC (NULL);
621 gi->store2_p = BITMAP_ALLOC (NULL);
622 gi->group_kill = BITMAP_ALLOC (NULL);
623 gi->process_globally = false;
624 gi->offset_map_size_n = 0;
625 gi->offset_map_size_p = 0;
626 gi->offset_map_n = NULL;
627 gi->offset_map_p = NULL;
628 VEC_safe_push (group_info_t, heap, rtx_group_vec, gi);
630 return clear_alias_group;
635 *slot = gi = pool_alloc (rtx_group_info_pool);
637 gi->id = rtx_group_next_id++;
638 gi->base_mem = gen_rtx_MEM (QImode, base);
639 gi->canon_base_mem = canon_rtx (gi->base_mem);
640 gi->store1_n = BITMAP_ALLOC (NULL);
641 gi->store1_p = BITMAP_ALLOC (NULL);
642 gi->store2_n = BITMAP_ALLOC (NULL);
643 gi->store2_p = BITMAP_ALLOC (NULL);
644 gi->group_kill = BITMAP_ALLOC (NULL);
645 gi->process_globally = false;
647 (base == frame_pointer_rtx) || (base == hard_frame_pointer_rtx);
648 gi->offset_map_size_n = 0;
649 gi->offset_map_size_p = 0;
650 gi->offset_map_n = NULL;
651 gi->offset_map_p = NULL;
652 VEC_safe_push (group_info_t, heap, rtx_group_vec, gi);
659 /* Initialization of data structures. */
665 globally_deleted = 0;
668 scratch = BITMAP_ALLOC (NULL);
671 = create_alloc_pool ("rtx_store_info_pool",
672 sizeof (struct store_info), 100);
674 = create_alloc_pool ("read_info_pool",
675 sizeof (struct read_info), 100);
677 = create_alloc_pool ("insn_info_pool",
678 sizeof (struct insn_info), 100);
680 = create_alloc_pool ("bb_info_pool",
681 sizeof (struct bb_info), 100);
683 = create_alloc_pool ("rtx_group_info_pool",
684 sizeof (struct group_info), 100);
686 = create_alloc_pool ("deferred_change_pool",
687 sizeof (struct deferred_change), 10);
689 rtx_group_table = htab_create (11, invariant_group_base_hash,
690 invariant_group_base_eq, NULL);
692 bb_table = XCNEWVEC (bb_info_t, last_basic_block);
693 rtx_group_next_id = 0;
695 stores_off_frame_dead_at_return =
696 (!(TREE_CODE (TREE_TYPE (current_function_decl)) == FUNCTION_TYPE
697 && (TYPE_RETURNS_STACK_DEPRESSED (TREE_TYPE (current_function_decl)))))
698 && (!current_function_stdarg);
700 init_alias_analysis ();
702 if (clear_alias_sets)
703 clear_alias_group = get_group_info (NULL);
705 clear_alias_group = NULL;
710 /*----------------------------------------------------------------------------
713 Scan all of the insns. Any random ordering of the blocks is fine.
714 Each block is scanned in forward order to accommodate cselib which
715 is used to remove stores with non-constant bases.
716 ----------------------------------------------------------------------------*/
718 /* Delete all of the store_info recs from INSN_INFO. */
721 free_store_info (insn_info_t insn_info)
723 store_info_t store_info = insn_info->store_rec;
726 store_info_t next = store_info->next;
727 if (store_info->cse_base)
728 pool_free (cse_store_info_pool, store_info);
730 pool_free (rtx_store_info_pool, store_info);
734 insn_info->cannot_delete = true;
735 insn_info->contains_cselib_groups = false;
736 insn_info->store_rec = NULL;
746 /* Add an insn to do the add inside a x if it is a
747 PRE/POST-INC/DEC/MODIFY. D is an structure containing the insn and
748 the size of the mode of the MEM that this is inside of. */
751 replace_inc_dec (rtx *r, void *d)
754 struct insn_size *data = (struct insn_size *)d;
755 switch (GET_CODE (x))
760 rtx r1 = XEXP (x, 0);
761 rtx c = gen_int_mode (Pmode, data->size);
762 add_insn_before (data->insn,
763 gen_rtx_SET (Pmode, r1,
764 gen_rtx_PLUS (Pmode, r1, c)),
772 rtx r1 = XEXP (x, 0);
773 rtx c = gen_int_mode (Pmode, -data->size);
774 add_insn_before (data->insn,
775 gen_rtx_SET (Pmode, r1,
776 gen_rtx_PLUS (Pmode, r1, c)),
784 /* We can reuse the add because we are about to delete the
785 insn that contained it. */
786 rtx add = XEXP (x, 0);
787 rtx r1 = XEXP (add, 0);
788 add_insn_before (data->insn,
789 gen_rtx_SET (Pmode, r1, add), NULL);
799 /* If X is a MEM, check the address to see if it is PRE/POST-INC/DEC/MODIFY
800 and generate an add to replace that. */
803 replace_inc_dec_mem (rtx *r, void *d)
806 if (GET_CODE (x) == MEM)
808 struct insn_size data;
810 data.size = GET_MODE_SIZE (GET_MODE (x));
813 for_each_rtx (&XEXP (x, 0), replace_inc_dec, &data);
820 /* Before we delete INSN, make sure that the auto inc/dec, if it is
821 there, is split into a separate insn. */
824 check_for_inc_dec (rtx insn)
826 rtx note = find_reg_note (insn, REG_INC, NULL_RTX);
828 for_each_rtx (&insn, replace_inc_dec_mem, insn);
832 /* Delete the insn and free all of the fields inside INSN_INFO. */
835 delete_dead_store_insn (insn_info_t insn_info)
837 read_info_t read_info;
842 check_for_inc_dec (insn_info->insn);
845 fprintf (dump_file, "Locally deleting insn %d ",
846 INSN_UID (insn_info->insn));
847 if (insn_info->store_rec->alias_set)
848 fprintf (dump_file, "alias set %d\n",
849 (int)insn_info->store_rec->alias_set);
851 fprintf (dump_file, "\n");
854 free_store_info (insn_info);
855 read_info = insn_info->read_rec;
859 read_info_t next = read_info->next;
860 pool_free (read_info_pool, read_info);
863 insn_info->read_rec = NULL;
865 delete_insn (insn_info->insn);
867 insn_info->insn = NULL;
869 insn_info->wild_read = false;
873 /* Set the store* bitmaps offset_map_size* fields in GROUP based on
877 set_usage_bits (group_info_t group, HOST_WIDE_INT offset, HOST_WIDE_INT width)
881 if ((offset > -MAX_OFFSET) && (offset < MAX_OFFSET))
882 for (i=offset; i<offset+width; i++)
889 store1 = group->store1_n;
890 store2 = group->store2_n;
895 store1 = group->store1_p;
896 store2 = group->store2_p;
900 if (bitmap_bit_p (store1, ai))
901 bitmap_set_bit (store2, ai);
904 bitmap_set_bit (store1, ai);
907 if (group->offset_map_size_n < ai)
908 group->offset_map_size_n = ai;
912 if (group->offset_map_size_p < ai)
913 group->offset_map_size_p = ai;
920 /* Set the BB_INFO so that the last insn is marked as a wild read. */
923 add_wild_read (bb_info_t bb_info)
925 insn_info_t insn_info = bb_info->last_insn;
926 read_info_t *ptr = &insn_info->read_rec;
930 read_info_t next = (*ptr)->next;
931 if ( (*ptr)->alias_set == 0 )
933 pool_free (read_info_pool, *ptr);
939 insn_info->wild_read = true;
940 active_local_stores = NULL;
944 /* Return true if X is a constant or one of the registers that behaves
945 as a constant over the life of a function. */
948 const_or_frame_p (rtx x)
950 switch (GET_CODE (x))
953 return MEM_READONLY_P (x);
964 /* Note that we have to test for the actual rtx used for the frame
965 and arg pointers and not just the register number in case we have
966 eliminated the frame and/or arg pointer and are using it
968 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
969 /* The arg pointer varies if it is not a fixed register. */
970 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])
971 || x == pic_offset_table_rtx)
980 /* Take all reasonable action to put the address of MEM into the form
981 that we can do analysis on.
983 The gold standard is to get the address into the form: address +
984 OFFSET where address is something that rtx_varies_p considers a
985 constant. When we can get the address in this form, we can do
986 global analysis on it. Note that for constant bases, address is
987 not actually returned, only the group_id. The address can be
990 If that fails, we try cselib to get a value we can at least use
991 locally. If that fails we return false.
993 The GROUP_ID is set to -1 for cselib bases and the index of the
994 group for non_varying bases.
996 FOR_READ is true if this is a mem read and false if not. */
999 canon_address (rtx mem,
1000 HOST_WIDE_INT *alias_set_out,
1002 HOST_WIDE_INT *offset,
1005 rtx mem_address = XEXP (mem, 0);
1006 rtx expanded_address, address;
1007 /* Make sure that cselib is has initialized all of the operands of
1008 the address before asking it to do the subst. */
1010 if (clear_alias_sets)
1012 /* If this is a spill, do not do any further processing. */
1013 HOST_WIDE_INT alias_set = MEM_ALIAS_SET (mem);
1015 fprintf (dump_file, "found alias set %d\n", (int)alias_set);
1016 if (bitmap_bit_p (clear_alias_sets, alias_set))
1018 struct clear_alias_mode_holder *entry
1019 = clear_alias_set_lookup (alias_set);
1021 /* If the modes do not match, we cannot process this set. */
1022 if (entry->mode != GET_MODE (mem))
1026 "disqualifying alias set %d, (%s) != (%s)\n",
1027 (int)alias_set, GET_MODE_NAME (entry->mode),
1028 GET_MODE_NAME (GET_MODE (mem)));
1030 bitmap_set_bit (disqualified_clear_alias_sets, alias_set);
1034 *alias_set_out = alias_set;
1035 *group_id = clear_alias_group->id;
1042 cselib_lookup (mem_address, Pmode, 1);
1046 fprintf (dump_file, " mem: ");
1047 print_inline_rtx (dump_file, mem_address, 0);
1048 fprintf (dump_file, "\n");
1051 /* Use cselib to replace all of the reg references with the full
1052 expression. This will take care of the case where we have
1054 r_x = base + offset;
1059 val = *(base + offset);
1062 expanded_address = cselib_expand_value_rtx (mem_address, scratch, 5);
1064 /* If this fails, just go with the mem_address. */
1065 if (!expanded_address)
1066 expanded_address = mem_address;
1068 /* Split the address into canonical BASE + OFFSET terms. */
1069 address = canon_rtx (expanded_address);
1075 fprintf (dump_file, "\n after cselib_expand address: ");
1076 print_inline_rtx (dump_file, expanded_address, 0);
1077 fprintf (dump_file, "\n");
1079 fprintf (dump_file, "\n after canon_rtx address: ");
1080 print_inline_rtx (dump_file, address, 0);
1081 fprintf (dump_file, "\n");
1084 if (GET_CODE (address) == CONST)
1085 address = XEXP (address, 0);
1087 if (GET_CODE (address) == PLUS && GET_CODE (XEXP (address, 1)) == CONST_INT)
1089 *offset = INTVAL (XEXP (address, 1));
1090 address = XEXP (address, 0);
1093 if (const_or_frame_p (address))
1095 group_info_t group = get_group_info (address);
1098 fprintf (dump_file, " gid=%d offset=%d \n", group->id, (int)*offset);
1100 *group_id = group->id;
1104 *base = cselib_lookup (address, Pmode, true);
1110 fprintf (dump_file, " no cselib val - should be a wild read.\n");
1114 fprintf (dump_file, " varying cselib base=%d offset = %d\n",
1115 (*base)->value, (int)*offset);
1121 /* Clear the rhs field from the active_local_stores array. */
1124 clear_rhs_from_active_local_stores (void)
1126 insn_info_t ptr = active_local_stores;
1130 store_info_t store_info = ptr->store_rec;
1131 /* Skip the clobbers. */
1132 while (!store_info->is_set)
1133 store_info = store_info->next;
1135 store_info->rhs = NULL;
1137 ptr = ptr->next_local_store;
1142 /* BODY is an instruction pattern that belongs to INSN. Return 1 if
1143 there is a candidate store, after adding it to the appropriate
1144 local store group if so. */
1147 record_store (rtx body, bb_info_t bb_info)
1150 HOST_WIDE_INT offset = 0;
1151 HOST_WIDE_INT width = 0;
1152 HOST_WIDE_INT spill_alias_set;
1153 insn_info_t insn_info = bb_info->last_insn;
1154 store_info_t store_info = NULL;
1156 cselib_val *base = NULL;
1157 insn_info_t ptr, last;
1158 bool store_is_unused;
1160 if (GET_CODE (body) != SET && GET_CODE (body) != CLOBBER)
1163 /* If this is not used, then this cannot be used to keep the insn
1164 from being deleted. On the other hand, it does provide something
1165 that can be used to prove that another store is dead. */
1167 = (find_reg_note (insn_info->insn, REG_UNUSED, body) != NULL);
1169 /* Check whether that value is a suitable memory location. */
1170 mem = SET_DEST (body);
1173 /* If the set or clobber is unused, then it does not effect our
1174 ability to get rid of the entire insn. */
1175 if (!store_is_unused)
1176 insn_info->cannot_delete = true;
1180 /* At this point we know mem is a mem. */
1181 if (GET_MODE (mem) == BLKmode)
1183 if (GET_CODE (XEXP (mem, 0)) == SCRATCH)
1186 fprintf (dump_file, " adding wild read for (clobber (mem:BLK (scratch))\n");
1187 add_wild_read (bb_info);
1188 insn_info->cannot_delete = true;
1190 else if (!store_is_unused)
1192 /* If the set or clobber is unused, then it does not effect our
1193 ability to get rid of the entire insn. */
1194 insn_info->cannot_delete = true;
1195 clear_rhs_from_active_local_stores ();
1200 /* We can still process a volatile mem, we just cannot delete it. */
1201 if (MEM_VOLATILE_P (mem))
1202 insn_info->cannot_delete = true;
1204 if (!canon_address (mem, &spill_alias_set, &group_id, &offset, &base))
1206 clear_rhs_from_active_local_stores ();
1210 width = GET_MODE_SIZE (GET_MODE (mem));
1212 if (spill_alias_set)
1214 bitmap store1 = clear_alias_group->store1_p;
1215 bitmap store2 = clear_alias_group->store2_p;
1217 if (bitmap_bit_p (store1, spill_alias_set))
1218 bitmap_set_bit (store2, spill_alias_set);
1220 bitmap_set_bit (store1, spill_alias_set);
1222 if (clear_alias_group->offset_map_size_p < spill_alias_set)
1223 clear_alias_group->offset_map_size_p = spill_alias_set;
1225 store_info = pool_alloc (rtx_store_info_pool);
1228 fprintf (dump_file, " processing spill store %d(%s)\n",
1229 (int)spill_alias_set, GET_MODE_NAME (GET_MODE (mem)));
1231 else if (group_id >= 0)
1233 /* In the restrictive case where the base is a constant or the
1234 frame pointer we can do global analysis. */
1237 = VEC_index (group_info_t, rtx_group_vec, group_id);
1239 store_info = pool_alloc (rtx_store_info_pool);
1240 set_usage_bits (group, offset, width);
1243 fprintf (dump_file, " processing const base store gid=%d[%d..%d)\n",
1244 group_id, (int)offset, (int)(offset+width));
1248 store_info = pool_alloc (cse_store_info_pool);
1249 insn_info->contains_cselib_groups = true;
1253 fprintf (dump_file, " processing cselib store [%d..%d)\n",
1254 (int)offset, (int)(offset+width));
1257 /* Check to see if this stores causes some other stores to be
1259 ptr = active_local_stores;
1264 insn_info_t next = ptr->next_local_store;
1265 store_info_t s_info = ptr->store_rec;
1268 /* Skip the clobbers. We delete the active insn if this insn
1269 shadows the set. To have been put on the active list, it
1270 has exactly on set. */
1271 while (!s_info->is_set)
1272 s_info = s_info->next;
1274 if (s_info->alias_set != spill_alias_set)
1276 else if (s_info->alias_set)
1278 struct clear_alias_mode_holder *entry
1279 = clear_alias_set_lookup (s_info->alias_set);
1280 /* Generally, spills cannot be processed if and of the
1281 references to the slot have a different mode. But if
1282 we are in the same block and mode is exactly the same
1283 between this store and one before in the same block,
1284 we can still delete it. */
1285 if ((GET_MODE (mem) == GET_MODE (s_info->mem))
1286 && (GET_MODE (mem) == entry->mode))
1289 s_info->positions_needed = 0;
1292 fprintf (dump_file, " trying spill store in insn=%d alias_set=%d\n",
1293 INSN_UID (ptr->insn), (int)s_info->alias_set);
1295 else if ((s_info->group_id == group_id)
1296 && (s_info->cse_base == base))
1300 fprintf (dump_file, " trying store in insn=%d gid=%d[%d..%d)\n",
1301 INSN_UID (ptr->insn), s_info->group_id,
1302 (int)s_info->begin, (int)s_info->end);
1303 for (i = offset; i < offset+width; i++)
1304 if (i >= s_info->begin && i < s_info->end)
1305 s_info->positions_needed &= ~(1L << (i - s_info->begin));
1307 else if (s_info->rhs)
1308 /* Need to see if it is possible for this store to overwrite
1309 the value of store_info. If it is, set the rhs to NULL to
1310 keep it from being used to remove a load. */
1312 if (canon_true_dependence (s_info->mem,
1313 GET_MODE (s_info->mem),
1319 /* An insn can be deleted if every position of every one of
1320 its s_infos is zero. */
1321 if (s_info->positions_needed != 0)
1326 insn_info_t insn_to_delete = ptr;
1329 last->next_local_store = ptr->next_local_store;
1331 active_local_stores = ptr->next_local_store;
1333 delete_dead_store_insn (insn_to_delete);
1341 gcc_assert ((unsigned) width < sizeof (store_info->positions_needed) * CHAR_BIT);
1343 /* Finish filling in the store_info. */
1344 store_info->next = insn_info->store_rec;
1345 insn_info->store_rec = store_info;
1346 store_info->mem = canon_rtx (mem);
1347 store_info->alias_set = spill_alias_set;
1348 store_info->mem_addr = get_addr (XEXP (mem, 0));
1349 store_info->cse_base = base;
1350 store_info->positions_needed = (1L << width) - 1;
1351 store_info->group_id = group_id;
1352 store_info->begin = offset;
1353 store_info->end = offset + width;
1354 store_info->is_set = GET_CODE (body) == SET;
1356 if (store_info->is_set
1357 /* No place to keep the value after ra. */
1358 && !reload_completed
1359 /* The careful reviewer may wish to comment my checking that the
1360 rhs of a store is always a reg. */
1361 && REG_P (SET_SRC (body))
1362 /* Sometimes the store and reload is used for truncation and
1364 && !(FLOAT_MODE_P (GET_MODE (mem)) && (flag_float_store)))
1365 store_info->rhs = SET_SRC (body);
1367 store_info->rhs = NULL;
1369 /* If this is a clobber, we return 0. We will only be able to
1370 delete this insn if there is only one store USED store, but we
1371 can use the clobber to delete other stores earlier. */
1372 return store_info->is_set ? 1 : 0;
1377 dump_insn_info (const char * start, insn_info_t insn_info)
1379 fprintf (dump_file, "%s insn=%d %s\n", start,
1380 INSN_UID (insn_info->insn),
1381 insn_info->store_rec ? "has store" : "naked");
1385 /* Take a sequence of:
1396 The STORE_INFO and STORE_INFO are for the store and the READ_INFO
1397 and READ_INSN are for the read. Return true if the replacement
1401 replace_read (store_info_t store_info, insn_info_t store_insn,
1402 read_info_t read_info, insn_info_t read_insn, rtx *loc)
1408 fprintf (dump_file, "generating move to replace load at %d from store at %d\n",
1409 INSN_UID (read_insn->insn), INSN_UID (store_insn->insn));
1410 if (GET_MODE (store_info->mem) == GET_MODE (read_info->mem))
1412 rtx new_reg = gen_reg_rtx (GET_MODE (store_info->mem));
1413 if (validate_change (read_insn->insn, loc, new_reg, 0))
1416 deferred_change_t deferred_change = pool_alloc (deferred_change_pool);
1419 emit_move_insn (new_reg, store_info->rhs);
1420 insns = get_insns ();
1422 emit_insn_before (insns, store_insn->insn);
1425 fprintf (dump_file, " -- adding move insn %d: r%d = r%d\n",
1426 INSN_UID (insns), REGNO (new_reg), REGNO (store_info->rhs));
1428 /* And now for the cludge part: cselib croaks if you just
1429 return at this point. There are two reasons for this:
1431 1) Cselib has an idea of how many pseudos there are and
1432 that does not include the new one we just added.
1434 2) Cselib does not know about the move insn we added
1435 above the store_info, and there is no way to tell it
1436 about it, because it has "moved on".
1438 So we are just going to have to lie. The move insn is
1439 not really an issue, cselib did not see it. But the use
1440 of the new pseudo read_insn is a real problem. The way
1441 that we solve this problem is that we are just going to
1442 put the mem back keep a table of mems to get rid of. At
1443 the end of the basic block we can put it back. */
1445 *loc = read_info->mem;
1446 deferred_change->next = deferred_change_list;
1447 deferred_change_list = deferred_change;
1448 deferred_change->loc = loc;
1449 deferred_change->reg = new_reg;
1451 /* Get rid of the read_info, from the point of view of the
1452 rest of dse, play like this read never happened. */
1453 read_insn->read_rec = read_info->next;
1454 pool_free (read_info_pool, read_info);
1460 fprintf (dump_file, " -- validation failure\n");
1466 /* Someone with excellent rtl skills needs to fill this in. You
1467 are guaranteed that the read is of the same size or smaller
1468 than the store, and that the read does not hang off one of
1469 the ends of the store. But the offsets of each must be
1470 checked because the read does not have to line up on either
1471 end of the store so the begin fields need to be examined in
1472 both the store_info and read_info. */
1474 fprintf (dump_file, " -- complex load, currently unsupported.\n");
1480 /* A for_each_rtx callback in which DATA is the bb_info. Check to see
1481 if LOC is a mem and if it is look at the address and kill any
1482 appropriate stores that may be active. */
1485 check_mem_read_rtx (rtx *loc, void *data)
1489 insn_info_t insn_info;
1490 HOST_WIDE_INT offset = 0;
1491 HOST_WIDE_INT width = 0;
1492 HOST_WIDE_INT spill_alias_set = 0;
1493 cselib_val *base = NULL;
1495 read_info_t read_info;
1497 if (!mem || !MEM_P (mem))
1500 bb_info = (bb_info_t) data;
1501 insn_info = bb_info->last_insn;
1503 if ((MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
1504 || (MEM_VOLATILE_P (mem)))
1507 fprintf (dump_file, " adding wild read, volatile or barrier.\n");
1508 add_wild_read (bb_info);
1509 insn_info->cannot_delete = true;
1513 /* If it is reading readonly mem, then there can be no conflict with
1515 if (MEM_READONLY_P (mem))
1518 if (!canon_address (mem, &spill_alias_set, &group_id, &offset, &base))
1521 fprintf (dump_file, " adding wild read, canon_address failure.\n");
1522 add_wild_read (bb_info);
1526 if (GET_MODE (mem) == BLKmode)
1529 width = GET_MODE_SIZE (GET_MODE (mem));
1531 read_info = pool_alloc (read_info_pool);
1532 read_info->group_id = group_id;
1533 read_info->mem = mem;
1534 read_info->alias_set = spill_alias_set;
1535 read_info->begin = offset;
1536 read_info->end = offset + width;
1537 read_info->next = insn_info->read_rec;
1538 insn_info->read_rec = read_info;
1540 /* We ignore the clobbers in store_info. The is mildly aggressive,
1541 but there really should not be a clobber followed by a read. */
1543 if (spill_alias_set)
1545 insn_info_t i_ptr = active_local_stores;
1546 insn_info_t last = NULL;
1549 fprintf (dump_file, " processing spill load %d\n",
1550 (int)spill_alias_set);
1554 store_info_t store_info = i_ptr->store_rec;
1556 /* Skip the clobbers. */
1557 while (!store_info->is_set)
1558 store_info = store_info->next;
1560 if (store_info->alias_set == spill_alias_set)
1563 dump_insn_info ("removing from active", i_ptr);
1566 last->next_local_store = i_ptr->next_local_store;
1568 active_local_stores = i_ptr->next_local_store;
1572 i_ptr = i_ptr->next_local_store;
1575 else if (group_id >= 0)
1577 /* This is the restricted case where the base is a constant or
1578 the frame pointer and offset is a constant. */
1579 insn_info_t i_ptr = active_local_stores;
1580 insn_info_t last = NULL;
1585 fprintf (dump_file, " processing const load gid=%d[BLK]\n",
1588 fprintf (dump_file, " processing const load gid=%d[%d..%d)\n",
1589 group_id, (int)offset, (int)(offset+width));
1594 bool remove = false;
1595 store_info_t store_info = i_ptr->store_rec;
1597 /* Skip the clobbers. */
1598 while (!store_info->is_set)
1599 store_info = store_info->next;
1601 /* There are three cases here. */
1602 if (store_info->group_id < 0)
1603 /* We have a cselib store followed by a read from a
1606 = canon_true_dependence (store_info->mem,
1607 GET_MODE (store_info->mem),
1608 store_info->mem_addr,
1611 else if (group_id == store_info->group_id)
1613 /* This is a block mode load. We may get lucky and
1614 canon_true_dependence may save the day. */
1617 = canon_true_dependence (store_info->mem,
1618 GET_MODE (store_info->mem),
1619 store_info->mem_addr,
1622 /* If this read is just reading back something that we just
1623 stored, rewrite the read. */
1627 && (offset >= store_info->begin)
1628 && (offset + width <= store_info->end))
1630 int mask = ((1L << width) - 1) << (offset - store_info->begin);
1632 if ((store_info->positions_needed & mask) == mask
1633 && replace_read (store_info, i_ptr,
1634 read_info, insn_info, loc))
1637 /* The bases are the same, just see if the offsets
1639 if ((offset < store_info->end)
1640 && (offset + width > store_info->begin))
1646 The else case that is missing here is that the
1647 bases are constant but different. There is nothing
1648 to do here because there is no overlap. */
1653 dump_insn_info ("removing from active", i_ptr);
1656 last->next_local_store = i_ptr->next_local_store;
1658 active_local_stores = i_ptr->next_local_store;
1662 i_ptr = i_ptr->next_local_store;
1667 insn_info_t i_ptr = active_local_stores;
1668 insn_info_t last = NULL;
1671 fprintf (dump_file, " processing cselib load mem:");
1672 print_inline_rtx (dump_file, mem, 0);
1673 fprintf (dump_file, "\n");
1678 bool remove = false;
1679 store_info_t store_info = i_ptr->store_rec;
1682 fprintf (dump_file, " processing cselib load against insn %d\n",
1683 INSN_UID (i_ptr->insn));
1685 /* Skip the clobbers. */
1686 while (!store_info->is_set)
1687 store_info = store_info->next;
1689 /* If this read is just reading back something that we just
1690 stored, rewrite the read. */
1692 && store_info->group_id == -1
1693 && store_info->cse_base == base
1694 && (offset >= store_info->begin)
1695 && (offset + width <= store_info->end))
1697 int mask = ((1L << width) - 1) << (offset - store_info->begin);
1699 if ((store_info->positions_needed & mask) == mask
1700 && replace_read (store_info, i_ptr,
1701 read_info, insn_info, loc))
1705 if (!store_info->alias_set)
1706 remove = canon_true_dependence (store_info->mem,
1707 GET_MODE (store_info->mem),
1708 store_info->mem_addr,
1714 dump_insn_info ("removing from active", i_ptr);
1717 last->next_local_store = i_ptr->next_local_store;
1719 active_local_stores = i_ptr->next_local_store;
1723 i_ptr = i_ptr->next_local_store;
1729 /* A for_each_rtx callback in which DATA points the INSN_INFO for
1730 as check_mem_read_rtx. Nullify the pointer if i_m_r_m_r returns
1731 true for any part of *LOC. */
1734 check_mem_read_use (rtx *loc, void *data)
1736 for_each_rtx (loc, check_mem_read_rtx, data);
1739 /* Apply record_store to all candidate stores in INSN. Mark INSN
1740 if some part of it is not a candidate store and assigns to a
1741 non-register target. */
1744 scan_insn (bb_info_t bb_info, rtx insn)
1747 insn_info_t insn_info = pool_alloc (insn_info_pool);
1749 memset (insn_info, 0, sizeof (struct insn_info));
1752 fprintf (dump_file, "\n**scanning insn=%d\n",
1755 insn_info->prev_insn = bb_info->last_insn;
1756 insn_info->insn = insn;
1757 bb_info->last_insn = insn_info;
1760 /* Cselib clears the table for this case, so we have to essentially
1762 if (NONJUMP_INSN_P (insn)
1763 && GET_CODE (PATTERN (insn)) == ASM_OPERANDS
1764 && MEM_VOLATILE_P (PATTERN (insn)))
1766 add_wild_read (bb_info);
1767 insn_info->cannot_delete = true;
1771 /* Look at all of the uses in the insn. */
1772 note_uses (&PATTERN (insn), check_mem_read_use, bb_info);
1776 insn_info->cannot_delete = true;
1777 /* Const functions cannot do anything bad i.e. read memory,
1778 however, they can read their parameters which may have been
1779 pushed onto the stack. */
1780 if (CONST_OR_PURE_CALL_P (insn) && !pure_call_p (insn))
1782 insn_info_t i_ptr = active_local_stores;
1783 insn_info_t last = NULL;
1786 fprintf (dump_file, "const call %d\n", INSN_UID (insn));
1790 store_info_t store_info = i_ptr->store_rec;
1792 /* Skip the clobbers. */
1793 while (!store_info->is_set)
1794 store_info = store_info->next;
1796 /* Remove the frame related stores. */
1797 if (store_info->group_id >= 0
1798 && VEC_index (group_info_t, rtx_group_vec, store_info->group_id)->frame_related)
1801 dump_insn_info ("removing from active", i_ptr);
1804 last->next_local_store = i_ptr->next_local_store;
1806 active_local_stores = i_ptr->next_local_store;
1810 i_ptr = i_ptr->next_local_store;
1813 insn_info->stack_read = true;
1818 /* Every other call, including pure functions may read memory. */
1819 add_wild_read (bb_info);
1823 /* Assuming that there are sets in these insns, we cannot delete
1825 if ((GET_CODE (PATTERN (insn)) == CLOBBER)
1826 || volatile_insn_p (PATTERN (insn))
1827 || (flag_non_call_exceptions && may_trap_p (PATTERN (insn)))
1828 || (RTX_FRAME_RELATED_P (insn))
1829 || find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX))
1830 insn_info->cannot_delete = true;
1832 body = PATTERN (insn);
1833 if (GET_CODE (body) == PARALLEL)
1836 for (i = 0; i < XVECLEN (body, 0); i++)
1837 mems_found += record_store (XVECEXP (body, 0, i), bb_info);
1840 mems_found += record_store (body, bb_info);
1843 fprintf (dump_file, "mems_found = %d, cannot_delete = %s\n",
1844 mems_found, insn_info->cannot_delete ? "true" : "false");
1846 /* If we found some sets of mems, and the insn has not been marked
1847 cannot delete, add it into the active_local_stores so that it can
1848 be locally deleted if found dead. Otherwise mark it as cannot
1849 delete. This simplifies the processing later. */
1850 if (mems_found == 1 && !insn_info->cannot_delete)
1852 insn_info->next_local_store = active_local_stores;
1853 active_local_stores = insn_info;
1856 insn_info->cannot_delete = true;
1860 /* Remove BASE from the set of active_local_stores. This is a
1861 callback from cselib that is used to get rid of the stores in
1862 active_local_stores. */
1865 remove_useless_values (cselib_val *base)
1867 insn_info_t insn_info = active_local_stores;
1868 insn_info_t last = NULL;
1872 store_info_t store_info = insn_info->store_rec;
1873 bool delete = false;
1875 /* If ANY of the store_infos match the cselib group that is
1876 being deleted, then the insn can not be deleted. */
1879 if ((store_info->group_id == -1)
1880 && (store_info->cse_base == base))
1885 store_info = store_info->next;
1891 last->next_local_store = insn_info->next_local_store;
1893 active_local_stores = insn_info->next_local_store;
1894 free_store_info (insn_info);
1899 insn_info = insn_info->next_local_store;
1904 /* Do all of step 1. */
1911 cselib_init (false);
1912 all_blocks = BITMAP_ALLOC (NULL);
1913 bitmap_set_bit (all_blocks, ENTRY_BLOCK);
1914 bitmap_set_bit (all_blocks, EXIT_BLOCK);
1919 bb_info_t bb_info = pool_alloc (bb_info_pool);
1921 memset (bb_info, 0, sizeof (struct bb_info));
1922 bitmap_set_bit (all_blocks, bb->index);
1924 bb_table[bb->index] = bb_info;
1925 cselib_discard_hook = remove_useless_values;
1927 if (bb->index >= NUM_FIXED_BLOCKS)
1932 = create_alloc_pool ("cse_store_info_pool",
1933 sizeof (struct store_info), 100);
1934 active_local_stores = NULL;
1935 cselib_clear_table ();
1937 /* Scan the insns. */
1938 FOR_BB_INSNS (bb, insn)
1941 scan_insn (bb_info, insn);
1942 cselib_process_insn (insn);
1945 /* This is something of a hack, because the global algorithm
1946 is supposed to take care of the case where stores go dead
1947 at the end of the function. However, the global
1948 algorithm must take a more conservative view of block
1949 mode reads than the local alg does. So to get the case
1950 where you have a store to the frame followed by a non
1951 overlapping block more read, we look at the active local
1952 stores at the end of the function and delete all of the
1953 frame and spill based ones. */
1954 if (stores_off_frame_dead_at_return
1955 && (EDGE_COUNT (bb->succs) == 0
1956 || (single_succ_p (bb)
1957 && single_succ (bb) == EXIT_BLOCK_PTR
1958 && ! current_function_calls_eh_return)))
1960 insn_info_t i_ptr = active_local_stores;
1963 store_info_t store_info = i_ptr->store_rec;
1965 /* Skip the clobbers. */
1966 while (!store_info->is_set)
1967 store_info = store_info->next;
1968 if (store_info->alias_set)
1969 delete_dead_store_insn (i_ptr);
1971 if (store_info->group_id >= 0)
1974 = VEC_index (group_info_t, rtx_group_vec, store_info->group_id);
1975 if (group->frame_related)
1976 delete_dead_store_insn (i_ptr);
1979 i_ptr = i_ptr->next_local_store;
1983 /* Get rid of the loads that were discovered in
1984 replace_read. Cselib is finished with this block. */
1985 while (deferred_change_list)
1987 deferred_change_t next = deferred_change_list->next;
1989 /* There is no reason to validate this change. That was
1991 *deferred_change_list->loc = deferred_change_list->reg;
1992 pool_free (deferred_change_pool, deferred_change_list);
1993 deferred_change_list = next;
1996 /* Get rid of all of the cselib based store_infos in this
1997 block and mark the containing insns as not being
1999 ptr = bb_info->last_insn;
2002 if (ptr->contains_cselib_groups)
2003 free_store_info (ptr);
2004 ptr = ptr->prev_insn;
2007 free_alloc_pool (cse_store_info_pool);
2012 htab_empty (rtx_group_table);
2016 /*----------------------------------------------------------------------------
2019 Assign each byte position in the stores that we are going to
2020 analyze globally to a position in the bitmaps. Returns true if
2021 there are any bit positions assigned.
2022 ----------------------------------------------------------------------------*/
2025 dse_step2_init (void)
2030 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
2032 /* For all non stack related bases, we only consider a store to
2033 be deletable if there are two or more stores for that
2034 position. This is because it takes one store to make the
2035 other store redundant. However, for the stores that are
2036 stack related, we consider them if there is only one store
2037 for the position. We do this because the stack related
2038 stores can be deleted if their is no read between them and
2039 the end of the function.
2041 To make this work in the current framework, we take the stack
2042 related bases add all of the bits from store1 into store2.
2043 This has the effect of making the eligible even if there is
2046 if (stores_off_frame_dead_at_return && group->frame_related)
2048 bitmap_ior_into (group->store2_n, group->store1_n);
2049 bitmap_ior_into (group->store2_p, group->store1_p);
2051 fprintf (dump_file, "group %d is frame related ", i);
2054 group->offset_map_size_n++;
2055 group->offset_map_n = XNEWVEC (int, group->offset_map_size_n);
2056 group->offset_map_size_p++;
2057 group->offset_map_p = XNEWVEC (int, group->offset_map_size_p);
2058 group->process_globally = false;
2061 fprintf (dump_file, "group %d(%d+%d): ", i,
2062 (int)bitmap_count_bits (group->store2_n),
2063 (int)bitmap_count_bits (group->store2_p));
2064 bitmap_print (dump_file, group->store2_n, "n ", " ");
2065 bitmap_print (dump_file, group->store2_p, "p ", "\n");
2071 /* Init the offset tables for the normal case. */
2074 dse_step2_nospill (void)
2078 /* Position 0 is unused because 0 is used in the maps to mean
2080 current_position = 1;
2082 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
2087 if (group == clear_alias_group)
2090 memset (group->offset_map_n, 0, sizeof(int) * group->offset_map_size_n);
2091 memset (group->offset_map_p, 0, sizeof(int) * group->offset_map_size_p);
2092 bitmap_clear (group->group_kill);
2094 EXECUTE_IF_SET_IN_BITMAP (group->store2_n, 0, j, bi)
2096 bitmap_set_bit (group->group_kill, current_position);
2097 group->offset_map_n[j] = current_position++;
2098 group->process_globally = true;
2100 EXECUTE_IF_SET_IN_BITMAP (group->store2_p, 0, j, bi)
2102 bitmap_set_bit (group->group_kill, current_position);
2103 group->offset_map_p[j] = current_position++;
2104 group->process_globally = true;
2107 return current_position != 1;
2111 /* Init the offset tables for the spill case. */
2114 dse_step2_spill (void)
2117 group_info_t group = clear_alias_group;
2120 /* Position 0 is unused because 0 is used in the maps to mean
2122 current_position = 1;
2126 bitmap_print (dump_file, clear_alias_sets,
2127 "clear alias sets ", "\n");
2128 bitmap_print (dump_file, disqualified_clear_alias_sets,
2129 "disqualified clear alias sets ", "\n");
2132 memset (group->offset_map_n, 0, sizeof(int) * group->offset_map_size_n);
2133 memset (group->offset_map_p, 0, sizeof(int) * group->offset_map_size_p);
2134 bitmap_clear (group->group_kill);
2136 /* Remove the disqualified positions from the store2_p set. */
2137 bitmap_and_compl_into (group->store2_p, disqualified_clear_alias_sets);
2139 /* We do not need to process the store2_n set because
2140 alias_sets are always positive. */
2141 EXECUTE_IF_SET_IN_BITMAP (group->store2_p, 0, j, bi)
2143 bitmap_set_bit (group->group_kill, current_position);
2144 group->offset_map_p[j] = current_position++;
2145 group->process_globally = true;
2148 return current_position != 1;
2153 /*----------------------------------------------------------------------------
2156 Build the bit vectors for the transfer functions.
2157 ----------------------------------------------------------------------------*/
2160 /* Note that this is NOT a general purpose function. Any mem that has
2161 an alias set registered here expected to be COMPLETELY unaliased:
2162 i.e it's addresses are not and need not be examined.
2164 It is known that all references to this address will have this
2165 alias set and there are NO other references to this address in the
2168 Currently the only place that is known to be clean enough to use
2169 this interface is the code that assigns the spill locations.
2171 All of the mems that have alias_sets registered are subjected to a
2172 very powerful form of dse where function calls, volatile reads and
2173 writes, and reads from random location are not taken into account.
2175 It is also assumed that these locations go dead when the function
2176 returns. This assumption could be relaxed if there were found to
2177 be places that this assumption was not correct.
2179 The MODE is passed in and saved. The mode of each load or store to
2180 a mem with ALIAS_SET is checked against MEM. If the size of that
2181 load or store is different from MODE, processing is halted on this
2182 alias set. For the vast majority of aliases sets, all of the loads
2183 and stores will use the same mode. But vectors are treated
2184 differently: the alias set is established for the entire vector,
2185 but reload will insert loads and stores for individual elements and
2186 we do not necessarily have the information to track those separate
2187 elements. So when we see a mode mismatch, we just bail. */
2191 dse_record_singleton_alias_set (HOST_WIDE_INT alias_set,
2192 enum machine_mode mode)
2194 struct clear_alias_mode_holder tmp_holder;
2195 struct clear_alias_mode_holder *entry;
2198 /* If we are not going to run dse, we need to return now or there
2199 will be problems with allocating the bitmaps. */
2200 if ((!gate_dse()) || !alias_set)
2203 if (!clear_alias_sets)
2205 clear_alias_sets = BITMAP_ALLOC (NULL);
2206 disqualified_clear_alias_sets = BITMAP_ALLOC (NULL);
2207 clear_alias_mode_table = htab_create (11, clear_alias_mode_hash,
2208 clear_alias_mode_eq, NULL);
2209 clear_alias_mode_pool = create_alloc_pool ("clear_alias_mode_pool",
2210 sizeof (struct clear_alias_mode_holder), 100);
2213 bitmap_set_bit (clear_alias_sets, alias_set);
2215 tmp_holder.alias_set = alias_set;
2217 slot = htab_find_slot (clear_alias_mode_table, &tmp_holder, INSERT);
2218 gcc_assert (*slot == NULL);
2220 *slot = entry = pool_alloc (clear_alias_mode_pool);
2221 entry->alias_set = alias_set;
2226 /* Remove ALIAS_SET from the sets of stack slots being considered. */
2229 dse_invalidate_singleton_alias_set (HOST_WIDE_INT alias_set)
2231 if ((!gate_dse()) || !alias_set)
2234 bitmap_clear_bit (clear_alias_sets, alias_set);
2238 /* Look up the bitmap index for OFFSET in GROUP_INFO. If it is not
2242 get_bitmap_index (group_info_t group_info, HOST_WIDE_INT offset)
2246 HOST_WIDE_INT offset_p = -offset;
2247 if (offset_p >= group_info->offset_map_size_n)
2249 return group_info->offset_map_n[offset_p];
2253 if (offset >= group_info->offset_map_size_p)
2255 return group_info->offset_map_p[offset];
2260 /* Process the STORE_INFOs into the bitmaps into GEN and KILL. KILL
2264 scan_stores_nospill (store_info_t store_info, bitmap gen, bitmap kill)
2269 group_info_t group_info
2270 = VEC_index (group_info_t, rtx_group_vec, store_info->group_id);
2271 if (group_info->process_globally)
2272 for (i = store_info->begin; i < store_info->end; i++)
2274 int index = get_bitmap_index (group_info, i);
2277 bitmap_set_bit (gen, index);
2279 bitmap_clear_bit (kill, index);
2282 store_info = store_info->next;
2287 /* Process the STORE_INFOs into the bitmaps into GEN and KILL. KILL
2291 scan_stores_spill (store_info_t store_info, bitmap gen, bitmap kill)
2295 if (store_info->alias_set)
2297 int index = get_bitmap_index (clear_alias_group,
2298 store_info->alias_set);
2301 bitmap_set_bit (gen, index);
2303 bitmap_clear_bit (kill, index);
2306 store_info = store_info->next;
2311 /* Process the READ_INFOs into the bitmaps into GEN and KILL. KILL
2315 scan_reads_nospill (insn_info_t insn_info, bitmap gen, bitmap kill)
2317 read_info_t read_info = insn_info->read_rec;
2321 /* For const function calls kill the stack related stores. */
2322 if (insn_info->stack_read)
2324 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
2325 if (group->process_globally && group->frame_related)
2328 bitmap_ior_into (kill, group->group_kill);
2329 bitmap_and_compl_into (gen, group->group_kill);
2335 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
2337 if (group->process_globally)
2339 if (i == read_info->group_id)
2341 if (read_info->begin > read_info->end)
2343 /* Begin > end for block mode reads. */
2345 bitmap_ior_into (kill, group->group_kill);
2346 bitmap_and_compl_into (gen, group->group_kill);
2350 /* The groups are the same, just process the
2353 for (j = read_info->begin; j < read_info->end; j++)
2355 int index = get_bitmap_index (group, j);
2359 bitmap_set_bit (kill, index);
2360 bitmap_clear_bit (gen, index);
2367 /* The groups are different, if the alias sets
2368 conflict, clear the entire group. We only need
2369 to apply this test if the read_info is a cselib
2370 read. Anything with a constant base cannot alias
2371 something else with a different constant
2373 if ((read_info->group_id < 0)
2374 && canon_true_dependence (group->base_mem,
2376 group->canon_base_mem,
2377 read_info->mem, rtx_varies_p))
2380 bitmap_ior_into (kill, group->group_kill);
2381 bitmap_and_compl_into (gen, group->group_kill);
2387 read_info = read_info->next;
2391 /* Process the READ_INFOs into the bitmaps into GEN and KILL. KILL
2395 scan_reads_spill (read_info_t read_info, bitmap gen, bitmap kill)
2399 if (read_info->alias_set)
2401 int index = get_bitmap_index (clear_alias_group,
2402 read_info->alias_set);
2406 bitmap_set_bit (kill, index);
2407 bitmap_clear_bit (gen, index);
2411 read_info = read_info->next;
2416 /* Return the insn in BB_INFO before the first wild read or if there
2417 are no wild reads in the block, return the last insn. */
2420 find_insn_before_first_wild_read (bb_info_t bb_info)
2422 insn_info_t insn_info = bb_info->last_insn;
2423 insn_info_t last_wild_read = NULL;
2427 if (insn_info->wild_read)
2429 last_wild_read = insn_info->prev_insn;
2430 /* Block starts with wild read. */
2431 if (!last_wild_read)
2435 insn_info = insn_info->prev_insn;
2439 return last_wild_read;
2441 return bb_info->last_insn;
2445 /* Scan the insns in BB_INFO starting at PTR and going to the top of
2446 the block in order to build the gen and kill sets for the block.
2447 We start at ptr which may be the last insn in the block or may be
2448 the first insn with a wild read. In the latter case we are able to
2449 skip the rest of the block because it just does not matter:
2450 anything that happens is hidden by the wild read. */
2453 dse_step3_scan (bool for_spills, basic_block bb)
2455 bb_info_t bb_info = bb_table[bb->index];
2456 insn_info_t insn_info;
2459 /* There are no wild reads in the spill case. */
2460 insn_info = bb_info->last_insn;
2462 insn_info = find_insn_before_first_wild_read (bb_info);
2464 /* In the spill case or in the no_spill case if there is no wild
2465 read in the block, we will need a kill set. */
2466 if (insn_info == bb_info->last_insn)
2469 bitmap_clear (bb_info->kill);
2471 bb_info->kill = BITMAP_ALLOC (NULL);
2475 BITMAP_FREE (bb_info->kill);
2479 /* There may have been code deleted by the dce pass run before
2481 if (insn_info->insn && INSN_P (insn_info->insn))
2483 /* Process the read(s) last. */
2486 scan_stores_spill (insn_info->store_rec, bb_info->gen, bb_info->kill);
2487 scan_reads_spill (insn_info->read_rec, bb_info->gen, bb_info->kill);
2491 scan_stores_nospill (insn_info->store_rec, bb_info->gen, bb_info->kill);
2492 scan_reads_nospill (insn_info, bb_info->gen, bb_info->kill);
2496 insn_info = insn_info->prev_insn;
2501 /* Set the gen set of the exit block, and also any block with no
2502 successors that does not have a wild read. */
2505 dse_step3_exit_block_scan (bb_info_t bb_info)
2507 /* The gen set is all 0's for the exit block except for the
2508 frame_pointer_group. */
2510 if (stores_off_frame_dead_at_return)
2515 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
2517 if (group->process_globally && group->frame_related)
2518 bitmap_ior_into (bb_info->gen, group->group_kill);
2524 /* Find all of the blocks that are not backwards reachable from the
2525 exit block or any block with no successors (BB). These are the
2526 infinite loops or infinite self loops. These blocks will still
2527 have their bits set in UNREACHABLE_BLOCKS. */
2530 mark_reachable_blocks (sbitmap unreachable_blocks, basic_block bb)
2535 if (TEST_BIT (unreachable_blocks, bb->index))
2537 RESET_BIT (unreachable_blocks, bb->index);
2538 FOR_EACH_EDGE (e, ei, bb->preds)
2540 mark_reachable_blocks (unreachable_blocks, e->src);
2545 /* Build the transfer functions for the function. */
2548 dse_step3 (bool for_spills)
2551 sbitmap unreachable_blocks = sbitmap_alloc (last_basic_block);
2552 sbitmap_iterator sbi;
2553 bitmap all_ones = NULL;
2556 sbitmap_ones (unreachable_blocks);
2560 bb_info_t bb_info = bb_table[bb->index];
2562 bitmap_clear (bb_info->gen);
2564 bb_info->gen = BITMAP_ALLOC (NULL);
2566 if (bb->index == ENTRY_BLOCK)
2568 else if (bb->index == EXIT_BLOCK)
2569 dse_step3_exit_block_scan (bb_info);
2571 dse_step3_scan (for_spills, bb);
2572 if (EDGE_COUNT (bb->succs) == 0)
2573 mark_reachable_blocks (unreachable_blocks, bb);
2575 /* If this is the second time dataflow is run, delete the old
2578 BITMAP_FREE (bb_info->in);
2580 BITMAP_FREE (bb_info->out);
2583 /* For any block in an infinite loop, we must initialize the out set
2584 to all ones. This could be expensive, but almost never occurs in
2585 practice. However, it is common in regression tests. */
2586 EXECUTE_IF_SET_IN_SBITMAP (unreachable_blocks, 0, i, sbi)
2588 if (bitmap_bit_p (all_blocks, i))
2590 bb_info_t bb_info = bb_table[i];
2596 all_ones = BITMAP_ALLOC (NULL);
2597 for (j = 0; VEC_iterate (group_info_t, rtx_group_vec, j, group); j++)
2598 bitmap_ior_into (all_ones, group->group_kill);
2602 bb_info->out = BITMAP_ALLOC (NULL);
2603 bitmap_copy (bb_info->out, all_ones);
2609 BITMAP_FREE (all_ones);
2610 sbitmap_free (unreachable_blocks);
2615 /*----------------------------------------------------------------------------
2618 Solve the bitvector equations.
2619 ----------------------------------------------------------------------------*/
2622 /* Confluence function for blocks with no successors. Create an out
2623 set from the gen set of the exit block. This block logically has
2624 the exit block as a successor. */
2629 dse_confluence_0 (basic_block bb)
2631 bb_info_t bb_info = bb_table[bb->index];
2633 if (bb->index == EXIT_BLOCK)
2638 bb_info->out = BITMAP_ALLOC (NULL);
2639 bitmap_copy (bb_info->out, bb_table[EXIT_BLOCK]->gen);
2643 /* Propagate the information from the in set of the dest of E to the
2644 out set of the src of E. If the various in or out sets are not
2645 there, that means they are all ones. */
2648 dse_confluence_n (edge e)
2650 bb_info_t src_info = bb_table[e->src->index];
2651 bb_info_t dest_info = bb_table[e->dest->index];
2656 bitmap_and_into (src_info->out, dest_info->in);
2659 src_info->out = BITMAP_ALLOC (NULL);
2660 bitmap_copy (src_info->out, dest_info->in);
2666 /* Propagate the info from the out to the in set of BB_INDEX's basic
2667 block. There are three cases:
2669 1) The block has no kill set. In this case the kill set is all
2670 ones. It does not matter what the out set of the block is, none of
2671 the info can reach the top. The only thing that reaches the top is
2672 the gen set and we just copy the set.
2674 2) There is a kill set but no out set and bb has successors. In
2675 this case we just return. Eventually an out set will be created and
2676 it is better to wait than to create a set of ones.
2678 3) There is both a kill and out set. We apply the obvious transfer
2683 dse_transfer_function (int bb_index)
2685 bb_info_t bb_info = bb_table[bb_index];
2693 return bitmap_ior_and_compl (bb_info->in, bb_info->gen,
2694 bb_info->out, bb_info->kill);
2697 bb_info->in = BITMAP_ALLOC (NULL);
2698 bitmap_ior_and_compl (bb_info->in, bb_info->gen,
2699 bb_info->out, bb_info->kill);
2709 /* Case 1 above. If there is already an in set, nothing
2715 bb_info->in = BITMAP_ALLOC (NULL);
2716 bitmap_copy (bb_info->in, bb_info->gen);
2722 /* Solve the dataflow equations. */
2727 df_simple_dataflow (DF_BACKWARD, NULL, dse_confluence_0,
2728 dse_confluence_n, dse_transfer_function,
2729 all_blocks, df_get_postorder (DF_BACKWARD),
2730 df_get_n_blocks (DF_BACKWARD));
2735 fprintf (dump_file, "\n\n*** Global dataflow info after analysis.\n");
2738 bb_info_t bb_info = bb_table[bb->index];
2740 df_print_bb_index (bb, dump_file);
2742 bitmap_print (dump_file, bb_info->in, " in: ", "\n");
2744 fprintf (dump_file, " in: *MISSING*\n");
2746 bitmap_print (dump_file, bb_info->gen, " gen: ", "\n");
2748 fprintf (dump_file, " gen: *MISSING*\n");
2750 bitmap_print (dump_file, bb_info->kill, " kill: ", "\n");
2752 fprintf (dump_file, " kill: *MISSING*\n");
2754 bitmap_print (dump_file, bb_info->out, " out: ", "\n");
2756 fprintf (dump_file, " out: *MISSING*\n\n");
2763 /*----------------------------------------------------------------------------
2766 Delete the stores that can only be deleted using the global information.
2767 ----------------------------------------------------------------------------*/
2771 dse_step5_nospill (void)
2776 bb_info_t bb_info = bb_table[bb->index];
2777 insn_info_t insn_info = bb_info->last_insn;
2778 bitmap v = bb_info->out;
2782 bool deleted = false;
2783 if (dump_file && insn_info->insn)
2785 fprintf (dump_file, "starting to process insn %d\n",
2786 INSN_UID (insn_info->insn));
2787 bitmap_print (dump_file, v, " v: ", "\n");
2790 /* There may have been code deleted by the dce pass run before
2793 && INSN_P (insn_info->insn)
2794 && (!insn_info->cannot_delete)
2795 && (!bitmap_empty_p (v)))
2797 store_info_t store_info = insn_info->store_rec;
2799 /* Try to delete the current insn. */
2802 /* Skip the clobbers. */
2803 while (!store_info->is_set)
2804 store_info = store_info->next;
2806 if (store_info->alias_set)
2811 group_info_t group_info
2812 = VEC_index (group_info_t, rtx_group_vec, store_info->group_id);
2814 for (i = store_info->begin; i < store_info->end; i++)
2816 int index = get_bitmap_index (group_info, i);
2819 fprintf (dump_file, "i = %d, index = %d\n", (int)i, index);
2820 if (index == 0 || !bitmap_bit_p (v, index))
2823 fprintf (dump_file, "failing at i = %d\n", (int)i);
2833 check_for_inc_dec (insn_info->insn);
2834 delete_insn (insn_info->insn);
2835 insn_info->insn = NULL;
2840 /* We do want to process the local info if the insn was
2841 deleted. For instance, if the insn did a wild read, we
2842 no longer need to trash the info. */
2844 && INSN_P (insn_info->insn)
2847 scan_stores_nospill (insn_info->store_rec, v, NULL);
2848 if (insn_info->wild_read)
2851 fprintf (dump_file, "wild read\n");
2854 else if (insn_info->read_rec)
2857 fprintf (dump_file, "regular read\n");
2858 scan_reads_nospill (insn_info, v, NULL);
2862 insn_info = insn_info->prev_insn;
2869 dse_step5_spill (void)
2874 bb_info_t bb_info = bb_table[bb->index];
2875 insn_info_t insn_info = bb_info->last_insn;
2876 bitmap v = bb_info->out;
2880 bool deleted = false;
2881 /* There may have been code deleted by the dce pass run before
2884 && INSN_P (insn_info->insn)
2885 && (!insn_info->cannot_delete)
2886 && (!bitmap_empty_p (v)))
2888 /* Try to delete the current insn. */
2889 store_info_t store_info = insn_info->store_rec;
2894 if (store_info->alias_set)
2896 int index = get_bitmap_index (clear_alias_group,
2897 store_info->alias_set);
2898 if (index == 0 || !bitmap_bit_p (v, index))
2906 store_info = store_info->next;
2908 if (deleted && dbg_cnt (dse))
2911 fprintf (dump_file, "Spill deleting insn %d\n",
2912 INSN_UID (insn_info->insn));
2913 check_for_inc_dec (insn_info->insn);
2914 delete_insn (insn_info->insn);
2916 insn_info->insn = NULL;
2921 && INSN_P (insn_info->insn)
2924 scan_stores_spill (insn_info->store_rec, v, NULL);
2925 scan_reads_spill (insn_info->read_rec, v, NULL);
2928 insn_info = insn_info->prev_insn;
2935 /*----------------------------------------------------------------------------
2938 Destroy everything left standing.
2939 ----------------------------------------------------------------------------*/
2942 dse_step6 (bool global_done)
2950 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
2952 free (group->offset_map_n);
2953 free (group->offset_map_p);
2954 BITMAP_FREE (group->store1_n);
2955 BITMAP_FREE (group->store1_p);
2956 BITMAP_FREE (group->store2_n);
2957 BITMAP_FREE (group->store2_p);
2958 BITMAP_FREE (group->group_kill);
2963 bb_info_t bb_info = bb_table[bb->index];
2964 BITMAP_FREE (bb_info->gen);
2966 BITMAP_FREE (bb_info->kill);
2968 BITMAP_FREE (bb_info->in);
2970 BITMAP_FREE (bb_info->out);
2975 for (i = 0; VEC_iterate (group_info_t, rtx_group_vec, i, group); i++)
2977 BITMAP_FREE (group->store1_n);
2978 BITMAP_FREE (group->store1_p);
2979 BITMAP_FREE (group->store2_n);
2980 BITMAP_FREE (group->store2_p);
2981 BITMAP_FREE (group->group_kill);
2985 if (clear_alias_sets)
2987 BITMAP_FREE (clear_alias_sets);
2988 BITMAP_FREE (disqualified_clear_alias_sets);
2989 free_alloc_pool (clear_alias_mode_pool);
2990 htab_delete (clear_alias_mode_table);
2993 end_alias_analysis ();
2995 htab_delete (rtx_group_table);
2996 VEC_free (group_info_t, heap, rtx_group_vec);
2997 BITMAP_FREE (all_blocks);
2998 BITMAP_FREE (scratch);
3000 free_alloc_pool (rtx_store_info_pool);
3001 free_alloc_pool (read_info_pool);
3002 free_alloc_pool (insn_info_pool);
3003 free_alloc_pool (bb_info_pool);
3004 free_alloc_pool (rtx_group_info_pool);
3005 free_alloc_pool (deferred_change_pool);
3010 /* -------------------------------------------------------------------------
3012 ------------------------------------------------------------------------- */
3014 /* Callback for running pass_rtl_dse. */
3017 rest_of_handle_dse (void)
3019 bool did_global = false;
3021 df_set_flags (DF_DEFER_INSN_RESCAN);
3026 if (dse_step2_nospill ())
3028 df_set_flags (DF_LR_RUN_DCE);
3032 fprintf (dump_file, "doing global processing\n");
3035 dse_step5_nospill ();
3038 /* For the instance of dse that runs after reload, we make a special
3039 pass to process the spills. These are special in that they are
3040 totally transparent, i.e, there is no aliasing issues that need
3041 to be considered. This means that the wild reads that kill
3042 everything else do not apply here. */
3043 if (clear_alias_sets && dse_step2_spill ())
3047 df_set_flags (DF_LR_RUN_DCE);
3052 fprintf (dump_file, "doing global spill processing\n");
3058 dse_step6 (did_global);
3061 fprintf (dump_file, "dse: local deletions = %d, global deletions = %d, spill deletions = %d\n",
3062 locally_deleted, globally_deleted, spill_deleted);
3069 return optimize > 0 && flag_dse;
3072 struct tree_opt_pass pass_rtl_dse1 =
3075 gate_dse, /* gate */
3076 rest_of_handle_dse, /* execute */
3079 0, /* static_pass_number */
3080 TV_DSE1, /* tv_id */
3081 0, /* properties_required */
3082 0, /* properties_provided */
3083 0, /* properties_destroyed */
3084 0, /* todo_flags_start */
3087 TODO_ggc_collect, /* todo_flags_finish */
3091 struct tree_opt_pass pass_rtl_dse2 =
3094 gate_dse, /* gate */
3095 rest_of_handle_dse, /* execute */
3098 0, /* static_pass_number */
3099 TV_DSE2, /* tv_id */
3100 0, /* properties_required */
3101 0, /* properties_provided */
3102 0, /* properties_destroyed */
3103 0, /* todo_flags_start */
3106 TODO_ggc_collect, /* todo_flags_finish */