1 /* Instruction scheduling pass. This file computes dependencies between
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 2, or (at your option) any later
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to the Free
22 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
27 #include "coretypes.h"
32 #include "hard-reg-set.h"
33 #include "basic-block.h"
37 #include "insn-config.h"
38 #include "insn-attr.h"
42 #include "sched-int.h"
46 extern char *reg_known_equiv_p;
47 extern rtx *reg_known_value;
49 static regset_head reg_pending_sets_head;
50 static regset_head reg_pending_clobbers_head;
51 static regset_head reg_pending_uses_head;
53 static regset reg_pending_sets;
54 static regset reg_pending_clobbers;
55 static regset reg_pending_uses;
56 static bool reg_pending_barrier;
58 /* To speed up the test for duplicate dependency links we keep a
59 record of dependencies created by add_dependence when the average
60 number of instructions in a basic block is very large.
62 Studies have shown that there is typically around 5 instructions between
63 branches for typical C code. So we can make a guess that the average
64 basic block is approximately 5 instructions long; we will choose 100X
65 the average size as a very large basic block.
67 Each insn has associated bitmaps for its dependencies. Each bitmap
68 has enough entries to represent a dependency on any other insn in
69 the insn chain. All bitmap for true dependencies cache is
70 allocated then the rest two ones are also allocated. */
71 static sbitmap *true_dependency_cache;
72 static sbitmap *anti_dependency_cache;
73 static sbitmap *output_dependency_cache;
75 /* To speed up checking consistency of formed forward insn
76 dependencies we use the following cache. Another possible solution
77 could be switching off checking duplication of insns in forward
79 #ifdef ENABLE_CHECKING
80 static sbitmap *forward_dependency_cache;
83 static int deps_may_trap_p PARAMS ((rtx));
84 static void add_dependence_list PARAMS ((rtx, rtx, enum reg_note));
85 static void add_dependence_list_and_free PARAMS ((rtx, rtx *, enum reg_note));
86 static void set_sched_group_p PARAMS ((rtx));
88 static void flush_pending_lists PARAMS ((struct deps *, rtx, int, int));
89 static void sched_analyze_1 PARAMS ((struct deps *, rtx, rtx));
90 static void sched_analyze_2 PARAMS ((struct deps *, rtx, rtx));
91 static void sched_analyze_insn PARAMS ((struct deps *, rtx, rtx, rtx));
93 static rtx get_condition PARAMS ((rtx));
94 static int conditions_mutex_p PARAMS ((rtx, rtx));
96 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
102 rtx addr = XEXP (mem, 0);
105 && REGNO (addr) >= FIRST_PSEUDO_REGISTER
106 && reg_known_value[REGNO (addr)])
107 addr = reg_known_value[REGNO (addr)];
108 return rtx_addr_can_trap_p (addr);
111 /* Return the INSN_LIST containing INSN in LIST, or NULL
112 if LIST does not contain INSN. */
115 find_insn_list (insn, list)
121 if (XEXP (list, 0) == insn)
123 list = XEXP (list, 1);
128 /* Find the condition under which INSN is executed. */
134 rtx pat = PATTERN (insn);
139 if (GET_CODE (pat) == COND_EXEC)
140 return COND_EXEC_TEST (pat);
141 if (GET_CODE (insn) != JUMP_INSN)
143 if (GET_CODE (pat) != SET || SET_SRC (pat) != pc_rtx)
145 if (GET_CODE (SET_DEST (pat)) != IF_THEN_ELSE)
147 pat = SET_DEST (pat);
148 cond = XEXP (pat, 0);
149 if (GET_CODE (XEXP (cond, 1)) == LABEL_REF
150 && XEXP (cond, 2) == pc_rtx)
152 else if (GET_CODE (XEXP (cond, 2)) == LABEL_REF
153 && XEXP (cond, 1) == pc_rtx)
154 return gen_rtx_fmt_ee (reverse_condition (GET_CODE (cond)), GET_MODE (cond),
155 XEXP (cond, 0), XEXP (cond, 1));
160 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
163 conditions_mutex_p (cond1, cond2)
166 if (GET_RTX_CLASS (GET_CODE (cond1)) == '<'
167 && GET_RTX_CLASS (GET_CODE (cond2)) == '<'
168 && GET_CODE (cond1) == reverse_condition (GET_CODE (cond2))
169 && XEXP (cond1, 0) == XEXP (cond2, 0)
170 && XEXP (cond1, 1) == XEXP (cond2, 1))
175 /* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
176 LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the type
177 of dependence that this link represents. */
180 add_dependence (insn, elem, dep_type)
183 enum reg_note dep_type;
189 /* Don't depend an insn on itself. */
193 /* We can get a dependency on deleted insns due to optimizations in
194 the register allocation and reloading or due to splitting. Any
195 such dependency is useless and can be ignored. */
196 if (GET_CODE (elem) == NOTE)
199 /* flow.c doesn't handle conditional lifetimes entirely correctly;
200 calls mess up the conditional lifetimes. */
201 /* ??? add_dependence is the wrong place to be eliding dependencies,
202 as that forgets that the condition expressions themselves may
204 if (GET_CODE (insn) != CALL_INSN && GET_CODE (elem) != CALL_INSN)
206 cond1 = get_condition (insn);
207 cond2 = get_condition (elem);
209 && conditions_mutex_p (cond1, cond2)
210 /* Make sure first instruction doesn't affect condition of second
211 instruction if switched. */
212 && !modified_in_p (cond1, elem)
213 /* Make sure second instruction doesn't affect condition of first
214 instruction if switched. */
215 && !modified_in_p (cond2, insn))
219 /* If elem is part of a sequence that must be scheduled together, then
220 make the dependence point to the last insn of the sequence.
221 When HAVE_cc0, it is possible for NOTEs to exist between users and
222 setters of the condition codes, so we must skip past notes here.
223 Otherwise, NOTEs are impossible here. */
224 next = next_nonnote_insn (elem);
225 if (next && INSN_P (next) && SCHED_GROUP_P (next))
227 /* Notes will never intervene here though, so don't bother checking
230 /* We must reject CODE_LABELs, so that we don't get confused by one
231 that has LABEL_PRESERVE_P set, which is represented by the same
232 bit in the rtl as SCHED_GROUP_P. A CODE_LABEL can never be
236 while ((nnext = next_nonnote_insn (next)) != NULL
239 && SCHED_GROUP_P (nnext))
243 add_dependence (insn, next, REG_DEP_ANTI);
249 #ifdef INSN_SCHEDULING
250 /* ??? No good way to tell from here whether we're doing interblock
251 scheduling. Possibly add another callback. */
253 /* (This code is guarded by INSN_SCHEDULING, otherwise INSN_BB is undefined.)
254 No need for interblock dependences with calls, since
255 calls are not moved between blocks. Note: the edge where
256 elem is a CALL is still required. */
257 if (GET_CODE (insn) == CALL_INSN
258 && (INSN_BB (elem) != INSN_BB (insn)))
262 /* If we already have a dependency for ELEM, then we do not need to
263 do anything. Avoiding the list walk below can cut compile times
264 dramatically for some code. */
265 if (true_dependency_cache != NULL)
267 enum reg_note present_dep_type = 0;
269 if (anti_dependency_cache == NULL || output_dependency_cache == NULL)
271 if (TEST_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem)))
272 /* Do nothing (present_set_type is already 0). */
274 else if (TEST_BIT (anti_dependency_cache[INSN_LUID (insn)],
276 present_dep_type = REG_DEP_ANTI;
277 else if (TEST_BIT (output_dependency_cache[INSN_LUID (insn)],
279 present_dep_type = REG_DEP_OUTPUT;
282 if (present_p && (int) dep_type >= (int) present_dep_type)
287 /* Check that we don't already have this dependence. */
289 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
290 if (XEXP (link, 0) == elem)
292 #ifdef INSN_SCHEDULING
293 /* Clear corresponding cache entry because type of the link
295 if (true_dependency_cache != NULL)
297 if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
298 RESET_BIT (anti_dependency_cache[INSN_LUID (insn)],
300 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
301 && output_dependency_cache)
302 RESET_BIT (output_dependency_cache[INSN_LUID (insn)],
309 /* If this is a more restrictive type of dependence than the existing
310 one, then change the existing dependence to this type. */
311 if ((int) dep_type < (int) REG_NOTE_KIND (link))
312 PUT_REG_NOTE_KIND (link, dep_type);
314 #ifdef INSN_SCHEDULING
315 /* If we are adding a dependency to INSN's LOG_LINKs, then
316 note that in the bitmap caches of dependency information. */
317 if (true_dependency_cache != NULL)
319 if ((int) REG_NOTE_KIND (link) == 0)
320 SET_BIT (true_dependency_cache[INSN_LUID (insn)],
322 else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
323 SET_BIT (anti_dependency_cache[INSN_LUID (insn)],
325 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
326 SET_BIT (output_dependency_cache[INSN_LUID (insn)],
332 /* Might want to check one level of transitivity to save conses. */
334 link = alloc_INSN_LIST (elem, LOG_LINKS (insn));
335 LOG_LINKS (insn) = link;
337 /* Insn dependency, not data dependency. */
338 PUT_REG_NOTE_KIND (link, dep_type);
340 #ifdef INSN_SCHEDULING
341 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
342 in the bitmap caches of dependency information. */
343 if (true_dependency_cache != NULL)
345 if ((int) dep_type == 0)
346 SET_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
347 else if (dep_type == REG_DEP_ANTI)
348 SET_BIT (anti_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
349 else if (dep_type == REG_DEP_OUTPUT)
350 SET_BIT (output_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
355 /* A convenience wrapper to operate on an entire list. */
358 add_dependence_list (insn, list, dep_type)
360 enum reg_note dep_type;
362 for (; list; list = XEXP (list, 1))
363 add_dependence (insn, XEXP (list, 0), dep_type);
366 /* Similar, but free *LISTP at the same time. */
369 add_dependence_list_and_free (insn, listp, dep_type)
372 enum reg_note dep_type;
375 for (list = *listp, *listp = NULL; list ; list = next)
377 next = XEXP (list, 1);
378 add_dependence (insn, XEXP (list, 0), dep_type);
379 free_INSN_LIST_node (list);
383 /* Set SCHED_GROUP_P and care for the rest of the bookkeeping that
384 goes along with that. */
387 set_sched_group_p (insn)
392 SCHED_GROUP_P (insn) = 1;
394 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
399 prev = prev_nonnote_insn (prev);
400 if (XEXP (link, 0) == prev)
403 while (SCHED_GROUP_P (prev));
404 if (XEXP (link, 0) != prev)
405 add_dependence (prev, XEXP (link, 0), REG_DEP_ANTI);
407 prev = prev_nonnote_insn (insn);
408 add_dependence (insn, prev, REG_DEP_ANTI);
411 /* Process an insn's memory dependencies. There are four kinds of
414 (0) read dependence: read follows read
415 (1) true dependence: read follows write
416 (2) anti dependence: write follows read
417 (3) output dependence: write follows write
419 We are careful to build only dependencies which actually exist, and
420 use transitivity to avoid building too many links. */
422 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
423 The MEM is a memory reference contained within INSN, which we are saving
424 so that we can do memory aliasing on it. */
427 add_insn_mem_dependence (deps, insn_list, mem_list, insn, mem)
429 rtx *insn_list, *mem_list, insn, mem;
433 link = alloc_INSN_LIST (insn, *insn_list);
436 if (current_sched_info->use_cselib)
438 mem = shallow_copy_rtx (mem);
439 XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0));
441 link = alloc_EXPR_LIST (VOIDmode, mem, *mem_list);
444 deps->pending_lists_length++;
447 /* Make a dependency between every memory reference on the pending lists
448 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
449 dependencies for a read operation, similarly with FOR_WRITE. */
452 flush_pending_lists (deps, insn, for_read, for_write)
455 int for_read, for_write;
459 add_dependence_list_and_free (insn, &deps->pending_read_insns,
461 free_EXPR_LIST_list (&deps->pending_read_mems);
464 add_dependence_list_and_free (insn, &deps->pending_write_insns,
465 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
466 free_EXPR_LIST_list (&deps->pending_write_mems);
467 deps->pending_lists_length = 0;
469 add_dependence_list_and_free (insn, &deps->last_pending_memory_flush,
470 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
471 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
472 deps->pending_flush_length = 1;
475 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
476 rtx, X, creating all dependencies generated by the write to the
477 destination of X, and reads of everything mentioned. */
480 sched_analyze_1 (deps, x, insn)
486 rtx dest = XEXP (x, 0);
487 enum rtx_code code = GET_CODE (x);
492 if (GET_CODE (dest) == PARALLEL)
496 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
497 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
498 sched_analyze_1 (deps,
499 gen_rtx_CLOBBER (VOIDmode,
500 XEXP (XVECEXP (dest, 0, i), 0)),
503 if (GET_CODE (x) == SET)
504 sched_analyze_2 (deps, SET_SRC (x), insn);
508 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
509 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
511 if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
513 /* The second and third arguments are values read by this insn. */
514 sched_analyze_2 (deps, XEXP (dest, 1), insn);
515 sched_analyze_2 (deps, XEXP (dest, 2), insn);
517 dest = XEXP (dest, 0);
520 if (GET_CODE (dest) == REG)
522 regno = REGNO (dest);
524 /* A hard reg in a wide mode may really be multiple registers.
525 If so, mark all of them just like the first. */
526 if (regno < FIRST_PSEUDO_REGISTER)
528 int i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
532 SET_REGNO_REG_SET (reg_pending_sets, regno + i);
537 SET_REGNO_REG_SET (reg_pending_clobbers, regno + i);
540 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
541 it does not reload. Ignore these as they have served their
543 else if (regno >= deps->max_reg)
545 if (GET_CODE (PATTERN (insn)) != USE
546 && GET_CODE (PATTERN (insn)) != CLOBBER)
552 SET_REGNO_REG_SET (reg_pending_sets, regno);
554 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
556 /* Pseudos that are REG_EQUIV to something may be replaced
557 by that during reloading. We need only add dependencies for
558 the address in the REG_EQUIV note. */
559 if (!reload_completed
560 && reg_known_equiv_p[regno]
561 && GET_CODE (reg_known_value[regno]) == MEM)
562 sched_analyze_2 (deps, XEXP (reg_known_value[regno], 0), insn);
564 /* Don't let it cross a call after scheduling if it doesn't
565 already cross one. */
566 if (REG_N_CALLS_CROSSED (regno) == 0)
567 add_dependence_list (insn, deps->last_function_call, REG_DEP_ANTI);
570 else if (GET_CODE (dest) == MEM)
572 /* Writing memory. */
575 if (current_sched_info->use_cselib)
577 t = shallow_copy_rtx (dest);
578 cselib_lookup (XEXP (t, 0), Pmode, 1);
579 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
582 if (deps->pending_lists_length > MAX_PENDING_LIST_LENGTH)
584 /* Flush all pending reads and writes to prevent the pending lists
585 from getting any larger. Insn scheduling runs too slowly when
586 these lists get long. When compiling GCC with itself,
587 this flush occurs 8 times for sparc, and 10 times for m88k using
588 the default value of 32. */
589 flush_pending_lists (deps, insn, false, true);
593 rtx pending, pending_mem;
595 pending = deps->pending_read_insns;
596 pending_mem = deps->pending_read_mems;
599 if (anti_dependence (XEXP (pending_mem, 0), t))
600 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
602 pending = XEXP (pending, 1);
603 pending_mem = XEXP (pending_mem, 1);
606 pending = deps->pending_write_insns;
607 pending_mem = deps->pending_write_mems;
610 if (output_dependence (XEXP (pending_mem, 0), t))
611 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
613 pending = XEXP (pending, 1);
614 pending_mem = XEXP (pending_mem, 1);
617 add_dependence_list (insn, deps->last_pending_memory_flush,
620 add_insn_mem_dependence (deps, &deps->pending_write_insns,
621 &deps->pending_write_mems, insn, dest);
623 sched_analyze_2 (deps, XEXP (dest, 0), insn);
627 if (GET_CODE (x) == SET)
628 sched_analyze_2 (deps, SET_SRC (x), insn);
631 /* Analyze the uses of memory and registers in rtx X in INSN. */
634 sched_analyze_2 (deps, x, insn)
657 /* Ignore constants. Note that we must handle CONST_DOUBLE here
658 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
659 this does not mean that this insn is using cc0. */
664 /* User of CC0 depends on immediately preceding insn. */
665 set_sched_group_p (insn);
671 int regno = REGNO (x);
672 if (regno < FIRST_PSEUDO_REGISTER)
674 int i = HARD_REGNO_NREGS (regno, GET_MODE (x));
676 SET_REGNO_REG_SET (reg_pending_uses, regno + i);
678 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
679 it does not reload. Ignore these as they have served their
681 else if (regno >= deps->max_reg)
683 if (GET_CODE (PATTERN (insn)) != USE
684 && GET_CODE (PATTERN (insn)) != CLOBBER)
689 SET_REGNO_REG_SET (reg_pending_uses, regno);
691 /* Pseudos that are REG_EQUIV to something may be replaced
692 by that during reloading. We need only add dependencies for
693 the address in the REG_EQUIV note. */
694 if (!reload_completed
695 && reg_known_equiv_p[regno]
696 && GET_CODE (reg_known_value[regno]) == MEM)
697 sched_analyze_2 (deps, XEXP (reg_known_value[regno], 0), insn);
699 /* If the register does not already cross any calls, then add this
700 insn to the sched_before_next_call list so that it will still
701 not cross calls after scheduling. */
702 if (REG_N_CALLS_CROSSED (regno) == 0)
703 deps->sched_before_next_call
704 = alloc_INSN_LIST (insn, deps->sched_before_next_call);
711 /* Reading memory. */
713 rtx pending, pending_mem;
716 if (current_sched_info->use_cselib)
718 t = shallow_copy_rtx (t);
719 cselib_lookup (XEXP (t, 0), Pmode, 1);
720 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
722 pending = deps->pending_read_insns;
723 pending_mem = deps->pending_read_mems;
726 if (read_dependence (XEXP (pending_mem, 0), t))
727 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
729 pending = XEXP (pending, 1);
730 pending_mem = XEXP (pending_mem, 1);
733 pending = deps->pending_write_insns;
734 pending_mem = deps->pending_write_mems;
737 if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
739 add_dependence (insn, XEXP (pending, 0), 0);
741 pending = XEXP (pending, 1);
742 pending_mem = XEXP (pending_mem, 1);
745 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
746 if (GET_CODE (XEXP (u, 0)) != JUMP_INSN
747 || deps_may_trap_p (x))
748 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
750 /* Always add these dependencies to pending_reads, since
751 this insn may be followed by a write. */
752 add_insn_mem_dependence (deps, &deps->pending_read_insns,
753 &deps->pending_read_mems, insn, x);
755 /* Take advantage of tail recursion here. */
756 sched_analyze_2 (deps, XEXP (x, 0), insn);
760 /* Force pending stores to memory in case a trap handler needs them. */
762 flush_pending_lists (deps, insn, true, false);
767 case UNSPEC_VOLATILE:
769 /* Traditional and volatile asm instructions must be considered to use
770 and clobber all hard registers, all pseudo-registers and all of
771 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
773 Consider for instance a volatile asm that changes the fpu rounding
774 mode. An insn should not be moved across this even if it only uses
775 pseudo-regs because it might give an incorrectly rounded result. */
776 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
777 reg_pending_barrier = true;
779 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
780 We can not just fall through here since then we would be confused
781 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
782 traditional asms unlike their normal usage. */
784 if (code == ASM_OPERANDS)
786 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
787 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
797 /* These both read and modify the result. We must handle them as writes
798 to get proper dependencies for following instructions. We must handle
799 them as reads to get proper dependencies from this to previous
800 instructions. Thus we need to pass them to both sched_analyze_1
801 and sched_analyze_2. We must call sched_analyze_2 first in order
802 to get the proper antecedent for the read. */
803 sched_analyze_2 (deps, XEXP (x, 0), insn);
804 sched_analyze_1 (deps, x, insn);
809 /* op0 = op0 + op1 */
810 sched_analyze_2 (deps, XEXP (x, 0), insn);
811 sched_analyze_2 (deps, XEXP (x, 1), insn);
812 sched_analyze_1 (deps, x, insn);
819 /* Other cases: walk the insn. */
820 fmt = GET_RTX_FORMAT (code);
821 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
824 sched_analyze_2 (deps, XEXP (x, i), insn);
825 else if (fmt[i] == 'E')
826 for (j = 0; j < XVECLEN (x, i); j++)
827 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
831 /* Analyze an INSN with pattern X to find all dependencies. */
834 sched_analyze_insn (deps, x, insn, loop_notes)
839 RTX_CODE code = GET_CODE (x);
843 if (code == COND_EXEC)
845 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
847 /* ??? Should be recording conditions so we reduce the number of
848 false dependencies. */
849 x = COND_EXEC_CODE (x);
852 if (code == SET || code == CLOBBER)
854 sched_analyze_1 (deps, x, insn);
856 /* Bare clobber insns are used for letting life analysis, reg-stack
857 and others know that a value is dead. Depend on the last call
858 instruction so that reg-stack won't get confused. */
860 add_dependence_list (insn, deps->last_function_call, REG_DEP_OUTPUT);
862 else if (code == PARALLEL)
865 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
867 rtx sub = XVECEXP (x, 0, i);
868 code = GET_CODE (sub);
870 if (code == COND_EXEC)
872 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
873 sub = COND_EXEC_CODE (sub);
874 code = GET_CODE (sub);
876 if (code == SET || code == CLOBBER)
877 sched_analyze_1 (deps, sub, insn);
879 sched_analyze_2 (deps, sub, insn);
883 sched_analyze_2 (deps, x, insn);
885 /* Mark registers CLOBBERED or used by called function. */
886 if (GET_CODE (insn) == CALL_INSN)
888 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
890 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
891 sched_analyze_1 (deps, XEXP (link, 0), insn);
893 sched_analyze_2 (deps, XEXP (link, 0), insn);
895 if (find_reg_note (insn, REG_SETJMP, NULL))
896 reg_pending_barrier = true;
899 if (GET_CODE (insn) == JUMP_INSN)
902 next = next_nonnote_insn (insn);
903 if (next && GET_CODE (next) == BARRIER)
904 reg_pending_barrier = true;
907 rtx pending, pending_mem;
911 (*current_sched_info->compute_jump_reg_dependencies) (insn, &tmp);
912 /* Make latency of jump equal to 0 by using anti-dependence. */
913 EXECUTE_IF_SET_IN_REG_SET (&tmp, 0, i,
915 struct deps_reg *reg_last = &deps->reg_last[i];
916 add_dependence_list (insn, reg_last->sets, REG_DEP_ANTI);
917 add_dependence_list (insn, reg_last->clobbers, REG_DEP_ANTI);
918 reg_last->uses_length++;
919 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
921 CLEAR_REG_SET (&tmp);
923 /* All memory writes and volatile reads must happen before the
924 jump. Non-volatile reads must happen before the jump iff
925 the result is needed by the above register used mask. */
927 pending = deps->pending_write_insns;
928 pending_mem = deps->pending_write_mems;
931 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
932 pending = XEXP (pending, 1);
933 pending_mem = XEXP (pending_mem, 1);
936 pending = deps->pending_read_insns;
937 pending_mem = deps->pending_read_mems;
940 if (MEM_VOLATILE_P (XEXP (pending_mem, 0)))
941 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
942 pending = XEXP (pending, 1);
943 pending_mem = XEXP (pending_mem, 1);
946 add_dependence_list (insn, deps->last_pending_memory_flush,
951 /* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic
952 block, then we must be sure that no instructions are scheduled across it.
953 Otherwise, the reg_n_refs info (which depends on loop_depth) would
959 /* Update loop_notes with any notes from this insn. Also determine
960 if any of the notes on the list correspond to instruction scheduling
961 barriers (loop, eh & setjmp notes, but not range notes). */
963 while (XEXP (link, 1))
965 if (INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_BEG
966 || INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_END
967 || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_BEG
968 || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_END)
969 reg_pending_barrier = true;
971 link = XEXP (link, 1);
973 XEXP (link, 1) = REG_NOTES (insn);
974 REG_NOTES (insn) = loop_notes;
977 /* If this instruction can throw an exception, then moving it changes
978 where block boundaries fall. This is mighty confusing elsewhere.
979 Therefore, prevent such an instruction from being moved. */
980 if (can_throw_internal (insn))
981 reg_pending_barrier = true;
983 /* Add dependencies if a scheduling barrier was found. */
984 if (reg_pending_barrier)
986 /* In the case of barrier the most added dependencies are not
987 real, so we use anti-dependence here. */
988 if (GET_CODE (PATTERN (insn)) == COND_EXEC)
990 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i,
992 struct deps_reg *reg_last = &deps->reg_last[i];
993 add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
994 add_dependence_list (insn, reg_last->sets, REG_DEP_ANTI);
995 add_dependence_list (insn, reg_last->clobbers, REG_DEP_ANTI);
1000 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i,
1002 struct deps_reg *reg_last = &deps->reg_last[i];
1003 add_dependence_list_and_free (insn, ®_last->uses,
1005 add_dependence_list_and_free (insn, ®_last->sets,
1007 add_dependence_list_and_free (insn, ®_last->clobbers,
1009 reg_last->uses_length = 0;
1010 reg_last->clobbers_length = 0;
1014 for (i = 0; i < deps->max_reg; i++)
1016 struct deps_reg *reg_last = &deps->reg_last[i];
1017 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1018 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
1021 flush_pending_lists (deps, insn, true, true);
1022 reg_pending_barrier = false;
1026 /* If the current insn is conditional, we can't free any
1028 if (GET_CODE (PATTERN (insn)) == COND_EXEC)
1030 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i,
1032 struct deps_reg *reg_last = &deps->reg_last[i];
1033 add_dependence_list (insn, reg_last->sets, 0);
1034 add_dependence_list (insn, reg_last->clobbers, 0);
1035 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
1036 reg_last->uses_length++;
1038 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i,
1040 struct deps_reg *reg_last = &deps->reg_last[i];
1041 add_dependence_list (insn, reg_last->sets, REG_DEP_OUTPUT);
1042 add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
1043 reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
1044 reg_last->clobbers_length++;
1046 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
1048 struct deps_reg *reg_last = &deps->reg_last[i];
1049 add_dependence_list (insn, reg_last->sets, REG_DEP_OUTPUT);
1050 add_dependence_list (insn, reg_last->clobbers, REG_DEP_OUTPUT);
1051 add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
1052 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1057 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i,
1059 struct deps_reg *reg_last = &deps->reg_last[i];
1060 add_dependence_list (insn, reg_last->sets, 0);
1061 add_dependence_list (insn, reg_last->clobbers, 0);
1062 reg_last->uses_length++;
1063 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
1065 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i,
1067 struct deps_reg *reg_last = &deps->reg_last[i];
1068 if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
1069 || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
1071 add_dependence_list_and_free (insn, ®_last->sets,
1073 add_dependence_list_and_free (insn, ®_last->uses,
1075 add_dependence_list_and_free (insn, ®_last->clobbers,
1077 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1078 reg_last->clobbers_length = 0;
1079 reg_last->uses_length = 0;
1083 add_dependence_list (insn, reg_last->sets, REG_DEP_OUTPUT);
1084 add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
1086 reg_last->clobbers_length++;
1087 reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
1089 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
1091 struct deps_reg *reg_last = &deps->reg_last[i];
1092 add_dependence_list_and_free (insn, ®_last->sets,
1094 add_dependence_list_and_free (insn, ®_last->clobbers,
1096 add_dependence_list_and_free (insn, ®_last->uses,
1098 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
1099 reg_last->uses_length = 0;
1100 reg_last->clobbers_length = 0;
1104 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
1105 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
1106 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
1108 CLEAR_REG_SET (reg_pending_uses);
1109 CLEAR_REG_SET (reg_pending_clobbers);
1110 CLEAR_REG_SET (reg_pending_sets);
1112 /* If we are currently in a libcall scheduling group, then mark the
1113 current insn as being in a scheduling group and that it can not
1114 be moved into a different basic block. */
1116 if (deps->libcall_block_tail_insn)
1118 set_sched_group_p (insn);
1119 CANT_MOVE (insn) = 1;
1122 /* If a post-call group is still open, see if it should remain so.
1123 This insn must be a simple move of a hard reg to a pseudo or
1126 We must avoid moving these insns for correctness on
1127 SMALL_REGISTER_CLASS machines, and for special registers like
1128 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
1129 hard regs for all targets. */
1131 if (deps->in_post_call_group_p)
1133 rtx tmp, set = single_set (insn);
1134 int src_regno, dest_regno;
1137 goto end_call_group;
1139 tmp = SET_DEST (set);
1140 if (GET_CODE (tmp) == SUBREG)
1141 tmp = SUBREG_REG (tmp);
1142 if (GET_CODE (tmp) == REG)
1143 dest_regno = REGNO (tmp);
1145 goto end_call_group;
1147 tmp = SET_SRC (set);
1148 if (GET_CODE (tmp) == SUBREG)
1149 tmp = SUBREG_REG (tmp);
1150 if (GET_CODE (tmp) == REG)
1151 src_regno = REGNO (tmp);
1153 goto end_call_group;
1155 if (src_regno < FIRST_PSEUDO_REGISTER
1156 || dest_regno < FIRST_PSEUDO_REGISTER)
1158 set_sched_group_p (insn);
1159 CANT_MOVE (insn) = 1;
1164 deps->in_post_call_group_p = false;
1169 /* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1170 for every dependency. */
1173 sched_analyze (deps, head, tail)
1180 if (current_sched_info->use_cselib)
1183 for (insn = head;; insn = NEXT_INSN (insn))
1185 rtx link, end_seq, r0, set;
1187 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
1189 /* Clear out the stale LOG_LINKS from flow. */
1190 free_INSN_LIST_list (&LOG_LINKS (insn));
1192 /* Make each JUMP_INSN a scheduling barrier for memory
1194 if (GET_CODE (insn) == JUMP_INSN)
1196 /* Keep the list a reasonable size. */
1197 if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
1198 flush_pending_lists (deps, insn, true, true);
1200 deps->last_pending_memory_flush
1201 = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
1203 sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
1206 else if (GET_CODE (insn) == CALL_INSN)
1210 CANT_MOVE (insn) = 1;
1212 /* Clear out the stale LOG_LINKS from flow. */
1213 free_INSN_LIST_list (&LOG_LINKS (insn));
1215 if (find_reg_note (insn, REG_SETJMP, NULL))
1217 /* This is setjmp. Assume that all registers, not just
1218 hard registers, may be clobbered by this call. */
1219 reg_pending_barrier = true;
1223 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1224 /* A call may read and modify global register variables. */
1227 SET_REGNO_REG_SET (reg_pending_sets, i);
1228 SET_REGNO_REG_SET (reg_pending_uses, i);
1230 /* Other call-clobbered hard regs may be clobbered.
1231 Since we only have a choice between 'might be clobbered'
1232 and 'definitely not clobbered', we must include all
1233 partly call-clobbered registers here. */
1234 else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
1235 || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
1236 SET_REGNO_REG_SET (reg_pending_clobbers, i);
1237 /* We don't know what set of fixed registers might be used
1238 by the function, but it is certain that the stack pointer
1239 is among them, but be conservative. */
1240 else if (fixed_regs[i])
1241 SET_REGNO_REG_SET (reg_pending_uses, i);
1242 /* The frame pointer is normally not used by the function
1243 itself, but by the debugger. */
1244 /* ??? MIPS o32 is an exception. It uses the frame pointer
1245 in the macro expansion of jal but does not represent this
1246 fact in the call_insn rtl. */
1247 else if (i == FRAME_POINTER_REGNUM
1248 || (i == HARD_FRAME_POINTER_REGNUM
1249 && (! reload_completed || frame_pointer_needed)))
1250 SET_REGNO_REG_SET (reg_pending_uses, i);
1253 /* For each insn which shouldn't cross a call, add a dependence
1254 between that insn and this call insn. */
1255 add_dependence_list_and_free (insn, &deps->sched_before_next_call,
1258 sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
1261 /* In the absence of interprocedural alias analysis, we must flush
1262 all pending reads and writes, and start new dependencies starting
1263 from here. But only flush writes for constant calls (which may
1264 be passed a pointer to something we haven't written yet). */
1265 flush_pending_lists (deps, insn, true, !CONST_OR_PURE_CALL_P (insn));
1267 /* Remember the last function call for limiting lifetimes. */
1268 free_INSN_LIST_list (&deps->last_function_call);
1269 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
1271 /* Before reload, begin a post-call group, so as to keep the
1272 lifetimes of hard registers correct. */
1273 if (! reload_completed)
1274 deps->in_post_call_group_p = true;
1277 /* See comments on reemit_notes as to why we do this.
1278 ??? Actually, the reemit_notes just say what is done, not why. */
1280 if (GET_CODE (insn) == NOTE
1281 && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
1282 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END
1283 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
1284 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END))
1288 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
1289 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END)
1290 rtx_region = GEN_INT (NOTE_EH_HANDLER (insn));
1292 rtx_region = GEN_INT (0);
1294 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1297 loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
1298 GEN_INT (NOTE_LINE_NUMBER (insn)),
1300 CONST_OR_PURE_CALL_P (loop_notes) = CONST_OR_PURE_CALL_P (insn);
1303 if (current_sched_info->use_cselib)
1304 cselib_process_insn (insn);
1306 /* Now that we have completed handling INSN, check and see if it is
1307 a CLOBBER beginning a libcall block. If it is, record the
1308 end of the libcall sequence.
1310 We want to schedule libcall blocks as a unit before reload. While
1311 this restricts scheduling, it preserves the meaning of a libcall
1314 As a side effect, we may get better code due to decreased register
1315 pressure as well as less chance of a foreign insn appearing in
1317 if (!reload_completed
1318 /* Note we may have nested libcall sequences. We only care about
1319 the outermost libcall sequence. */
1320 && deps->libcall_block_tail_insn == 0
1321 /* The sequence must start with a clobber of a register. */
1322 && GET_CODE (insn) == INSN
1323 && GET_CODE (PATTERN (insn)) == CLOBBER
1324 && (r0 = XEXP (PATTERN (insn), 0), GET_CODE (r0) == REG)
1325 && GET_CODE (XEXP (PATTERN (insn), 0)) == REG
1326 /* The CLOBBER must also have a REG_LIBCALL note attached. */
1327 && (link = find_reg_note (insn, REG_LIBCALL, NULL_RTX)) != 0
1328 && (end_seq = XEXP (link, 0)) != 0
1329 /* The insn referenced by the REG_LIBCALL note must be a
1330 simple nop copy with the same destination as the register
1331 mentioned in the clobber. */
1332 && (set = single_set (end_seq)) != 0
1333 && SET_DEST (set) == r0 && SET_SRC (set) == r0
1334 /* And finally the insn referenced by the REG_LIBCALL must
1335 also contain a REG_EQUAL note and a REG_RETVAL note. */
1336 && find_reg_note (end_seq, REG_EQUAL, NULL_RTX) != 0
1337 && find_reg_note (end_seq, REG_RETVAL, NULL_RTX) != 0)
1338 deps->libcall_block_tail_insn = XEXP (link, 0);
1340 /* If we have reached the end of a libcall block, then close the
1342 if (deps->libcall_block_tail_insn == insn)
1343 deps->libcall_block_tail_insn = 0;
1347 if (current_sched_info->use_cselib)
1355 /* Examine insns in the range [ HEAD, TAIL ] and Use the backward
1356 dependences from LOG_LINKS to build forward dependences in
1360 compute_forward_dependences (head, tail)
1365 enum reg_note dep_type;
1367 next_tail = NEXT_INSN (tail);
1368 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1370 if (! INSN_P (insn))
1373 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
1375 rtx x = XEXP (link, 0);
1378 if (x != XEXP (link, 0))
1381 #ifdef ENABLE_CHECKING
1382 /* If add_dependence is working properly there should never
1383 be notes, deleted insns or duplicates in the backward
1384 links. Thus we need not check for them here.
1386 However, if we have enabled checking we might as well go
1387 ahead and verify that add_dependence worked properly. */
1388 if (GET_CODE (x) == NOTE
1389 || INSN_DELETED_P (x)
1390 || (forward_dependency_cache != NULL
1391 && TEST_BIT (forward_dependency_cache[INSN_LUID (x)],
1393 || (forward_dependency_cache == NULL
1394 && find_insn_list (insn, INSN_DEPEND (x))))
1396 if (forward_dependency_cache != NULL)
1397 SET_BIT (forward_dependency_cache[INSN_LUID (x)],
1401 new_link = alloc_INSN_LIST (insn, INSN_DEPEND (x));
1403 dep_type = REG_NOTE_KIND (link);
1404 PUT_REG_NOTE_KIND (new_link, dep_type);
1406 INSN_DEPEND (x) = new_link;
1407 INSN_DEP_COUNT (insn) += 1;
1412 /* Initialize variables for region data dependence analysis.
1413 n_bbs is the number of region blocks. */
1419 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
1421 deps->max_reg = max_reg;
1422 deps->reg_last = (struct deps_reg *)
1423 xcalloc (max_reg, sizeof (struct deps_reg));
1424 INIT_REG_SET (&deps->reg_last_in_use);
1426 deps->pending_read_insns = 0;
1427 deps->pending_read_mems = 0;
1428 deps->pending_write_insns = 0;
1429 deps->pending_write_mems = 0;
1430 deps->pending_lists_length = 0;
1431 deps->pending_flush_length = 0;
1432 deps->last_pending_memory_flush = 0;
1433 deps->last_function_call = 0;
1434 deps->sched_before_next_call = 0;
1435 deps->in_post_call_group_p = false;
1436 deps->libcall_block_tail_insn = 0;
1439 /* Free insn lists found in DEPS. */
1447 free_INSN_LIST_list (&deps->pending_read_insns);
1448 free_EXPR_LIST_list (&deps->pending_read_mems);
1449 free_INSN_LIST_list (&deps->pending_write_insns);
1450 free_EXPR_LIST_list (&deps->pending_write_mems);
1451 free_INSN_LIST_list (&deps->last_pending_memory_flush);
1453 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
1454 times. For a test case with 42000 regs and 8000 small basic blocks,
1455 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
1456 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i,
1458 struct deps_reg *reg_last = &deps->reg_last[i];
1460 free_INSN_LIST_list (®_last->uses);
1462 free_INSN_LIST_list (®_last->sets);
1463 if (reg_last->clobbers)
1464 free_INSN_LIST_list (®_last->clobbers);
1466 CLEAR_REG_SET (&deps->reg_last_in_use);
1468 free (deps->reg_last);
1471 /* If it is profitable to use them, initialize caches for tracking
1472 dependency information. LUID is the number of insns to be scheduled,
1473 it is used in the estimate of profitability. */
1476 init_dependency_caches (luid)
1479 /* ?!? We could save some memory by computing a per-region luid mapping
1480 which could reduce both the number of vectors in the cache and the size
1481 of each vector. Instead we just avoid the cache entirely unless the
1482 average number of instructions in a basic block is very high. See
1483 the comment before the declaration of true_dependency_cache for
1484 what we consider "very high". */
1485 if (luid / n_basic_blocks > 100 * 5)
1487 true_dependency_cache = sbitmap_vector_alloc (luid, luid);
1488 sbitmap_vector_zero (true_dependency_cache, luid);
1489 anti_dependency_cache = sbitmap_vector_alloc (luid, luid);
1490 sbitmap_vector_zero (anti_dependency_cache, luid);
1491 output_dependency_cache = sbitmap_vector_alloc (luid, luid);
1492 sbitmap_vector_zero (output_dependency_cache, luid);
1493 #ifdef ENABLE_CHECKING
1494 forward_dependency_cache = sbitmap_vector_alloc (luid, luid);
1495 sbitmap_vector_zero (forward_dependency_cache, luid);
1500 /* Free the caches allocated in init_dependency_caches. */
1503 free_dependency_caches ()
1505 if (true_dependency_cache)
1507 sbitmap_vector_free (true_dependency_cache);
1508 true_dependency_cache = NULL;
1509 sbitmap_vector_free (anti_dependency_cache);
1510 anti_dependency_cache = NULL;
1511 sbitmap_vector_free (output_dependency_cache);
1512 output_dependency_cache = NULL;
1513 #ifdef ENABLE_CHECKING
1514 sbitmap_vector_free (forward_dependency_cache);
1515 forward_dependency_cache = NULL;
1520 /* Initialize some global variables needed by the dependency analysis
1526 reg_pending_sets = INITIALIZE_REG_SET (reg_pending_sets_head);
1527 reg_pending_clobbers = INITIALIZE_REG_SET (reg_pending_clobbers_head);
1528 reg_pending_uses = INITIALIZE_REG_SET (reg_pending_uses_head);
1529 reg_pending_barrier = false;
1532 /* Free everything used by the dependency analysis code. */
1535 finish_deps_global ()
1537 FREE_REG_SET (reg_pending_sets);
1538 FREE_REG_SET (reg_pending_clobbers);
1539 FREE_REG_SET (reg_pending_uses);