/* Dataflow support routines.
- Copyright (C) 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
+ Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
Contributed by Michael P. Hayes (m.hayes@elec.canterbury.ac.nz,
mhayes@redhat.com)
df_init simply creates a poor man's object (df) that needs to be
passed to all the dataflow routines. df_finish destroys this
-object and frees up any allocated memory.
+object and frees up any allocated memory. DF_ALL says to analyse
+everything.
df_analyse performs the following:
If the insns are in SSA form then the reg-def and use-def lists
should only contain the single defining ref.
+
TODO:
1) Incremental dataflow analysis.
or deleted refs. Currently the global dataflow information is
recomputed from scratch but this could be propagated more efficiently.
-2) Improved global data flow computation using depth first search.
-
-3) Reduced memory requirements.
+2) Reduced memory requirements.
We could operate a pool of ref structures. When a ref is deleted it
gets returned to the pool (say by linking on to a chain of free refs).
periodically squeeze the def and use tables and associated bitmaps and
renumber the def and use ids.
-4) Ordering of reg-def and reg-use lists.
+3) Ordering of reg-def and reg-use lists.
Should the first entry in the def list be the first def (within a BB)?
Similarly, should the first entry in the use list be the last use
(within a BB)?
-5) Working with a sub-CFG.
+4) Working with a sub-CFG.
Often the whole CFG does not need to be analyzed, for example,
when optimising a loop, only certain registers are of interest.
Perhaps there should be a bitmap argument to df_analyse to specify
- which registers should be analyzed? */
+which registers should be analyzed?
+
+
+NOTES:
+
+Embedded addressing side-effects, such as POST_INC or PRE_INC, generate
+both a use and a def. These are both marked read/write to show that they
+are dependent. For example, (set (reg 40) (mem (post_inc (reg 42))))
+will generate a use of reg 42 followed by a def of reg 42 (both marked
+read/write). Similarly, (set (reg 40) (mem (pre_dec (reg 41))))
+generates a use of reg 41 then a def of reg 41 (both marked read/write),
+even though reg 41 is decremented before it is used for the memory
+address in this second example.
+
+A set to a REG inside a ZERO_EXTRACT, SIGN_EXTRACT, or SUBREG invokes
+a read-modify write operation. We generate both a use and a def
+and again mark them read/write.
+*/
#include "config.h"
#include "system.h"
static struct df *ddf;
static void df_reg_table_realloc PARAMS((struct df *, int));
-#if 0
-static void df_def_table_realloc PARAMS((struct df *, int));
-#endif
static void df_insn_table_realloc PARAMS((struct df *, unsigned int));
static void df_bitmaps_alloc PARAMS((struct df *, int));
static void df_bitmaps_free PARAMS((struct df *, int));
static void df_chain_dump PARAMS((struct df_link *, FILE *file));
static void df_chain_dump_regno PARAMS((struct df_link *, FILE *file));
static void df_regno_debug PARAMS ((struct df *, unsigned int, FILE *));
+static void df_regno_rtl_debug PARAMS ((struct df *, unsigned int, FILE *));
static void df_ref_debug PARAMS ((struct df *, struct ref *, FILE *));
static void df_rd_transfer_function PARAMS ((int, int *, bitmap, bitmap,
bitmap, bitmap, void *));
if (size <= df->insn_size)
return;
- /* Make the table a little larger than requested, so we don't need
+ /* Make the table a little larger than requested, so we do not need
to enlarge it so often. */
size += df->insn_size / 4;
}
-#if 0
-/* Not currently used. */
-static void
-df_def_table_realloc (df, size)
- struct df *df;
- int size;
-{
- int i;
- struct ref *refs;
-
- /* Make table 25 percent larger by default. */
- if (! size)
- size = df->def_size / 4;
-
- df->def_size += size;
- df->defs = xrealloc (df->defs,
- df->def_size * sizeof (*df->defs));
-
- /* Allocate a new block of memory and link into list of blocks
- that will need to be freed later. */
-
- refs = xmalloc (size * sizeof (*refs));
-
- /* Link all the new refs together, overloading the chain field. */
- for (i = 0; i < size - 1; i++)
- refs[i].chain = (struct df_link *) (refs + i + 1);
- refs[size - 1].chain = 0;
-}
-#endif
-
-
-
/* Allocate bitmaps for each basic block. */
static void
df_bitmaps_alloc (df, flags)
if (! (df->flags & DF_HARD_REGS))
return;
- /* GET_MODE (reg) is correct here. We don't want to go into a SUBREG
+ /* GET_MODE (reg) is correct here. We do not want to go into a SUBREG
for the mode, because we only want to add references to regs, which
- are really referenced. E.g. a (subreg:SI (reg:DI 0) 0) does _not_
+ are really referenced. E.g., a (subreg:SI (reg:DI 0) 0) does _not_
reference the whole reg 0 in DI mode (which would also include
reg 1, at least, if 0 and 1 are SImode registers). */
endregno = HARD_REGNO_NREGS (regno, GET_MODE (reg));
}
}
-/* Writes to paradoxical subregs, or subregs which are too narrow
- are read-modify-write. */
+/* Return non-zero if writes to paradoxical SUBREGs, or SUBREGs which
+ are too narrow, are read-modify-write. */
static inline bool
read_modify_subreg_p (x)
rtx x;
return true;
}
+
/* Process all the registers defined in the rtx, X. */
static void
df_def_record_1 (df, x, bb, insn)
flags |= DF_REF_MODE_CHANGE;
#endif
- /* May be, we should flag the use of strict_low_part somehow. Might be
- handy for the reg allocator. */
+ /* Maybe, we should flag the use of STRICT_LOW_PART somehow. It might
+ be handy for the reg allocator. */
while (GET_CODE (dst) == STRICT_LOW_PART
|| GET_CODE (dst) == ZERO_EXTRACT
|| GET_CODE (dst) == SIGN_EXTRACT
|| read_modify_subreg_p (dst))
{
- /* Strict low part always contains SUBREG, but we don't want to make
+ /* Strict low part always contains SUBREG, but we do not want to make
it appear outside, as whole register is always considered. */
if (GET_CODE (dst) == STRICT_LOW_PART)
{
case SUBREG:
/* While we're here, optimize this case. */
- /* In case the SUBREG is not of a register, don't optimize. */
+ /* In case the SUBREG is not of a REG, do not optimize. */
if (GET_CODE (SUBREG_REG (x)) != REG)
{
loc = &SUBREG_REG (x);
/* ... Fall through ... */
case REG:
- /* See a register (or subreg) other than being set. */
+ /* See a REG (or SUBREG) other than being set. */
df_ref_record (df, x, loc, insn, ref_type, flags);
return;
bb, insn, 0);
break;
case STRICT_LOW_PART:
- /* A strict_low_part uses the whole reg not only the subreg. */
+ /* A strict_low_part uses the whole REG and not just the SUBREG. */
dst = XEXP (dst, 0);
if (GET_CODE (dst) != SUBREG)
abort ();
df_uses_record (df, &PATTERN (insn),
DF_REF_REG_USE, bb, insn, 0);
-
if (GET_CODE (insn) == CALL_INSN)
{
rtx note;
{
struct ref *def = link->ref;
unsigned int dregno = DF_REF_REGNO (def);
- /* Don't add ref's to the chain two times. I.e. only add
- new refs. XXX the same could be done by testing if the current
- insn is a modified (or a new) one. This would be faster. */
+
+ /* Do not add ref's to the chain twice, i.e., only add new
+ refs. XXX the same could be done by testing if the
+ current insn is a modified (or a new) one. This would be
+ faster. */
if (DF_REF_ID (def) < df->def_id_save)
continue;
{
rtx insn;
- /* Scan in forward order so that the last uses appear at the
- start of the chain. */
+ /* Scan in forward order so that the last uses appear at the start
+ of the chain. */
for (insn = bb->head; insn && insn != NEXT_INSN (bb->end);
insn = NEXT_INSN (insn))
{
struct ref *use = link->ref;
unsigned int uregno = DF_REF_REGNO (use);
- /* Don't add ref's to the chain two times. I.e. only add
- new refs. XXX the same could be done by testing if the current
- insn is a modified (or a new) one. This would be faster. */
+
+ /* Do not add ref's to the chain twice, i.e., only add new
+ refs. XXX the same could be done by testing if the
+ current insn is a modified (or a new) one. This would be
+ faster. */
if (DF_REF_ID (use) < df->use_id_save)
continue;
}
- /* For each def in insn...record the last def of each reg. */
+ /* For each def in insn... record the last def of each reg. */
for (def_link = df->insns[uid].defs; def_link; def_link = def_link->next)
{
struct ref *def = def_link->ref;
{
*changed = bitmap_union_of_diff (out, gen, in, kill);
}
+
+
static void
df_ru_transfer_function (bb, changed, in, out, gen, kill, data)
int bb ATTRIBUTE_UNUSED;
*changed = bitmap_union_of_diff (in, gen, out, kill);
}
+
static void
df_lr_transfer_function (bb, changed, in, out, use, def, data)
int bb ATTRIBUTE_UNUSED;
return total;
}
+
/* Perform dataflow analysis using existing DF structure for blocks
within BLOCKS. If BLOCKS is zero, use all basic blocks in the CFG. */
static void
kill[bb->index] = DF_BB_INFO (df, bb)->rd_kill;
}
iterative_dataflow_bitmap (in, out, gen, kill, df->all_blocks,
- FORWARD, UNION, df_rd_transfer_function,
+ DF_FORWARD, DF_UNION, df_rd_transfer_function,
df->inverse_rc_map, NULL);
free (in);
free (out);
kill[bb->index] = DF_BB_INFO (df, bb)->ru_kill;
}
iterative_dataflow_bitmap (in, out, gen, kill, df->all_blocks,
- BACKWARD, UNION, df_ru_transfer_function,
+ DF_BACKWARD, DF_UNION, df_ru_transfer_function,
df->inverse_rts_map, NULL);
free (in);
free (out);
def[bb->index] = DF_BB_INFO (df, bb)->lr_def;
}
iterative_dataflow_bitmap (in, out, use, def, df->all_blocks,
- BACKWARD, UNION, df_lr_transfer_function,
+ DF_BACKWARD, DF_UNION, df_lr_transfer_function,
df->inverse_rts_map, NULL);
free (in);
free (out);
rtx insn;
int count = 0;
- /* While we have to scan the chain of insns for this BB, we don't
+ /* While we have to scan the chain of insns for this BB, we do not
need to allocate and queue a long chain of BB/INSN pairs. Using
a bitmap for insns_modified saves memory and avoids queuing
duplicates. */
}
-typedef struct replace_args {
+typedef struct replace_args
+{
rtx match;
rtx replacement;
rtx insn;
fprintf (file, "}");
}
+
+/* Dump a chain of refs with the associated regno. */
static void
df_chain_dump_regno (link, file)
struct df_link *link;
fprintf (file, "}");
}
+
/* Dump dataflow info. */
void
df_dump (df, flags, file)
fprintf (file, "\n");
}
+
void
df_insn_debug_regno (df, insn, file)
struct df *df;
fprintf (file, "\n");
}
+
static void
df_regno_debug (df, regno, file)
struct df *df;
df_chain_dump (DF_REF_CHAIN (ref), file);
fprintf (file, "\n");
}
-
+\f
+/* Functions for debugging from GDB. */
void
debug_df_insn (insn)
df_chain_dump (link, stderr);
fputc ('\n', stderr);
}
+\f
/* Hybrid search algorithm from "Implementation Techniques for
Efficient Data-Flow Analysis of Large Programs". */
int i = block->index;
edge e;
basic_block bb = block;
+
SET_BIT (visited, block->index);
if (TEST_BIT (pending, block->index))
{
- if (dir == FORWARD)
+ if (dir == DF_FORWARD)
{
- /* Calculate <conf_op> of predecessor_outs */
+ /* Calculate <conf_op> of predecessor_outs. */
bitmap_zero (in[i]);
for (e = bb->pred; e != 0; e = e->pred_next)
{
continue;
switch (conf_op)
{
- case UNION:
+ case DF_UNION:
bitmap_a_or_b (in[i], in[i], out[e->src->index]);
break;
- case INTERSECTION:
+ case DF_INTERSECTION:
bitmap_a_and_b (in[i], in[i], out[e->src->index]);
break;
}
}
else
{
- /* Calculate <conf_op> of successor ins */
+ /* Calculate <conf_op> of successor ins. */
bitmap_zero (out[i]);
for (e = bb->succ; e != 0; e = e->succ_next)
{
continue;
switch (conf_op)
{
- case UNION:
+ case DF_UNION:
bitmap_a_or_b (out[i], out[i], in[e->dest->index]);
break;
- case INTERSECTION:
+ case DF_INTERSECTION:
bitmap_a_and_b (out[i], out[i], in[e->dest->index]);
break;
}
RESET_BIT (pending, i);
if (changed)
{
- if (dir == FORWARD)
+ if (dir == DF_FORWARD)
{
for (e = bb->succ; e != 0; e = e->succ_next)
{
}
}
}
- if (dir == FORWARD)
+ if (dir == DF_FORWARD)
{
for (e = bb->succ; e != 0; e = e->succ_next)
{
int i = block->index;
edge e;
basic_block bb = block;
+
SET_BIT (visited, block->index);
if (TEST_BIT (pending, block->index))
{
- if (dir == FORWARD)
+ if (dir == DF_FORWARD)
{
- /* Calculate <conf_op> of predecessor_outs */
+ /* Calculate <conf_op> of predecessor_outs. */
sbitmap_zero (in[i]);
for (e = bb->pred; e != 0; e = e->pred_next)
{
continue;
switch (conf_op)
{
- case UNION:
+ case DF_UNION:
sbitmap_a_or_b (in[i], in[i], out[e->src->index]);
break;
- case INTERSECTION:
+ case DF_INTERSECTION:
sbitmap_a_and_b (in[i], in[i], out[e->src->index]);
break;
}
}
else
{
- /* Calculate <conf_op> of successor ins */
+ /* Calculate <conf_op> of successor ins. */
sbitmap_zero (out[i]);
for (e = bb->succ; e != 0; e = e->succ_next)
{
continue;
switch (conf_op)
{
- case UNION:
+ case DF_UNION:
sbitmap_a_or_b (out[i], out[i], in[e->dest->index]);
break;
- case INTERSECTION:
+ case DF_INTERSECTION:
sbitmap_a_and_b (out[i], out[i], in[e->dest->index]);
break;
}
}
}
- /* Common part */
+ /* Common part. */
(*transfun)(i, &changed, in[i], out[i], gen[i], kill[i], data);
RESET_BIT (pending, i);
if (changed)
{
- if (dir == FORWARD)
+ if (dir == DF_FORWARD)
{
for (e = bb->succ; e != 0; e = e->succ_next)
{
}
}
}
- if (dir == FORWARD)
+ if (dir == DF_FORWARD)
{
for (e = bb->succ; e != 0; e = e->succ_next)
{
}
-
-
/* gen = GEN set.
kill = KILL set.
in, out = Filled in by function.
fibheap_t worklist;
basic_block bb;
sbitmap visited, pending;
+
pending = sbitmap_alloc (last_basic_block);
visited = sbitmap_alloc (last_basic_block);
sbitmap_zero (pending);
sbitmap_zero (visited);
worklist = fibheap_new ();
+
EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i,
{
fibheap_insert (worklist, order[i], (void *) (size_t) i);
SET_BIT (pending, i);
- if (dir == FORWARD)
+ if (dir == DF_FORWARD)
sbitmap_copy (out[i], gen[i]);
else
sbitmap_copy (in[i], gen[i]);
});
+
while (sbitmap_first_set_bit (pending) != -1)
{
while (!fibheap_empty (worklist))
hybrid_search_sbitmap (bb, in, out, gen, kill, dir,
conf_op, transfun, visited, pending, data);
}
+
if (sbitmap_first_set_bit (pending) != -1)
{
EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i,
break;
}
}
+
sbitmap_free (pending);
sbitmap_free (visited);
fibheap_delete (worklist);
}
+
/* Exactly the same as iterative_dataflow_sbitmap, except it works on
- bitmaps instead */
+ bitmaps instead. */
void
iterative_dataflow_bitmap (in, out, gen, kill, blocks,
dir, conf_op, transfun, order, data)
fibheap_t worklist;
basic_block bb;
sbitmap visited, pending;
+
pending = sbitmap_alloc (last_basic_block);
visited = sbitmap_alloc (last_basic_block);
sbitmap_zero (pending);
sbitmap_zero (visited);
worklist = fibheap_new ();
+
EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i,
{
fibheap_insert (worklist, order[i], (void *) (size_t) i);
SET_BIT (pending, i);
- if (dir == FORWARD)
+ if (dir == DF_FORWARD)
bitmap_copy (out[i], gen[i]);
else
bitmap_copy (in[i], gen[i]);
});
+
while (sbitmap_first_set_bit (pending) != -1)
{
while (!fibheap_empty (worklist))
hybrid_search_bitmap (bb, in, out, gen, kill, dir,
conf_op, transfun, visited, pending, data);
}
+
if (sbitmap_first_set_bit (pending) != -1)
{
EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i,
/* Form lists of pseudo register references for autoinc optimization
for GNU compiler. This is part of flow optimization.
- Copyright (C) 1999, 2000, 2001 Free Software Foundation, Inc.
+ Copyright (C) 1999, 2000, 2001, 2003 Free Software Foundation, Inc.
Contributed by Michael P. Hayes (m.hayes@elec.canterbury.ac.nz)
This file is part of GCC.
#define DF_RD_CHAIN 64 /* Reg-def chain. */
#define DF_RU_CHAIN 128 /* Reg-use chain. */
#define DF_ALL 255
-#define DF_HARD_REGS 1024
+#define DF_HARD_REGS 1024 /* Mark hard registers. */
#define DF_EQUIV_NOTES 2048 /* Mark uses present in EQUIV/EQUAL notes. */
enum df_ref_type {DF_REF_REG_DEF, DF_REF_REG_USE, DF_REF_REG_MEM_LOAD,
#define DF_REF_TYPE_NAMES {"def", "use", "mem load", "mem store"}
-/* ???> Perhaps all these data structures should be made private
- to enforce the interface. */
-
-
/* Link on a def-use or use-def chain. */
struct df_link
{
enum df_ref_flags
{
+ /* Read-modify-write refs generate both a use and a def and
+ these are marked with this flag to show that they are not
+ independent. */
DF_REF_READ_WRITE = 1,
-
- /* This flag is set on register references itself representing a or
- being inside a subreg on machines which have CLASS_CANNOT_CHANGE_MODE
- and where the mode change of that subreg expression is invalid for
- this class. Note, that this flag can also be set on df_refs
- representing the REG itself (i.e. one might not see the subreg
- anymore). Also note, that this flag is set also for hardreg refs.
- I.e. you must check yourself if it's a pseudo. */
+
+ /* This flag is set on register references inside a subreg on
+ machines which have CLASS_CANNOT_CHANGE_MODE and where the mode
+ change of that subreg expression is invalid for this class.
+ Note, that this flag can also be set on df_refs representing
+ the REG itself (i.e., one might not see the subreg anyore).
+ Also note, that this flag is set also for hardreg refs, i.e.,
+ you must check yourself if it's a pseudo. */
DF_REF_MODE_CHANGE = 2
};
-/* Define a register reference structure. */
+
+/* Define a register reference structure. One of these is allocated
+ for every register reference (use or def). Note some register
+ references (e.g., post_inc, subreg) generate both a def and a use. */
struct ref
{
rtx reg; /* The register referenced. */
rtx insn; /* Insn containing ref. */
- rtx *loc; /* Loc is the location of the reg. */
+ rtx *loc; /* The location of the reg. */
struct df_link *chain; /* Head of def-use or use-def chain. */
- enum df_ref_type type; /* Type of ref. */
unsigned int id; /* Ref index. */
+ enum df_ref_type type; /* Type of ref. */
enum df_ref_flags flags; /* Various flags. */
};
{
struct df_link *defs; /* Head of insn-def chain. */
struct df_link *uses; /* Head of insn-use chain. */
- /* ???? The following luid field should be considered private so that
+ /* ???? The following luid field should be considerd private so that
we can change it on the fly to accommodate new insns? */
int luid; /* Logical UID. */
-#if 0
- rtx insn; /* Backpointer to the insn. */
-#endif
};
/* The sbitmap vector of dominators or NULL if not computed.
Ideally, this should be a pointer to a CFG object. */
sbitmap *dom;
- int * dfs_order; /* DFS order -> block number */
- int * rc_order; /* reverse completion order -> block number */
- int * rts_order; /* reverse top sort order -> block number */
- int * inverse_rc_map; /* block number -> reverse completion order */
- int * inverse_dfs_map; /* block number -> DFS order */
- int * inverse_rts_map; /* block number -> reverse top-sort order */
+ int *dfs_order; /* DFS order -> block number. */
+ int *rc_order; /* Reverse completion order -> block number. */
+ int *rts_order; /* Reverse top sort order -> block number. */
+ int *inverse_rc_map; /* Block number -> reverse completion order. */
+ int *inverse_dfs_map; /* Block number -> DFS order. */
+ int *inverse_rts_map; /* Block number -> reverse top-sort order. */
};
/* Macros to access the elements within the ref structure. */
+
#define DF_REF_REAL_REG(REF) (GET_CODE ((REF)->reg) == SUBREG \
? SUBREG_REG ((REF)->reg) : ((REF)->reg))
#define DF_REF_REGNO(REF) REGNO (DF_REF_REAL_REG (REF))
#define DF_REF_REAL_LOC(REF) (GET_CODE ((REF)->reg) == SUBREG \
? &SUBREG_REG ((REF)->reg) : ((REF)->loc))
-#ifdef OLD_DF_INTERFACE
-#define DF_REF_REG(REF) DF_REF_REAL_REG(REF)
-#define DF_REF_LOC(REF) DF_REF_REAL_LOC(REF)
-#else
#define DF_REF_REG(REF) ((REF)->reg)
#define DF_REF_LOC(REF) ((REF)->loc)
-#endif
#define DF_REF_BB(REF) (BLOCK_FOR_INSN ((REF)->insn))
#define DF_REF_BBNO(REF) (BLOCK_FOR_INSN ((REF)->insn)->index)
#define DF_REF_INSN(REF) ((REF)->insn)
#define DF_REF_REG_MEM_STORE_P(REF) (DF_REF_TYPE (REF) == DF_REF_REG_MEM_STORE)
#define DF_REF_REG_MEM_LOAD_P(REF) (DF_REF_TYPE (REF) == DF_REF_REG_MEM_LOAD)
#define DF_REF_REG_MEM_P(REF) (DF_REF_REG_MEM_STORE_P (REF) \
- || DF_REF_REG_MEM_LOAD_P (REF))
+ || DF_REF_REG_MEM_LOAD_P (REF))
/* Macros to access the elements within the reg_info structure table. */
extern void df_dump PARAMS ((struct df *, int, FILE *));
+
/* Functions to modify insns. */
extern void df_insn_modify PARAMS ((struct df *, basic_block, rtx));
basic_block, rtx));
extern rtx df_pattern_emit_after PARAMS ((struct df *, rtx,
- basic_block, rtx));
+ basic_block, rtx));
extern rtx df_insn_move_before PARAMS ((struct df *, basic_block, rtx,
basic_block, rtx));
extern void debug_df_ref PARAMS ((struct ref *));
extern void debug_df_chain PARAMS ((struct df_link *));
+
extern void df_insn_debug PARAMS ((struct df *, rtx, FILE *));
+
extern void df_insn_debug_regno PARAMS ((struct df *, rtx, FILE *));
-/* Meet over any path (UNION) or meet over all paths (INTERSECTION) */
+
+
+/* Meet over any path (UNION) or meet over all paths (INTERSECTION). */
enum df_confluence_op
{
- UNION,
- INTERSECTION
+ DF_UNION,
+ DF_INTERSECTION
};
-/* Dataflow direction */
+
+
+/* Dataflow direction. */
enum df_flow_dir
{
- FORWARD,
- BACKWARD
+ DF_FORWARD,
+ DF_BACKWARD
};
+
typedef void (*transfer_function_sbitmap) PARAMS ((int, int *, sbitmap, sbitmap,
- sbitmap, sbitmap, void *));
+ sbitmap, sbitmap, void *));
+
typedef void (*transfer_function_bitmap) PARAMS ((int, int *, bitmap, bitmap,
- bitmap, bitmap, void *));
+ bitmap, bitmap, void *));
extern void iterative_dataflow_sbitmap PARAMS ((sbitmap *, sbitmap *,
sbitmap *, sbitmap *,
enum df_confluence_op,
transfer_function_sbitmap,
int *, void *));
+
extern void iterative_dataflow_bitmap PARAMS ((bitmap *, bitmap *, bitmap *,
bitmap *, bitmap,
enum df_flow_dir,