+2006-01-31 Marcin Dalecki <martin@dalecki.de>
+
+ * tree-vrp.c: Use XNEW/XCNEW allocation wrappers.
+ * regrename.c: Ditto.
+ * tree-ssa-loop-im.c: Ditto.
+ * tree-dump.c: Ditto.
+ * tree-complex.c: Ditto.
+ * genrecog.c: Ditto.
+ * tree-ssa-threadupdate.c: Ditto.
+ * tracer.c: Ditto.
+ * java/class.c: Ditto.
+ * java/jcf-parse.c: Ditto.
+ * java/resource.c: Ditto.
+ * java/except.c: Ditto.
+ * java/jvspec.c: Ditto.
+ * java/jcf-write.c: Ditto.
+ * java/jcf-path.c: Ditto.
+ * java/gjavah.c: Ditto.
+ * java/zextract.c: Ditto.
+ * java/jcf-io.c: Ditto.
+ * java/jcf.h: Ditto.
+ * java/buffer.c: Ditto.
+ * java/lang.c: Ditto.
+ * java/parse-scan.y: Ditto.
+ * java/lex.c: Ditto.
+ * java/lex.h: Ditto.
+ * cfgloopmanip.c: Ditto.
+ * postreload-gcse.c: Ditto.
+ * tree-ssa-loop-manip.c: Ditto.
+ * postreload.c: Ditto.
+ * tree-ssa-loop-ch.c: Ditto.
+ * loop.c: Ditto.
+ * ipa-cp.c: Ditto.
+ * cppspec.c: Ditto.
+ * diagnostic.c: Ditto.
+ * final.c: Ditto.
+ * genoutput.c: Ditto.
+ * gcc.c: Ditto.
+ * cfghooks.c: Ditto.
+ * cfgloopanal.c: Ditto.
+ * objc/objc-act.c: Ditto.
+ * gcov.c: Ditto.
+ * genextract.c: Ditto.
+ * genautomata.c: Ditto.
+ * pretty-print.c: Ditto.
+ * genemit.c: Ditto.
+ * cgraphunit.c: Ditto.
+ * flow.c: Ditto.
+ * df-scan.c: Ditto.
+ * haifa-sched.c: Ditto.
+ * dominance.c: Ditto.
+ * dbxout.c: Ditto.
+ * tree-ssa-loop-ivopts.c: Ditto.
+ * df-core.c: Ditto.
+ * mode-switching.c: Ditto.
+ * modulo-sched.c: Ditto.
+ * graph.c: Ditto.
+ * ipa-pure-const.c: Ditto.
+ * cse.c: Ditto.
+ * fix-header.c: Ditto.
+ * web.c: Ditto.
+ * tree-stdarg.c: Ditto.
+ * ipa-utils.c: Ditto.
+ * loop-init.c: Ditto.
+ * ipa-inline.c: Ditto.
+ * cfganal.c: Ditto.
+ * global.c: Ditto.
+ * alloc-pool.c: Ditto.
+ * dwarf2out.c: Ditto.
+ * opts.c: Ditto.
+ * genattrtab.c: Ditto.
+ * tree-ssa-loop-ivcanon.c: Ditto.
+ * predict.c: Ditto.
+ * timevar.c: Ditto.
+ * lcm.c: Ditto.
+ * fortran/gfortranspec.c: Ditto.
+ * regmove.c: Ditto.
+ * local-alloc.c: Ditto.
+ * langhooks.c: Ditto.
+ * function.c: Ditto.
+ * tree-vectorizer.c: Ditto.
+ * gcse.c: Ditto.
+ * ipa-type-escape.c: Ditto.
+ * alias.c: Ditto.
+ * tree-if-conv.c: Ditto.
+ * profile.c: Ditto.
+ * ipa.c: Ditto.
+ * tree-data-ref.c: Ditto.
+ * loop-unroll.c: Ditto.
+ * treelang/treetree.c: Ditto.
+ * calls.c: Ditto.
+ * bt-load.c: Ditto.
+ * ggc-common.c: Ditto.
+ * except.c: Ditto.
+ * coverage.c: Ditto.
+ * cselib.c: Ditto.
+ * tree-cfgcleanup.c: Ditto.
+ * tree-ssa-pre.c: Ditto.
+ * cfgcleanup.c: Ditto.
+ * loop-invariant.c: Ditto.
+ * loop-iv.c: Ditto.
+ * ipa-prop.c: Ditto.
+ * print-tree.c: Ditto.
+ * conflict.c: Ditto.
+ * ggc-page.c: Ditto.
+ * sched-deps.c: Ditto.
+ * regclass.c: Ditto.
+ * tree-object-size.c: Ditto.
+ * combine.c: Ditto.
+ * bb-reorder.c: Ditto.
+ * resource.c: Ditto.
+ * var-tracking.c: Ditto.
+ * cfgloop.c: Ditto.
+ * df-problems.c: Ditto.
+ * reg-stack.c: Ditto.
+ * tlink.c: Ditto.
+ * gccspec.c: Ditto.
+ * sched-rgn.c: Ditto.
+ * tree-ssa-structalias.c: Ditto.
+ * tree-ssa-reassoc.c: Ditto.
+ * config/darwin-c.c: Ditto.
+ * config/darwin.c: Ditto.
+ * config/arm/arm.c: Ditto.
+ * cfgrtl.c: Ditto.
+ * collect2.c: Ditto.
+ * reload1.c: Ditto.
+
2006-01-31 Nicolas Pitre <nico@cam.org>
* arm.md (smaxsi3): Make aware of smax_m1.
VARRAY_RTX_INIT (reg_base_value, maxreg, "reg_base_value");
}
- new_reg_base_value = xmalloc (maxreg * sizeof (rtx));
- reg_seen = xmalloc (maxreg);
+ new_reg_base_value = XNEWVEC (rtx, maxreg);
+ reg_seen = XNEWVEC (char, maxreg);
/* The basic idea is that each pass through this loop will use the
"constant" information from the previous pass to propagate alias
alloc_pool_list block_header;
/* Make the block. */
- block = xmalloc (pool->block_size);
+ block = XNEWVEC (char, pool->block_size);
block_header = (alloc_pool_list) block;
block += align_eight (sizeof (struct alloc_pool_list_def));
#ifdef GATHER_STATISTICS
else
count_threshold = max_entry_count / 1000 * DUPLICATION_THRESHOLD;
- connected = xcalloc (n_traces, sizeof (bool));
+ connected = XCNEWVEC (bool, n_traces);
last_trace = -1;
current_pass = 1;
current_partition = BB_PARTITION (traces[0].first);
/* We need to know some information for each basic block. */
array_size = GET_ARRAY_SIZE (last_basic_block);
- bbd = xmalloc (array_size * sizeof (bbro_basic_block_data));
+ bbd = XNEWVEC (bbro_basic_block_data, array_size);
for (i = 0; i < array_size; i++)
{
bbd[i].start_of_trace = -1;
bbd[i].node = NULL;
}
- traces = xmalloc (n_basic_blocks * sizeof (struct trace));
+ traces = XNEWVEC (struct trace, n_basic_blocks);
n_traces = 0;
find_traces (&n_traces, traces);
connect_traces (n_traces, traces);
if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
return;
- crossing_edges = xcalloc (max_edges, sizeof (edge));
+ crossing_edges = XCNEWVEC (edge, max_edges);
cfg_layout_initialize (0);
build_btr_def_use_webs (fibheap_t all_btr_defs)
{
const int max_uid = get_max_uid ();
- btr_def *def_array = xcalloc (max_uid, sizeof (btr_def));
- btr_user *use_array = xcalloc (max_uid, sizeof (btr_user));
+ btr_def *def_array = XCNEWVEC (btr_def, max_uid);
+ btr_user *use_array = XCNEWVEC (btr_user, max_uid);
sbitmap *btr_defset = sbitmap_vector_alloc (
(last_btr - first_btr) + 1, max_uid);
sbitmap *bb_gen = sbitmap_vector_alloc (n_basic_blocks, max_uid);
- HARD_REG_SET *btrs_written = xcalloc (n_basic_blocks, sizeof (HARD_REG_SET));
+ HARD_REG_SET *btrs_written = XCNEWVEC (HARD_REG_SET, n_basic_blocks);
sbitmap *bb_kill;
sbitmap *bb_out;
{
basic_block *worklist, *tos;
- tos = worklist = xmalloc (sizeof (basic_block) * (n_basic_blocks + 1));
+ tos = worklist = XNEWVEC (basic_block, n_basic_blocks + 1);
if (dominated_by_p (CDI_DOMINATORS, new_bb, head_bb))
{
= (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
}
- args[i].aligned_regs = xmalloc (sizeof (rtx) * args[i].n_aligned_regs);
+ args[i].aligned_regs = XNEWVEC (rtx, args[i].n_aligned_regs);
/* Structures smaller than a word are normally aligned to the
least significant byte. On a BYTES_BIG_ENDIAN machine,
#endif
if (stack_usage_map_buf)
free (stack_usage_map_buf);
- stack_usage_map_buf = xmalloc (highest_outgoing_arg_in_use);
+ stack_usage_map_buf = XNEWVEC (char, highest_outgoing_arg_in_use);
stack_usage_map = stack_usage_map_buf;
if (initial_highest_arg_in_use)
/* Make a new map for the new argument list. */
if (stack_usage_map_buf)
free (stack_usage_map_buf);
- stack_usage_map_buf = xmalloc (highest_outgoing_arg_in_use);
+ stack_usage_map_buf = XNEWVEC (char, highest_outgoing_arg_in_use);
stack_usage_map = stack_usage_map_buf;
memset (stack_usage_map, 0, highest_outgoing_arg_in_use);
highest_outgoing_arg_in_use = 0;
highest_outgoing_arg_in_use = MAX (initial_highest_arg_in_use,
needed);
#endif
- stack_usage_map_buf = xmalloc (highest_outgoing_arg_in_use);
+ stack_usage_map_buf = XNEWVEC (char, highest_outgoing_arg_in_use);
stack_usage_map = stack_usage_map_buf;
if (initial_highest_arg_in_use)
bool found = false;
/* Allocate the preorder and postorder number arrays. */
- pre = xcalloc (last_basic_block, sizeof (int));
- post = xcalloc (last_basic_block, sizeof (int));
+ pre = XCNEWVEC (int, last_basic_block);
+ post = XCNEWVEC (int, last_basic_block);
/* Allocate stack for back-tracking up CFG. */
- stack = xmalloc ((n_basic_blocks + 1) * sizeof (edge_iterator));
+ stack = XNEWVEC (edge_iterator, n_basic_blocks + 1);
sp = 0;
/* Allocate bitmap to track nodes that have been visited. */
edge_iterator ei;
basic_block *tos, *worklist, bb;
- tos = worklist = xmalloc (sizeof (basic_block) * n_basic_blocks);
+ tos = worklist = XNEWVEC (basic_block, n_basic_blocks);
/* Clear all the reachability flags. */
num_edges += EDGE_COUNT (bb->succs);
}
- elist = xmalloc (sizeof (struct edge_list));
+ elist = XNEW (struct edge_list);
elist->num_blocks = block_count;
elist->num_edges = num_edges;
- elist->index_to_edge = xmalloc (sizeof (edge) * num_edges);
+ elist->index_to_edge = XNEWVEC (edge, num_edges);
num_edges = 0;
post_order[post_order_num++] = EXIT_BLOCK;
/* Allocate stack for back-tracking up CFG. */
- stack = xmalloc ((n_basic_blocks + 1) * sizeof (edge_iterator));
+ stack = XNEWVEC (edge_iterator, n_basic_blocks + 1);
sp = 0;
/* Allocate bitmap to track nodes that have been visited. */
sbitmap visited;
/* Allocate stack for back-tracking up CFG. */
- stack = xmalloc ((n_basic_blocks + 1) * sizeof (edge_iterator));
+ stack = XNEWVEC (edge_iterator, n_basic_blocks + 1);
sp = 0;
if (include_entry_exit)
flow_dfs_compute_reverse_init (depth_first_search_ds data)
{
/* Allocate stack for back-tracking up CFG. */
- data->stack = xmalloc (n_basic_blocks * sizeof (basic_block));
+ data->stack = XNEWVEC (basic_block, n_basic_blocks);
data->sp = 0;
/* Allocate bitmap to track nodes that have been visited. */
v_size = size;
}
- st = xcalloc (rslt_max, sizeof (basic_block));
+ st = XCNEWVEC (basic_block, rslt_max);
rslt[tv++] = st[sp++] = bb;
MARK_VISITED (bb);
while (sp)
if (t)
{
if (!threaded_edges)
- threaded_edges = xmalloc (sizeof (*threaded_edges)
- * n_basic_blocks);
+ threaded_edges = XNEWVEC (edge, n_basic_blocks);
else
{
int i;
basic_block *last_visited;
timevar_push (TV_CFG_VERIFY);
- last_visited = xcalloc (last_basic_block, sizeof (basic_block));
- edge_checksum = xcalloc (last_basic_block, sizeof (size_t));
+ last_visited = XCNEWVEC (basic_block, last_basic_block);
+ edge_checksum = XCNEWVEC (size_t, last_basic_block);
/* Check bb chain & numbers. */
last_bb_seen = ENTRY_BLOCK_PTR;
if (loop->latch->loop_father != loop)
{
- stack = xmalloc (n_basic_blocks * sizeof (basic_block));
+ stack = XNEWVEC (basic_block, n_basic_blocks);
sp = 0;
num_nodes++;
stack[sp++] = loop->latch;
if (loop->pred)
free (loop->pred);
- loop->pred = xmalloc (sizeof (struct loop *) * loop->depth);
+ loop->pred = XNEWVEC (struct loop *, loop->depth);
memcpy (loop->pred, father->pred, sizeof (struct loop *) * father->depth);
loop->pred[father->depth] = father;
}
/* Allocate loop structures. */
- loops->parray = xcalloc (num_loops + 1, sizeof (struct loop *));
+ loops->parray = XCNEWVEC (struct loop *, num_loops + 1);
/* Dummy loop containing whole function. */
- loops->parray[0] = xcalloc (1, sizeof (struct loop));
+ loops->parray[0] = XCNEW (struct loop);
loops->parray[0]->next = NULL;
loops->parray[0]->inner = NULL;
loops->parray[0]->outer = NULL;
{
/* Compute depth first search order of the CFG so that outer
natural loops will be found before inner natural loops. */
- dfs_order = xmalloc (n_basic_blocks * sizeof (int));
- rc_order = xmalloc (n_basic_blocks * sizeof (int));
+ dfs_order = XNEWVEC (int, n_basic_blocks);
+ rc_order = XNEWVEC (int, n_basic_blocks);
pre_and_rev_post_order_compute (dfs_order, rc_order, false);
/* Save CFG derived information to avoid recomputing it. */
header = BASIC_BLOCK (rc_order[b]);
- loop = loops->parray[num_loops] = xcalloc (1, sizeof (struct loop));
+ loop = loops->parray[num_loops] = XCNEW (struct loop);
loop->header = header;
loop->num = num_loops;
gcc_assert (loop->num_nodes);
- tovisit = xcalloc (loop->num_nodes, sizeof (basic_block));
+ tovisit = XCNEWVEC (basic_block, loop->num_nodes);
tovisit[tv++] = loop->header;
if (loop->latch == EXIT_BLOCK_PTR)
gcc_assert (loop->num_nodes);
- tovisit = xcalloc (loop->num_nodes, sizeof (basic_block));
+ tovisit = XCNEWVEC (basic_block, loop->num_nodes);
gcc_assert (loop->latch != EXIT_BLOCK_PTR);
gcc_assert (loop->num_nodes);
gcc_assert (loop->latch != EXIT_BLOCK_PTR);
- blocks = xcalloc (loop->num_nodes, sizeof (basic_block));
+ blocks = XCNEWVEC (basic_block, loop->num_nodes);
visited = BITMAP_ALLOC (NULL);
bb = loop->header;
FOR_EACH_EDGE (e, ei, body[i]->succs)
if (!flow_bb_inside_loop_p (loop, e->dest))
n++;
- edges = xmalloc (n * sizeof (edge));
+ edges = XNEWVEC (edge, n);
*num_edges = n;
n = 0;
for (i = 0; i < loop->num_nodes; i++)
edge e;
/* Check sizes. */
- sizes = xcalloc (loops->num, sizeof (int));
+ sizes = XCNEWVEC (unsigned, loops->num);
sizes[0] = 2;
FOR_EACH_BB (bb)
static struct graph *
new_graph (int n_vertices)
{
- struct graph *g = xmalloc (sizeof (struct graph));
+ struct graph *g = XNEW (struct graph);
g->n_vertices = n_vertices;
- g->vertices = xcalloc (n_vertices, sizeof (struct vertex));
+ g->vertices = XCNEWVEC (struct vertex, n_vertices);
return g;
}
edge_iterator ei;
int i, src, dest;
struct graph *g;
- int *queue1 = xmalloc ((last_basic_block + loops->num) * sizeof (int));
- int *queue2 = xmalloc ((last_basic_block + loops->num) * sizeof (int));
+ int *queue1 = XNEWVEC (int, last_basic_block + loops->num);
+ int *queue2 = XNEWVEC (int, last_basic_block + loops->num);
int nq, depth;
struct loop *cloop;
gcc_assert (EDGE_COUNT (e->dest->preds) <= 1);
/* Find bbs in the path. */
- *bbs = xcalloc (n_basic_blocks, sizeof (basic_block));
+ *bbs = XCNEWVEC (basic_block, n_basic_blocks);
return dfs_enumerate_from (e->dest, 0, rpe_enum_p, *bbs,
n_basic_blocks, e->dest);
}
/* Prevent us from going out of the base_loop. */
SET_BIT (in_queue, base_loop->header->index);
- queue = xmalloc ((base_loop->num_nodes + 1) * sizeof (basic_block));
+ queue = XNEWVEC (basic_block, base_loop->num_nodes + 1);
qtop = queue + base_loop->num_nodes + 1;
qbeg = queue;
qend = queue + 1;
on_stack = sbitmap_alloc (last_basic_block);
sbitmap_zero (on_stack);
SET_BIT (on_stack, from->index);
- stack = xmalloc (from->loop_father->num_nodes * sizeof (basic_block));
+ stack = XNEWVEC (basic_block, from->loop_father->num_nodes);
stack[0] = from;
stack_top = 1;
else
{
num_edges = EDGE_COUNT (bb->succs);
- edges = xmalloc (num_edges * sizeof (edge));
+ edges = XNEWVEC (edge, num_edges);
FOR_EACH_EDGE (e, ei, bb->succs)
edges[ei.index] = e;
}
nrem = find_path (e, &rem_bbs);
n_bord_bbs = 0;
- bord_bbs = xcalloc (n_basic_blocks, sizeof (basic_block));
+ bord_bbs = XCNEWVEC (basic_block, n_basic_blocks);
seen = sbitmap_alloc (last_basic_block);
sbitmap_zero (seen);
from = e->src;
deleted = loop_delete_branch_edge (e, 1);
gcc_assert (deleted);
- dom_bbs = xcalloc (n_basic_blocks, sizeof (basic_block));
+ dom_bbs = XCNEWVEC (basic_block, n_basic_blocks);
/* Cancel loops contained in the path. */
for (i = 0; i < nrem; i++)
loop->level = 1;
/* Find its nodes. */
- bbs = xcalloc (n_basic_blocks, sizeof (basic_block));
+ bbs = XCNEWVEC (basic_block, n_basic_blocks);
n = dfs_enumerate_from (loop->latch, 1, alp_enum_p,
bbs, n_basic_blocks, loop->header);
basic_block *dom_bbs, *body;
unsigned n_dom_bbs, i;
sbitmap seen;
- struct loop *loop = xcalloc (1, sizeof (struct loop));
+ struct loop *loop = XCNEW (struct loop);
struct loop *outer = succ_bb->loop_father->outer;
int freq, prob, tot_prob;
gcov_type cnt;
scale_loop_frequencies (succ_bb->loop_father, tot_prob - prob, tot_prob);
/* Update dominators of blocks outside of LOOP. */
- dom_bbs = xcalloc (n_basic_blocks, sizeof (basic_block));
+ dom_bbs = XCNEWVEC (basic_block, n_basic_blocks);
n_dom_bbs = 0;
seen = sbitmap_alloc (last_basic_block);
sbitmap_zero (seen);
duplicate_loop (struct loops *loops, struct loop *loop, struct loop *target)
{
struct loop *cloop;
- cloop = xcalloc (1, sizeof (struct loop));
+ cloop = XCNEW (struct loop);
place_new_loop (loops, cloop);
/* Initialize copied loop. */
free (bbs);
return false;
}
- new_bbs = xmalloc (sizeof (basic_block) * loop->num_nodes);
+ new_bbs = XNEWVEC (basic_block, loop->num_nodes);
/* In case we are doing loop peeling and the loop is in the middle of
irreducible region, the peeled copies will be inside it too. */
prob_pass_wont_exit =
RDIV (REG_BR_PROB_BASE * (freq_le + freq_out_orig), freq_in);
- scale_step = xmalloc (ndupl * sizeof (int));
+ scale_step = XNEWVEC (int, ndupl);
for (i = 1; i <= ndupl; i++)
scale_step[i - 1] = TEST_BIT (wont_exit, i)
n_orig_loops = 0;
for (aloop = loop->inner; aloop; aloop = aloop->next)
n_orig_loops++;
- orig_loops = xcalloc (n_orig_loops, sizeof (struct loop *));
+ orig_loops = XCNEWVEC (struct loop *, n_orig_loops);
for (aloop = loop->inner, i = 0; aloop; aloop = aloop->next, i++)
orig_loops[i] = aloop;
loop->copy = target;
- first_active = xmalloc (n * sizeof (basic_block));
+ first_active = XNEWVEC (basic_block, n);
if (is_latch)
{
memcpy (first_active, bbs, n * sizeof (basic_block));
free_dominance_info (CDI_DOMINATORS);
if (loops.num > 1)
{
- last = xcalloc (loops.num, sizeof (basic_block));
+ last = XCNEWVEC (basic_block, loops.num);
FOR_EACH_BB (bb)
{
last[loop->num] = bb;
}
- first = xcalloc (loops.num, sizeof (basic_block));
- stack = xcalloc (loops.num, sizeof (struct loop *));
+ first = XCNEWVEC (basic_block, loops.num);
+ stack = XCNEWVEC (struct loop *, loops.num);
top = stack;
FOR_EACH_BB (bb)
{
enum bb_state { NOT_IN_BB, IN_ONE_BB, IN_MULTIPLE_BB };
int max_uid = get_max_uid ();
- basic_block *start = xcalloc (max_uid, sizeof (basic_block));
- basic_block *end = xcalloc (max_uid, sizeof (basic_block));
- enum bb_state *in_bb_p = xcalloc (max_uid, sizeof (enum bb_state));
+ basic_block *start = XCNEWVEC (basic_block, max_uid);
+ basic_block *end = XCNEWVEC (basic_block, max_uid);
+ enum bb_state *in_bb_p = XCNEWVEC (enum bb_state, max_uid);
basic_block bb;
int err = 0;
basic_block bb;
- bb_info = xcalloc (max_uid, sizeof (basic_block));
+ bb_info = XCNEWVEC (basic_block, max_uid);
FOR_EACH_BB_REVERSE (bb)
{
cgraph_expand_all_functions (void)
{
struct cgraph_node *node;
- struct cgraph_node **order =
- xcalloc (cgraph_n_nodes, sizeof (struct cgraph_node *));
+ struct cgraph_node **order = XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
int order_pos = 0, new_order_pos = 0;
int i;
len += strlen (HOST_EXECUTABLE_SUFFIX);
#endif
- temp = xmalloc (len);
+ temp = XNEWVEC (char, len);
/* Determine the filename to execute (special case for absolute paths). */
if (len > pprefix->max_len)
pprefix->max_len = len;
- pl = xmalloc (sizeof (struct prefix_list));
+ pl = XNEW (struct prefix_list);
pl->prefix = xstrdup (prefix);
if (*prev)
prefix_from_string (const char *p, struct path_prefix *pprefix)
{
const char *startp, *endp;
- char *nstore = xmalloc (strlen (p) + 3);
+ char *nstore = XNEWVEC (char, strlen (p) + 3);
if (debug)
fprintf (stderr, "Convert string '%s' into prefixes, separator = '%c'\n", p, PATH_SEPARATOR);
/* Strip now if it was requested on the command line. */
if (strip_flag)
{
- char **real_strip_argv = xcalloc (sizeof (char *), 3);
+ char **real_strip_argv = XCNEWVEC (char *, 3);
const char ** strip_argv = (const char **) real_strip_argv;
strip_argv[0] = strip_file_name;
}
}
/* q points to null at end of the string (or . of the .so version) */
- prefix = xmalloc (q - p + 1);
+ prefix = XNEWVEC (char, q - p + 1);
strncpy (prefix, p, q - p);
prefix[q - p] = 0;
for (r = prefix; *r; r++)
if (undobuf.frees)
buf = undobuf.frees, undobuf.frees = buf->next;
else
- buf = xmalloc (sizeof (struct undo));
+ buf = XNEW (struct undo);
buf->kind = UNDO_RTX;
buf->where.r = into;
if (undobuf.frees)
buf = undobuf.frees, undobuf.frees = buf->next;
else
- buf = xmalloc (sizeof (struct undo));
+ buf = XNEW (struct undo);
buf->kind = UNDO_INT;
buf->where.i = into;
if (undobuf.frees)
buf = undobuf.frees, undobuf.frees = buf->next;
else
- buf = xmalloc (sizeof (struct undo));
+ buf = XNEW (struct undo);
buf->kind = UNDO_MODE;
buf->where.r = into;
rtl_hooks = combine_rtl_hooks;
- reg_stat = xcalloc (nregs, sizeof (struct reg_stat));
+ reg_stat = XCNEWVEC (struct reg_stat, nregs);
init_recog_no_volatile ();
if (INSN_UID (insn) > i)
i = INSN_UID (insn);
- uid_cuid = xmalloc ((i + 1) * sizeof (int));
+ uid_cuid = XNEWVEC (int, i + 1);
max_uid_cuid = i;
nonzero_bits_mode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
sbitmap_zero (refresh_blocks);
/* Allocate array of current insn_rtx_costs. */
- uid_insn_cost = xcalloc (max_uid_cuid + 1, sizeof (int));
+ uid_insn_cost = XCNEWVEC (int, max_uid_cuid + 1);
last_insn_cost = max_uid_cuid;
for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
any existing entry. Otherwise, we insert the new fix before
MAX_MP and, if necessary, adjust the constraints on the other
entries. */
- mp = xmalloc (sizeof (* mp));
+ mp = XNEW (Mnode);
mp->fix_size = fix->fix_size;
mp->mode = fix->mode;
mp->value = fix->value;
}
/* We need to create a new entry. */
- mp = xmalloc (sizeof (* mp));
+ mp = XNEW (Mnode);
mp->fix_size = fix->fix_size;
mp->mode = fix->mode;
mp->value = fix->value;
static void
push_field_alignment (int bit_alignment)
{
- align_stack *entry = (align_stack *) xmalloc (sizeof (align_stack));
+ align_stack *entry = XNEW (align_stack);
entry->alignment = maximum_field_alignment;
entry->prev = field_align_stack;
frameworks_in_use = xrealloc (frameworks_in_use,
max_frameworks*sizeof(*frameworks_in_use));
}
- dir_name = xmalloc (len + 1);
+ dir_name = XNEWVEC (char, len + 1);
memcpy (dir_name, name, len);
dir_name[len] = '\0';
frameworks_in_use[num_frameworks].name = dir_name;
if (fast_dir && dir != fast_dir)
return 0;
- frname = xmalloc (strlen (fname) + dir->len + 2
+ frname = XNEWVEC (char, strlen (fname) + dir->len + 2
+ strlen(".framework/") + strlen("PrivateHeaders"));
strncpy (&frname[0], dir->name, dir->len);
frname_len = dir->len;
into
sfrname = /System/Library/Frameworks/Foundation.framework/Frameworks/CarbonCore.framework/Headers/OSUtils.h */
- sfrname = (char *) xmalloc (strlen (pname) + strlen (fname) + 2 +
+ sfrname = XNEWVEC (char, strlen (pname) + strlen (fname) + 2 +
strlen ("Frameworks/") + strlen (".framework/")
+ strlen ("PrivateHeaders"));
int cxx_aware = 1;
cpp_dir *p;
- p = xmalloc (sizeof (cpp_dir));
+ p = XNEW (cpp_dir);
p->next = NULL;
p->name = path;
p->sysp = 1 + !cxx_aware;
{
cpp_dir *p;
- p = xmalloc (sizeof (cpp_dir));
+ p = XNEW (cpp_dir);
p->next = NULL;
p->name = path;
p->sysp = 0;
if (! for_eh)
suffix = ".eh1";
- lab = xmalloc (strlen (prefix)
+ lab = XNEWVEC (char, strlen (prefix)
+ base_len + strlen (suffix) + quotes_len + 1);
lab[0] = '\0';
conflict_graph
conflict_graph_new (int num_regs)
{
- conflict_graph graph = xmalloc (sizeof (struct conflict_graph_def));
+ conflict_graph graph = XNEW (struct conflict_graph_def);
graph->num_regs = num_regs;
/* Set up the hash table. No delete action is specified; memory
obstack_init (&graph->arc_obstack);
/* Create and zero the lookup table by register number. */
- graph->neighbor_heads = xcalloc (num_regs, sizeof (conflict_graph_arc));
+ graph->neighbor_heads = XCNEWVEC (conflict_graph_arc, num_regs);
return graph;
}
entry = *slot;
if (!entry)
{
- *slot = entry = xcalloc (1, sizeof (counts_entry_t));
+ *slot = entry = XCNEW (counts_entry_t);
entry->ident = elt.ident;
entry->ctr = elt.ctr;
entry->checksum = checksum;
entry->summary.num = n_counts;
- entry->counts = xcalloc (n_counts, sizeof (gcov_type));
+ entry->counts = XCNEWVEC (gcov_type, n_counts);
}
else if (entry->checksum != checksum)
{
{
struct function_list *item;
- item = xmalloc (sizeof (struct function_list));
+ item = XNEW (struct function_list);
*functions_tail = item;
functions_tail = &item->next;
int len = strlen (filename);
/* Name of da file. */
- da_file_name = xmalloc (len + strlen (GCOV_DATA_SUFFIX) + 1);
+ da_file_name = XNEWVEC (char, len + strlen (GCOV_DATA_SUFFIX) + 1);
strcpy (da_file_name, filename);
strcat (da_file_name, GCOV_DATA_SUFFIX);
/* Name of bbg file. */
- bbg_file_name = xmalloc (len + strlen (GCOV_NOTE_SUFFIX) + 1);
+ bbg_file_name = XNEWVEC (char, len + strlen (GCOV_NOTE_SUFFIX) + 1);
strcpy (bbg_file_name, filename);
strcat (bbg_file_name, GCOV_NOTE_SUFFIX);
return;
/* One more slot for a terminating null. */
- new_argv = xmalloc ((new_argc + 1) * sizeof(char *));
+ new_argv = XNEWVEC (const char *, new_argc + 1);
new_argv[0] = argv[0];
j = 1;
/* Reallocate the table with NEW_SIZE entries. */
if (cse_reg_info_table)
free (cse_reg_info_table);
- cse_reg_info_table = xmalloc (sizeof (struct cse_reg_info)
- * new_size);
+ cse_reg_info_table = XNEWVEC (struct cse_reg_info, new_size);
cse_reg_info_table_size = new_size;
cse_reg_info_table_first_uninitialized = 0;
}
if (elt)
free_element_chain = elt->next_same_hash;
else
- elt = xmalloc (sizeof (struct table_elt));
+ elt = XNEW (struct table_elt);
elt->exp = x;
elt->canon_exp = NULL_RTX;
init_cse_reg_info (nregs);
- val.path = xmalloc (sizeof (struct branch_path)
- * PARAM_VALUE (PARAM_MAX_CSE_PATH_LENGTH));
+ val.path = XNEWVEC (struct branch_path, PARAM_VALUE (PARAM_MAX_CSE_PATH_LENGTH));
cse_jumps_altered = 0;
recorded_label_ref = 0;
init_recog ();
init_alias_analysis ();
- reg_eqv_table = xmalloc (nregs * sizeof (struct reg_eqv_elem));
+ reg_eqv_table = XNEWVEC (struct reg_eqv_elem, nregs);
/* Find the largest uid. */
max_uid = get_max_uid ();
- uid_cuid = xcalloc (max_uid + 1, sizeof (int));
+ uid_cuid = XCNEWVEC (int, max_uid + 1);
/* Compute the mapping from uids to cuids.
CUIDs are numbers assigned to insns, like uids,
int no_conflict = 0;
/* Allocate the space needed by qty_table. */
- qty_table = xmalloc (max_qty * sizeof (struct qty_table_elem));
+ qty_table = XNEWVEC (struct qty_table_elem, max_qty);
new_basic_block ();
following branches in this case. */
to_usage = 0;
val.path_size = 0;
- val.path = xmalloc (sizeof (struct branch_path)
- * PARAM_VALUE (PARAM_MAX_CSE_PATH_LENGTH));
+ val.path = XNEWVEC (struct branch_path, PARAM_VALUE (PARAM_MAX_CSE_PATH_LENGTH));
cse_end_of_basic_block (insn, &val, 0, 0);
free (val.path);
timevar_push (TV_DELETE_TRIVIALLY_DEAD);
/* First count the number of times each register is used. */
- counts = xcalloc (nreg, sizeof (int));
+ counts = XCNEWVEC (int, nreg);
for (insn = insns; insn; insn = NEXT_INSN (insn))
if (INSN_P (insn))
count_reg_usage (insn, counts, NULL_RTX, 1);
/* Some space for newly emit instructions so we don't end up
reallocating in between passes. */
reg_values_size = cselib_nregs + (63 + cselib_nregs) / 16;
- reg_values = xcalloc (reg_values_size, sizeof (reg_values));
+ reg_values = XCNEWVEC (struct elt_list *, reg_values_size);
}
- used_regs = xmalloc (sizeof (*used_regs) * cselib_nregs);
+ used_regs = XNEWVEC (unsigned int, cselib_nregs);
n_used_regs = 0;
cselib_hash_table = htab_create (31, get_value_hash,
entry_and_rtx_equal_p, NULL);
next_type_number = 1;
#ifdef DBX_USE_BINCL
- current_file = xmalloc (sizeof *current_file);
+ current_file = XNEW (struct dbx_file);
current_file->next = NULL;
current_file->file_number = 0;
current_file->next_type_number = 1;
const char *filename ATTRIBUTE_UNUSED)
{
#ifdef DBX_USE_BINCL
- struct dbx_file *n = xmalloc (sizeof *n);
+ struct dbx_file *n = XNEW (struct dbx_file);
n->next = current_file;
n->next_type_number = 1;
struct df *
df_init (int flags)
{
- struct df *df = xcalloc (1, sizeof (struct df));
+ struct df *df = XCNEW (struct df);
df->flags = flags;
/* This is executed once per compilation to initialize platform
return dflow;
/* Make a new one and add it to the end. */
- dflow = xcalloc (1, sizeof (struct dataflow));
+ dflow = XCNEW (struct dataflow);
dflow->df = df;
dflow->problem = problem;
df->problems_in_order[df->num_problems_defined++] = dflow;
void
df_analyze (struct df *df)
{
- int *postorder = xmalloc (sizeof (int) *last_basic_block);
+ int *postorder = XNEWVEC (int, last_basic_block);
bitmap current_all_blocks = BITMAP_ALLOC (NULL);
int n_blocks;
int i;
}
else
{
- struct df_ru_problem_data *problem_data =
- xmalloc (sizeof (struct df_ru_problem_data));
+ struct df_ru_problem_data *problem_data = XNEW (struct df_ru_problem_data);
dflow->problem_data = problem_data;
- problem_data->use_sites = xcalloc (reg_size, sizeof (bitmap));
+ problem_data->use_sites = XCNEWVEC (bitmap, reg_size);
problem_data->use_sites_size = reg_size;
problem_data->sparse_invalidated_by_call = BITMAP_ALLOC (NULL);
problem_data->dense_invalidated_by_call = BITMAP_ALLOC (NULL);
}
else
{
- struct df_rd_problem_data *problem_data =
- xmalloc (sizeof (struct df_rd_problem_data));
+ struct df_rd_problem_data *problem_data = XNEW (struct df_rd_problem_data);
dflow->problem_data = problem_data;
- problem_data->def_sites = xcalloc (reg_size, sizeof (bitmap));
+ problem_data->def_sites = XCNEWVEC (bitmap, reg_size);
problem_data->def_sites_size = reg_size;
problem_data->sparse_invalidated_by_call = BITMAP_ALLOC (NULL);
problem_data->dense_invalidated_by_call = BITMAP_ALLOC (NULL);
if (!dflow->problem_data)
{
- problem_data = xmalloc (sizeof (struct df_urec_problem_data));
+ problem_data = XNEW (struct df_urec_problem_data);
dflow->problem_data = problem_data;
}
problem_data->earlyclobbers_found = false;
df_chain_add_problem (struct df *df, int flags)
{
struct df_chain_problem_data *problem_data =
- xmalloc (sizeof (struct df_chain_problem_data));
+ XNEW (struct df_chain_problem_data);
struct dataflow *dflow = df_add_problem (df, &problem_CHAIN);
dflow->problem_data = problem_data;
if (!dflow->problem_data)
{
- struct df_ri_problem_data *problem_data =
- xmalloc (sizeof (struct df_ri_problem_data));
+ struct df_ri_problem_data *problem_data = XNEW (struct df_ri_problem_data);
dflow->problem_data = problem_data;
}
sizeof (struct df_scan_bb_info),
block_size);
- problem_data = xmalloc (sizeof (struct df_scan_problem_data));
+ problem_data = XNEW (struct df_scan_problem_data);
dflow->problem_data = problem_data;
problem_data->ref_pool
{
/* Allocate a basic pretty-printer. Clients will replace this a
much more elaborated pretty-printer if they wish. */
- context->printer = xmalloc (sizeof (pretty_printer));
+ context->printer = XNEW (pretty_printer);
pp_construct (context->printer, NULL, 0);
/* By default, diagnostics are sent to stderr. */
context->printer->buffer->stream = stderr;
{ \
unsigned int i = 1; /* Catch content == i. */ \
if (! (content)) \
- (var) = xcalloc ((num), sizeof (type)); \
+ (var) = XCNEWVEC (type, num); \
else \
{ \
- (var) = xmalloc ((num) * sizeof (type)); \
+ (var) = XNEWVEC (type, (num)); \
for (i = 0; i < num; i++) \
(var)[i] = (content); \
} \
/* Ending block. */
basic_block ex_block;
- stack = xmalloc ((n_basic_blocks + 1) * sizeof (edge_iterator));
+ stack = XNEWVEC (edge_iterator, n_basic_blocks + 1);
sp = 0;
/* Initialize our border blocks, and the first edge. */
for (ason = son->right, n = 1; ason != son; ason = ason->right)
n++;
- *bbs = xmalloc (n * sizeof (basic_block));
+ *bbs = XNEWVEC (basic_block, n);
(*bbs)[0] = son->data;
for (ason = son->right, n = 1; ason != son; ason = ason->right)
(*bbs)[n++] = ason->data;
static inline char *
stripattributes (const char *s)
{
- char *stripped = xmalloc (strlen (s) + 2);
+ char *stripped = XNEWVEC (char, strlen (s) + 2);
char *p = stripped;
*p++ = '*';
return 1;
}
- entry = xcalloc (1, sizeof (struct cu_hash_table_entry));
+ entry = XCNEW (struct cu_hash_table_entry);
entry->cu = cu;
entry->min_comdat_num = *sym_num = last->max_comdat_num;
entry->next = *slot;
{
/* Filter value is a 1 based table index. */
- n = xmalloc (sizeof (*n));
+ n = XNEW (struct ttypes_filter);
n->t = type;
n->filter = VEC_length (tree, cfun->eh->ttype_data) + 1;
*slot = n;
{
/* Filter value is a -1 based byte index into a uleb128 buffer. */
- n = xmalloc (sizeof (*n));
+ n = XNEW (struct ttypes_filter);
n->t = list;
n->filter = -(VARRAY_ACTIVE_SIZE (cfun->eh->ehspec_data) + 1);
*slot = n;
{
struct sjlj_lp_info *lp_info;
- lp_info = xcalloc (cfun->eh->last_region_number + 1,
- sizeof (struct sjlj_lp_info));
+ lp_info = XCNEWVEC (struct sjlj_lp_info, cfun->eh->last_region_number + 1);
if (sjlj_find_directly_reachable_regions (lp_info))
{
max_labelno = max_label_num ();
min_labelno = get_first_label_num ();
- label_align = xcalloc (max_labelno - min_labelno + 1,
- sizeof (struct label_alignment));
+ label_align = XCNEWVEC (struct label_alignment, max_labelno - min_labelno + 1);
/* If not optimizing or optimizing for size, don't assign any alignments. */
if (! optimize || optimize_size)
/* Free uid_shuid before reallocating it. */
free (uid_shuid);
- uid_shuid = xmalloc (max_uid * sizeof *uid_shuid);
+ uid_shuid = XNEWVEC (int, max_uid);
if (max_labelno != max_label_num ())
{
#ifdef HAVE_ATTR_length
/* Allocate the rest of the arrays. */
- insn_lengths = xmalloc (max_uid * sizeof (*insn_lengths));
+ insn_lengths = XNEWVEC (int, max_uid);
insn_lengths_max_uid = max_uid;
/* Syntax errors can lead to labels being outside of the main insn stream.
Initialize insn_addresses, so that we get reproducible results. */
INSN_ADDRESSES_ALLOC (max_uid);
- varying_length = xcalloc (max_uid, sizeof (char));
+ varying_length = XCNEWVEC (char, max_uid);
/* Initialize uid_align. We scan instructions
from end to start, and keep in align_tab[n] the last seen insn
that does an alignment of at least n+1, i.e. the successor
in the alignment chain for an insn that does / has a known
alignment of n. */
- uid_align = xcalloc (max_uid, sizeof *uid_align);
+ uid_align = XCNEWVEC (rtx, max_uid);
for (i = MAX_CODE_ALIGN; --i >= 0;)
align_tab[i] = NULL_RTX;
exit (FATAL_EXIT_CODE);
}
inf_size = sbuf.st_size;
- inf_buffer = xmalloc (inf_size + 2);
+ inf_buffer = XNEWVEC (char, inf_size + 2);
inf_ptr = inf_buffer;
to_read = inf_size;
ndead = 0;
if ((prop_flags & PROP_REG_INFO) && !reg_deaths)
- reg_deaths = xcalloc (sizeof (*reg_deaths), max_regno);
+ reg_deaths = XCNEWVEC (int, max_regno);
timevar_push ((extent == UPDATE_LIFE_LOCAL || blocks)
? TV_LIFE_UPDATE : TV_LIFE);
SET_REGNO_REG_SET (invalidated_by_call, i);
/* Allocate space for the sets of local properties. */
- local_sets = xcalloc (last_basic_block, sizeof (regset));
- cond_local_sets = xcalloc (last_basic_block, sizeof (regset));
+ local_sets = XCNEWVEC (bitmap, last_basic_block);
+ cond_local_sets = XCNEWVEC (bitmap, last_basic_block);
/* Create a worklist. Allocate an extra slot for the `head == tail'
style test for an empty queue doesn't work with a full queue. */
- queue = xmalloc ((n_basic_blocks + 1) * sizeof (*queue));
+ queue = XNEWVEC (basic_block, n_basic_blocks + 1);
qtail = queue;
qhead = qend = queue + n_basic_blocks;
}
}
- block_accesses = xcalloc (last_basic_block, sizeof (int));
+ block_accesses = XCNEWVEC (int, last_basic_block);
/* We clean aux when we remove the initially-enqueued bbs, but we
don't enqueue ENTRY and EXIT initially, so clean them upfront and
max_regno = max_reg_num ();
gcc_assert (!reg_deaths);
- reg_deaths = xcalloc (sizeof (*reg_deaths), max_regno);
+ reg_deaths = XCNEWVEC (int, max_regno);
/* Recalculate the register space, in case it has grown. Old style
vector oriented regsets would set regset_{size,bytes} here also. */
init_propagate_block_info (basic_block bb, regset live, regset local_set,
regset cond_local_set, int flags)
{
- struct propagate_block_info *pbi = xmalloc (sizeof (*pbi));
+ struct propagate_block_info *pbi = XNEW (struct propagate_block_info);
pbi->bb = bb;
pbi->reg_live = live;
pbi->insn_num = 0;
if (flags & (PROP_LOG_LINKS | PROP_AUTOINC))
- pbi->reg_next_use = xcalloc (max_reg_num (), sizeof (rtx));
+ pbi->reg_next_use = XCNEWVEC (rtx, max_reg_num ());
else
pbi->reg_next_use = NULL;
struct reg_cond_life_info *rcli;
rtx cond;
- rcli = xmalloc (sizeof (*rcli));
+ rcli = XNEW (struct reg_cond_life_info);
if (REGNO_REG_SET_P (bb_true->il.rtl->global_live_at_start,
i))
/* The register was unconditionally live previously.
Record the current condition as the condition under
which it is dead. */
- rcli = xmalloc (sizeof (*rcli));
+ rcli = XNEW (struct reg_cond_life_info);
rcli->condition = cond;
rcli->stores = cond;
rcli->orig_condition = const0_rtx;
{
/* The register was not previously live at all. Record
the condition under which it is still dead. */
- rcli = xmalloc (sizeof (*rcli));
+ rcli = XNEW (struct reg_cond_life_info);
rcli->condition = not_reg_cond (cond);
rcli->stores = const0_rtx;
rcli->orig_condition = const0_rtx;
if (argv[i][2] == '\0')
{
- p = xmalloc (strlen (argv[i + 1]) + 2);
+ p = XNEWVEC (char, strlen (argv[i + 1]) + 2);
p[0] = '-';
p[1] = 'J';
strcpy (&p[2], argv[i + 1]);
}
else
{
- p = xmalloc (strlen (argv[i]) + 1);
+ p = XNEWVEC (char, strlen (argv[i]) + 1);
strcpy (p, argv[i]);
}
append_arg (p);
tree *block_vector;
*n_blocks_p = all_blocks (block, NULL);
- block_vector = xmalloc (*n_blocks_p * sizeof (tree));
+ block_vector = XNEWVEC (tree, *n_blocks_p);
all_blocks (block, block_vector);
return block_vector;
if (!sl)
{
/* Not found - make it. */
- sl = xmalloc (sizeof (struct spec_list));
+ sl = XNEW (struct spec_list);
sl->name = xstrdup (name);
sl->name_len = name_len;
sl->ptr_spec = &sl->ptr;
alloc_args (void)
{
argbuf_length = 10;
- argbuf = xmalloc (argbuf_length * sizeof (const char *));
+ argbuf = XNEWVEC (const char *, argbuf_length);
}
/* Clear out the vector of arguments (after a command is executed). */
pfatal_with_name (filename);
/* Read contents of file into BUFFER. */
- buffer = xmalloc ((unsigned) statbuf.st_size + 1);
+ buffer = XNEWVEC (char, statbuf.st_size + 1);
readlen = read (desc, buffer, (unsigned) statbuf.st_size);
if (readlen < 0)
pfatal_with_name (filename);
buffer[readlen] = 0;
close (desc);
- specs = xmalloc (readlen + 1);
+ specs = XNEWVEC (char, readlen + 1);
specs_p = specs;
for (buffer_p = buffer; buffer_p && *buffer_p; buffer_p++)
{
if (! strcmp (name, temp->name))
goto already1;
- temp = xmalloc (sizeof (struct temp_file));
+ temp = XNEW (struct temp_file);
temp->next = always_delete_queue;
temp->name = name;
always_delete_queue = temp;
if (! strcmp (name, temp->name))
goto already2;
- temp = xmalloc (sizeof (struct temp_file));
+ temp = XNEW (struct temp_file);
temp->next = failure_delete_queue;
temp->name = name;
failure_delete_queue = temp;
len += suffix_len;
else
len += multi_os_dir_len;
- path = xmalloc (len);
+ path = XNEWVEC (char, len);
}
for (pl = paths->plist; pl != 0; pl = pl->next)
if (len > pprefix->max_len)
pprefix->max_len = len;
- pl = xmalloc (sizeof (struct prefix_list));
+ pl = XNEW (struct prefix_list);
pl->prefix = prefix;
pl->require_machine_suffix = require_machine_suffix;
pl->priority = priority;
n_preprocessor_options++;
if (! preprocessor_options)
- preprocessor_options = xmalloc (n_preprocessor_options * sizeof (char *));
+ preprocessor_options = XNEWVEC (char *, n_preprocessor_options);
else
preprocessor_options = xrealloc (preprocessor_options,
n_preprocessor_options * sizeof (char *));
n_assembler_options++;
if (! assembler_options)
- assembler_options = xmalloc (n_assembler_options * sizeof (char *));
+ assembler_options = XNEWVEC (char *, n_assembler_options);
else
assembler_options = xrealloc (assembler_options,
n_assembler_options * sizeof (char *));
n_linker_options++;
if (! linker_options)
- linker_options = xmalloc (n_linker_options * sizeof (char *));
+ linker_options = XNEWVEC (char *, n_linker_options);
else
linker_options = xrealloc (linker_options,
n_linker_options * sizeof (char *));
}
else if (strcmp (argv[i], "-specs") == 0)
{
- struct user_specs *user = xmalloc (sizeof (struct user_specs));
+ struct user_specs *user = XNEW (struct user_specs);
if (++i >= argc)
fatal ("argument to '-specs' is missing");
}
else if (strncmp (argv[i], "-specs=", 7) == 0)
{
- struct user_specs *user = xmalloc (sizeof (struct user_specs));
+ struct user_specs *user = XNEW (struct user_specs);
if (strlen (argv[i]) == 7)
fatal ("argument to '-specs=' is missing");
if (! IS_DIR_SEPARATOR (value [len - 1])
&& is_directory (value, false))
{
- char *tmp = xmalloc (len + 2);
+ char *tmp = XNEWVEC (char, len + 2);
strcpy (tmp, value);
tmp[len] = DIR_SEPARATOR;
tmp[++ len] = 0;
/* Then create the space for the vectors and scan again. */
- switches = xmalloc ((n_switches + 1) * sizeof (struct switchstr));
- infiles = xmalloc ((n_infiles + 1) * sizeof (struct infile));
+ switches = XNEWVEC (struct switchstr, n_switches + 1);
+ infiles = XNEWVEC (struct infile, n_infiles + 1);
n_switches = 0;
n_infiles = 0;
last_language_n_infiles = -1;
if (i + n_args >= argc)
fatal ("argument to '-%s' is missing", p);
switches[n_switches].args
- = xmalloc ((n_args + 1) * sizeof(const char *));
+ = XNEWVEC (const char *, n_args + 1);
while (j < n_args)
switches[n_switches].args[j++] = argv[++i];
/* Null-terminate the vector. */
{
/* On some systems, ld cannot handle some options without
a space. So split the option from its argument. */
- char *part1 = xmalloc (2);
+ char *part1 = XNEWVEC (char, 2);
part1[0] = c;
part1[1] = '\0';
switches[n_switches].part1 = part1;
- switches[n_switches].args = xmalloc (2 * sizeof (const char *));
+ switches[n_switches].args = XNEWVEC (const char *, 2);
switches[n_switches].args[0] = xstrdup (p+1);
switches[n_switches].args[1] = 0;
}
else
{
saved_suffix
- = xmalloc (suffix_length
+ = XNEWVEC (char, suffix_length
+ strlen (TARGET_OBJECT_SUFFIX));
strncpy (saved_suffix, suffix, suffix_length);
strcpy (saved_suffix + suffix_length,
i = n_infiles;
i += lang_specific_extra_outfiles;
- outfiles = xcalloc (i, sizeof (char *));
+ outfiles = XCNEWVEC (const char *, i);
/* Record which files were specified explicitly as link input. */
- explicit_link_files = xcalloc (1, n_infiles);
+ explicit_link_files = XCNEWVEC (bool, n_infiles);
if (combine_flag)
combine_inputs = true;
static char *
save_string (const char *s, int len)
{
- char *result = xmalloc (len + 1);
+ char *result = XNEWVEC (char, len + 1);
memcpy (result, s, len);
result[len] = 0;
xmalloc from calling fatal, and prevents us from re-executing this
block of code. */
mswitches
- = xmalloc (sizeof (struct mswitchstr)
- * (n_mdswitches + (n_switches ? n_switches : 1)));
+ = XNEWVEC (struct mswitchstr, n_mdswitches + (n_switches ? n_switches : 1));
for (i = 0; i < n_switches; i++)
if (switches[i].live_cond != SWITCH_IGNORE)
{
{
int i = 0;
- mdswitches = xmalloc (sizeof (struct mdswitchstr) * n_mdswitches);
+ mdswitches = XNEWVEC (struct mdswitchstr, n_mdswitches);
for (start = multilib_defaults; *start != '\0'; start = end + 1)
{
while (*start == ' ' || *start == '\t')
if (this_path_len != 1
|| this_path[0] != '.')
{
- char *new_multilib_dir = xmalloc (this_path_len + 1);
+ char *new_multilib_dir = XNEWVEC (char, this_path_len + 1);
char *q;
strncpy (new_multilib_dir, this_path, this_path_len);
q++;
if (q < end)
{
- char *new_multilib_os_dir = xmalloc (end - q);
+ char *new_multilib_os_dir = XNEWVEC (char, end - q);
memcpy (new_multilib_os_dir, q + 1, end - q - 1);
new_multilib_os_dir[end - q - 1] = '\0';
multilib_os_dir = new_multilib_os_dir;
if (shared_libgcc)
{
/* Make sure to have room for the trailing NULL argument. */
- arglist = xmalloc ((argc+2) * sizeof (char *));
+ arglist = XNEWVEC (const char *, argc + 2);
i = 0;
do
for (fn = functions; fn; fn = fn->next)
solve_flow_graph (fn);
for (src = sources; src; src = src->next)
- src->lines = xcalloc (src->num_lines, sizeof (line_t));
+ src->lines = XCNEWVEC (line_t, src->num_lines);
for (fn = functions; fn; fn = fn->next)
{
coverage_t coverage;
struct stat status;
length += strlen (object_directory) + 2;
- name = xmalloc (length);
+ name = XNEWVEC (char, length);
name[0] = 0;
base = !stat (object_directory, &status) && S_ISDIR (status.st_mode);
}
else
{
- name = xmalloc (length + 1);
+ name = XNEWVEC (char, length + 1);
name[0] = 0;
base = 1;
}
length = strlen (name);
- bbg_file_name = xmalloc (length + strlen (GCOV_NOTE_SUFFIX) + 1);
+ bbg_file_name = XNEWVEC (char, length + strlen (GCOV_NOTE_SUFFIX) + 1);
strcpy (bbg_file_name, name);
strcpy (bbg_file_name + length, GCOV_NOTE_SUFFIX);
- da_file_name = xmalloc (length + strlen (GCOV_DATA_SUFFIX) + 1);
+ da_file_name = XNEWVEC (char, length + strlen (GCOV_DATA_SUFFIX) + 1);
strcpy (da_file_name, name);
strcpy (da_file_name + length, GCOV_DATA_SUFFIX);
if (!strcmp (file_name, src->name))
return src;
- src = xcalloc (1, sizeof (source_t));
+ src = XCNEW (source_t);
src->name = xstrdup (file_name);
src->coverage.name = src->name;
src->index = sources ? sources->index + 1 : 1;
src = find_source (gcov_read_string ());
lineno = gcov_read_unsigned ();
- fn = xcalloc (1, sizeof (function_t));
+ fn = XCNEW (function_t);
fn->name = function_name;
fn->ident = ident;
fn->checksum = checksum;
unsigned ix, num_blocks = GCOV_TAG_BLOCKS_NUM (length);
fn->num_blocks = num_blocks;
- fn->blocks = xcalloc (fn->num_blocks, sizeof (block_t));
+ fn->blocks = XCNEWVEC (block_t, fn->num_blocks);
for (ix = 0; ix != num_blocks; ix++)
fn->blocks[ix].flags = gcov_read_unsigned ();
}
if (dest >= fn->num_blocks)
goto corrupt;
- arc = xcalloc (1, sizeof (arc_t));
+ arc = XCNEW (arc_t);
arc->dst = &fn->blocks[dest];
arc->src = &fn->blocks[src];
else if (fn && tag == GCOV_TAG_LINES)
{
unsigned blockno = gcov_read_unsigned ();
- unsigned *line_nos = xcalloc (length - 1, sizeof (unsigned));
+ unsigned *line_nos = XCNEWVEC (unsigned, length - 1);
if (blockno >= fn->num_blocks || fn->blocks[blockno].u.line.encoding)
goto corrupt;
goto mismatch;
if (!fn->counts)
- fn->counts = xcalloc (fn->num_counts, sizeof (gcov_type));
+ fn->counts = XCNEWVEC (gcov_type, fn->num_counts);
for (ix = 0; ix != fn->num_counts; ix++)
fn->counts[ix] += gcov_read_counter ();
make_gcov_file_name (const char *input_name, const char *src_name)
{
char *cptr;
- char *name = xmalloc (strlen (src_name) + strlen (input_name) + 10);
+ char *name = XNEWVEC (char, strlen (src_name) + strlen (input_name) + 10);
name[0] = 0;
if (flag_long_names && strcmp (src_name, input_name))
local_cprop_pass (cprop_jumps);
/* Determine implicit sets. */
- implicit_sets = xcalloc (last_basic_block, sizeof (rtx));
+ implicit_sets = XCNEWVEC (rtx, last_basic_block);
find_implicit_sets ();
alloc_hash_table (max_cuid, &set_hash_table, 1);
pre_expr_reaches_here_p (basic_block occr_bb, struct expr *expr, basic_block bb)
{
int rval;
- char *visited = xcalloc (last_basic_block, 1);
+ char *visited = XCNEWVEC (char, last_basic_block);
rval = pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited);
/* Compute a mapping from expression number (`bitmap_index') to
hash table entry. */
- index_map = xcalloc (expr_hash_table.n_elems, sizeof (struct expr *));
+ index_map = XCNEWVEC (struct expr *, expr_hash_table.n_elems);
for (i = 0; i < expr_hash_table.size; i++)
for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
index_map[expr->bitmap_index] = expr;
if (visited == NULL)
{
visited_allocated_locally = 1;
- visited = xcalloc (last_basic_block, 1);
+ visited = XCNEWVEC (char, last_basic_block);
}
FOR_EACH_EDGE (pred, ei, bb->preds)
/* Compute a mapping from expression number (`bitmap_index') to
hash table entry. */
- index_map = xcalloc (expr_hash_table.n_elems, sizeof (struct expr *));
+ index_map = XCNEWVEC (struct expr *, expr_hash_table.n_elems);
for (i = 0; i < expr_hash_table.size; i++)
for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
index_map[expr->bitmap_index] = expr;
if (*slot)
return (struct ls_expr *)*slot;
- ptr = xmalloc (sizeof (struct ls_expr));
+ ptr = XNEW (struct ls_expr);
ptr->next = pre_ldst_mems;
ptr->expr = NULL;
pre_ldst_mems = 0;
pre_ldst_table = htab_create (13, pre_ldst_expr_hash,
pre_ldst_expr_eq, NULL);
- last_set_in = xcalloc (max_gcse_regno, sizeof (int));
- already_set = xmalloc (sizeof (int) * max_gcse_regno);
+ last_set_in = XCNEWVEC (int, max_gcse_regno);
+ already_set = XNEWVEC (int, max_gcse_regno);
/* Find all the stores we care about. */
FOR_EACH_BB (bb)
transp = sbitmap_vector_alloc (last_basic_block, num_stores);
sbitmap_vector_zero (transp, last_basic_block);
- regs_set_in_block = xmalloc (sizeof (int) * max_gcse_regno);
+ regs_set_in_block = XNEWVEC (int, max_gcse_regno);
FOR_EACH_BB (bb)
{
rtx last, insn, note;
rtx mem = smexpr->pattern;
- stack = xmalloc (sizeof (edge_iterator) * n_basic_blocks);
+ stack = XNEWVEC (edge_iterator, n_basic_blocks);
sp = 0;
ei = ei_start (bb->succs);
rtx defval = XEXP (exp, 1);
rtx new_defval = XEXP (exp, 1);
int len = XVECLEN (exp, 0);
- rtx *tests = xmalloc (len * sizeof (rtx));
+ rtx *tests = XNEWVEC (rtx, len);
int allsame = 1;
rtx ret;
return;
/* Make 2 extra elements, for "code" values -2 and -1. */
- insn_code_values = xcalloc ((insn_code_number + 2),
- sizeof (struct attr_value_list *));
+ insn_code_values = XCNEWVEC (struct attr_value_list *, insn_code_number + 2);
/* Offset the table address so we can index by -2 or -1. */
insn_code_values += 2;
- iv = ivbuf = xmalloc (num_insn_ents * sizeof (struct attr_value_list));
+ iv = ivbuf = XNEWVEC (struct attr_value_list, num_insn_ents);
for (i = 0; i < MAX_ATTRS_INDEX; i++)
for (attr = attrs[i]; attr; attr = attr->next)
int i;
if (description->units_num)
- units_array = xmalloc (description->units_num * sizeof (unit_decl_t));
+ units_array = XNEWVEC (unit_decl_t, description->units_num);
else
units_array = 0;
process_state_for_insn_equiv_partition (state_t state)
{
arc_t arc;
- arc_t *insn_arcs_array = xcalloc (description->insns_num, sizeof(arc_t));
+ arc_t *insn_arcs_array = XCNEWVEC (arc_t, description->insns_num);
/* Process insns of the arcs. */
for (arc = first_out_arc (state); arc != NULL; arc = next_out_arc (arc))
if (description->units_num == 0)
return;
estimation_bound = estimate_one_automaton_bound ();
- unit_decls = xmalloc (description->units_num * sizeof (unit_decl_t));
+ unit_decls = XNEWVEC (unit_decl_t, description->units_num);
for (i = 0, j = 0; i < description->decls_num; i++)
if (description->decls[i]->mode == dm_unit)
if (i != XVECLEN (insn, 1) - 1)
{
struct clobber_pat *p;
- struct clobber_ent *link = xmalloc (sizeof (struct clobber_ent));
+ struct clobber_ent *link = XNEW (struct clobber_ent);
int j;
link->code_number = insn_code_number;
if (p == 0)
{
- p = xmalloc (sizeof (struct clobber_pat));
+ p = XNEW (struct clobber_pat);
p->insns = 0;
p->pattern = insn;
max_operand_vec (split, 2);
operands = MAX (max_opno, MAX (max_dup_opno, max_scratch_opno)) + 1;
unused = (operands == 0 ? " ATTRIBUTE_UNUSED" : "");
- used = xcalloc (1, operands);
+ used = XCNEWVEC (char, operands);
/* Output the prototype, function name and argument declarations. */
if (GET_CODE (split) == DEFINE_PEEPHOLE2)
VEC_char_to_string (VEC(char,heap) *v)
{
size_t n = VEC_length (char, v);
- char *s = xmalloc (n + 1);
+ char *s = XNEWVEC (char, n + 1);
memcpy (s, VEC_address (char, v), n);
s[n] = '\0';
return s;
else if (GET_CODE (desc) == DEFINE_PEEPHOLE)
{
- struct code_ptr *link = xmalloc (sizeof (struct code_ptr));
+ struct code_ptr *link = XNEW (struct code_ptr);
link->insn_code = insn_code_number;
link->next = peepholes;
static void
gen_insn (rtx insn, int lineno)
{
- struct data *d = xmalloc (sizeof (struct data));
+ struct data *d = XNEW (struct data);
int i;
d->code_number = next_code_number;
static void
gen_peephole (rtx peep, int lineno)
{
- struct data *d = xmalloc (sizeof (struct data));
+ struct data *d = XNEW (struct data);
int i;
d->code_number = next_code_number;
static void
gen_expand (rtx insn, int lineno)
{
- struct data *d = xmalloc (sizeof (struct data));
+ struct data *d = XNEW (struct data);
int i;
d->code_number = next_code_number;
static void
gen_split (rtx split, int lineno)
{
- struct data *d = xmalloc (sizeof (struct data));
+ struct data *d = XNEW (struct data);
int i;
d->code_number = next_code_number;
if (s == 0)
return 0;
- p = q = xmalloc (strlen (s) + 1);
+ p = q = XNEWVEC (char, strlen (s) + 1);
while ((ch = *s++) != '\0')
if (! ISSPACE (ch))
*p++ = ch;
struct decision_test **place = *pplace;
struct decision_test *test;
- test = xmalloc (sizeof (*test));
+ test = XNEW (struct decision_test);
test->next = *place;
test->type = type;
*place = test;
ggc_pch_this_base (state.d, mmi.preferred_base);
- state.ptrs = xmalloc (state.count * sizeof (*state.ptrs));
+ state.ptrs = XNEWVEC (struct ptr_data *, state.count);
state.ptrs_i = 0;
htab_traverse (saving_htab, call_alloc, &state);
qsort (state.ptrs, state.count, sizeof (*state.ptrs), compare_ptr_data);
const char *name, int line, const char *function)
{
struct loc_descriptor *loc = loc_descriptor (name, line, function);
- struct ptr_hash_entry *p = xmalloc (sizeof (struct ptr_hash_entry));
+ struct ptr_hash_entry *p = XNEW (struct ptr_hash_entry);
PTR *slot;
p->ptr = ptr;
L2 = LOOKUP_L2 (p);
if (base[L1] == NULL)
- base[L1] = xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
+ base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
base[L1][L2] = entry;
}
the data, but instead verify that the data is *actually* not
reachable the next time we collect. */
{
- struct free_object *fo = xmalloc (sizeof (struct free_object));
+ struct free_object *fo = XNEW (struct free_object);
fo->object = p;
fo->next = G.free_object_list;
G.free_object_list = fo;
}
/* We have a good page, might as well hold onto it... */
- e = xcalloc (1, sizeof (struct page_entry));
+ e = XCNEW (struct page_entry);
e->bytes = G.pagesize;
e->page = p;
e->next = G.free_pages;
G.depth_in_use = 0;
G.depth_max = 10;
- G.depth = xmalloc (G.depth_max * sizeof (unsigned int));
+ G.depth = XNEWVEC (unsigned int, G.depth_max);
G.by_depth_in_use = 0;
G.by_depth_max = INITIAL_PTE_COUNT;
- G.by_depth = xmalloc (G.by_depth_max * sizeof (page_entry *));
- G.save_in_use = xmalloc (G.by_depth_max * sizeof (unsigned long *));
+ G.by_depth = XNEWVEC (page_entry *, G.by_depth_max);
+ G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
}
/* Start a new GGC zone. */
struct ggc_pch_data *
init_ggc_pch (void)
{
- return xcalloc (sizeof (struct ggc_pch_data), 1);
+ return XCNEW (struct ggc_pch_data);
}
void
page_entry **new_by_depth;
unsigned long **new_save_in_use;
- new_by_depth = xmalloc (G.by_depth_max * sizeof (page_entry *));
- new_save_in_use = xmalloc (G.by_depth_max * sizeof (unsigned long *));
+ new_by_depth = XNEWVEC (page_entry *, G.by_depth_max);
+ new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
memcpy (&new_by_depth[0],
&G.by_depth[count_old_page_tables],
/* Establish mappings from register number to allocation number
and vice versa. In the process, count the allocnos. */
- reg_allocno = xmalloc (max_regno * sizeof (int));
+ reg_allocno = XNEWVEC (int, max_regno);
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
reg_allocno[i] = -1;
/* Initialize the shared-hard-reg mapping
from the list of pairs that may share. */
- reg_may_share = xcalloc (max_regno, sizeof (int));
+ reg_may_share = XCNEWVEC (int, max_regno);
for (x = regs_may_share; x; x = XEXP (XEXP (x, 1), 1))
{
int r1 = REGNO (XEXP (x, 0));
else
reg_allocno[i] = -1;
- allocno = xcalloc (max_allocno, sizeof (struct allocno));
+ allocno = XCNEWVEC (struct allocno, max_allocno);
for (i = FIRST_PSEUDO_REGISTER; i < (size_t) max_regno; i++)
if (reg_allocno[i] >= 0)
/* We used to use alloca here, but the size of what it would try to
allocate would occasionally cause it to exceed the stack limit and
cause unpredictable core dumps. Some examples were > 2Mb in size. */
- conflicts = xcalloc (max_allocno * allocno_row_words, sizeof (INT_TYPE));
+ conflicts = XCNEWVEC (INT_TYPE, max_allocno * allocno_row_words);
- allocnos_live = xmalloc (allocno_row_words * sizeof (INT_TYPE));
+ allocnos_live = XNEWVEC (INT_TYPE, allocno_row_words);
/* If there is work to be done (at least one reg to allocate),
perform global conflict analysis and allocate the regs. */
/* Determine the order to allocate the remaining pseudo registers. */
- allocno_order = xmalloc (max_allocno * sizeof (int));
+ allocno_order = XNEWVEC (int, max_allocno);
for (i = 0; i < (size_t) max_allocno; i++)
allocno_order[i] = i;
int *block_start_allocnos;
/* Make a vector that mark_reg_{store,clobber} will store in. */
- regs_set = xmalloc (max_parallel * sizeof (rtx) * 2);
+ regs_set = XNEWVEC (rtx, max_parallel * 2);
- block_start_allocnos = xmalloc (max_allocno * sizeof (int));
+ block_start_allocnos = XNEWVEC (int, max_allocno);
FOR_EACH_BB (b)
{
{
int i;
int num;
- int *allocno_to_order = xmalloc (max_allocno * sizeof (int));
+ int *allocno_to_order = XNEWVEC (int, max_allocno);
/* Scan least most important to most important.
For each allocno, remove from preferences registers that cannot be used,
int i;
int *rts_order;
- rts_order = xmalloc (sizeof (int) * (n_basic_blocks - NUM_FIXED_BLOCKS));
+ rts_order = XNEWVEC (int, n_basic_blocks - NUM_FIXED_BLOCKS);
post_order_compute (rts_order, false);
for (i = 0; i < n_basic_blocks - NUM_FIXED_BLOCKS; i++)
BB_INFO_BY_INDEX (rts_order [i])->rts_number = i;
{
enum bb_state { NOT_IN_BB, IN_ONE_BB, IN_MULTIPLE_BB };
int max_uid = get_max_uid ();
- int *start = xmalloc (max_uid * sizeof (int));
- int *end = xmalloc (max_uid * sizeof (int));
- enum bb_state *in_bb_p = xmalloc (max_uid * sizeof (enum bb_state));
+ int *start = XNEWVEC (int, max_uid);
+ int *end = XNEWVEC (int, max_uid);
+ enum bb_state *in_bb_p = XNEWVEC (enum bb_state, max_uid);
basic_block bb;
int i;
/* Allocate the ready list. */
ready.veclen = rgn_n_insns + 1 + issue_rate;
ready.first = ready.veclen - 1;
- ready.vec = xmalloc (ready.veclen * sizeof (rtx));
+ ready.vec = XNEWVEC (rtx, ready.veclen);
ready.n_ready = 0;
/* It is used for first cycle multipass scheduling. */
temp_state = alloca (dfa_state_size);
- ready_try = xcalloc ((rgn_n_insns + 1), sizeof (char));
- choice_stack = xmalloc ((rgn_n_insns + 1)
- * sizeof (struct choice_entry));
+ ready_try = XCNEWVEC (char, rgn_n_insns + 1);
+ choice_stack = XNEWVEC (struct choice_entry, rgn_n_insns + 1);
for (i = 0; i <= rgn_n_insns; i++)
choice_stack[i].state = xmalloc (dfa_state_size);
pseudos which do not cross calls. */
old_max_uid = get_max_uid () + 1;
- h_i_d = xcalloc (old_max_uid, sizeof (*h_i_d));
+ h_i_d = XCNEWVEC (struct haifa_insn_data, old_max_uid);
for (i = 0; i < old_max_uid; i++)
h_i_d [i].cost = -1;
{
rtx line;
- line_note_head = xcalloc (last_basic_block, sizeof (rtx));
+ line_note_head = XCNEWVEC (rtx, last_basic_block);
/* Save-line-note-head:
Determine the line-number at the start of each basic block.
ipcp_formal_create (struct cgraph_node *mt)
{
IPA_NODE_REF (mt)->ipcp_cval =
- xcalloc (ipa_method_formal_count (mt), sizeof (struct ipcp_formal));
+ XCNEWVEC (struct ipcp_formal, ipa_method_formal_count (mt));
}
/* Set cval structure of I-th formal of MT to CVAL. */
struct ipa_replace_map *replace_map;
tree const_val;
- replace_map = xcalloc (1, sizeof (struct ipa_replace_map));
+ replace_map = XCNEW (struct ipa_replace_map);
gcc_assert (ipcp_type_is_const (type));
if (type == CONST_VALUE_REF )
{
struct cgraph_node *node;
int nnodes;
struct cgraph_node **order =
- xcalloc (cgraph_n_nodes, sizeof (struct cgraph_node *));
+ XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
int old_insns = 0;
int i;
struct cgraph_node *node;
int nnodes;
struct cgraph_node **order =
- xcalloc (cgraph_n_nodes, sizeof (struct cgraph_node *));
+ XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
int i;
if (sorrycount || errorcount)
ipa_method_tree_map_create (struct cgraph_node *mt)
{
IPA_NODE_REF (mt)->ipa_param_tree =
- xcalloc (ipa_method_formal_count (mt), sizeof (tree));
+ XCNEWVEC (tree, ipa_method_formal_count (mt));
}
/* Create modify structure for MT. */
ipa_method_modify_create (struct cgraph_node *mt)
{
((struct ipa_node *) mt->aux)->ipa_mod =
- xcalloc (ipa_method_formal_count (mt), sizeof (bool));
+ XCNEWVEC (bool, ipa_method_formal_count (mt));
}
/* Set modify of I-th formal of MT to VAL. */
ipa_callsite_param_map_create (struct cgraph_edge *cs)
{
IPA_EDGE_REF (cs)->ipa_param_map =
- xcalloc (ipa_callsite_param_count (cs), sizeof (struct ipa_jump_func));
+ XCNEWVEC (struct ipa_jump_func, ipa_callsite_param_count (cs));
}
/* Return the call expr tree related to callsite CS. */
static void
analyze_function (struct cgraph_node *fn)
{
- funct_state l = xcalloc (1, sizeof (struct funct_state_d));
+ funct_state l = XCNEW (struct funct_state_d);
tree decl = fn->decl;
struct ipa_dfs_info * w_info = fn->aux;
struct cgraph_node *node;
struct cgraph_node *w;
struct cgraph_node **order =
- xcalloc (cgraph_n_nodes, sizeof (struct cgraph_node *));
+ XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
int order_pos = order_pos = ipa_utils_reduced_inorder (order, true, false);
int i;
struct ipa_dfs_info * w_info;
static tree
discover_unique_type (tree type)
{
- struct type_brand_s * brand = xmalloc (sizeof (struct type_brand_s));
+ struct type_brand_s * brand = XNEW (struct type_brand_s);
int i = 0;
splay_tree_node result;
struct cgraph_node *node;
struct searchc_env env;
splay_tree_node result;
- env.stack = xcalloc (cgraph_n_nodes, sizeof (struct cgraph_node *));
+ env.stack = XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
env.stack_size = 0;
env.result = order;
env.order_pos = 0;
struct cgraph_edge *edge, last;
struct cgraph_node **stack =
- xcalloc (cgraph_n_nodes, sizeof (struct cgraph_node *));
+ XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
/* We have to deal with cycles nicely, so use a depth first traversal
output algorithm. Ignore the fact that some functions won't need
{
if (size < 120)
size = 120;
- bufp->data = xmalloc (size);
+ bufp->data = XNEWVEC (unsigned char, size);
bufp->ptr = bufp->data;
}
else
if (NULL == root)
{
- root = xmalloc (sizeof (class_flag_node));
+ root = XNEW (class_flag_node);
root->ident = "";
root->value = 0;
root->sibling = NULL;
else
{
/* Insert new node into the tree. */
- node = xmalloc (sizeof (class_flag_node));
+ node = XNEW (class_flag_node);
node->ident = xstrdup (ident);
node->value = value;
}
/* Create a new range. */
- h = xmalloc (sizeof (struct eh_range));
+ h = XNEW (struct eh_range);
h->start_pc = pc;
h->end_pc = range->end_pc;
}
/* Create the new range. */
- h = xmalloc (sizeof (struct eh_range));
+ h = XNEW (struct eh_range);
first_child = &h->first_child;
h->start_pc = start_pc;
are `$'. */
if (i == length)
{
- char *dup = xmalloc (2 + length - min_length + kwl);
+ char *dup = XNEWVEC (char, 2 + length - min_length + kwl);
strcpy (dup, cxx_keywords[mid]);
for (i = kwl; i < length + 1; ++i)
dup[i] = '$';
return NULL;
}
- override = xmalloc (length + 3);
+ override = XNEWVEC (char, length + 3);
memcpy (override, name, length);
strcpy (override + length, "__");
}
{
struct method_name *nn;
- nn = xmalloc (sizeof (struct method_name));
- nn->name = xmalloc (length);
+ nn = XNEW (struct method_name);
+ nn->name = XNEWVEC (unsigned char, length);
memcpy (nn->name, str, length);
nn->length = length;
nn->next = method_name_list;
nn->sig_length = JPOOL_UTF_LENGTH (jcf, sig_index);
- nn->signature = xmalloc (nn->sig_length);
+ nn->signature = XNEWVEC (unsigned char, nn->sig_length);
nn->is_native = METHOD_IS_NATIVE (flags);
memcpy (nn->signature, JPOOL_UTF_DATA (jcf, sig_index),
nn->sig_length);
for (length = 0; clname[length] != ';' && clname[length] != '\0'; ++length)
;
- current = ALLOC (length + 1);
+ current = XNEWVEC (unsigned char, length + 1);
for (i = 0; i < length; ++i)
current[i] = clname[i] == '/' ? '.' : clname[i];
current[length] = '\0';
jcf_parse_class (&jcf);
tmp = (unsigned char *) super_class_name (&jcf, &super_length);
- super = ALLOC (super_length + 1);
+ super = XNEWVEC (unsigned char, super_length + 1);
memcpy (super, tmp, super_length);
super[super_length] = '\0';
return;
}
- incl = xmalloc (sizeof (struct include));
- incl->name = xmalloc (len + 1);
+ incl = XNEW (struct include);
+ incl->name = XNEWVEC (char, len + 1);
strncpy (incl->name, (const char *) utf8, len);
incl->name[len] = '\0';
incl->next = all_includes;
if (n == NULL)
{
- n = xmalloc (sizeof (struct namelet));
- n->name = xmalloc (p - name + 1);
+ n = XNEW (struct namelet);
+ n->name = XNEWVEC (char, p - name + 1);
strncpy (n->name, (const char *) name, p - name);
n->name[p - name] = '\0';
n->is_class = (p == name_limit);
if (len > 6 && ! strcmp (&jcf->classname[len - 6], ".class"))
len -= 6;
/* Turn the class name into a file name. */
- name = xmalloc (len + 1);
+ name = XNEWVEC (char, len + 1);
for (i = 0; i < len; ++i)
name[i] = jcf->classname[i] == '.' ? '/' : jcf->classname[i];
name[i] = '\0';
case OPT_PREPEND:
if (prepend_count == 0)
- prepend_specs = ALLOC (argc * sizeof (char*));
+ prepend_specs = XNEWVEC (char *, argc);
prepend_specs[prepend_count++] = optarg;
break;
case OPT_FRIEND:
if (friend_count == 0)
- friend_specs = ALLOC (argc * sizeof (char*));
+ friend_specs = XNEWVEC (char *, argc);
friend_specs[friend_count++] = optarg;
break;
case OPT_ADD:
if (add_count == 0)
- add_specs = ALLOC (argc * sizeof (char*));
+ add_specs = XNEWVEC (char *, argc);
add_specs[add_count++] = optarg;
break;
case OPT_APPEND:
if (append_count == 0)
- append_specs = ALLOC (argc * sizeof (char*));
+ append_specs = XNEWVEC (char *, argc);
append_specs[append_count++] = optarg;
break;
{
int dir_len = strlen (output_directory);
int i, classname_length = strlen (classname);
- current_output_file = ALLOC (dir_len + classname_length + 5);
+ current_output_file = XNEWVEC (char, dir_len + classname_length + 5);
strcpy (current_output_file, output_directory);
if (dir_len > 0 && output_directory[dir_len-1] != '/')
current_output_file[dir_len++] = '/';
read_zip_member (JCF *jcf, ZipDirectory *zipd, ZipFile *zipf)
{
jcf->filbuf = jcf_unexpected_eof;
- jcf->zipd = (void *)zipd;
+ jcf->zipd = zipd;
if (zipd->compression_method == Z_NO_COMPRESSION)
{
- jcf->buffer = ALLOC (zipd->size);
+ jcf->buffer = XNEWVEC (unsigned char, zipd->size);
jcf->buffer_end = jcf->buffer + zipd->size;
jcf->read_ptr = jcf->buffer;
jcf->read_end = jcf->buffer_end;
d_stream.zfree = (free_func) 0;
d_stream.opaque = (voidpf) 0;
- jcf->buffer = ALLOC (zipd->uncompressed_size);
+ jcf->buffer = XNEWVEC (unsigned char, zipd->uncompressed_size);
d_stream.next_out = jcf->buffer;
d_stream.avail_out = zipd->uncompressed_size;
jcf->buffer_end = jcf->buffer + zipd->uncompressed_size;
jcf->read_ptr = jcf->buffer;
jcf->read_end = jcf->buffer_end;
- buffer = ALLOC (zipd->size);
+ buffer = XNEWVEC (char, zipd->size);
d_stream.next_in = (unsigned char *) buffer;
d_stream.avail_in = zipd->size;
if (lseek (zipf->fd, zipd->filestart, 0) < 0
inflateInit2 (&d_stream, -MAX_WBITS);
inflate (&d_stream, Z_NO_FLUSH);
inflateEnd (&d_stream);
- FREE (buffer);
+ free (buffer);
}
return 0;
if (dep_name != NULL)
jcf_dependency_add_file (dep_name, 0);
JCF_ZERO (jcf);
- jcf->buffer = ALLOC (stat_buf.st_size);
+ jcf->buffer = XNEWVEC (unsigned char, stat_buf.st_size);
jcf->buffer_end = jcf->buffer + stat_buf.st_size;
jcf->read_ptr = jcf->buffer;
jcf->read_end = jcf->buffer_end;
if (!*slot)
{
/* We have not already scanned this directory; scan it now. */
- dent = ((memoized_dirlist_entry *)
- ALLOC (sizeof (memoized_dirlist_entry)));
+ dent = XNEW (memoized_dirlist_entry);
dent->dir = xstrdup (filename);
/* Unfortunately, scandir is not fully standardized. In
particular, the type of the function pointer passed as the
/* Length of prefix, not counting final dot. */
int i = dot - class_name;
/* Concatenate current package prefix with new sfname. */
- char *buf = xmalloc (i + new_len + 2); /* Space for '.' and '\0'. */
+ char *buf = XNEWVEC (char, i + new_len + 2); /* Space for '.' and '\0'. */
strcpy (buf + i + 1, sfname);
/* Copy package from class_name, replacing '.' by DIR_SEPARATOR.
Note we start at the end with the final package dot. */
finput = fopen (main_input_filename, "r");
if (finput == NULL)
fatal_error ("can't open %s: %m", input_filename);
- list = xmalloc(avail);
+ list = XNEWVEC (char, avail);
next = list;
for (;;)
{
}
filename_length -= strlen (".class");
- class_name = ALLOC (filename_length + 1);
+ class_name = XNEWVEC (char, filename_length + 1);
memcpy (class_name, class_name_in_zip_dir, filename_length);
class_name [filename_length] = '\0';
if (TYPE_SIZE (current_class) != error_mark_node)
{
parse_class_file ();
- FREE (current_jcf->buffer); /* No longer necessary */
+ free (current_jcf->buffer); /* No longer necessary */
/* Note: there is a way to free this buffer right after a
class seen in a zip file has been parsed. The idea is the
set its jcf in such a way that buffer will be reallocated
{
char *file_name, *class_name_in_zip_dir, *buffer;
JCF *jcf;
- file_name = ALLOC (zdir->filename_length + 1);
+ file_name = XNEWVEC (char, zdir->filename_length + 1);
class_name_in_zip_dir = ZIPDIR_FILENAME (zdir);
strncpy (file_name, class_name_in_zip_dir, zdir->filename_length);
file_name[zdir->filename_length] = '\0';
- jcf = ALLOC (sizeof (JCF));
+ jcf = XNEW (JCF);
JCF_ZERO (jcf);
jcf->read_state = finput;
jcf->filbuf = jcf_filbuf_from_stdio;
if (read_zip_member (jcf, zdir, localToFile) < 0)
fatal_error ("error while reading %s from zip file", file_name);
- buffer = ALLOC (zdir->filename_length + 1 +
+ buffer = XNEWVEC (char, zdir->filename_length + 1 +
(jcf->buffer_end - jcf->buffer));
strcpy (buffer, file_name);
/* This is not a typo: we overwrite the trailing \0 of the
compile_resource_data (file_name, buffer,
jcf->buffer_end - jcf->buffer);
JCF_FINISH (jcf);
- FREE (jcf);
- FREE (buffer);
+ free (jcf);
+ free (buffer);
}
break;
continue;
class_name = compute_class_name (zdir);
- file_name = ALLOC (zdir->filename_length+1);
+ file_name = XNEWVEC (char, zdir->filename_length+1);
jcf = ggc_alloc (sizeof (JCF));
JCF_ZERO (jcf);
int len;
struct entry *n;
- n = ALLOC (sizeof (struct entry));
+ n = XNEW (struct entry);
n->flags = is_system ? FLAG_SYSTEM : 0;
n->next = NULL;
unsigned HOST_WIDE_INT delta;
/* Copy the chain of relocs into a sorted array. */
struct jcf_relocation **relocs
- = xmalloc (sw_state.num_cases * sizeof (struct jcf_relocation *));
+ = XNEWVEC (struct jcf_relocation *, sw_state.num_cases);
/* The relocs arrays is a buffer with a gap.
The assumption is that cases will normally come in "runs". */
int gap_start = 0;
slash = dname + strlen (dname);
}
- r = xmalloc (slash - dname + strlen (cname) + 2);
+ r = XNEWVEC (char, slash - dname + strlen (cname) + 2);
strncpy (r, dname, slash - dname);
r[slash - dname] = sep;
strcpy (&r[slash - dname + 1], cname);
#define JCF_FINISH(JCF) { \
CPOOL_FINISH(&(JCF)->cpool); \
- if ((JCF)->buffer) FREE ((JCF)->buffer); \
- if ((JCF)->filename) FREE ((char *) (JCF)->filename); \
- if ((JCF)->classname) FREE ((char *) (JCF)->classname); \
+ if ((JCF)->buffer) free ((JCF)->buffer); \
+ if ((JCF)->filename) free ((char *) (JCF)->filename); \
+ if ((JCF)->classname) free ((char *) (JCF)->classname); \
(JCF)->finished = 1; }
#define CPOOL_INIT(CPOOL) \
int x;
struct stat sb;
- spec = xmalloc (strlen (dir) + sizeof (SPEC_FILE)
+ spec = XNEWVEC (char, strlen (dir) + sizeof (SPEC_FILE)
+ sizeof ("-specs=") + 4);
strcpy (spec, "-specs=");
x = strlen (spec);
argv = *in_argv;
added_libraries = *in_added_libraries;
- args = xcalloc (argc, sizeof (int));
+ args = XCNEWVEC (int, argc);
for (i = 1; i < argc; i++)
{
num_args += shared_libgcc;
- arglist = xmalloc ((num_args + 1) * sizeof (char *));
+ arglist = XNEWVEC (const char *, num_args + 1);
j = 0;
arglist[j++] = argv[0];
if (decl_buf == NULL)
{
decl_buflen = len + 100;
- decl_buf = xmalloc (decl_buflen);
+ decl_buf = XNEWVEC (char, decl_buflen);
}
else
{
error ("couldn't determine target name for dependency tracking");
else
{
- char *buf = xmalloc (dot - filename +
+ char *buf = XNEWVEC (char, dot - filename +
3 + sizeof (TARGET_OBJECT_SUFFIX));
strncpy (buf, filename, dot - filename);
java_lexer *
java_new_lexer (FILE *finput, const char *encoding)
{
- java_lexer *lex = xmalloc (sizeof (java_lexer));
+ java_lexer *lex = XNEW (java_lexer);
int enc_error = 0;
lex->finput = finput;
/* Special ways to report error on numeric literals */
#define JAVA_FLOAT_RANGE_ERROR(m) \
{ \
- char *msg = xmalloc (100 + strlen (m)); \
+ char *msg = XNEWVEC (char, 100 + strlen (m)); \
sprintf (msg, "Floating point literal exceeds range of `%s'", (m)); \
JAVA_RANGE_ERROR(msg); \
free (msg); \
};
#define NEW_METHOD_DECLARATOR(D,N,A) \
{ \
- (D) = xmalloc (sizeof (struct method_declarator)); \
+ (D) = XNEW (struct method_declarator); \
(D)->method_name = (N); \
(D)->args = (A); \
}
if (bracket_count)
{
int i;
- char *n = xmalloc (bracket_count + 1 + strlen ($$));
+ char *n = XNEWVEC (char, bracket_count + 1 + strlen ($$));
for (i = 0; i < bracket_count; ++i)
n[i] = '[';
strcpy (n + bracket_count, $$);
if (bracket_count)
{
int i;
- char *n = xmalloc (bracket_count + 1 + strlen ($2));
+ char *n = XNEWVEC (char, bracket_count + 1 + strlen ($2));
for (i = 0; i < bracket_count; ++i)
n[i] = '[';
strcpy (n + bracket_count, $2);
void
java_push_parser_context (void)
{
- struct parser_ctxt *new = xcalloc (1, sizeof (struct parser_ctxt));
+ struct parser_ctxt *tmp = XCNEW (struct parser_ctxt);
- new->next = ctxp;
- ctxp = new;
+ tmp->next = ctxp;
+ ctxp = tmp;
}
static void
{
struct class_context *ctx;
- ctx = xmalloc (sizeof (struct class_context));
+ ctx = XNEW (struct class_context);
ctx->name = (char *) name;
ctx->next = current_class_context;
current_class_context = ctx;
perror ("Could not figure length of resource file");
return;
}
- buffer = xmalloc (strlen (name) + stat_buf.st_size);
+ buffer = XNEWVEC (char, strlen (name) + stat_buf.st_size);
strcpy (buffer, name);
read (fd, buffer + strlen (name), stat_buf.st_size);
close (fd);
}
zipf->count = makeword((const uch *) &buffer[TOTAL_ENTRIES_CENTRAL_DIR]);
zipf->dir_size = makelong((const uch *) &buffer[SIZE_CENTRAL_DIRECTORY]);
-#define ALLOC xmalloc
/* Allocate 1 more to allow appending '\0' to last filename. */
- zipf->central_directory = ALLOC (zipf->dir_size+1);
+ zipf->central_directory = XNEWVEC (char, zipf->dir_size + 1);
if (lseek (zipf->fd, -(zipf->dir_size+ECREC_SIZE+4), SEEK_CUR) < 0)
return -2;
if (read (zipf->fd, zipf->central_directory, zipf->dir_size) < 0)
tree globals = lang_hooks.decls.getdecls ();
int len = list_length (globals);
- tree *vec = xmalloc (sizeof (tree) * len);
+ tree *vec = XNEWVEC (tree, len);
int i;
tree decl;
/* Allocate a worklist array/queue. Entries are only added to the
list if they were not already on the list. So the size is
bounded by the number of basic blocks. */
- qin = qout = worklist = xmalloc (sizeof (basic_block) * n_basic_blocks);
+ qin = qout = worklist = XNEWVEC (basic_block, n_basic_blocks);
/* We want a maximal solution, so make an optimistic initialization of
ANTIN. */
list if they were not already on the list. So the size is
bounded by the number of basic blocks. */
qin = qout = worklist
- = xmalloc (sizeof (basic_block) * n_basic_blocks);
+ = XNEWVEC (basic_block, n_basic_blocks);
/* Initialize a mapping from each edge to its index. */
for (i = 0; i < num_edges; i++)
list if they were not already on the list. So the size is
bounded by the number of basic blocks. */
qin = qout = worklist =
- xmalloc (sizeof (basic_block) * (n_basic_blocks - NUM_FIXED_BLOCKS));
+ XNEWVEC (basic_block, n_basic_blocks - NUM_FIXED_BLOCKS);
/* We want a maximal solution. */
sbitmap_vector_ones (avout, last_basic_block);
/* Allocate a worklist array/queue. Entries are only added to the
list if they were not already on the list. So the size is
bounded by the number of basic blocks. */
- tos = worklist = xmalloc (sizeof (basic_block) * (n_basic_blocks + 1));
+ tos = worklist = XNEWVEC (basic_block, n_basic_blocks + 1);
/* Initialize NEARER for each edge and build a mapping from an edge to
its index. */
See the declarations of these variables, above,
for what they mean. */
- qty = xmalloc (max_qty * sizeof (struct qty));
- qty_phys_copy_sugg = xmalloc (max_qty * sizeof (HARD_REG_SET));
- qty_phys_num_copy_sugg = xmalloc (max_qty * sizeof (short));
- qty_phys_sugg = xmalloc (max_qty * sizeof (HARD_REG_SET));
- qty_phys_num_sugg = xmalloc (max_qty * sizeof (short));
+ qty = XNEWVEC (struct qty, max_qty);
+ qty_phys_copy_sugg = XNEWVEC (HARD_REG_SET, max_qty);
+ qty_phys_num_copy_sugg = XNEWVEC (short, max_qty);
+ qty_phys_sugg = XNEWVEC (HARD_REG_SET, max_qty);
+ qty_phys_num_sugg = XNEWVEC (short, max_qty);
- reg_qty = xmalloc (max_regno * sizeof (int));
- reg_offset = xmalloc (max_regno * sizeof (char));
- reg_next_in_qty = xmalloc (max_regno * sizeof (int));
+ reg_qty = XNEWVEC (int, max_regno);
+ reg_offset = XNEWVEC (char, max_regno);
+ reg_next_in_qty = XNEWVEC (int, max_regno);
/* Determine which pseudo-registers can be allocated by local-alloc.
In general, these are the registers used only in a single block and
regset_head cleared_regs;
int clear_regnos = 0;
- reg_equiv = xcalloc (max_regno, sizeof *reg_equiv);
+ reg_equiv = XCNEWVEC (struct equivalence, max_regno);
INIT_REG_SET (&cleared_regs);
reg_equiv_init = ggc_alloc_cleared (max_regno * sizeof (rtx));
reg_equiv_init_size = max_regno;
/* +2 to leave room for a post_mark_life at the last insn and for
the birth of a CLOBBER in the first insn. */
- regs_live_at = xcalloc ((2 * insn_count + 2), sizeof (HARD_REG_SET));
+ regs_live_at = XCNEWVEC (HARD_REG_SET, 2 * insn_count + 2);
/* Initialize table of hardware registers currently live. */
number of suggested registers they need so we allocate those with
the most restrictive needs first. */
- qty_order = xmalloc (next_qty * sizeof (int));
+ qty_order = XNEWVEC (int, next_qty);
for (i = 0; i < next_qty; i++)
qty_order[i] = i;
struct loops *
loop_optimizer_init (FILE *dumpfile)
{
- struct loops *loops = xcalloc (1, sizeof (struct loops));
+ struct loops *loops = XCNEW (struct loops);
edge e;
edge_iterator ei;
static bool first_time = true;
if (entry)
return entry->inv;
- entry = xmalloc (sizeof (struct invariant_expr_entry));
+ entry = XNEW (struct invariant_expr_entry);
entry->inv = inv;
entry->expr = expr;
entry->mode = mode;
create_new_invariant (struct def *def, rtx insn, bitmap depends_on,
bool always_executed)
{
- struct invariant *inv = xmalloc (sizeof (struct invariant));
+ struct invariant *inv = XNEW (struct invariant);
rtx set = single_set (insn);
inv->def = def;
static void
record_use (struct def *def, rtx *use, rtx insn)
{
- struct use *u = xmalloc (sizeof (struct use));
+ struct use *u = XNEW (struct use);
if (GET_CODE (*use) == SUBREG)
use = &SUBREG_REG (*use);
}
if (simple)
- def = xcalloc (1, sizeof (struct def));
+ def = XCNEW (struct def);
else
def = NULL;
static void
record_iv (struct df_ref *def, struct rtx_iv *iv)
{
- struct rtx_iv *recorded_iv = xmalloc (sizeof (struct rtx_iv));
+ struct rtx_iv *recorded_iv = XNEW (struct rtx_iv);
*recorded_iv = *iv;
DF_REF_IV_SET (def, recorded_iv);
static void
record_biv (rtx def, struct rtx_iv *iv)
{
- struct biv_entry *biv = xmalloc (sizeof (struct biv_entry));
+ struct biv_entry *biv = XNEW (struct biv_entry);
void **slot = htab_find_slot_with_hash (bivs, def, REGNO (def), INSERT);
biv->regno = REGNO (def);
if (desc)
return desc;
- desc = xmalloc (sizeof (struct niter_desc));
+ desc = XNEW (struct niter_desc);
iv_analysis_loop_init (loop);
find_simple_exit (loop, desc);
loop->aux = desc;
if (desc->noloop_assumptions)
RESET_BIT (wont_exit, 1);
- remove_edges = xcalloc (npeel, sizeof (edge));
+ remove_edges = XCNEWVEC (edge, npeel);
n_remove_edges = 0;
if (flag_split_ivs_in_unroller)
wont_exit = sbitmap_alloc (max_unroll + 1);
sbitmap_ones (wont_exit);
- remove_edges = xcalloc (max_unroll + exit_mod + 1, sizeof (edge));
+ remove_edges = XCNEWVEC (edge, max_unroll + exit_mod + 1);
n_remove_edges = 0;
if (flag_split_ivs_in_unroller
|| flag_variable_expansion_in_unroller)
opt_info = analyze_insns_in_loop (loop);
/* Remember blocks whose dominators will have to be updated. */
- dom_bbs = xcalloc (n_basic_blocks, sizeof (basic_block));
+ dom_bbs = XCNEWVEC (basic_block, n_basic_blocks);
n_dom_bbs = 0;
body = get_loop_body (loop);
/* Precondition the loop. */
loop_split_edge_with (loop_preheader_edge (loop), init_code);
- remove_edges = xcalloc (max_unroll + n_peel + 1, sizeof (edge));
+ remove_edges = XCNEWVEC (edge, max_unroll + n_peel + 1);
n_remove_edges = 0;
wont_exit = sbitmap_alloc (max_unroll + 2);
return NULL;
/* Record the accumulator to expand. */
- ves = xmalloc (sizeof (struct var_to_expand));
+ ves = XNEW (struct var_to_expand);
ves->insn = insn;
ves->var_expansions = VEC_alloc (rtx, heap, 1);
ves->reg = copy_rtx (dest);
return NULL;
/* Record the insn to split. */
- ivts = xmalloc (sizeof (struct iv_to_split));
+ ivts = XNEW (struct iv_to_split);
ivts->insn = insn;
ivts->base_var = NULL_RTX;
ivts->step = iv.step;
{
basic_block *body, bb;
unsigned i, num_edges = 0;
- struct opt_info *opt_info = xcalloc (1, sizeof (struct opt_info));
+ struct opt_info *opt_info = XCNEW (struct opt_info);
rtx insn;
struct iv_to_split *ivts = NULL;
struct var_to_expand *ves = NULL;
Leave some space for labels allocated by find_and_verify_loops. */
max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
- uid_luid = xcalloc (max_uid_for_loop, sizeof (int));
- uid_loop = xcalloc (max_uid_for_loop, sizeof (struct loop *));
+ uid_luid = XCNEWVEC (int, max_uid_for_loop);
+ uid_loop = XCNEWVEC (struct loop *, max_uid_for_loop);
/* Allocate storage for array of loops. */
- loops->array = xcalloc (loops->num, sizeof (struct loop));
+ loops->array = XCNEWVEC (struct loop, loops->num);
/* Find and process each loop.
First, find them, and record them in order of their beginnings. */
find_and_verify_loops (f, loops);
/* Allocate and initialize auxiliary loop information. */
- loops_info = xcalloc (loops->num, sizeof (struct loop_info));
+ loops_info = XCNEWVEC (struct loop_info, loops->num);
for (i = 0; i < (int) loops->num; i++)
loops->array[i].aux = loops_info + i;
continue;
}
- m = xmalloc (sizeof (struct movable));
+ m = XNEW (struct movable);
m->next = 0;
m->insn = p;
m->set_src = src;
if (regs->array[regno].set_in_loop == 2)
{
struct movable *m;
- m = xmalloc (sizeof (struct movable));
+ m = XNEW (struct movable);
m->next = 0;
m->insn = p;
m->set_dest = SET_DEST (set);
combine_movables (struct loop_movables *movables, struct loop_regs *regs)
{
struct movable *m;
- char *matched_regs = xmalloc (regs->num);
+ char *matched_regs = XNEWVEC (char, regs->num);
enum machine_mode mode;
/* Regs that are set more than once are not allowed to match
/* Map of pseudo-register replacements to handle combining
when we move several insns that load the same value
into different pseudo-registers. */
- rtx *reg_map = xcalloc (nregs, sizeof (rtx));
- char *already_moved = xcalloc (nregs, sizeof (char));
+ rtx *reg_map = XCNEWVEC (rtx, nregs);
+ char *already_moved = XCNEWVEC (char, nregs);
for (m = movables->head; m; m = m->next)
{
}
else if (m->insert_temp)
{
- rtx *reg_map2 = xcalloc (REGNO (newreg),
- sizeof(rtx));
+ rtx *reg_map2 = XCNEWVEC (rtx, REGNO(newreg));
reg_map2 [m->regno] = newreg;
i1 = loop_insn_hoist (loop, copy_rtx (PATTERN (p)));
addr_placeholder = gen_reg_rtx (Pmode);
ivs->n_regs = max_reg_before_loop;
- ivs->regs = xcalloc (ivs->n_regs, sizeof (struct iv));
+ ivs->regs = XCNEWVEC (struct iv, ivs->n_regs);
/* Find all BIVs in loop. */
loop_bivs_find (loop);
Some givs might have been made from biv increments, so look at
ivs->reg_iv_type for a suitable size. */
reg_map_size = ivs->n_regs;
- reg_map = xcalloc (reg_map_size, sizeof (rtx));
+ reg_map = XCNEWVEC (rtx, reg_map_size);
/* Examine each iv class for feasibility of strength reduction/induction
variable elimination. */
/* It is a possible basic induction variable.
Create and initialize an induction structure for it. */
- struct induction *v = xmalloc (sizeof (struct induction));
+ struct induction *v = XNEW (struct induction);
record_biv (loop, v, p, dest_reg, inc_val, mult_val, location,
not_every_iteration, maybe_multiple);
&add_val, &mult_val, &ext_val,
&last_consec_insn))))
{
- struct induction *v = xmalloc (sizeof (struct induction));
+ struct induction *v = XNEW (struct induction);
/* If this is a library call, increase benefit. */
if (find_reg_note (p, REG_RETVAL, NULL_RTX))
GET_MODE (x)))
{
/* Found one; record it. */
- struct induction *v = xmalloc (sizeof (struct induction));
+ struct induction *v = XNEW (struct induction);
record_giv (loop, v, insn, src_reg, addr_placeholder, mult_val,
add_val, ext_val, benefit, DEST_ADDR,
{
/* Create and initialize new iv_class. */
- bl = xmalloc (sizeof (struct iv_class));
+ bl = XNEW (struct iv_class);
bl->regno = REGNO (dest_reg);
bl->biv = 0;
if (!g1->ignore)
giv_array[i++] = g1;
- stats = xcalloc (giv_count, sizeof (*stats));
- can_combine = xcalloc (giv_count, giv_count * sizeof (rtx));
+ stats = XCNEWVEC (struct combine_givs_stats, giv_count);
+ can_combine = XCNEWVEC (rtx, giv_count * giv_count);
for (i = 0; i < giv_count; i++)
{
regs->array[i].single_usage = NULL_RTX;
}
- last_set = xcalloc (regs->num, sizeof (rtx));
+ last_set = XCNEWVEC (rtx, regs->num);
/* Scan the loop, recording register usage. */
for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
new_seginfo (int mode, rtx insn, int bb, HARD_REG_SET regs_live)
{
struct seginfo *ptr;
- ptr = xmalloc (sizeof (struct seginfo));
+ ptr = XNEW (struct seginfo);
ptr->mode = mode;
ptr->insn_ptr = insn;
ptr->bbnum = bb;
entry_exit_extra = 3;
#endif
bb_info[n_entities]
- = xcalloc (last_basic_block + entry_exit_extra, sizeof **bb_info);
+ = XCNEWVEC (struct bb_info, last_basic_block);
entity_map[n_entities++] = e;
if (num_modes[e] > max_num_modes)
max_num_modes = num_modes[e];
static struct loops *
build_loops_structure (FILE *dumpfile)
{
- struct loops *loops = xcalloc (1, sizeof (struct loops));
+ struct loops *loops = XCNEW (struct loops);
/* Find the loops. */
/* Allocate memory to hold the DDG array one entry for each loop.
We use loop->num as index into this array. */
- g_arr = xcalloc (loops->num, sizeof (ddg_ptr));
+ g_arr = XCNEWVEC (ddg_ptr, loops->num);
/* Build DDGs for all the relevant loops and hold them in G_ARR
fprintf (stats_file, "\n");
}
- node_order = (int *) xmalloc (sizeof (int) * g->num_nodes);
+ node_order = XNEWVEC (int, g->num_nodes);
mii = 1; /* Need to pass some estimate of mii. */
rec_mii = sms_order_nodes (g, mii, node_order);
static partial_schedule_ptr
create_partial_schedule (int ii, ddg_ptr g, int history)
{
- partial_schedule_ptr ps = (partial_schedule_ptr)
- xmalloc (sizeof (struct partial_schedule));
+ partial_schedule_ptr ps = XNEW (struct partial_schedule);
ps->rows = (ps_insn_ptr *) xcalloc (ii, sizeof (ps_insn_ptr));
ps->ii = ii;
ps->history = history;
static ps_insn_ptr
create_ps_insn (ddg_node_ptr node, int rest_count, int cycle)
{
- ps_insn_ptr ps_i = xmalloc (sizeof (struct ps_insn));
+ ps_insn_ptr ps_i = XNEW (struct ps_insn);
ps_i->node = node;
ps_i->next_in_row = NULL;
(mangled[1] == 'i' || mangled[1] == 'c') &&
mangled[2] == '_')
{
- cp = demangled = xmalloc(strlen(mangled) + 2);
+ cp = demangled = XNEWVEC (char, strlen(mangled) + 2);
if (mangled[1] == 'i')
*cp++ = '-'; /* for instance method */
else
gcc_obstack_init (&util_obstack);
util_firstobj = (char *) obstack_finish (&util_obstack);
- errbuf = (char *) xmalloc (1024 * 10);
+ errbuf = XNEWVEC (char, 1024 * 10);
hash_init ();
synth_module_prologue ();
}
if (mask & (1U << n))
len += strlen (lang_name) + 1;
- result = xmalloc (len);
+ result = XNEWVEC (char, len);
len = 0;
for (n = 0; (lang_name = lang_names[n]) != 0; n++)
if (mask & (1U << n))
/* Drop the "no-" from negative switches. */
size_t len = strlen (opt) - 3;
- dup = xmalloc (len + 1);
+ dup = XNEWVEC (char, len + 1);
dup[0] = '-';
dup[1] = opt[1];
memcpy (dup + 2, opt + 5, len - 2 + 1);
{
char *new_option;
int option_index;
- new_option = (char *) xmalloc (strlen (arg) + 2);
+ new_option = XNEWVEC (char, strlen (arg) + 2);
new_option[0] = 'W';
strcpy (new_option+1, arg);
option_index = find_opt (new_option, lang_mask);
rtx insn;
/* Find the largest UID and create a mapping from UIDs to CUIDs. */
- uid_cuid = xcalloc (get_max_uid () + 1, sizeof (int));
+ uid_cuid = XCNEWVEC (int, get_max_uid () + 1);
i = 1;
FOR_EACH_BB (bb)
FOR_BB_INSNS (bb, insn)
destination. */
min_labelno = get_first_label_num ();
n_labels = max_label_num () - min_labelno;
- label_live = xmalloc (n_labels * sizeof (HARD_REG_SET));
+ label_live = XNEWVEC (HARD_REG_SET, n_labels);
CLEAR_HARD_REG_SET (ever_live_at_start);
FOR_EACH_BB_REVERSE (bb)
basic_block bb;
int *heads;
- heads = xmalloc (sizeof (int) * last_basic_block);
+ heads = XNEWVEC (int, last_basic_block);
memset (heads, ENTRY_BLOCK, sizeof (int) * last_basic_block);
heads[ENTRY_BLOCK_PTR->next_bb->index] = last_basic_block;
pp_construct (pretty_printer *pp, const char *prefix, int maximum_length)
{
memset (pp, 0, sizeof (pretty_printer));
- pp->buffer = xcalloc (1, sizeof (output_buffer));
+ pp->buffer = XCNEW (output_buffer);
obstack_init (&pp->buffer->chunk_obstack);
obstack_init (&pp->buffer->formatted_obstack);
pp->buffer->obstack = &pp->buffer->formatted_obstack;
void
debug_tree (tree node)
{
- table = xcalloc (HASH_SIZE, sizeof (struct bucket *));
+ table = XCNEWVEC (struct bucket *, HASH_SIZE);
print_node (stderr, "", node, 0);
free (table);
table = 0;
}
/* Add this node to the table. */
- b = xmalloc (sizeof (struct bucket));
+ b = XNEW (struct bucket);
b->node = node;
b->next = table[hash];
table[hash] = b;
hist->hvalue.next = ann->histograms;
ann->histograms = hist;
- hist->hvalue.counters =
- xmalloc (sizeof (gcov_type) * hist->n_counters);
+ hist->hvalue.counters = XNEWVEC (gcov_type, hist->n_counters);
for (j = 0; j < hist->n_counters; j++)
hist->hvalue.counters[j] = aact_count[j];
}
is only processed after all its predecessors. The number of predecessors
of every block has already been computed. */
- stack = xmalloc (sizeof (*stack) * n_basic_blocks);
+ stack = XNEWVEC (basic_block, n_basic_blocks);
sp = stack;
*sp++ = block;
init_recog ();
- costs = xmalloc (nregs * sizeof (struct costs));
+ costs = XNEWVEC (struct costs, nregs);
#ifdef FORBIDDEN_INC_DEC_CLASSES
- in_inc_dec = xmalloc (nregs);
+ in_inc_dec = XNEWVEC (char, nregs);
#endif /* FORBIDDEN_INC_DEC_CLASSES */
{
VARRAY_REG_INIT (reg_n_info, regno_allocated, "reg_n_info");
renumber = xmalloc (size_renumber);
- reg_pref_buffer = xmalloc (regno_allocated
- * sizeof (struct reg_pref));
+ reg_pref_buffer = XNEWVEC (struct reg_pref, regno_allocated);
}
else
{
free ((char *) renumber);
free ((char *) reg_pref);
renumber = xmalloc (size_renumber);
- reg_pref_buffer = xmalloc (regno_allocated
- * sizeof (struct reg_pref));
+ reg_pref_buffer = XNEWVEC (struct reg_pref, regno_allocated);
}
else
{
renumber = xrealloc (renumber, size_renumber);
- reg_pref_buffer = xrealloc (reg_pref_buffer,
+ reg_pref_buffer = (struct reg_pref *) xrealloc (reg_pref_buffer,
regno_allocated
* sizeof (struct reg_pref));
}
node = *slot;
if (node == NULL)
{
- node = xcalloc (1, sizeof (*node));
+ node = XCNEW (struct subregs_of_mode_node);
node->block = regno & -8;
*slot = node;
}
can suppress some optimizations in those zones. */
mark_flags_life_zones (discover_flags_reg ());
- regno_src_regno = xmalloc (sizeof *regno_src_regno * nregs);
+ regno_src_regno = XNEWVEC (int, nregs);
for (i = nregs; --i >= 0; ) regno_src_regno[i] = -1;
- regmove_bb_head = xmalloc (sizeof (int) * (old_max_uid + 1));
+ regmove_bb_head = XNEWVEC (int, old_max_uid + 1);
for (i = old_max_uid; i >= 0; i--) regmove_bb_head[i] = -1;
FOR_EACH_BB (bb)
regmove_bb_head[INSN_UID (BB_HEAD (bb))] = bb->index;
{
struct csa_memlist *ml;
- ml = xmalloc (sizeof (*ml));
+ ml = XNEW (struct csa_memlist);
if (XEXP (*mem, 0) == stack_pointer_rtx)
ml->sp_offset = 0;
need_refresh = false;
- all_vd = xmalloc (sizeof (struct value_data) * last_basic_block);
+ all_vd = XNEWVEC (struct value_data, last_basic_block);
visited = sbitmap_alloc (last_basic_block);
sbitmap_zero (visited);
Record memory equivalents in reg_mem_equiv so they can
be substituted eventually by altering the REG-rtx's. */
- reg_equiv_constant = xcalloc (max_regno, sizeof (rtx));
- reg_equiv_invariant = xcalloc (max_regno, sizeof (rtx));
- reg_equiv_mem = xcalloc (max_regno, sizeof (rtx));
- reg_equiv_address = xcalloc (max_regno, sizeof (rtx));
- reg_max_ref_width = xcalloc (max_regno, sizeof (int));
- reg_old_renumber = xcalloc (max_regno, sizeof (short));
+ reg_equiv_constant = XCNEWVEC (rtx, max_regno);
+ reg_equiv_invariant = XCNEWVEC (rtx, max_regno);
+ reg_equiv_mem = XCNEWVEC (rtx, max_regno);
+ reg_equiv_address = XCNEWVEC (rtx, max_regno);
+ reg_max_ref_width = XCNEWVEC (unsigned int, max_regno);
+ reg_old_renumber = XCNEWVEC (short, max_regno);
memcpy (reg_old_renumber, reg_renumber, max_regno * sizeof (short));
- pseudo_forbidden_regs = xmalloc (max_regno * sizeof (HARD_REG_SET));
- pseudo_previous_regs = xcalloc (max_regno, sizeof (HARD_REG_SET));
+ pseudo_forbidden_regs = XNEWVEC (HARD_REG_SET, max_regno);
+ pseudo_previous_regs = XCNEWVEC (HARD_REG_SET, max_regno);
CLEAR_HARD_REG_SET (bad_spill_regs_global);
/* We used to use alloca here, but the size of what it would try to
allocate would occasionally cause it to exceed the stack limit and
cause a core dump. */
- offsets_known_at = xmalloc (num_labels);
- offsets_at = xmalloc (num_labels * NUM_ELIMINABLE_REGS * sizeof (HOST_WIDE_INT));
+ offsets_known_at = XNEWVEC (char, num_labels);
+ offsets_at = (HOST_WIDE_INT (*)[NUM_ELIMINABLE_REGS]) xmalloc (num_labels * NUM_ELIMINABLE_REGS * sizeof (HOST_WIDE_INT));
/* Alter each pseudo-reg rtx to contain its hard reg number.
Assign stack slots to the pseudos that lack hard regs or equivalents.
memset (spill_reg_rtx, 0, sizeof spill_reg_rtx);
memset (spill_reg_store, 0, sizeof spill_reg_store);
- reg_last_reload_reg = xcalloc (max_regno, sizeof (rtx));
- reg_has_output_reload = xmalloc (max_regno);
+ reg_last_reload_reg = XCNEWVEC (rtx, max_regno);
+ reg_has_output_reload = XNEWVEC (char, max_regno);
CLEAR_HARD_REG_SET (reg_reloaded_valid);
CLEAR_HARD_REG_SET (reg_reloaded_call_part_clobbered);
{
/* Allocate a place to put our results and chain it into the
hash table. */
- tinfo = xmalloc (sizeof (struct target_info));
+ tinfo = XNEW (struct target_info);
tinfo->uid = INSN_UID (target);
tinfo->block = b;
tinfo->next
}
/* Allocate and initialize the tables used by mark_target_live_regs. */
- target_hash_table = xcalloc (TARGET_HASH_PRIME, sizeof (struct target_info *));
- bb_ticks = xcalloc (last_basic_block, sizeof (int));
+ target_hash_table = XCNEWVEC (struct target_info *, TARGET_HASH_PRIME);
+ bb_ticks = XCNEWVEC (int, last_basic_block);
}
\f
/* Free up the resources allocated to mark_target_live_regs (). This
int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
deps->max_reg = max_reg;
- deps->reg_last = xcalloc (max_reg, sizeof (struct deps_reg));
+ deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
INIT_REG_SET (&deps->reg_last_in_use);
INIT_REG_SET (&deps->reg_conditional_sets);
if (luid / n_basic_blocks > 100 * 5)
{
int i;
- true_dependency_cache = xmalloc (luid * sizeof (bitmap_head));
- anti_dependency_cache = xmalloc (luid * sizeof (bitmap_head));
- output_dependency_cache = xmalloc (luid * sizeof (bitmap_head));
+ true_dependency_cache = XNEWVEC (bitmap_head, luid);
+ anti_dependency_cache = XNEWVEC (bitmap_head, luid);
+ output_dependency_cache = XNEWVEC (bitmap_head, luid);
#ifdef ENABLE_CHECKING
- forward_dependency_cache = xmalloc (luid * sizeof (bitmap_head));
+ forward_dependency_cache = XNEWVEC (bitmap_head, luid);
#endif
for (i = 0; i < luid; i++)
{
STACK, SP and DFS_NR are only used during the first traversal. */
/* Allocate and initialize variables for the first traversal. */
- max_hdr = xmalloc (last_basic_block * sizeof (int));
- dfs_nr = xcalloc (last_basic_block, sizeof (int));
- stack = xmalloc (n_edges * sizeof (edge_iterator));
+ max_hdr = XNEWVEC (int, last_basic_block);
+ dfs_nr = XCNEWVEC (int, last_basic_block);
+ stack = XNEWVEC (edge_iterator, n_edges);
inner = sbitmap_alloc (last_basic_block);
sbitmap_ones (inner);
/* Second traversal:find reducible inner loops and topologically sort
block of each region. */
- queue = xmalloc (n_basic_blocks * sizeof (int));
+ queue = XNEWVEC (int, n_basic_blocks);
/* Find blocks which are inner loop headers. We still have non-reducible
loops to consider at this point. */
/* Prepare current target block info. */
if (current_nr_blocks > 1)
{
- candidate_table = xmalloc (current_nr_blocks * sizeof (candidate));
+ candidate_table = XNEWVEC (candidate, current_nr_blocks);
bblst_last = 0;
/* bblst_table holds split blocks and update blocks for each block after
the TO blocks of region edges, so there can be at most rgn_nr_edges
of them. */
bblst_size = (current_nr_blocks - target_bb) * rgn_nr_edges;
- bblst_table = xmalloc (bblst_size * sizeof (basic_block));
+ bblst_table = XNEWVEC (basic_block, bblst_size);
edgelst_last = 0;
- edgelst_table = xmalloc (rgn_nr_edges * sizeof (edge));
+ edgelst_table = XNEWVEC (edge, rgn_nr_edges);
compute_trg_info (target_bb);
}
init_deps_global ();
/* Initializations for region data dependence analysis. */
- bb_deps = xmalloc (sizeof (struct deps) * current_nr_blocks);
+ bb_deps = XNEWVEC (struct deps, current_nr_blocks);
for (bb = 0; bb < current_nr_blocks; bb++)
init_deps (bb_deps + bb);
/* Compute interblock info: probabilities, split-edges, dominators, etc. */
if (current_nr_blocks > 1)
{
- prob = xmalloc ((current_nr_blocks) * sizeof (float));
+ prob = XNEWVEC (float, current_nr_blocks);
dom = sbitmap_vector_alloc (current_nr_blocks, current_nr_blocks);
sbitmap_vector_zero (dom, current_nr_blocks);
SET_EDGE_TO_BIT (e, rgn_nr_edges++);
}
- rgn_edges = xmalloc (rgn_nr_edges * sizeof (edge));
+ rgn_edges = XNEWVEC (edge, rgn_nr_edges);
rgn_nr_edges = 0;
FOR_EACH_BB (block)
{
int rgn;
nr_regions = 0;
- rgn_table = xmalloc ((n_basic_blocks) * sizeof (region));
- rgn_bb_table = xmalloc ((n_basic_blocks) * sizeof (int));
- block_to_bb = xmalloc ((last_basic_block) * sizeof (int));
- containing_rgn = xmalloc ((last_basic_block) * sizeof (int));
+ rgn_table = XNEWVEC (region, n_basic_blocks);
+ rgn_bb_table = XNEWVEC (int, n_basic_blocks);
+ block_to_bb = XNEWVEC (int, last_basic_block);
+ containing_rgn = XNEWVEC (int, last_basic_block);
/* Compute regions for scheduling. */
if (reload_completed
if (CHECK_DEAD_NOTES)
{
blocks = sbitmap_alloc (last_basic_block);
- deaths_in_region = xmalloc (sizeof (int) * nr_regions);
+ deaths_in_region = XNEWVEC (int, nr_regions);
/* Remove all death notes from the subroutine. */
for (rgn = 0; rgn < nr_regions; rgn++)
{
unused_stack_instances = unused_stack_instances->next;
}
else
- context = xmalloc (sizeof (struct timevar_stack_def));
+ context = XNEW (struct timevar_stack_def);
/* Fill it in and put it on the stack. */
context->timevar = tv;
if (*e == NULL)
{
struct symbol_hash_entry *v;
- *e = v = xcalloc (1, sizeof (*v));
+ *e = v = XCNEW (struct symbol_hash_entry);
v->key = xstrdup (string);
}
return *e;
if (*e == NULL)
{
struct file_hash_entry *v;
- *e = v = xcalloc (1, sizeof (*v));
+ *e = v = XCNEW (struct file_hash_entry);
v->key = xstrdup (string);
}
return *e;
if (*e == NULL)
{
struct demangled_hash_entry *v;
- *e = v = xcalloc (1, sizeof (*v));
+ *e = v = XCNEW (struct demangled_hash_entry);
v->key = xstrdup (string);
}
return *e;
static void
tail_duplicate (void)
{
- fibnode_t *blocks = xcalloc (last_basic_block, sizeof (fibnode_t));
- basic_block *trace = xmalloc (sizeof (basic_block) * n_basic_blocks);
- int *counts = xmalloc (sizeof (int) * last_basic_block);
+ fibnode_t *blocks = XCNEWVEC (fibnode_t, last_basic_block);
+ basic_block *trace = XNEWVEC (basic_block, n_basic_blocks);
+ int *counts = XNEWVEC (int, last_basic_block);
int ninsns = 0, nduplicated = 0;
gcov_type weighted_insns = 0, traced_insns = 0;
fibheap_t heap = fibheap_new ();
{
basic_block bb;
bool changed = false;
- basic_block *worklist = xmalloc (sizeof (basic_block) * n_basic_blocks);
+ basic_block *worklist = XNEWVEC (basic_block, n_basic_blocks);
basic_block *current = worklist;
FOR_EACH_BB (bb)
static void
merge_phi_nodes (void)
{
- basic_block *worklist = xmalloc (sizeof (basic_block) * n_basic_blocks);
+ basic_block *worklist = XNEWVEC (basic_block, n_basic_blocks);
basic_block *current = worklist;
basic_block bb;
struct int_tree_map *h;
void **loc;
- h = xmalloc (sizeof (struct int_tree_map));
+ h = XNEW (struct int_tree_map);
h->uid = uid;
h->to = to;
loc = htab_find_slot_with_hash (complex_variable_components, h,
fprintf (dump_file, ")\n");
}
- res = xmalloc (sizeof (struct data_reference));
+ res = XNEW (struct data_reference);
DR_STMT (res) = stmt;
DR_REF (res) = ref;
fprintf (dump_file, ")\n");
}
- res = xmalloc (sizeof (struct data_reference));
+ res = XNEW (struct data_reference);
DR_STMT (res) = stmt;
DR_REF (res) = ref;
bool differ_p;
unsigned int i;
- res = xmalloc (sizeof (struct data_dependence_relation));
+ res = XNEW (struct data_dependence_relation);
DDR_A (res) = a;
DDR_B (res) = b;
{
struct subscript *subscript;
- subscript = xmalloc (sizeof (struct subscript));
+ subscript = XNEW (struct subscript);
SUB_CONFLICTS_IN_A (subscript) = chrec_dont_know;
SUB_CONFLICTS_IN_B (subscript) = chrec_dont_know;
SUB_LAST_CONFLICT (subscript) = chrec_dont_know;
struct data_reference *res;
insert_dont_know_node:;
- res = xmalloc (sizeof (struct data_reference));
+ res = XNEW (struct data_reference);
DR_STMT (res) = NULL_TREE;
DR_REF (res) = NULL_TREE;
DR_BASE_OBJECT (res) = NULL;
di->free_list = dq->next;
}
else
- dq = xmalloc (sizeof (struct dump_queue));
+ dq = XNEW (struct dump_queue);
/* Create a new entry in the splay-tree. */
- dni = xmalloc (sizeof (struct dump_node_info));
+ dni = XNEW (struct dump_node_info);
dni->index = index;
dni->binfo_p = ((flags & DUMP_BINFO) != 0);
dq->node = splay_tree_insert (di->nodes, (splay_tree_key) t,
gcc_assert (loop->num_nodes);
gcc_assert (loop->latch != EXIT_BLOCK_PTR);
- blocks = xcalloc (loop->num_nodes, sizeof (basic_block));
+ blocks = XCNEWVEC (basic_block, loop->num_nodes);
visited = BITMAP_ALLOC (NULL);
blocks_in_bfs_order = get_loop_body_in_bfs_order (loop);
E.g. p = &buf[0]; while (cond) p = p + 4; */
if (object_size_type & 2)
{
- osi.depths = xcalloc (num_ssa_names, sizeof (unsigned int));
- osi.stack = xmalloc (num_ssa_names * sizeof (unsigned int));
+ osi.depths = XCNEWVEC (unsigned int, num_ssa_names);
+ osi.stack = XNEWVEC (unsigned int, num_ssa_names);
osi.tos = osi.stack;
osi.pass = 1;
/* collect_object_sizes_for is changing
for (object_size_type = 0; object_size_type <= 3; object_size_type++)
{
- object_sizes[object_size_type]
- = xmalloc (num_ssa_names * sizeof (HOST_WIDE_INT));
+ object_sizes[object_size_type] = XNEWVEC (unsigned HOST_WIDE_INT, num_ssa_names);
computed[object_size_type] = BITMAP_ALLOC (NULL);
}
verify_loop_structure (loops);
#endif
- bbs = xmalloc (sizeof (basic_block) * n_basic_blocks);
- copied_bbs = xmalloc (sizeof (basic_block) * n_basic_blocks);
+ bbs = XNEWVEC (basic_block, n_basic_blocks);
+ copied_bbs = XNEWVEC (basic_block, n_basic_blocks);
bbs_size = n_basic_blocks;
for (i = 1; i < loops->num; i++)
&& def_bb->loop_father == loop)
data->cost += LIM_DATA (def_stmt)->cost;
- dep = xmalloc (sizeof (struct depend));
+ dep = XNEW (struct depend);
dep->stmt = def_stmt;
dep->next = data->depends;
data->depends = dep;
static void
record_mem_ref_loc (struct mem_ref_loc **mem_refs, tree stmt, tree *ref)
{
- struct mem_ref_loc *aref = xmalloc (sizeof (struct mem_ref_loc));
+ struct mem_ref_loc *aref = XNEW (struct mem_ref_loc);
aref->stmt = stmt;
aref->ref = ref;
ref = *slot;
else
{
- ref = xmalloc (sizeof (struct mem_ref));
+ ref = XNEW (struct mem_ref);
ref->mem = *mem;
ref->hash = hash;
ref->locs = NULL;
if (n_unroll)
{
sbitmap wont_exit;
- edge *edges_to_remove = xmalloc (sizeof (edge *) * n_unroll);
+ edge *edges_to_remove = XNEWVEC (edge, n_unroll);
unsigned int n_to_remove = 0;
old_cond = COND_EXPR_COND (cond);
unsigned i;
data->version_info_size = 2 * num_ssa_names;
- data->version_info = xcalloc (data->version_info_size,
- sizeof (struct version_info));
+ data->version_info = XCNEWVEC (struct version_info, data->version_info_size);
data->relevant = BITMAP_ALLOC (NULL);
data->important_candidates = BITMAP_ALLOC (NULL);
data->max_inv_id = 0;
static struct iv *
alloc_iv (tree base, tree step)
{
- struct iv *iv = xcalloc (1, sizeof (struct iv));
+ struct iv *iv = XCNEW (struct iv);
if (step && integer_zerop (step))
step = NULL_TREE;
record_use (struct ivopts_data *data, tree *use_p, struct iv *iv,
tree stmt, enum use_type use_type)
{
- struct iv_use *use = xcalloc (1, sizeof (struct iv_use));
+ struct iv_use *use = XCNEW (struct iv_use);
use->id = n_iv_uses (data);
use->type = use_type;
}
iv->have_use_for = true;
- civ = xmalloc (sizeof (struct iv));
+ civ = XNEW (struct iv);
*civ = *iv;
stmt = SSA_NAME_DEF_STMT (op);
return;
}
- civ = xmalloc (sizeof (struct iv));
+ civ = XNEW (struct iv);
*civ = zero_p (iv0->step) ? *iv1: *iv0;
record_use (data, cond_p, civ, stmt, USE_COMPARE);
}
if (i == n_iv_cands (data))
{
- cand = xcalloc (1, sizeof (struct iv_cand));
+ cand = XCNEW (struct iv_cand);
cand->id = i;
if (!base && !step)
}
use->n_map_members = size;
- use->cost_map = xcalloc (size, sizeof (struct cost_pair));
+ use->cost_map = XCNEWVEC (struct cost_pair, size);
}
}
if (*cached)
return (*cached)->cost;
- *cached = xmalloc (sizeof (struct mbc_entry));
+ *cached = XNEW (struct mbc_entry);
(*cached)->mode = mode;
(*cached)->cst = cst;
iv_ca_delta_add (struct iv_use *use, struct cost_pair *old_cp,
struct cost_pair *new_cp, struct iv_ca_delta *next_change)
{
- struct iv_ca_delta *change = xmalloc (sizeof (struct iv_ca_delta));
+ struct iv_ca_delta *change = XNEW (struct iv_ca_delta);
change->use = use;
change->old_cp = old_cp;
static struct iv_ca *
iv_ca_new (struct ivopts_data *data)
{
- struct iv_ca *nw = xmalloc (sizeof (struct iv_ca));
+ struct iv_ca *nw = XNEW (struct iv_ca);
nw->upto = 0;
nw->bad_uses = 0;
- nw->cand_for_use = xcalloc (n_iv_uses (data), sizeof (struct cost_pair *));
- nw->n_cand_uses = xcalloc (n_iv_cands (data), sizeof (unsigned));
+ nw->cand_for_use = XCNEWVEC (struct cost_pair *, n_iv_uses (data));
+ nw->n_cand_uses = XCNEWVEC (unsigned, n_iv_cands (data));
nw->cands = BITMAP_ALLOC (NULL);
nw->n_cands = 0;
nw->n_regs = 0;
nw->cand_use_cost = 0;
nw->cand_cost = 0;
- nw->n_invariant_uses = xcalloc (data->max_inv_id + 1, sizeof (unsigned));
+ nw->n_invariant_uses = XCNEWVEC (unsigned, data->max_inv_id + 1);
nw->cost = 0;
return nw;
{
data->version_info_size = 2 * num_ssa_names;
free (data->version_info);
- data->version_info = xcalloc (data->version_info_size,
- sizeof (struct version_info));
+ data->version_info = XCNEWVEC (struct version_info, data->version_info_size);
}
data->max_inv_id = 0;
update_ssa (update_flag);
old_num_ssa_names = num_ssa_names;
- use_blocks = xcalloc (old_num_ssa_names, sizeof (bitmap));
+ use_blocks = XCNEWVEC (bitmap, old_num_ssa_names);
/* Find the uses outside loops. */
find_uses_to_rename (changed_bbs, use_blocks, names_to_rename);
RVUSE_IN[BB] = Union of RVUSE_OUT of predecessors.
RVUSE_OUT[BB] = RVUSE_GEN[BB] U (RVUSE_IN[BB] - RVUSE_KILL[BB])
*/
- postorder = xmalloc (sizeof (int) * (n_basic_blocks - NUM_FIXED_BLOCKS));
+ postorder = XNEWVEC (int, n_basic_blocks - NUM_FIXED_BLOCKS);
pre_and_rev_post_order_compute (NULL, postorder, false);
changed = true;
computing ANTIC, either, even though it's plenty fast. */
if (!do_fre && n_basic_blocks < 4000)
{
- vuse_names = xcalloc (num_ssa_names, sizeof (bitmap));
+ vuse_names = XCNEWVEC (bitmap, num_ssa_names);
compute_rvuse ();
compute_antic ();
insert ();
int i;
unsigned int rank = 2;
tree param;
- int *bbs = xmalloc ((last_basic_block + 1) * sizeof (int));
+ int *bbs = XNEWVEC (int, last_basic_block + 1);
memset (&reassociate_stats, 0, sizeof (reassociate_stats));
/* Reverse RPO (Reverse Post Order) will give us something where
deeper loops come later. */
pre_and_rev_post_order_compute (NULL, bbs, false);
- bb_rank = xcalloc (last_basic_block + 1, sizeof (unsigned int));
+ bb_rank = XCNEWVEC (unsigned int, last_basic_block + 1);
operand_rank = htab_create (511, operand_entry_hash,
operand_entry_eq, 0);
int i = 0;
constraint_t c;
- graph = xmalloc (sizeof (struct constraint_graph));
- graph->succs = xcalloc (VEC_length (varinfo_t, varmap) + 1,
- sizeof (*graph->succs));
- graph->preds = xcalloc (VEC_length (varinfo_t, varmap) + 1,
- sizeof (*graph->preds));
- graph->zero_weight_succs = xcalloc (VEC_length (varinfo_t, varmap) + 1,
- sizeof (*graph->zero_weight_succs));
- graph->zero_weight_preds = xcalloc (VEC_length (varinfo_t, varmap) + 1,
- sizeof (*graph->zero_weight_preds));
+ graph = XNEW (struct constraint_graph);
+ graph->succs = XCNEWVEC (VEC(constraint_edge_t,heap) *, VEC_length (varinfo_t, varmap) + 1);
+ graph->preds = XCNEWVEC (VEC(constraint_edge_t,heap) *, VEC_length (varinfo_t, varmap) + 1);
+ graph->zero_weight_succs = XCNEWVEC (bitmap, VEC_length (varinfo_t, varmap) + 1);
+ graph->zero_weight_preds = XCNEWVEC (bitmap, VEC_length (varinfo_t, varmap) + 1);
for (i = 0; VEC_iterate (constraint_t, constraints, i, c); i++)
{
init_topo_info (void)
{
size_t size = VEC_length (varinfo_t, varmap);
- struct topo_info *ti = xmalloc (sizeof (struct topo_info));
+ struct topo_info *ti = XNEW (struct topo_info);
ti->visited = sbitmap_alloc (size);
sbitmap_zero (ti->visited);
ti->topo_order = VEC_alloc (unsigned, heap, 1);
static struct scc_info *
init_scc_info (void)
{
- struct scc_info *si = xmalloc (sizeof (struct scc_info));
+ struct scc_info *si = XNEW (struct scc_info);
size_t size = VEC_length (varinfo_t, varmap);
si->current_index = 0;
sbitmap_zero (si->visited);
si->in_component = sbitmap_alloc (size);
sbitmap_ones (si->in_component);
- si->visited_index = xcalloc (sizeof (unsigned int), size + 1);
+ si->visited_index = XCNEWVEC (unsigned int, size + 1);
si->scc_stack = VEC_alloc (unsigned, heap, 1);
si->unification_queue = VEC_alloc (unsigned, heap, 1);
return si;
finder.t = t;
slot = htab_find_slot (id_for_tree, &finder, INSERT);
gcc_assert (*slot == NULL);
- new_pair = xmalloc (sizeof (struct tree_id));
+ new_pair = XNEW (struct tree_id);
new_pair->t = t;
new_pair->id = id;
*slot = (void *)new_pair;
/* Build a hash table element so we can see if E is already
in the table. */
- elt = xmalloc (sizeof (struct redirection_data));
+ elt = XNEW (struct redirection_data);
elt->outgoing_edge = e;
elt->dup_block = NULL;
elt->do_not_duplicate = false;
if (*slot == NULL)
{
*slot = (void *)elt;
- elt->incoming_edges = xmalloc (sizeof (struct el));
+ elt->incoming_edges = XNEW (struct el);
elt->incoming_edges->e = incoming_edge;
elt->incoming_edges->next = NULL;
return elt;
to the list of incoming edges associated with E. */
if (insert)
{
- struct el *el = xmalloc (sizeof (struct el));
+ struct el *el = XNEW (struct el);
el->next = elt->incoming_edges;
el->e = incoming_edge;
elt->incoming_edges = el;
if (! dominated_by_p (CDI_DOMINATORS, va_arg_bb, va_start_bb))
return false;
- stack = xmalloc ((n_basic_blocks + 1) * sizeof (edge));
+ stack = XNEWVEC (edge, n_basic_blocks + 1);
sp = 0;
visited = sbitmap_alloc (last_basic_block);
{
unsigned int i;
- si->offsets = xmalloc (num_ssa_names * sizeof (int));
+ si->offsets = XNEWVEC (int, num_ssa_names);
for (i = 0; i < num_ssa_names; ++i)
si->offsets[i] = -1;
}
exit_dest) == loop->header ?
true : false);
- new_bbs = xmalloc (sizeof (basic_block) * loop->num_nodes);
+ new_bbs = XNEWVEC (basic_block, loop->num_nodes);
copy_bbs (bbs, loop->num_nodes, new_bbs,
&loop->single_exit, 1, &new_loop->single_exit, NULL,
return vr;
/* Create a default value range. */
- vr_value[ver] = vr = xmalloc (sizeof (*vr));
+ vr_value[ver] = vr = XNEW (value_range_t);
memset (vr, 0, sizeof (*vr));
/* Allocate an equivalence set. */
/* If we didn't find an assertion already registered for
NAME COMP_CODE VAL, add a new one at the end of the list of
assertions associated with NAME. */
- n = xmalloc (sizeof (*n));
+ n = XNEW (struct assert_locus_d);
n->bb = dest_bb;
n->e = e;
n->si = si;
sbitmap_zero (blocks_visited);
need_assert_for = BITMAP_ALLOC (NULL);
- asserts_for = xmalloc (num_ssa_names * sizeof (assert_locus_t));
+ asserts_for = XNEWVEC (assert_locus_t, num_ssa_names);
memset (asserts_for, 0, num_ssa_names * sizeof (assert_locus_t));
calculate_dominance_info (CDI_DOMINATORS);
{
basic_block bb;
- vr_value = xmalloc (num_ssa_names * sizeof (value_range_t *));
+ vr_value = XNEWVEC (value_range_t *, num_ssa_names);
memset (vr_value, 0, num_ssa_names * sizeof (value_range_t *));
FOR_EACH_BB (bb)
/* We may have ended with ranges that have exactly one value. Those
values can be substituted as any other copy/const propagated
value using substitute_and_fold. */
- single_val_range = xmalloc (num_ssa_names * sizeof (*single_val_range));
+ single_val_range = XNEWVEC (prop_value_t, num_ssa_names);
memset (single_val_range, 0, num_ssa_names * sizeof (*single_val_range));
do_value_subst_p = false;
static void
pushlevel (int ignore ATTRIBUTE_UNUSED)
{
- struct binding_level *newlevel = xmalloc (sizeof (struct binding_level));
+ struct binding_level *newlevel = XNEW (struct binding_level);
*newlevel = clear_binding_level;
VTI (ENTRY_BLOCK_PTR)->out.stack_adjust = INCOMING_FRAME_SP_OFFSET;
/* Allocate stack for back-tracking up CFG. */
- stack = xmalloc ((n_basic_blocks + 1) * sizeof (edge_iterator));
+ stack = XNEWVEC (edge_iterator, n_basic_blocks + 1);
sp = 0;
/* Push the first edge on to the stack. */
dst_l = 0;
for (node = dst->var_part[j].loc_chain; node; node = node->next)
dst_l++;
- vui = xcalloc (src_l + dst_l, sizeof (struct variable_union_info));
+ vui = XCNEWVEC (struct variable_union_info, src_l + dst_l);
/* Fill in the locations from DST. */
for (node = dst->var_part[j].loc_chain, jj = 0; node;
/* Compute reverse completion order of depth first search of the CFG
so that the data-flow runs faster. */
- rc_order = xmalloc ((n_basic_blocks - NUM_FIXED_BLOCKS) * sizeof (int));
- bb_order = xmalloc (last_basic_block * sizeof (int));
+ rc_order = XNEWVEC (int, n_basic_blocks - NUM_FIXED_BLOCKS);
+ bb_order = XNEWVEC (int, last_basic_block);
pre_and_rev_post_order_compute (NULL, rc_order, false);
for (i = 0; i < n_basic_blocks - NUM_FIXED_BLOCKS; i++)
bb_order[rc_order[i]] = i;
}
/* Add the micro-operations to the array. */
- VTI (bb)->mos = xmalloc (VTI (bb)->n_mos
- * sizeof (struct micro_operation_def));
+ VTI (bb)->mos = XNEWVEC (micro_operation, VTI (bb)->n_mos);
VTI (bb)->n_mos = 0;
for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb));
insn = NEXT_INSN (insn))
df_reorganize_refs (&df->def_info);
df_reorganize_refs (&df->use_info);
- def_entry = xcalloc (DF_DEFS_SIZE (df), sizeof (struct web_entry));
- use_entry = xcalloc (DF_USES_SIZE (df), sizeof (struct web_entry));
- used = xcalloc (max, sizeof (char));
+ def_entry = XCNEWVEC (struct web_entry, DF_DEFS_SIZE (df));
+ use_entry = XCNEWVEC (struct web_entry, DF_USES_SIZE (df));
+ used = XCNEWVEC (char, max);
if (dump_file)
df_dump (df, dump_file);