/* This type represents a reference to a memory region. */
struct asan_mem_ref
{
- /* The expression of the begining of the memory region. */
+ /* The expression of the beginning of the memory region. */
tree start;
/* The size of the access (can be 1, 2, 4, 8, 16 for now). */
After removing the best edge, the final result will be ABCD/ ACBD.
It does not add jump compared with the previous order. But it
- reduces the possiblity of long jumps. */
+ reduces the possibility of long jumps. */
if (best_edge && for_size
&& (EDGE_COUNT (best_edge->dest->succs) > 1
|| EDGE_COUNT (best_edge->dest->preds) > 1))
+2013-07-21 OndÅ\99ej BÃlka <neleai@seznam.cz>
+
+ * c-common.c: Fix typos.
+ * c-common.h: Likewise.
+
2013-07-13 Lubos Lunak <l.lunak@suse.cz>
PR c++/55203
{
int size;
tree type = TREE_TYPE ((*params)[x]);
- /* __atomic_compare_exchange has a bool in the 4th postion, skip it. */
+ /* __atomic_compare_exchange has a bool in the 4th position, skip it. */
if (n_param == 6 && x == 3)
continue;
if (!POINTER_TYPE_P (type))
/* Possibe cases of scalar_to_vector conversion. */
enum stv_conv {
- stv_error, /* Error occured. */
+ stv_error, /* Error occurred. */
stv_nothing, /* Nothing happened. */
stv_firstarg, /* First argument must be expanded. */
stv_secondarg /* Second argument must be expanded. */
/* True when alias is a weakref. */
unsigned weakref : 1;
/* C++ frontend produce same body aliases and extra name aliases for
- virutal functions and vtables that are obviously equivalent.
+ virtual functions and vtables that are obviously equivalent.
Those aliases are bit special, especially because C++ frontend
visibility code is so ugly it can not get them right at first time
and their visibility needs to be copied from their "masters" at
b) early small interprocedural passes.
Those are interprocedural passes executed only at compilation
- time. These include, for exmaple, transational memory lowering,
+ time. These include, for example, transational memory lowering,
unreachable code removal and other simple transformations.
c) IP analysis stage. All interprocedural passes do their
MEMMODEL_LAST = 6
};
-/* Suppose that higher bits are target dependant. */
+/* Suppose that higher bits are target dependent. */
#define MEMMODEL_MASK ((1<<16)-1)
/* Support for user-provided GGC and PCH markers. The first parameter
{
if (MEM_P (*x))
{
- /* Visit all MEMs in *PAT and check indepedence. */
+ /* Visit all MEMs in *PAT and check independence. */
if (for_each_rtx (pat, (rtx_function) walk_mems_2, *x))
/* Indicate that dependence was determined and stop traversal. */
return 1;
DATAFLOW is problem we are solving, PENDING is worklist of basic blocks we
need to visit.
BLOCK_IN_POSTORDER is array of size N_BLOCKS specifying postorder in BBs and
- BBINDEX_TO_POSTORDER is array mapping back BB->index to postorder possition.
+ BBINDEX_TO_POSTORDER is array mapping back BB->index to postorder position.
PENDING will be freed.
The worklists are bitmaps indexed by postorder positions.
block. The id field in the ref is used to index into these sets.
See df.h for details.
- If the DF_RD_PRUNE_DEAD_DEFS changable flag is set, only DEFs reaching
+ If the DF_RD_PRUNE_DEAD_DEFS changeable flag is set, only DEFs reaching
existing uses are included in the global reaching DEFs set, or in other
words only DEFs that are still live. This is a kind of pruned version
of the traditional reaching definitions problem that is much less
bool operator == (double_int cst2) const;
bool operator != (double_int cst2) const;
- /* Please migrate away from using these member variables publically. */
+ /* Please migrate away from using these member variables publicly. */
unsigned HOST_WIDE_INT low;
HOST_WIDE_INT high;
of exprloc or after DW_OP_{,bit_}piece, and val_addr can't be
resolved. Replace it (both DW_OP_addr and DW_OP_stack_value)
with DW_OP_GNU_implicit_pointer if possible
- and return true, if unsuccesful, return false. */
+ and return true, if unsuccessful, return false. */
static bool
optimize_one_addr_into_implicit_ptr (dw_loc_descr_ref loc)
}
}
else
- /* Punt, no point hoisting a single occurence. */
+ /* Punt, no point hoisting a single occurrence. */
occrs_to_hoist.release ();
if (flag_ira_hoist_pressure
printf ("typedef void *state_t;\n\n");
}
- /* Special-purpose atributes should be tested with if, not #ifdef. */
+ /* Special-purpose attributes should be tested with if, not #ifdef. */
const char * const special_attrs[] = { "length", "enabled", 0 };
for (const char * const *p = special_attrs; *p; p++)
{
}
/* Write the beginning of a new s-expresion e.g. "(!foo "
- The writer automatically adds whitespace to show the hierachical
+ The writer automatically adds whitespace to show the hierarchical
structure of the expressions, so each one starts on a new line,
and any within it will be at an increased indentation level. */
void
{
tree init = ctor_for_folding (base);
- /* Our semantic is exact oposite of ctor_for_folding;
+ /* Our semantic is exact opposite of ctor_for_folding;
NULL means unknown, while error_mark_node is 0. */
if (init == error_mark_node)
return NULL_TREE;
tileMap = isl_basic_map_add_constraint(tileMap, c);
}
- /* Project out auxilary dimensions.
+ /* Project out auxiliary dimensions.
- The auxilary dimensions are transformed into existentially quantified ones.
+ The auxiliary dimensions are transformed into existentially quantified ones.
This reduces the number of visible scattering dimensions and allows Cloog
to produces better code. */
tileMap = isl_basic_map_project_out(tileMap, isl_dim_out,
to do so because it enables loop optimizatoins. */
INLINE_HINT_loop_iterations = 2,
INLINE_HINT_loop_stride = 4,
- /* Inlining withing same strongly connected component of callgraph is often
+ /* Inlining within same strongly connected component of callgraph is often
a loss due to increased stack frame usage and prologue setup costs. */
INLINE_HINT_same_scc = 8,
/* Inlining functions in strongly connected component is not such a great
*/
INLINE_HINT_cross_module = 64,
/* If array indexes of loads/stores become known there may be room for
- futher optimization. */
+ further optimization. */
INLINE_HINT_array_index = 128
};
typedef int inline_hints;
using up-to-date value in the badness metric mean a lot of extra
expenses. */
int growth;
- /* Number of SCC on the beggining of inlining process. */
+ /* Number of SCC on the beginning of inlining process. */
int scc_no;
};
return true;
}
-/* Return true if a load from a formal parameter PARM_LOAD is known to retreive
+/* Return true if a load from a formal parameter PARM_LOAD is known to retrieve
a value known not to be modified in this function before reaching the
statement STMT. PARM_AINFO is a pointer to a structure containing temporary
information about the parameter. */
the size of the type) is clobbered with an unknown value. When
agg_preserved is false or the type of the containing jump function is
different, all unlisted parts are assumed to be unknown and all values must
- fullfill is_gimple_ip_invariant. */
+ fulfill is_gimple_ip_invariant. */
typedef struct GTY(()) ipa_agg_jf_item
{
} ipa_jump_func_t;
-/* Return the offset of the component that is decribed by a known type jump
+/* Return the offset of the component that is described by a known type jump
function JFUNC. */
static inline HOST_WIDE_INT
/* Return true if NODE is self recursive function.
??? self recursive and indirectly recursive funcions should
- be the same, so this function seems unnecesary. */
+ be the same, so this function seems unnecessary. */
static bool
self_recursive_p (struct cgraph_node *node)